From c65bfa62b7185bdeb063c2a637f501f00997d068 Mon Sep 17 00:00:00 2001 From: Mian Yousaf Kaukab Date: Tue, 4 Jan 2011 12:47:02 +0100 Subject: [PATCH] usb: musb: maintain three states for buffer mappings instead of two If dma buffers are mapped by a higher layer, with a boolean musb_request.mapped it is still possible to call dma_sync_single_for_device() from musb_g_giveback(), even if txstate()/rxstate() has called unmap_dma_buffer() before falling back to pio mode. Moreover, check for musb_ep->dma is moved within map_dma_buffer() so where applicable checks for it are removed. And where possible, checks for is_dma_capable() are merged with buffer map state check. Signed-off-by: Mian Yousaf Kaukab Signed-off-by: Felipe Balbi --- drivers/usb/musb/musb_gadget.c | 45 +++++++++++++++++++--------------- drivers/usb/musb/musb_gadget.h | 8 +++++- 2 files changed, 32 insertions(+), 21 deletions(-) diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index 23dad4c8785..65775039d6b 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -92,11 +92,19 @@ /* ----------------------------------------------------------------------- */ +#define is_buffer_mapped(req) (is_dma_capable() && \ + (req->map_state != UN_MAPPED)) + /* Maps the buffer to dma */ static inline void map_dma_buffer(struct musb_request *request, - struct musb *musb) + struct musb *musb, struct musb_ep *musb_ep) { + request->map_state = UN_MAPPED; + + if (!is_dma_capable() || !musb_ep->dma) + return; + if (request->request.dma == DMA_ADDR_INVALID) { request->request.dma = dma_map_single( musb->controller, @@ -105,7 +113,7 @@ static inline void map_dma_buffer(struct musb_request *request, request->tx ? DMA_TO_DEVICE : DMA_FROM_DEVICE); - request->mapped = 1; + request->map_state = MUSB_MAPPED; } else { dma_sync_single_for_device(musb->controller, request->request.dma, @@ -113,7 +121,7 @@ static inline void map_dma_buffer(struct musb_request *request, request->tx ? DMA_TO_DEVICE : DMA_FROM_DEVICE); - request->mapped = 0; + request->map_state = PRE_MAPPED; } } @@ -121,11 +129,14 @@ static inline void map_dma_buffer(struct musb_request *request, static inline void unmap_dma_buffer(struct musb_request *request, struct musb *musb) { + if (!is_buffer_mapped(request)) + return; + if (request->request.dma == DMA_ADDR_INVALID) { DBG(20, "not unmapping a never mapped buffer\n"); return; } - if (request->mapped) { + if (request->map_state == MUSB_MAPPED) { dma_unmap_single(musb->controller, request->request.dma, request->request.length, @@ -133,16 +144,15 @@ static inline void unmap_dma_buffer(struct musb_request *request, ? DMA_TO_DEVICE : DMA_FROM_DEVICE); request->request.dma = DMA_ADDR_INVALID; - request->mapped = 0; - } else { + } else { /* PRE_MAPPED */ dma_sync_single_for_cpu(musb->controller, request->request.dma, request->request.length, request->tx ? DMA_TO_DEVICE : DMA_FROM_DEVICE); - } + request->map_state = UN_MAPPED; } /* @@ -172,8 +182,7 @@ __acquires(ep->musb->lock) ep->busy = 1; spin_unlock(&musb->lock); - if (is_dma_capable() && ep->dma) - unmap_dma_buffer(req, musb); + unmap_dma_buffer(req, musb); if (request->status == 0) DBG(5, "%s done request %p, %d/%d\n", ep->end_point.name, request, @@ -335,7 +344,7 @@ static void txstate(struct musb *musb, struct musb_request *req) csr); #ifndef CONFIG_MUSB_PIO_ONLY - if (is_dma_capable() && musb_ep->dma) { + if (is_buffer_mapped(req)) { struct dma_controller *c = musb->dma_controller; size_t request_size; @@ -436,8 +445,7 @@ static void txstate(struct musb *musb, struct musb_request *req) * Unmap the dma buffer back to cpu if dma channel * programming fails */ - if (is_dma_capable() && musb_ep->dma) - unmap_dma_buffer(req, musb); + unmap_dma_buffer(req, musb); musb_write_fifo(musb_ep->hw_ep, fifo_count, (u8 *) (request->buf + request->actual)); @@ -627,7 +635,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) return; } - if (is_cppi_enabled() && musb_ep->dma) { + if (is_cppi_enabled() && is_buffer_mapped(req)) { struct dma_controller *c = musb->dma_controller; struct dma_channel *channel = musb_ep->dma; @@ -658,7 +666,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) len = musb_readw(epio, MUSB_RXCOUNT); if (request->actual < request->length) { #ifdef CONFIG_USB_INVENTRA_DMA - if (is_dma_capable() && musb_ep->dma) { + if (is_buffer_mapped(req)) { struct dma_controller *c; struct dma_channel *channel; int use_dma = 0; @@ -742,7 +750,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) fifo_count = min_t(unsigned, len, fifo_count); #ifdef CONFIG_USB_TUSB_OMAP_DMA - if (tusb_dma_omap() && musb_ep->dma) { + if (tusb_dma_omap() && is_buffer_mapped(req)) { struct dma_controller *c = musb->dma_controller; struct dma_channel *channel = musb_ep->dma; u32 dma_addr = request->dma + request->actual; @@ -762,7 +770,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) * programming fails. This buffer is mapped if the * channel allocation is successful */ - if (is_dma_capable() && musb_ep->dma) { + if (is_buffer_mapped(req)) { unmap_dma_buffer(req, musb); /* @@ -1222,10 +1230,7 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, request->epnum = musb_ep->current_epnum; request->tx = musb_ep->is_in; - if (is_dma_capable() && musb_ep->dma) - map_dma_buffer(request, musb); - else - request->mapped = 0; + map_dma_buffer(request, musb, musb_ep); spin_lock_irqsave(&musb->lock, lockflags); diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h index dec8dc00819..a55354fbccf 100644 --- a/drivers/usb/musb/musb_gadget.h +++ b/drivers/usb/musb/musb_gadget.h @@ -35,13 +35,19 @@ #ifndef __MUSB_GADGET_H #define __MUSB_GADGET_H +enum buffer_map_state { + UN_MAPPED = 0, + PRE_MAPPED, + MUSB_MAPPED +}; + struct musb_request { struct usb_request request; struct musb_ep *ep; struct musb *musb; u8 tx; /* endpoint direction */ u8 epnum; - u8 mapped; + enum buffer_map_state map_state; }; static inline struct musb_request *to_musb_request(struct usb_request *req) -- 2.46.0