]> git.openfabrics.org - ~shefty/rdma-dev.git/commitdiff
dmaengine: at_hdmac: replace spin_lock* with irqsave variants
authorNicolas Ferre <nicolas.ferre@atmel.com>
Wed, 27 Jul 2011 12:21:28 +0000 (12:21 +0000)
committerVinod Koul <vinod.koul@intel.com>
Fri, 19 Aug 2011 13:09:48 +0000 (18:39 +0530)
dmaengine routines can be called from interrupt context and with
interrupts disabled. Whereas spin_unlock_bh can't be called from
such contexts. So this patch converts all spin_lock* routines
to irqsave variants.

Also, spin_lock() used in tasklet is converted to irqsave variants,
as tasklet can be interrupted, and dma requests from such interruptions
may also call spin_lock.

Idea from dw_dmac patch by Viresh Kumar.

Signed-off-by: Nicolas Ferre <nicolas.ferre@atmel.com>
Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
drivers/dma/at_hdmac.c

index 6a483eac7b3f2d971cabae0aceaf022a42866558..fd87b9690e1b73fdb1a63cf93b62a67e38f6cf7a 100644 (file)
@@ -107,10 +107,11 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
 {
        struct at_desc *desc, *_desc;
        struct at_desc *ret = NULL;
+       unsigned long flags;
        unsigned int i = 0;
        LIST_HEAD(tmp_list);
 
-       spin_lock_bh(&atchan->lock);
+       spin_lock_irqsave(&atchan->lock, flags);
        list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
                i++;
                if (async_tx_test_ack(&desc->txd)) {
@@ -121,7 +122,7 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
                dev_dbg(chan2dev(&atchan->chan_common),
                                "desc %p not ACKed\n", desc);
        }
-       spin_unlock_bh(&atchan->lock);
+       spin_unlock_irqrestore(&atchan->lock, flags);
        dev_vdbg(chan2dev(&atchan->chan_common),
                "scanned %u descriptors on freelist\n", i);
 
@@ -129,9 +130,9 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
        if (!ret) {
                ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
                if (ret) {
-                       spin_lock_bh(&atchan->lock);
+                       spin_lock_irqsave(&atchan->lock, flags);
                        atchan->descs_allocated++;
-                       spin_unlock_bh(&atchan->lock);
+                       spin_unlock_irqrestore(&atchan->lock, flags);
                } else {
                        dev_err(chan2dev(&atchan->chan_common),
                                        "not enough descriptors available\n");
@@ -150,8 +151,9 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
 {
        if (desc) {
                struct at_desc *child;
+               unsigned long flags;
 
-               spin_lock_bh(&atchan->lock);
+               spin_lock_irqsave(&atchan->lock, flags);
                list_for_each_entry(child, &desc->tx_list, desc_node)
                        dev_vdbg(chan2dev(&atchan->chan_common),
                                        "moving child desc %p to freelist\n",
@@ -160,7 +162,7 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
                dev_vdbg(chan2dev(&atchan->chan_common),
                         "moving desc %p to freelist\n", desc);
                list_add(&desc->desc_node, &atchan->free_list);
-               spin_unlock_bh(&atchan->lock);
+               spin_unlock_irqrestore(&atchan->lock, flags);
        }
 }
 
@@ -471,8 +473,9 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan)
 static void atc_tasklet(unsigned long data)
 {
        struct at_dma_chan *atchan = (struct at_dma_chan *)data;
+       unsigned long flags;
 
-       spin_lock(&atchan->lock);
+       spin_lock_irqsave(&atchan->lock, flags);
        if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
                atc_handle_error(atchan);
        else if (test_bit(ATC_IS_CYCLIC, &atchan->status))
@@ -480,7 +483,7 @@ static void atc_tasklet(unsigned long data)
        else
                atc_advance_work(atchan);
 
-       spin_unlock(&atchan->lock);
+       spin_unlock_irqrestore(&atchan->lock, flags);
 }
 
 static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
@@ -539,8 +542,9 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
        struct at_desc          *desc = txd_to_at_desc(tx);
        struct at_dma_chan      *atchan = to_at_dma_chan(tx->chan);
        dma_cookie_t            cookie;
+       unsigned long           flags;
 
-       spin_lock_bh(&atchan->lock);
+       spin_lock_irqsave(&atchan->lock, flags);
        cookie = atc_assign_cookie(atchan, desc);
 
        if (list_empty(&atchan->active_list)) {
@@ -554,7 +558,7 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
                list_add_tail(&desc->desc_node, &atchan->queue);
        }
 
-       spin_unlock_bh(&atchan->lock);
+       spin_unlock_irqrestore(&atchan->lock, flags);
 
        return cookie;
 }
@@ -927,28 +931,29 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
        struct at_dma_chan      *atchan = to_at_dma_chan(chan);
        struct at_dma           *atdma = to_at_dma(chan->device);
        int                     chan_id = atchan->chan_common.chan_id;
+       unsigned long           flags;
 
        LIST_HEAD(list);
 
        dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
 
        if (cmd == DMA_PAUSE) {
-               spin_lock_bh(&atchan->lock);
+               spin_lock_irqsave(&atchan->lock, flags);
 
                dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
                set_bit(ATC_IS_PAUSED, &atchan->status);
 
-               spin_unlock_bh(&atchan->lock);
+               spin_unlock_irqrestore(&atchan->lock, flags);
        } else if (cmd == DMA_RESUME) {
                if (!test_bit(ATC_IS_PAUSED, &atchan->status))
                        return 0;
 
-               spin_lock_bh(&atchan->lock);
+               spin_lock_irqsave(&atchan->lock, flags);
 
                dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
                clear_bit(ATC_IS_PAUSED, &atchan->status);
 
-               spin_unlock_bh(&atchan->lock);
+               spin_unlock_irqrestore(&atchan->lock, flags);
        } else if (cmd == DMA_TERMINATE_ALL) {
                struct at_desc  *desc, *_desc;
                /*
@@ -957,7 +962,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
                 * channel. We still have to poll the channel enable bit due
                 * to AHB/HSB limitations.
                 */
-               spin_lock_bh(&atchan->lock);
+               spin_lock_irqsave(&atchan->lock, flags);
 
                /* disabling channel: must also remove suspend state */
                dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
@@ -978,7 +983,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
                /* if channel dedicated to cyclic operations, free it */
                clear_bit(ATC_IS_CYCLIC, &atchan->status);
 
-               spin_unlock_bh(&atchan->lock);
+               spin_unlock_irqrestore(&atchan->lock, flags);
        } else {
                return -ENXIO;
        }
@@ -1004,9 +1009,10 @@ atc_tx_status(struct dma_chan *chan,
        struct at_dma_chan      *atchan = to_at_dma_chan(chan);
        dma_cookie_t            last_used;
        dma_cookie_t            last_complete;
+       unsigned long           flags;
        enum dma_status         ret;
 
-       spin_lock_bh(&atchan->lock);
+       spin_lock_irqsave(&atchan->lock, flags);
 
        last_complete = atchan->completed_cookie;
        last_used = chan->cookie;
@@ -1021,7 +1027,7 @@ atc_tx_status(struct dma_chan *chan,
                ret = dma_async_is_complete(cookie, last_complete, last_used);
        }
 
-       spin_unlock_bh(&atchan->lock);
+       spin_unlock_irqrestore(&atchan->lock, flags);
 
        if (ret != DMA_SUCCESS)
                dma_set_tx_state(txstate, last_complete, last_used,
@@ -1046,6 +1052,7 @@ atc_tx_status(struct dma_chan *chan,
 static void atc_issue_pending(struct dma_chan *chan)
 {
        struct at_dma_chan      *atchan = to_at_dma_chan(chan);
+       unsigned long           flags;
 
        dev_vdbg(chan2dev(chan), "issue_pending\n");
 
@@ -1053,11 +1060,11 @@ static void atc_issue_pending(struct dma_chan *chan)
        if (test_bit(ATC_IS_CYCLIC, &atchan->status))
                return;
 
-       spin_lock_bh(&atchan->lock);
+       spin_lock_irqsave(&atchan->lock, flags);
        if (!atc_chan_is_enabled(atchan)) {
                atc_advance_work(atchan);
        }
-       spin_unlock_bh(&atchan->lock);
+       spin_unlock_irqrestore(&atchan->lock, flags);
 }
 
 /**
@@ -1073,6 +1080,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
        struct at_dma           *atdma = to_at_dma(chan->device);
        struct at_desc          *desc;
        struct at_dma_slave     *atslave;
+       unsigned long           flags;
        int                     i;
        u32                     cfg;
        LIST_HEAD(tmp_list);
@@ -1116,11 +1124,11 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
                list_add_tail(&desc->desc_node, &tmp_list);
        }
 
-       spin_lock_bh(&atchan->lock);
+       spin_lock_irqsave(&atchan->lock, flags);
        atchan->descs_allocated = i;
        list_splice(&tmp_list, &atchan->free_list);
        atchan->completed_cookie = chan->cookie = 1;
-       spin_unlock_bh(&atchan->lock);
+       spin_unlock_irqrestore(&atchan->lock, flags);
 
        /* channel parameters */
        channel_writel(atchan, CFG, cfg);