From 7096f36e5388beb06b80576d3ccc3a0df2760035 Mon Sep 17 00:00:00 2001
From: Kedareswara rao Appana <appana.durga.rao@xilinx.com>
Date: Fri, 26 Feb 2016 19:33:51 +0530
Subject: [PATCH 1/5] dmaengine: xilinx_vdma: Improve SG engine handling

The current driver allows user to queue up multiple segments
on to a single transaction descriptor. User will submit this single desc
and in the issue_pending() we decode multiple segments and submit to SG HW engine.
We free up the allocated_desc when it is submitted to the HW.

Existing code prevents the user to prepare multiple trasactions at same time as
we are overwrite with the allocated_desc.

The best utilization of HW SG engine would happen if we collate the pending
list when we start dma this patch updates the same.

Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
---
 drivers/dma/xilinx/xilinx_vdma.c | 127 ++++++++++++++++++-------------
 1 file changed, 72 insertions(+), 55 deletions(-)

diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index 6f4b5017ca3b..06bffec934d2 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -190,8 +190,7 @@ struct xilinx_vdma_tx_descriptor {
  * @desc_offset: TX descriptor registers offset
  * @lock: Descriptor operation lock
  * @pending_list: Descriptors waiting
- * @active_desc: Active descriptor
- * @allocated_desc: Allocated descriptor
+ * @active_list: Descriptors ready to submit
  * @done_list: Complete descriptors
  * @common: DMA common channel
  * @desc_pool: Descriptors pool
@@ -206,6 +205,7 @@ struct xilinx_vdma_tx_descriptor {
  * @tasklet: Cleanup work after irq
  * @config: Device configuration info
  * @flush_on_fsync: Flush on Frame sync
+ * @desc_pendingcount: Descriptor pending count
  */
 struct xilinx_vdma_chan {
 	struct xilinx_vdma_device *xdev;
@@ -213,8 +213,7 @@ struct xilinx_vdma_chan {
 	u32 desc_offset;
 	spinlock_t lock;
 	struct list_head pending_list;
-	struct xilinx_vdma_tx_descriptor *active_desc;
-	struct xilinx_vdma_tx_descriptor *allocated_desc;
+	struct list_head active_list;
 	struct list_head done_list;
 	struct dma_chan common;
 	struct dma_pool *desc_pool;
@@ -229,6 +228,7 @@ struct xilinx_vdma_chan {
 	struct tasklet_struct tasklet;
 	struct xilinx_vdma_config config;
 	bool flush_on_fsync;
+	u32 desc_pendingcount;
 };
 
 /**
@@ -342,19 +342,11 @@ static struct xilinx_vdma_tx_descriptor *
 xilinx_vdma_alloc_tx_descriptor(struct xilinx_vdma_chan *chan)
 {
 	struct xilinx_vdma_tx_descriptor *desc;
-	unsigned long flags;
-
-	if (chan->allocated_desc)
-		return chan->allocated_desc;
 
 	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
 	if (!desc)
 		return NULL;
 
-	spin_lock_irqsave(&chan->lock, flags);
-	chan->allocated_desc = desc;
-	spin_unlock_irqrestore(&chan->lock, flags);
-
 	INIT_LIST_HEAD(&desc->segments);
 
 	return desc;
@@ -412,9 +404,7 @@ static void xilinx_vdma_free_descriptors(struct xilinx_vdma_chan *chan)
 
 	xilinx_vdma_free_desc_list(chan, &chan->pending_list);
 	xilinx_vdma_free_desc_list(chan, &chan->done_list);
-
-	xilinx_vdma_free_tx_descriptor(chan, chan->active_desc);
-	chan->active_desc = NULL;
+	xilinx_vdma_free_desc_list(chan, &chan->active_list);
 
 	spin_unlock_irqrestore(&chan->lock, flags);
 }
@@ -614,25 +604,26 @@ static void xilinx_vdma_start(struct xilinx_vdma_chan *chan)
 static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
 {
 	struct xilinx_vdma_config *config = &chan->config;
-	struct xilinx_vdma_tx_descriptor *desc;
+	struct xilinx_vdma_tx_descriptor *desc, *tail_desc;
 	unsigned long flags;
 	u32 reg;
-	struct xilinx_vdma_tx_segment *head, *tail = NULL;
+	struct xilinx_vdma_tx_segment *tail_segment;
 
 	if (chan->err)
 		return;
 
 	spin_lock_irqsave(&chan->lock, flags);
 
-	/* There's already an active descriptor, bail out. */
-	if (chan->active_desc)
-		goto out_unlock;
-
 	if (list_empty(&chan->pending_list))
 		goto out_unlock;
 
 	desc = list_first_entry(&chan->pending_list,
 				struct xilinx_vdma_tx_descriptor, node);
+	tail_desc = list_last_entry(&chan->pending_list,
+				    struct xilinx_vdma_tx_descriptor, node);
+
+	tail_segment = list_last_entry(&tail_desc->segments,
+				       struct xilinx_vdma_tx_segment, node);
 
 	/* If it is SG mode and hardware is busy, cannot submit */
 	if (chan->has_sg && xilinx_vdma_is_running(chan) &&
@@ -645,14 +636,9 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
 	 * If hardware is idle, then all descriptors on the running lists are
 	 * done, start new transfers
 	 */
-	if (chan->has_sg) {
-		head = list_first_entry(&desc->segments,
-					struct xilinx_vdma_tx_segment, node);
-		tail = list_entry(desc->segments.prev,
-				  struct xilinx_vdma_tx_segment, node);
-
-		vdma_ctrl_write(chan, XILINX_VDMA_REG_CURDESC, head->phys);
-	}
+	if (chan->has_sg)
+		vdma_ctrl_write(chan, XILINX_VDMA_REG_CURDESC,
+				desc->async_tx.phys);
 
 	/* Configure the hardware using info in the config structure */
 	reg = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR);
@@ -694,12 +680,15 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
 
 	/* Start the transfer */
 	if (chan->has_sg) {
-		vdma_ctrl_write(chan, XILINX_VDMA_REG_TAILDESC, tail->phys);
+		vdma_ctrl_write(chan, XILINX_VDMA_REG_TAILDESC,
+				tail_segment->phys);
 	} else {
 		struct xilinx_vdma_tx_segment *segment, *last = NULL;
 		int i = 0;
 
-		list_for_each_entry(segment, &desc->segments, node) {
+		list_for_each_entry(desc, &chan->pending_list, node) {
+			segment = list_first_entry(&desc->segments,
+					   struct xilinx_vdma_tx_segment, node);
 			vdma_desc_write(chan,
 					XILINX_VDMA_REG_START_ADDRESS(i++),
 					segment->hw.buf_addr);
@@ -716,8 +705,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
 		vdma_desc_write(chan, XILINX_VDMA_REG_VSIZE, last->hw.vsize);
 	}
 
-	list_del(&desc->node);
-	chan->active_desc = desc;
+	list_splice_tail_init(&chan->pending_list, &chan->active_list);
+	chan->desc_pendingcount = 0;
 
 out_unlock:
 	spin_unlock_irqrestore(&chan->lock, flags);
@@ -742,22 +731,20 @@ static void xilinx_vdma_issue_pending(struct dma_chan *dchan)
  */
 static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan)
 {
-	struct xilinx_vdma_tx_descriptor *desc;
+	struct xilinx_vdma_tx_descriptor *desc, *next;
 	unsigned long flags;
 
 	spin_lock_irqsave(&chan->lock, flags);
 
-	desc = chan->active_desc;
-	if (!desc) {
-		dev_dbg(chan->dev, "no running descriptors\n");
+	if (list_empty(&chan->active_list))
 		goto out_unlock;
+
+	list_for_each_entry_safe(desc, next, &chan->active_list, node) {
+		list_del(&desc->node);
+		dma_cookie_complete(&desc->async_tx);
+		list_add_tail(&desc->node, &chan->done_list);
 	}
 
-	dma_cookie_complete(&desc->async_tx);
-	list_add_tail(&desc->node, &chan->done_list);
-
-	chan->active_desc = NULL;
-
 out_unlock:
 	spin_unlock_irqrestore(&chan->lock, flags);
 }
@@ -878,6 +865,44 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data)
 	return IRQ_HANDLED;
 }
 
+/**
+ * append_desc_queue - Queuing descriptor
+ * @chan: Driver specific dma channel
+ * @desc: dma transaction descriptor
+ */
+static void append_desc_queue(struct xilinx_vdma_chan *chan,
+			      struct xilinx_vdma_tx_descriptor *desc)
+{
+	struct xilinx_vdma_tx_segment *tail_segment;
+	struct xilinx_vdma_tx_descriptor *tail_desc;
+
+	if (list_empty(&chan->pending_list))
+		goto append;
+
+	/*
+	 * Add the hardware descriptor to the chain of hardware descriptors
+	 * that already exists in memory.
+	 */
+	tail_desc = list_last_entry(&chan->pending_list,
+				    struct xilinx_vdma_tx_descriptor, node);
+	tail_segment = list_last_entry(&tail_desc->segments,
+				       struct xilinx_vdma_tx_segment, node);
+	tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
+
+	/*
+	 * Add the software descriptor and all children to the list
+	 * of pending transactions
+	 */
+append:
+	list_add_tail(&desc->node, &chan->pending_list);
+	chan->desc_pendingcount++;
+
+	if (unlikely(chan->desc_pendingcount > chan->num_frms)) {
+		dev_dbg(chan->dev, "desc pendingcount is too high\n");
+		chan->desc_pendingcount = chan->num_frms;
+	}
+}
+
 /**
  * xilinx_vdma_tx_submit - Submit DMA transaction
  * @tx: Async transaction descriptor
@@ -906,11 +931,8 @@ static dma_cookie_t xilinx_vdma_tx_submit(struct dma_async_tx_descriptor *tx)
 
 	cookie = dma_cookie_assign(tx);
 
-	/* Append the transaction to the pending transactions queue. */
-	list_add_tail(&desc->node, &chan->pending_list);
-
-	/* Free the allocated desc */
-	chan->allocated_desc = NULL;
+	/* Put this transaction onto the tail of the pending queue */
+	append_desc_queue(chan, desc);
 
 	spin_unlock_irqrestore(&chan->lock, flags);
 
@@ -973,13 +995,6 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
 	else
 		hw->buf_addr = xt->src_start;
 
-	/* Link the previous next descriptor to current */
-	if (!list_empty(&desc->segments)) {
-		prev = list_last_entry(&desc->segments,
-				       struct xilinx_vdma_tx_segment, node);
-		prev->hw.next_desc = segment->phys;
-	}
-
 	/* Insert the segment into the descriptor segments list. */
 	list_add_tail(&segment->node, &desc->segments);
 
@@ -988,7 +1003,7 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
 	/* Link the last hardware descriptor with the first. */
 	segment = list_first_entry(&desc->segments,
 				   struct xilinx_vdma_tx_segment, node);
-	prev->hw.next_desc = segment->phys;
+	desc->async_tx.phys = segment->phys;
 
 	return &desc->async_tx;
 
@@ -1127,10 +1142,12 @@ static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev,
 	chan->dev = xdev->dev;
 	chan->xdev = xdev;
 	chan->has_sg = xdev->has_sg;
+	chan->desc_pendingcount = 0x0;
 
 	spin_lock_init(&chan->lock);
 	INIT_LIST_HEAD(&chan->pending_list);
 	INIT_LIST_HEAD(&chan->done_list);
+	INIT_LIST_HEAD(&chan->active_list);
 
 	/* Retrieve the channel properties from the device tree */
 	has_dre = of_property_read_bool(node, "xlnx,include-dre");

From e2b538a77d99706a9d2dc12925f3f8477c724dcc Mon Sep 17 00:00:00 2001
From: Kedareswara rao Appana <appana.durga.rao@xilinx.com>
Date: Fri, 26 Feb 2016 19:33:53 +0530
Subject: [PATCH 2/5] dmaengine: xilinx_vdma: Fix issues with non-parking mode

This patch fixes issues with the Non-parking mode(Cirular mode).
With the  existing driver in cirular mode if we submit frames less than h/w
configured we simply end-up having misconfigured vdma h/w.
This patch fixes this issue by configuring the frame count register.

Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
---
 drivers/dma/xilinx/xilinx_vdma.c | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index 06bffec934d2..e96ff9dea40d 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -648,6 +648,10 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
 	else
 		reg &= ~XILINX_VDMA_DMACR_FRAMECNT_EN;
 
+	/* Configure channel to allow number frame buffers */
+	vdma_ctrl_write(chan, XILINX_VDMA_REG_FRMSTORE,
+			chan->desc_pendingcount);
+
 	/*
 	 * With SG, start with circular mode, so that BDs can be fetched.
 	 * In direct register mode, if not parking, enable circular mode

From 26c5e36931b3fea7461ba6f0d8b65eb25ce2b917 Mon Sep 17 00:00:00 2001
From: Kedareswara rao Appana <appana.durga.rao@xilinx.com>
Date: Fri, 26 Feb 2016 19:33:52 +0530
Subject: [PATCH 3/5] dmaengine: xilinx_vdma: Simplify spin lock handling

This patch simplifies the spin lock handling in the driver
by moving locking out of xilinx_dma_start_transfer() API
and xilinx_dma_update_completed_cookie() API.

Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
---
 drivers/dma/xilinx/xilinx_vdma.c | 29 ++++++++++++-----------------
 1 file changed, 12 insertions(+), 17 deletions(-)

diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index e96ff9dea40d..70b2b32cba15 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -605,17 +605,15 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
 {
 	struct xilinx_vdma_config *config = &chan->config;
 	struct xilinx_vdma_tx_descriptor *desc, *tail_desc;
-	unsigned long flags;
 	u32 reg;
 	struct xilinx_vdma_tx_segment *tail_segment;
 
+	/* This function was invoked with lock held */
 	if (chan->err)
 		return;
 
-	spin_lock_irqsave(&chan->lock, flags);
-
 	if (list_empty(&chan->pending_list))
-		goto out_unlock;
+		return;
 
 	desc = list_first_entry(&chan->pending_list,
 				struct xilinx_vdma_tx_descriptor, node);
@@ -629,7 +627,7 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
 	if (chan->has_sg && xilinx_vdma_is_running(chan) &&
 	    !xilinx_vdma_is_idle(chan)) {
 		dev_dbg(chan->dev, "DMA controller still busy\n");
-		goto out_unlock;
+		return;
 	}
 
 	/*
@@ -680,7 +678,7 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
 	xilinx_vdma_start(chan);
 
 	if (chan->err)
-		goto out_unlock;
+		return;
 
 	/* Start the transfer */
 	if (chan->has_sg) {
@@ -700,7 +698,7 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
 		}
 
 		if (!last)
-			goto out_unlock;
+			return;
 
 		/* HW expects these parameters to be same for one transaction */
 		vdma_desc_write(chan, XILINX_VDMA_REG_HSIZE, last->hw.hsize);
@@ -711,9 +709,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
 
 	list_splice_tail_init(&chan->pending_list, &chan->active_list);
 	chan->desc_pendingcount = 0;
-
-out_unlock:
-	spin_unlock_irqrestore(&chan->lock, flags);
 }
 
 /**
@@ -723,8 +718,11 @@ out_unlock:
 static void xilinx_vdma_issue_pending(struct dma_chan *dchan)
 {
 	struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
+	unsigned long flags;
 
+	spin_lock_irqsave(&chan->lock, flags);
 	xilinx_vdma_start_transfer(chan);
+	spin_unlock_irqrestore(&chan->lock, flags);
 }
 
 /**
@@ -736,21 +734,16 @@ static void xilinx_vdma_issue_pending(struct dma_chan *dchan)
 static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan)
 {
 	struct xilinx_vdma_tx_descriptor *desc, *next;
-	unsigned long flags;
-
-	spin_lock_irqsave(&chan->lock, flags);
 
+	/* This function was invoked with lock held */
 	if (list_empty(&chan->active_list))
-		goto out_unlock;
+		return;
 
 	list_for_each_entry_safe(desc, next, &chan->active_list, node) {
 		list_del(&desc->node);
 		dma_cookie_complete(&desc->async_tx);
 		list_add_tail(&desc->node, &chan->done_list);
 	}
-
-out_unlock:
-	spin_unlock_irqrestore(&chan->lock, flags);
 }
 
 /**
@@ -861,8 +854,10 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data)
 	}
 
 	if (status & XILINX_VDMA_DMASR_FRM_CNT_IRQ) {
+		spin_lock(&chan->lock);
 		xilinx_vdma_complete_descriptor(chan);
 		xilinx_vdma_start_transfer(chan);
+		spin_unlock(&chan->lock);
 	}
 
 	tasklet_schedule(&chan->tasklet);

From 9495f2648287029fb5545c34a0fa318426ebe84c Mon Sep 17 00:00:00 2001
From: Kedareswara rao Appana <appana.durga.rao@xilinx.com>
Date: Fri, 26 Feb 2016 19:33:54 +0530
Subject: [PATCH 4/5] dmaengine: xilinx_vdma: Use readl_poll_timeout instead of
 do while loop's

It is sometimes necessary to poll a memory-mapped register until its
value satisfies some condition use convenience macros
that do this instead of do while loop's.

This patch updates the same in the driver.

Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
---
 drivers/dma/xilinx/xilinx_vdma.c | 46 +++++++++++++++-----------------
 1 file changed, 22 insertions(+), 24 deletions(-)

diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index 70b2b32cba15..bc2ca457247e 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -28,6 +28,7 @@
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
+#include <linux/iopoll.h>
 #include <linux/module.h>
 #include <linux/of_address.h>
 #include <linux/of_dma.h>
@@ -254,6 +255,9 @@ struct xilinx_vdma_device {
 	container_of(chan, struct xilinx_vdma_chan, common)
 #define to_vdma_tx_descriptor(tx) \
 	container_of(tx, struct xilinx_vdma_tx_descriptor, async_tx)
+#define xilinx_vdma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
+	readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
+			   cond, delay_us, timeout_us)
 
 /* IO accessors */
 static inline u32 vdma_read(struct xilinx_vdma_chan *chan, u32 reg)
@@ -550,18 +554,17 @@ static bool xilinx_vdma_is_idle(struct xilinx_vdma_chan *chan)
  */
 static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan)
 {
-	int loop = XILINX_VDMA_LOOP_COUNT;
+	int err = 0;
+	u32 val;
 
 	vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP);
 
 	/* Wait for the hardware to halt */
-	do {
-		if (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
-		    XILINX_VDMA_DMASR_HALTED)
-			break;
-	} while (loop--);
+	err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val,
+				      (val & XILINX_VDMA_DMASR_HALTED), 0,
+				      XILINX_VDMA_LOOP_COUNT);
 
-	if (!loop) {
+	if (err) {
 		dev_err(chan->dev, "Cannot stop channel %p: %x\n",
 			chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
 		chan->err = true;
@@ -576,18 +579,17 @@ static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan)
  */
 static void xilinx_vdma_start(struct xilinx_vdma_chan *chan)
 {
-	int loop = XILINX_VDMA_LOOP_COUNT;
+	int err = 0;
+	u32 val;
 
 	vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP);
 
 	/* Wait for the hardware to start */
-	do {
-		if (!(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
-		      XILINX_VDMA_DMASR_HALTED))
-			break;
-	} while (loop--);
+	err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val,
+				      !(val & XILINX_VDMA_DMASR_HALTED), 0,
+				      XILINX_VDMA_LOOP_COUNT);
 
-	if (!loop) {
+	if (err) {
 		dev_err(chan->dev, "Cannot start channel %p: %x\n",
 			chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
 
@@ -754,21 +756,17 @@ static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan)
  */
 static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan)
 {
-	int loop = XILINX_VDMA_LOOP_COUNT;
+	int err = 0;
 	u32 tmp;
 
 	vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RESET);
 
-	tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
-		XILINX_VDMA_DMACR_RESET;
-
 	/* Wait for the hardware to finish reset */
-	do {
-		tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
-			XILINX_VDMA_DMACR_RESET;
-	} while (loop-- && tmp);
+	err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMACR, tmp,
+				      !(tmp & XILINX_VDMA_DMACR_RESET), 0,
+				      XILINX_VDMA_LOOP_COUNT);
 
-	if (!loop) {
+	if (err) {
 		dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
 			vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR),
 			vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
@@ -777,7 +775,7 @@ static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan)
 
 	chan->err = false;
 
-	return 0;
+	return err;
 }
 
 /**

From 694906348d0c3a0975a0d75f14ceb3239cfeb4e0 Mon Sep 17 00:00:00 2001
From: Kedareswara rao Appana <appana.durga.rao@xilinx.com>
Date: Thu, 3 Mar 2016 23:02:42 +0530
Subject: [PATCH 5/5] dmaengine: xilinx_vdma: Remove unnecessary variable
 initializations

This patch removes the unnecessary variable initializations
in the driver.

Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
---
 drivers/dma/xilinx/xilinx_vdma.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index bc2ca457247e..0ee0321868d3 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -554,7 +554,7 @@ static bool xilinx_vdma_is_idle(struct xilinx_vdma_chan *chan)
  */
 static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan)
 {
-	int err = 0;
+	int err;
 	u32 val;
 
 	vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP);
@@ -579,7 +579,7 @@ static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan)
  */
 static void xilinx_vdma_start(struct xilinx_vdma_chan *chan)
 {
-	int err = 0;
+	int err;
 	u32 val;
 
 	vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP);
@@ -756,7 +756,7 @@ static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan)
  */
 static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan)
 {
-	int err = 0;
+	int err;
 	u32 tmp;
 
 	vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RESET);