On 3/26/13 3:43 PM, "Dave Jiang" <dave.ji...@intel.com> wrote:

>v3.3 provides support for write back descriptor error status. This allows
>reporting of errors in a descriptor field. In supporting this, certain
>errors such as P/Q validation errors no longer halts the channel. The DMA
>engine can continue to execute until the end of the chain and allow
>software
>to report the "errors" up the stack. We are also going to mask those error
>interrupts and handle them when the "chain" has completed at the end.
>
>Signed-off-by: Dave Jiang <dave.ji...@intel.com>
>---
> drivers/dma/ioat/dma_v3.c    |   87
>++++++++++++++++++++++++++++++++++++------
> drivers/dma/ioat/hw.h        |   17 +++++++-
> drivers/dma/ioat/registers.h |    1
> 3 files changed, 90 insertions(+), 15 deletions(-)
>
>diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
>index 230a8bc..83d44f3 100644
>--- a/drivers/dma/ioat/dma_v3.c
>+++ b/drivers/dma/ioat/dma_v3.c
>@@ -498,6 +498,32 @@ static bool ioat3_cleanup_preamble(struct
>ioat_chan_common *chan,
>       return true;
> }
> 
>+static void desc_get_errstat(struct ioat_ring_ent *desc)
>+{
>+      struct ioat_dma_descriptor *hw = desc->hw;
>+
>+      switch (hw->ctl_f.op) {
>+      case IOAT_OP_PQ_VAL:
>+      case IOAT_OP_PQ_VAL_16S:
>+      {
>+              struct ioat_pq_descriptor *pq = desc->pq;
>+
>+              /* check if there's error written */
>+              if (!pq->dwbes_f.wbes)
>+                      return;
>+
>+              if (pq->dwbes_f.p_val_err)
>+                      *desc->result |= SUM_CHECK_P_RESULT;
>+
>+              if (pq->dwbes_f.q_val_err)
>+                      *desc->result |= SUM_CHECK_Q_RESULT;
>+              return;
>+      }
>+      default:
>+              return;
>+      }
>+}
>+
> /**
>  * __cleanup - reclaim used descriptors
>  * @ioat: channel (ring) to clean
>@@ -535,6 +561,10 @@ static void __cleanup(struct ioat2_dma_chan *ioat,
>dma_addr_t phys_complete)
>               prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
>               desc = ioat2_get_ring_ent(ioat, idx + i);
>               dump_desc_dbg(ioat, desc);
>+
>+              /* set err stat if we are using dwbes */
>+              desc_get_errstat(desc);
>+
>               tx = &desc->txd;
>               if (tx->cookie) {
>                       dma_cookie_complete(tx);
>@@ -580,14 +610,15 @@ static void ioat3_cleanup(struct ioat2_dma_chan
>*ioat)
> {
>       struct ioat_chan_common *chan = &ioat->base;
>       u64 phys_complete;
>+      u32 chanerr;
> 
>       spin_lock_bh(&chan->cleanup_lock);
> 
>       if (ioat3_cleanup_preamble(chan, &phys_complete))
>               __cleanup(ioat, phys_complete);
> 
>+      chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
>       if (is_ioat_halted(*chan->completion)) {
>-              u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
> 
>               if (chanerr & IOAT_CHANERR_HANDLE_MASK) {
>                       mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);

This is now incurring a mmio read for every cleanup which somewhat defeats
the point of posting the completion status to memory.  Should be able to
get away with just writel(IOAT_CHANERR_HANDLE_MASK) iff cleanup found came
across an error-writeback.


>@@ -595,6 +626,15 @@ static void ioat3_cleanup(struct ioat2_dma_chan
>*ioat)
>               }
>       }
> 
>+      /*
>+       * with DWBES we must clear the chanerr register at the end of the
>+       * chain in order to be able to issue the next command.
>+       */
>+      if (chanerr) {
>+              writel(chanerr & IOAT_CHANERR_HANDLE_MASK,
>+                     chan->reg_base + IOAT_CHANERR_OFFSET);
>+      }
>+

 Does this also mean we need to re-write dmacount?  I.e. are writes to
dmacount ignored while chanerr is non-zero?

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to