CB3.2 and earlier hardware has silicon bugs that are no longer needed with
the new hardware. We don't have to use a NULL op to signal interrupt for
RAID ops any longer. This code make sure the legacy workarounds only happen on
legacy hardware.

Signed-off-by: Dave Jiang <dave.ji...@intel.com>
---
 drivers/dma/ioat/dma.c    |    6 +++++
 drivers/dma/ioat/dma.h    |    8 +++++++
 drivers/dma/ioat/dma_v3.c |   50 +++++++++++++++++++++++++++++++++++----------
 3 files changed, 53 insertions(+), 11 deletions(-)

diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 17a2393..e2bf3fa 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -1042,6 +1042,12 @@ int ioat_probe(struct ioatdma_device *device)
        if (err)
                goto err_setup_interrupts;
 
+       if (device->init_device) {
+               err = device->init_device(device);
+               if (err)
+                       goto err_self_test;
+       }
+
        err = device->self_test(device);
        if (err)
                goto err_self_test;
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index b16902c..12eab37 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -92,8 +92,14 @@ struct ioatdma_device {
        void (*cleanup_fn)(unsigned long data);
        void (*timer_fn)(unsigned long data);
        int (*self_test)(struct ioatdma_device *device);
+       int (*init_device)(struct ioatdma_device *device);
 };
 
+enum ioat_hwbugs {
+       IOAT_LEGACY_COMPLETION_REQUIRED = (1 << 0),
+};
+
+
 struct ioat_chan_common {
        struct dma_chan common;
        void __iomem *reg_base;
@@ -116,6 +122,8 @@ struct ioat_chan_common {
        u64 *completion;
        struct tasklet_struct cleanup_task;
        struct kobject kobj;
+
+       u32 hwbug_flags;
 };
 
 struct ioat_sysfs_entry {
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 65b912a..e66fead 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -760,7 +760,8 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum 
sum_check_flags *result,
         * order.
         */
        if (likely(num_descs) &&
-           ioat2_check_space_lock(ioat, num_descs+1) == 0)
+           ioat2_check_space_lock(ioat, num_descs + !!(chan->hwbug_flags &
+                           IOAT_LEGACY_COMPLETION_REQUIRED)) == 0)
                idx = ioat->head;
        else
                return NULL;
@@ -814,16 +815,23 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum 
sum_check_flags *result,
        pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
        dump_pq_desc_dbg(ioat, desc, ext);
 
-       /* completion descriptor carries interrupt bit */
-       compl_desc = ioat2_get_ring_ent(ioat, idx + i);
-       compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
-       hw = compl_desc->hw;
-       hw->ctl = 0;
-       hw->ctl_f.null = 1;
-       hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
-       hw->ctl_f.compl_write = 1;
-       hw->size = NULL_DESC_BUFFER_SIZE;
-       dump_desc_dbg(ioat, compl_desc);
+       if (!(chan->hwbug_flags & IOAT_LEGACY_COMPLETION_REQUIRED)) {
+               pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+               pq->ctl_f.compl_write = 1;
+               compl_desc = desc;
+       } else {
+               /* completion descriptor carries interrupt bit */
+               compl_desc = ioat2_get_ring_ent(ioat, idx + i);
+               compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
+               hw = compl_desc->hw;
+               hw->ctl = 0;
+               hw->ctl_f.null = 1;
+               hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+               hw->ctl_f.compl_write = 1;
+               hw->size = NULL_DESC_BUFFER_SIZE;
+               dump_desc_dbg(ioat, compl_desc);
+       }
+
 
        /* we leave the channel locked to ensure in order submission */
        return &compl_desc->txd;
@@ -1358,6 +1366,25 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan)
        return err;
 }
 
+static int ioat3_init_device(struct ioatdma_device *device)
+{
+       struct pci_dev *pdev = device->pdev;
+       struct dma_device *dma;
+       struct dma_chan *c;
+       struct ioat_chan_common *chan;
+
+       dma = &device->common;
+
+       list_for_each_entry(c, &dma->channels, device_node) {
+               if (is_xeon_cb32(pdev)) {
+                       chan = to_chan_common(c);
+                       chan->hwbug_flags |= IOAT_LEGACY_COMPLETION_REQUIRED;
+               }
+       }
+
+       return 0;
+}
+
 int ioat3_dma_probe(struct ioatdma_device *device, int dca)
 {
        struct pci_dev *pdev = device->pdev;
@@ -1372,6 +1399,7 @@ int ioat3_dma_probe(struct ioatdma_device *device, int 
dca)
        device->enumerate_channels = ioat2_enumerate_channels;
        device->reset_hw = ioat3_reset_hw;
        device->self_test = ioat3_dma_self_test;
+       device->init_device = ioat3_init_device;
        dma = &device->common;
        dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
        dma->device_issue_pending = ioat2_issue_pending;

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to