Rather than using the global variable, have cell use its own variable to
store the direct DMA offset.
Signed-off-by: Michael Ellerman <[EMAIL PROTECTED]>
Signed-off-by: Arnd Bergmann <[EMAIL PROTECTED]>
---
 arch/powerpc/platforms/cell/iommu.c |   10 ++++++----
 1 files changed, 6 insertions(+), 4 deletions(-)

Index: linux-2.6-new/arch/powerpc/platforms/cell/iommu.c
===================================================================
--- linux-2.6-new.orig/arch/powerpc/platforms/cell/iommu.c
+++ linux-2.6-new/arch/powerpc/platforms/cell/iommu.c
@@ -490,6 +490,8 @@ static struct cbe_iommu *cell_iommu_for_
        return NULL;
 }
 
+static unsigned long cell_dma_direct_offset;
+
 static void cell_dma_dev_setup(struct device *dev)
 {
        struct iommu_window *window;
@@ -497,7 +499,7 @@ static void cell_dma_dev_setup(struct de
        struct dev_archdata *archdata = &dev->archdata;
 
        if (get_pci_dma_ops() == &dma_direct_ops) {
-               archdata->dma_data = &dma_direct_offset;
+               archdata->dma_data = &cell_dma_direct_offset;
                return;
        }
 
@@ -655,7 +657,7 @@ static int __init cell_iommu_init_disabl
 
        /* If we have no Axon, we set up the spider DMA magic offset */
        if (of_find_node_by_name(NULL, "axon") == NULL)
-               dma_direct_offset = SPIDER_DMA_OFFSET;
+               cell_dma_direct_offset = SPIDER_DMA_OFFSET;
 
        /* Now we need to check to see where the memory is mapped
         * in PCI space. We assume that all busses use the same dma
@@ -689,10 +691,10 @@ static int __init cell_iommu_init_disabl
                return -ENODEV;
        }
 
-       dma_direct_offset += base;
+       cell_dma_direct_offset += base;
 
        printk("iommu: disabled, direct DMA offset is 0x%lx\n",
-              dma_direct_offset);
+              cell_dma_direct_offset);
 
        return 0;
 }

-- 

_______________________________________________
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev

Reply via email to