On Fri, Jun 08, 2018 at 03:46:29PM +1000, Alexey Kardashevskiy wrote:
> Right now we have allocation code in pci-ioda.c and traversing code in
> pci.c, let's keep them toghether. However both files are big enough
> already so let's move this business to a new file.
> 
> While we at it, move the code which links IOMMU table groups to
> IOMMU tables as it is not specific to any PNV PHB model.
> 
> These puts exported symbols from the new file together.
> 
> This fixes several warnings from checkpatch.pl like this:
> "WARNING: Prefer 'unsigned int' to bare use of 'unsigned'".
> 
> As this is almost cut-n-paste, there should be no behavioral change.
> 
> Signed-off-by: Alexey Kardashevskiy <a...@ozlabs.ru>

Reviewed-by: David Gibson <da...@gibson.dropbear.id.au>

> ---
>  arch/powerpc/platforms/powernv/Makefile       |   2 +-
>  arch/powerpc/platforms/powernv/pci.h          |  41 ++--
>  arch/powerpc/platforms/powernv/pci-ioda-tce.c | 313 
> ++++++++++++++++++++++++++
>  arch/powerpc/platforms/powernv/pci-ioda.c     | 146 ------------
>  arch/powerpc/platforms/powernv/pci.c          | 158 -------------
>  5 files changed, 340 insertions(+), 320 deletions(-)
>  create mode 100644 arch/powerpc/platforms/powernv/pci-ioda-tce.c
> 
> diff --git a/arch/powerpc/platforms/powernv/Makefile 
> b/arch/powerpc/platforms/powernv/Makefile
> index 703a350..b540ce8e 100644
> --- a/arch/powerpc/platforms/powernv/Makefile
> +++ b/arch/powerpc/platforms/powernv/Makefile
> @@ -6,7 +6,7 @@ obj-y                 += opal-msglog.o opal-hmi.o 
> opal-power.o opal-irqchip.o
>  obj-y                        += opal-kmsg.o opal-powercap.o opal-psr.o 
> opal-sensor-groups.o
>  
>  obj-$(CONFIG_SMP)    += smp.o subcore.o subcore-asm.o
> -obj-$(CONFIG_PCI)    += pci.o pci-ioda.o npu-dma.o
> +obj-$(CONFIG_PCI)    += pci.o pci-ioda.o npu-dma.o pci-ioda-tce.o
>  obj-$(CONFIG_CXL_BASE)       += pci-cxl.o
>  obj-$(CONFIG_EEH)    += eeh-powernv.o
>  obj-$(CONFIG_PPC_SCOM)       += opal-xscom.o
> diff --git a/arch/powerpc/platforms/powernv/pci.h 
> b/arch/powerpc/platforms/powernv/pci.h
> index 1408247..f507baf 100644
> --- a/arch/powerpc/platforms/powernv/pci.h
> +++ b/arch/powerpc/platforms/powernv/pci.h
> @@ -202,13 +202,6 @@ struct pnv_phb {
>  };
>  
>  extern struct pci_ops pnv_pci_ops;
> -extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
> -             unsigned long uaddr, enum dma_data_direction direction,
> -             unsigned long attrs);
> -extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages);
> -extern int pnv_tce_xchg(struct iommu_table *tbl, long index,
> -             unsigned long *hpa, enum dma_data_direction *direction);
> -extern unsigned long pnv_tce_get(struct iommu_table *tbl, long index);
>  
>  void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
>                               unsigned char *log_buff);
> @@ -218,14 +211,6 @@ int pnv_pci_cfg_write(struct pci_dn *pdn,
>                     int where, int size, u32 val);
>  extern struct iommu_table *pnv_pci_table_alloc(int nid);
>  
> -extern long pnv_pci_link_table_and_group(int node, int num,
> -             struct iommu_table *tbl,
> -             struct iommu_table_group *table_group);
> -extern void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
> -             struct iommu_table_group *table_group);
> -extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
> -                                   void *tce_mem, u64 tce_size,
> -                                   u64 dma_offset, unsigned page_shift);
>  extern void pnv_pci_init_ioda_hub(struct device_node *np);
>  extern void pnv_pci_init_ioda2_phb(struct device_node *np);
>  extern void pnv_pci_init_npu_phb(struct device_node *np);
> @@ -273,4 +258,30 @@ extern void pnv_cxl_cx4_teardown_msi_irqs(struct pci_dev 
> *pdev);
>  /* phb ops (cxl switches these when enabling the kernel api on the phb) */
>  extern const struct pci_controller_ops pnv_cxl_cx4_ioda_controller_ops;
>  
> +/* pci-ioda-tce.c */
> +#define POWERNV_IOMMU_DEFAULT_LEVELS 1
> +#define POWERNV_IOMMU_MAX_LEVELS     5
> +
> +extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
> +             unsigned long uaddr, enum dma_data_direction direction,
> +             unsigned long attrs);
> +extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages);
> +extern int pnv_tce_xchg(struct iommu_table *tbl, long index,
> +             unsigned long *hpa, enum dma_data_direction *direction);
> +extern unsigned long pnv_tce_get(struct iommu_table *tbl, long index);
> +
> +extern long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
> +             __u32 page_shift, __u64 window_size, __u32 levels,
> +             struct iommu_table *tbl);
> +extern void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl);
> +
> +extern long pnv_pci_link_table_and_group(int node, int num,
> +             struct iommu_table *tbl,
> +             struct iommu_table_group *table_group);
> +extern void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
> +             struct iommu_table_group *table_group);
> +extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
> +             void *tce_mem, u64 tce_size,
> +             u64 dma_offset, unsigned int page_shift);
> +
>  #endif /* __POWERNV_PCI_H */
> diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c 
> b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
> new file mode 100644
> index 0000000..700ceb1
> --- /dev/null
> +++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
> @@ -0,0 +1,313 @@
> +// SPDX-License-Identifier: GPL-2.0+
> +/*
> + * TCE helpers for IODA PCI/PCIe on PowerNV platforms
> + *
> + * Copyright 2018 IBM Corp.
> + *
> + * This program is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU General Public License
> + * as published by the Free Software Foundation; either version
> + * 2 of the License, or (at your option) any later version.
> + */
> +
> +#include <linux/kernel.h>
> +#include <linux/iommu.h>
> +
> +#include <asm/iommu.h>
> +#include <asm/tce.h>
> +#include "pci.h"
> +
> +void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
> +             void *tce_mem, u64 tce_size,
> +             u64 dma_offset, unsigned int page_shift)
> +{
> +     tbl->it_blocksize = 16;
> +     tbl->it_base = (unsigned long)tce_mem;
> +     tbl->it_page_shift = page_shift;
> +     tbl->it_offset = dma_offset >> tbl->it_page_shift;
> +     tbl->it_index = 0;
> +     tbl->it_size = tce_size >> 3;
> +     tbl->it_busno = 0;
> +     tbl->it_type = TCE_PCI;
> +}
> +
> +static __be64 *pnv_tce(struct iommu_table *tbl, long idx)
> +{
> +     __be64 *tmp = ((__be64 *)tbl->it_base);
> +     int  level = tbl->it_indirect_levels;
> +     const long shift = ilog2(tbl->it_level_size);
> +     unsigned long mask = (tbl->it_level_size - 1) << (level * shift);
> +
> +     while (level) {
> +             int n = (idx & mask) >> (level * shift);
> +             unsigned long tce = be64_to_cpu(tmp[n]);
> +
> +             tmp = __va(tce & ~(TCE_PCI_READ | TCE_PCI_WRITE));
> +             idx &= ~mask;
> +             mask >>= shift;
> +             --level;
> +     }
> +
> +     return tmp + idx;
> +}
> +
> +int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
> +             unsigned long uaddr, enum dma_data_direction direction,
> +             unsigned long attrs)
> +{
> +     u64 proto_tce = iommu_direction_to_tce_perm(direction);
> +     u64 rpn = __pa(uaddr) >> tbl->it_page_shift;
> +     long i;
> +
> +     if (proto_tce & TCE_PCI_WRITE)
> +             proto_tce |= TCE_PCI_READ;
> +
> +     for (i = 0; i < npages; i++) {
> +             unsigned long newtce = proto_tce |
> +                     ((rpn + i) << tbl->it_page_shift);
> +             unsigned long idx = index - tbl->it_offset + i;
> +
> +             *(pnv_tce(tbl, idx)) = cpu_to_be64(newtce);
> +     }
> +
> +     return 0;
> +}
> +
> +#ifdef CONFIG_IOMMU_API
> +int pnv_tce_xchg(struct iommu_table *tbl, long index,
> +             unsigned long *hpa, enum dma_data_direction *direction)
> +{
> +     u64 proto_tce = iommu_direction_to_tce_perm(*direction);
> +     unsigned long newtce = *hpa | proto_tce, oldtce;
> +     unsigned long idx = index - tbl->it_offset;
> +
> +     BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl));
> +
> +     if (newtce & TCE_PCI_WRITE)
> +             newtce |= TCE_PCI_READ;
> +
> +     oldtce = be64_to_cpu(xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce)));
> +     *hpa = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
> +     *direction = iommu_tce_direction(oldtce);
> +
> +     return 0;
> +}
> +#endif
> +
> +void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
> +{
> +     long i;
> +
> +     for (i = 0; i < npages; i++) {
> +             unsigned long idx = index - tbl->it_offset + i;
> +
> +             *(pnv_tce(tbl, idx)) = cpu_to_be64(0);
> +     }
> +}
> +
> +unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
> +{
> +     return be64_to_cpu(*(pnv_tce(tbl, index - tbl->it_offset)));
> +}
> +
> +static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr,
> +             unsigned long size, unsigned int levels)
> +{
> +     const unsigned long addr_ul = (unsigned long) addr &
> +                     ~(TCE_PCI_READ | TCE_PCI_WRITE);
> +
> +     if (levels) {
> +             long i;
> +             u64 *tmp = (u64 *) addr_ul;
> +
> +             for (i = 0; i < size; ++i) {
> +                     unsigned long hpa = be64_to_cpu(tmp[i]);
> +
> +                     if (!(hpa & (TCE_PCI_READ | TCE_PCI_WRITE)))
> +                             continue;
> +
> +                     pnv_pci_ioda2_table_do_free_pages(__va(hpa), size,
> +                                     levels - 1);
> +             }
> +     }
> +
> +     free_pages(addr_ul, get_order(size << 3));
> +}
> +
> +void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl)
> +{
> +     const unsigned long size = tbl->it_indirect_levels ?
> +                     tbl->it_level_size : tbl->it_size;
> +
> +     if (!tbl->it_size)
> +             return;
> +
> +     pnv_pci_ioda2_table_do_free_pages((__be64 *)tbl->it_base, size,
> +                     tbl->it_indirect_levels);
> +}
> +
> +static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned int 
> shift,
> +             unsigned int levels, unsigned long limit,
> +             unsigned long *current_offset, unsigned long *total_allocated)
> +{
> +     struct page *tce_mem = NULL;
> +     __be64 *addr, *tmp;
> +     unsigned int order = max_t(unsigned int, shift, PAGE_SHIFT) -
> +                     PAGE_SHIFT;
> +     unsigned long allocated = 1UL << (order + PAGE_SHIFT);
> +     unsigned int entries = 1UL << (shift - 3);
> +     long i;
> +
> +     tce_mem = alloc_pages_node(nid, GFP_KERNEL, order);
> +     if (!tce_mem) {
> +             pr_err("Failed to allocate a TCE memory, order=%d\n", order);
> +             return NULL;
> +     }
> +     addr = page_address(tce_mem);
> +     memset(addr, 0, allocated);
> +     *total_allocated += allocated;
> +
> +     --levels;
> +     if (!levels) {
> +             *current_offset += allocated;
> +             return addr;
> +     }
> +
> +     for (i = 0; i < entries; ++i) {
> +             tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift,
> +                             levels, limit, current_offset, total_allocated);
> +             if (!tmp)
> +                     break;
> +
> +             addr[i] = cpu_to_be64(__pa(tmp) |
> +                             TCE_PCI_READ | TCE_PCI_WRITE);
> +
> +             if (*current_offset >= limit)
> +                     break;
> +     }
> +
> +     return addr;
> +}
> +
> +long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
> +             __u32 page_shift, __u64 window_size, __u32 levels,
> +             struct iommu_table *tbl)
> +{
> +     void *addr;
> +     unsigned long offset = 0, level_shift, total_allocated = 0;
> +     const unsigned int window_shift = ilog2(window_size);
> +     unsigned int entries_shift = window_shift - page_shift;
> +     unsigned int table_shift = max_t(unsigned int, entries_shift + 3,
> +                     PAGE_SHIFT);
> +     const unsigned long tce_table_size = 1UL << table_shift;
> +
> +     if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS))
> +             return -EINVAL;
> +
> +     if (!is_power_of_2(window_size))
> +             return -EINVAL;
> +
> +     /* Adjust direct table size from window_size and levels */
> +     entries_shift = (entries_shift + levels - 1) / levels;
> +     level_shift = entries_shift + 3;
> +     level_shift = max_t(unsigned int, level_shift, PAGE_SHIFT);
> +
> +     if ((level_shift - 3) * levels + page_shift >= 55)
> +             return -EINVAL;
> +
> +     /* Allocate TCE table */
> +     addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
> +                     levels, tce_table_size, &offset, &total_allocated);
> +
> +     /* addr==NULL means that the first level allocation failed */
> +     if (!addr)
> +             return -ENOMEM;
> +
> +     /*
> +      * First level was allocated but some lower level failed as
> +      * we did not allocate as much as we wanted,
> +      * release partially allocated table.
> +      */
> +     if (offset < tce_table_size) {
> +             pnv_pci_ioda2_table_do_free_pages(addr,
> +                             1ULL << (level_shift - 3), levels - 1);
> +             return -ENOMEM;
> +     }
> +
> +     /* Setup linux iommu table */
> +     pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, bus_offset,
> +                     page_shift);
> +     tbl->it_level_size = 1ULL << (level_shift - 3);
> +     tbl->it_indirect_levels = levels - 1;
> +     tbl->it_allocated_size = total_allocated;
> +
> +     pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n",
> +                     window_size, tce_table_size, bus_offset);
> +
> +     return 0;
> +}
> +
> +static void pnv_iommu_table_group_link_free(struct rcu_head *head)
> +{
> +     struct iommu_table_group_link *tgl = container_of(head,
> +                     struct iommu_table_group_link, rcu);
> +
> +     kfree(tgl);
> +}
> +
> +void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
> +             struct iommu_table_group *table_group)
> +{
> +     long i;
> +     bool found;
> +     struct iommu_table_group_link *tgl;
> +
> +     if (!tbl || !table_group)
> +             return;
> +
> +     /* Remove link to a group from table's list of attached groups */
> +     found = false;
> +     list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
> +             if (tgl->table_group == table_group) {
> +                     list_del_rcu(&tgl->next);
> +                     call_rcu(&tgl->rcu, pnv_iommu_table_group_link_free);
> +                     found = true;
> +                     break;
> +             }
> +     }
> +     if (WARN_ON(!found))
> +             return;
> +
> +     /* Clean a pointer to iommu_table in iommu_table_group::tables[] */
> +     found = false;
> +     for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
> +             if (table_group->tables[i] == tbl) {
> +                     table_group->tables[i] = NULL;
> +                     found = true;
> +                     break;
> +             }
> +     }
> +     WARN_ON(!found);
> +}
> +
> +long pnv_pci_link_table_and_group(int node, int num,
> +             struct iommu_table *tbl,
> +             struct iommu_table_group *table_group)
> +{
> +     struct iommu_table_group_link *tgl = NULL;
> +
> +     if (WARN_ON(!tbl || !table_group))
> +             return -EINVAL;
> +
> +     tgl = kzalloc_node(sizeof(struct iommu_table_group_link), GFP_KERNEL,
> +                     node);
> +     if (!tgl)
> +             return -ENOMEM;
> +
> +     tgl->table_group = table_group;
> +     list_add_rcu(&tgl->next, &tbl->it_group_list);
> +
> +     table_group->tables[num] = tbl;
> +
> +     return 0;
> +}
> diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c 
> b/arch/powerpc/platforms/powernv/pci-ioda.c
> index d4c60b6..9577059 100644
> --- a/arch/powerpc/platforms/powernv/pci-ioda.c
> +++ b/arch/powerpc/platforms/powernv/pci-ioda.c
> @@ -51,12 +51,8 @@
>  #define PNV_IODA1_M64_SEGS   8       /* Segments per M64 BAR */
>  #define PNV_IODA1_DMA32_SEGSIZE      0x10000000
>  
> -#define POWERNV_IOMMU_DEFAULT_LEVELS 1
> -#define POWERNV_IOMMU_MAX_LEVELS     5
> -
>  static const char * const pnv_phb_names[] = { "IODA1", "IODA2", "NPU_NVLINK",
>                                             "NPU_OCAPI" };
> -static void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl);
>  
>  void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
>                           const char *fmt, ...)
> @@ -2464,10 +2460,6 @@ void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, 
> bool enable)
>               pe->tce_bypass_enabled = enable;
>  }
>  
> -static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
> -             __u32 page_shift, __u64 window_size, __u32 levels,
> -             struct iommu_table *tbl);
> -
>  static long pnv_pci_ioda2_create_table(struct iommu_table_group *table_group,
>               int num, __u32 page_shift, __u64 window_size, __u32 levels,
>               struct iommu_table **ptbl)
> @@ -2775,144 +2767,6 @@ static void pnv_pci_ioda_setup_iommu_api(void)
>  static void pnv_pci_ioda_setup_iommu_api(void) { };
>  #endif
>  
> -static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
> -             unsigned levels, unsigned long limit,
> -             unsigned long *current_offset, unsigned long *total_allocated)
> -{
> -     struct page *tce_mem = NULL;
> -     __be64 *addr, *tmp;
> -     unsigned order = max_t(unsigned, shift, PAGE_SHIFT) - PAGE_SHIFT;
> -     unsigned long allocated = 1UL << (order + PAGE_SHIFT);
> -     unsigned entries = 1UL << (shift - 3);
> -     long i;
> -
> -     tce_mem = alloc_pages_node(nid, GFP_KERNEL, order);
> -     if (!tce_mem) {
> -             pr_err("Failed to allocate a TCE memory, order=%d\n", order);
> -             return NULL;
> -     }
> -     addr = page_address(tce_mem);
> -     memset(addr, 0, allocated);
> -     *total_allocated += allocated;
> -
> -     --levels;
> -     if (!levels) {
> -             *current_offset += allocated;
> -             return addr;
> -     }
> -
> -     for (i = 0; i < entries; ++i) {
> -             tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift,
> -                             levels, limit, current_offset, total_allocated);
> -             if (!tmp)
> -                     break;
> -
> -             addr[i] = cpu_to_be64(__pa(tmp) |
> -                             TCE_PCI_READ | TCE_PCI_WRITE);
> -
> -             if (*current_offset >= limit)
> -                     break;
> -     }
> -
> -     return addr;
> -}
> -
> -static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr,
> -             unsigned long size, unsigned level);
> -
> -static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
> -             __u32 page_shift, __u64 window_size, __u32 levels,
> -             struct iommu_table *tbl)
> -{
> -     void *addr;
> -     unsigned long offset = 0, level_shift, total_allocated = 0;
> -     const unsigned window_shift = ilog2(window_size);
> -     unsigned entries_shift = window_shift - page_shift;
> -     unsigned table_shift = max_t(unsigned, entries_shift + 3, PAGE_SHIFT);
> -     const unsigned long tce_table_size = 1UL << table_shift;
> -
> -     if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS))
> -             return -EINVAL;
> -
> -     if (!is_power_of_2(window_size))
> -             return -EINVAL;
> -
> -     /* Adjust direct table size from window_size and levels */
> -     entries_shift = (entries_shift + levels - 1) / levels;
> -     level_shift = entries_shift + 3;
> -     level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
> -
> -     if ((level_shift - 3) * levels + page_shift >= 55)
> -             return -EINVAL;
> -
> -     /* Allocate TCE table */
> -     addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
> -                     levels, tce_table_size, &offset, &total_allocated);
> -
> -     /* addr==NULL means that the first level allocation failed */
> -     if (!addr)
> -             return -ENOMEM;
> -
> -     /*
> -      * First level was allocated but some lower level failed as
> -      * we did not allocate as much as we wanted,
> -      * release partially allocated table.
> -      */
> -     if (offset < tce_table_size) {
> -             pnv_pci_ioda2_table_do_free_pages(addr,
> -                             1ULL << (level_shift - 3), levels - 1);
> -             return -ENOMEM;
> -     }
> -
> -     /* Setup linux iommu table */
> -     pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, bus_offset,
> -                     page_shift);
> -     tbl->it_level_size = 1ULL << (level_shift - 3);
> -     tbl->it_indirect_levels = levels - 1;
> -     tbl->it_allocated_size = total_allocated;
> -
> -     pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n",
> -                     window_size, tce_table_size, bus_offset);
> -
> -     return 0;
> -}
> -
> -static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr,
> -             unsigned long size, unsigned level)
> -{
> -     const unsigned long addr_ul = (unsigned long) addr &
> -                     ~(TCE_PCI_READ | TCE_PCI_WRITE);
> -
> -     if (level) {
> -             long i;
> -             u64 *tmp = (u64 *) addr_ul;
> -
> -             for (i = 0; i < size; ++i) {
> -                     unsigned long hpa = be64_to_cpu(tmp[i]);
> -
> -                     if (!(hpa & (TCE_PCI_READ | TCE_PCI_WRITE)))
> -                             continue;
> -
> -                     pnv_pci_ioda2_table_do_free_pages(__va(hpa), size,
> -                                     level - 1);
> -             }
> -     }
> -
> -     free_pages(addr_ul, get_order(size << 3));
> -}
> -
> -static void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl)
> -{
> -     const unsigned long size = tbl->it_indirect_levels ?
> -                     tbl->it_level_size : tbl->it_size;
> -
> -     if (!tbl->it_size)
> -             return;
> -
> -     pnv_pci_ioda2_table_do_free_pages((__be64 *)tbl->it_base, size,
> -                     tbl->it_indirect_levels);
> -}
> -
>  static unsigned long pnv_ioda_parse_tce_sizes(struct pnv_phb *phb)
>  {
>       struct pci_controller *hose = phb->hose;
> diff --git a/arch/powerpc/platforms/powernv/pci.c 
> b/arch/powerpc/platforms/powernv/pci.c
> index b265ecc..13aef23 100644
> --- a/arch/powerpc/platforms/powernv/pci.c
> +++ b/arch/powerpc/platforms/powernv/pci.c
> @@ -802,85 +802,6 @@ struct pci_ops pnv_pci_ops = {
>       .write = pnv_pci_write_config,
>  };
>  
> -static __be64 *pnv_tce(struct iommu_table *tbl, long idx)
> -{
> -     __be64 *tmp = ((__be64 *)tbl->it_base);
> -     int  level = tbl->it_indirect_levels;
> -     const long shift = ilog2(tbl->it_level_size);
> -     unsigned long mask = (tbl->it_level_size - 1) << (level * shift);
> -
> -     while (level) {
> -             int n = (idx & mask) >> (level * shift);
> -             unsigned long tce = be64_to_cpu(tmp[n]);
> -
> -             tmp = __va(tce & ~(TCE_PCI_READ | TCE_PCI_WRITE));
> -             idx &= ~mask;
> -             mask >>= shift;
> -             --level;
> -     }
> -
> -     return tmp + idx;
> -}
> -
> -int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
> -             unsigned long uaddr, enum dma_data_direction direction,
> -             unsigned long attrs)
> -{
> -     u64 proto_tce = iommu_direction_to_tce_perm(direction);
> -     u64 rpn = __pa(uaddr) >> tbl->it_page_shift;
> -     long i;
> -
> -     if (proto_tce & TCE_PCI_WRITE)
> -             proto_tce |= TCE_PCI_READ;
> -
> -     for (i = 0; i < npages; i++) {
> -             unsigned long newtce = proto_tce |
> -                     ((rpn + i) << tbl->it_page_shift);
> -             unsigned long idx = index - tbl->it_offset + i;
> -
> -             *(pnv_tce(tbl, idx)) = cpu_to_be64(newtce);
> -     }
> -
> -     return 0;
> -}
> -
> -#ifdef CONFIG_IOMMU_API
> -int pnv_tce_xchg(struct iommu_table *tbl, long index,
> -             unsigned long *hpa, enum dma_data_direction *direction)
> -{
> -     u64 proto_tce = iommu_direction_to_tce_perm(*direction);
> -     unsigned long newtce = *hpa | proto_tce, oldtce;
> -     unsigned long idx = index - tbl->it_offset;
> -
> -     BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl));
> -
> -     if (newtce & TCE_PCI_WRITE)
> -             newtce |= TCE_PCI_READ;
> -
> -     oldtce = be64_to_cpu(xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce)));
> -     *hpa = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
> -     *direction = iommu_tce_direction(oldtce);
> -
> -     return 0;
> -}
> -#endif
> -
> -void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
> -{
> -     long i;
> -
> -     for (i = 0; i < npages; i++) {
> -             unsigned long idx = index - tbl->it_offset + i;
> -
> -             *(pnv_tce(tbl, idx)) = cpu_to_be64(0);
> -     }
> -}
> -
> -unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
> -{
> -     return be64_to_cpu(*(pnv_tce(tbl, index - tbl->it_offset)));
> -}
> -
>  struct iommu_table *pnv_pci_table_alloc(int nid)
>  {
>       struct iommu_table *tbl;
> @@ -895,85 +816,6 @@ struct iommu_table *pnv_pci_table_alloc(int nid)
>       return tbl;
>  }
>  
> -long pnv_pci_link_table_and_group(int node, int num,
> -             struct iommu_table *tbl,
> -             struct iommu_table_group *table_group)
> -{
> -     struct iommu_table_group_link *tgl = NULL;
> -
> -     if (WARN_ON(!tbl || !table_group))
> -             return -EINVAL;
> -
> -     tgl = kzalloc_node(sizeof(struct iommu_table_group_link), GFP_KERNEL,
> -                     node);
> -     if (!tgl)
> -             return -ENOMEM;
> -
> -     tgl->table_group = table_group;
> -     list_add_rcu(&tgl->next, &tbl->it_group_list);
> -
> -     table_group->tables[num] = tbl;
> -
> -     return 0;
> -}
> -
> -static void pnv_iommu_table_group_link_free(struct rcu_head *head)
> -{
> -     struct iommu_table_group_link *tgl = container_of(head,
> -                     struct iommu_table_group_link, rcu);
> -
> -     kfree(tgl);
> -}
> -
> -void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
> -             struct iommu_table_group *table_group)
> -{
> -     long i;
> -     bool found;
> -     struct iommu_table_group_link *tgl;
> -
> -     if (!tbl || !table_group)
> -             return;
> -
> -     /* Remove link to a group from table's list of attached groups */
> -     found = false;
> -     list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
> -             if (tgl->table_group == table_group) {
> -                     list_del_rcu(&tgl->next);
> -                     call_rcu(&tgl->rcu, pnv_iommu_table_group_link_free);
> -                     found = true;
> -                     break;
> -             }
> -     }
> -     if (WARN_ON(!found))
> -             return;
> -
> -     /* Clean a pointer to iommu_table in iommu_table_group::tables[] */
> -     found = false;
> -     for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
> -             if (table_group->tables[i] == tbl) {
> -                     table_group->tables[i] = NULL;
> -                     found = true;
> -                     break;
> -             }
> -     }
> -     WARN_ON(!found);
> -}
> -
> -void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
> -                            void *tce_mem, u64 tce_size,
> -                            u64 dma_offset, unsigned page_shift)
> -{
> -     tbl->it_blocksize = 16;
> -     tbl->it_base = (unsigned long)tce_mem;
> -     tbl->it_page_shift = page_shift;
> -     tbl->it_offset = dma_offset >> tbl->it_page_shift;
> -     tbl->it_index = 0;
> -     tbl->it_size = tce_size >> 3;
> -     tbl->it_busno = 0;
> -     tbl->it_type = TCE_PCI;
> -}
> -
>  void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
>  {
>       struct pci_controller *hose = pci_bus_to_host(pdev->bus);

-- 
David Gibson                    | I'll have my music baroque, and my code
david AT gibson.dropbear.id.au  | minimalist, thank you.  NOT _the_ _other_
                                | _way_ _around_!
http://www.ozlabs.org/~dgibson

Attachment: signature.asc
Description: PGP signature

Reply via email to