Hi,

It seems that this patch has been stale for a month, with actions needed
from the author. So sending this email as a gentle reminder. Thanks!

Kind regards,
Henry

> -----Original Message-----
> Subject: [PATCH] iommu: add preemption support to iommu_{un,}map()
> 
> The loop in iommu_{un,}map() can be arbitrary large, and as such it
> needs to handle preemption.  Introduce a new parameter that allow
> returning the number of pages that have been processed, and which
> presence also signals whether the function should do preemption
> checks.
> 
> Note that the cleanup done in iommu_map() can now be incomplete if
> preemption has happened, and hence callers would need to take care of
> unmapping the whole range (ie: ranges already mapped by previously
> preempted calls).  So far none of the callers care about having those
> ranges unmapped, so error handling in iommu_memory_setup() and
> arch_iommu_hwdom_init() can be kept as-is.
> 
> Note that iommu_legacy_{un,}map() is left without preemption handling:
> callers of those interfaces are not modified to pass bigger chunks,
> and hence the functions won't be modified as are legacy and should be
> replaced with iommu_{un,}map() instead if preemption is required.
> 
> Fixes: f3185c165d ('IOMMU/x86: perform PV Dom0 mappings in batches')
> Signed-off-by: Roger Pau Monné <roger....@citrix.com>
> ---
>  xen/arch/x86/pv/dom0_build.c        | 15 ++++++++++++---
>  xen/drivers/passthrough/iommu.c     | 26 +++++++++++++++++++-------
>  xen/drivers/passthrough/x86/iommu.c | 13 +++++++++++--
>  xen/include/xen/iommu.h             |  4 ++--
>  4 files changed, 44 insertions(+), 14 deletions(-)
> 
> diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c
> index 04a4ea3c18..e5a42870ec 100644
> --- a/xen/arch/x86/pv/dom0_build.c
> +++ b/xen/arch/x86/pv/dom0_build.c
> @@ -77,7 +77,8 @@ static __init void mark_pv_pt_pages_rdonly(struct
> domain *d,
>           * iommu_memory_setup() ended up mapping them.
>           */
>          if ( need_iommu_pt_sync(d) &&
> -             iommu_unmap(d, _dfn(mfn_x(page_to_mfn(page))), 1, flush_flags) )
> +             iommu_unmap(d, _dfn(mfn_x(page_to_mfn(page))), 1, flush_flags,
> +                         NULL) )
>              BUG();
> 
>          /* Read-only mapping + PGC_allocated + page-table page. */
> @@ -121,13 +122,21 @@ static void __init iommu_memory_setup(struct
> domain *d, const char *what,
>                                        unsigned int *flush_flags)
>  {
>      int rc;
> +    unsigned long done;
>      mfn_t mfn = page_to_mfn(page);
> 
>      if ( !need_iommu_pt_sync(d) )
>          return;
> 
> -    rc = iommu_map(d, _dfn(mfn_x(mfn)), mfn, nr,
> -                   IOMMUF_readable | IOMMUF_writable, flush_flags);
> +    while ( (rc = iommu_map(d, _dfn(mfn_x(mfn)), mfn, nr,
> +                            IOMMUF_readable | IOMMUF_writable,
> +                            flush_flags, &done)) == -ERESTART )
> +    {
> +        mfn_add(mfn, done);
> +        nr -= done;
> +        process_pending_softirqs();
> +    }
> +
>      if ( rc )
>      {
>          printk(XENLOG_ERR "pre-mapping %s MFN [%lx,%lx) into IOMMU
> failed: %d\n",
> diff --git a/xen/drivers/passthrough/iommu.c
> b/xen/drivers/passthrough/iommu.c
> index 75df3aa8dd..5c2a341112 100644
> --- a/xen/drivers/passthrough/iommu.c
> +++ b/xen/drivers/passthrough/iommu.c
> @@ -310,11 +310,11 @@ static unsigned int mapping_order(const struct
> domain_iommu *hd,
> 
>  int iommu_map(struct domain *d, dfn_t dfn0, mfn_t mfn0,
>                unsigned long page_count, unsigned int flags,
> -              unsigned int *flush_flags)
> +              unsigned int *flush_flags, unsigned long *done)
>  {
>      const struct domain_iommu *hd = dom_iommu(d);
>      unsigned long i;
> -    unsigned int order;
> +    unsigned int order, j = 0;
>      int rc = 0;
> 
>      if ( !is_iommu_enabled(d) )
> @@ -327,6 +327,12 @@ int iommu_map(struct domain *d, dfn_t dfn0,
> mfn_t mfn0,
>          dfn_t dfn = dfn_add(dfn0, i);
>          mfn_t mfn = mfn_add(mfn0, i);
> 
> +        if ( done && !(++j & 0xfffff) && general_preempt_check() )
> +        {
> +            *done = i;
> +            return -ERESTART;
> +        }
> +
>          order = mapping_order(hd, dfn, mfn, page_count - i);
> 
>          rc = iommu_call(hd->platform_ops, map_page, d, dfn, mfn,
> @@ -341,7 +347,7 @@ int iommu_map(struct domain *d, dfn_t dfn0, mfn_t
> mfn0,
>                     d->domain_id, dfn_x(dfn), mfn_x(mfn), rc);
> 
>          /* while statement to satisfy __must_check */
> -        while ( iommu_unmap(d, dfn0, i, flush_flags) )
> +        while ( iommu_unmap(d, dfn0, i, flush_flags, NULL) )
>              break;
> 
>          if ( !is_hardware_domain(d) )
> @@ -365,7 +371,7 @@ int iommu_legacy_map(struct domain *d, dfn_t dfn,
> mfn_t mfn,
>                       unsigned long page_count, unsigned int flags)
>  {
>      unsigned int flush_flags = 0;
> -    int rc = iommu_map(d, dfn, mfn, page_count, flags, &flush_flags);
> +    int rc = iommu_map(d, dfn, mfn, page_count, flags, &flush_flags, NULL);
> 
>      if ( !this_cpu(iommu_dont_flush_iotlb) && !rc )
>          rc = iommu_iotlb_flush(d, dfn, page_count, flush_flags);
> @@ -374,11 +380,11 @@ int iommu_legacy_map(struct domain *d, dfn_t
> dfn, mfn_t mfn,
>  }
> 
>  int iommu_unmap(struct domain *d, dfn_t dfn0, unsigned long page_count,
> -                unsigned int *flush_flags)
> +                unsigned int *flush_flags, unsigned long *done)
>  {
>      const struct domain_iommu *hd = dom_iommu(d);
>      unsigned long i;
> -    unsigned int order;
> +    unsigned int order, j = 0;
>      int rc = 0;
> 
>      if ( !is_iommu_enabled(d) )
> @@ -389,6 +395,12 @@ int iommu_unmap(struct domain *d, dfn_t dfn0,
> unsigned long page_count,
>          dfn_t dfn = dfn_add(dfn0, i);
>          int err;
> 
> +        if ( done && !(++j & 0xfffff) && general_preempt_check() )
> +        {
> +            *done = i;
> +            return -ERESTART;
> +        }
> +
>          order = mapping_order(hd, dfn, _mfn(0), page_count - i);
>          err = iommu_call(hd->platform_ops, unmap_page, d, dfn,
>                           order, flush_flags);
> @@ -425,7 +437,7 @@ int iommu_unmap(struct domain *d, dfn_t dfn0,
> unsigned long page_count,
>  int iommu_legacy_unmap(struct domain *d, dfn_t dfn, unsigned long
> page_count)
>  {
>      unsigned int flush_flags = 0;
> -    int rc = iommu_unmap(d, dfn, page_count, &flush_flags);
> +    int rc = iommu_unmap(d, dfn, page_count, &flush_flags, NULL);
> 
>      if ( !this_cpu(iommu_dont_flush_iotlb) && !rc )
>          rc = iommu_iotlb_flush(d, dfn, page_count, flush_flags);
> diff --git a/xen/drivers/passthrough/x86/iommu.c
> b/xen/drivers/passthrough/x86/iommu.c
> index 11a4f244e4..546e6dbe2a 100644
> --- a/xen/drivers/passthrough/x86/iommu.c
> +++ b/xen/drivers/passthrough/x86/iommu.c
> @@ -403,9 +403,18 @@ void __hwdom_init
> arch_iommu_hwdom_init(struct domain *d)
>          }
>          else if ( pfn != start + count || perms != start_perms )
>          {
> +            unsigned long done;
> +
>          commit:
> -            rc = iommu_map(d, _dfn(start), _mfn(start), count, start_perms,
> -                           &flush_flags);
> +            while ( (rc = iommu_map(d, _dfn(start), _mfn(start), count,
> +                                    start_perms, &flush_flags,
> +                                    &done)) == -ERESTART )
> +            {
> +                start += done;
> +                count -= done;
> +                process_pending_softirqs();
> +            }
> +
>              if ( rc )
>                  printk(XENLOG_WARNING
>                         "%pd: IOMMU identity mapping of [%lx,%lx) failed: 
> %d\n",
> diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
> index 79529adf1f..e6643bcc1c 100644
> --- a/xen/include/xen/iommu.h
> +++ b/xen/include/xen/iommu.h
> @@ -155,10 +155,10 @@ enum
> 
>  int __must_check iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
>                             unsigned long page_count, unsigned int flags,
> -                           unsigned int *flush_flags);
> +                           unsigned int *flush_flags, unsigned long *done);
>  int __must_check iommu_unmap(struct domain *d, dfn_t dfn,
>                               unsigned long page_count,
> -                             unsigned int *flush_flags);
> +                             unsigned int *flush_flags, unsigned long *done);
> 
>  int __must_check iommu_legacy_map(struct domain *d, dfn_t dfn, mfn_t
> mfn,
>                                    unsigned long page_count,
> --
> 2.36.1
> 

Reply via email to