>>> On 18.04.16 at 16:00, <quan...@intel.com> wrote:
> --- a/xen/common/memory.c
> +++ b/xen/common/memory.c
> @@ -678,8 +678,9 @@ static int xenmem_add_to_physmap(struct domain *d,
>      if ( need_iommu(d) )
>      {
>          this_cpu(iommu_dont_flush_iotlb) = 0;
> -        iommu_iotlb_flush(d, xatp->idx - done, done);
> -        iommu_iotlb_flush(d, xatp->gpfn - done, done);
> +        rc = iommu_iotlb_flush(d, xatp->idx - done, done);
> +        if ( !rc )
> +            rc = iommu_iotlb_flush(d, xatp->gpfn - done, done);

And again - best effort flushing in all cases. I.e. you shouldn't
bypass the second one if the first one failed, but instead
properly accumulate the return value, and also again not clobber
any negative value rc may already hold. What's worse (and
makes me wonder whether this got tested) - you also clobber the
positive return value this function may produce, even in the case
the flushes succeed (and hence the function as a whole was
successful).

> --- a/xen/include/xen/iommu.h
> +++ b/xen/include/xen/iommu.h
> @@ -182,8 +182,8 @@ int iommu_do_pci_domctl(struct xen_domctl *, struct 
> domain *d,
>  int iommu_do_domctl(struct xen_domctl *, struct domain *d,
>                      XEN_GUEST_HANDLE_PARAM(xen_domctl_t));
>  
> -void iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned int 
> page_count);
> -void iommu_iotlb_flush_all(struct domain *d);
> +int iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned int 
> page_count);
> +int iommu_iotlb_flush_all(struct domain *d);

__must_check

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

Reply via email to