--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -294,7 +294,9 @@ int guest_remove_page(struct domain *d, unsigned long gmfn)
p2m_type_t p2mt;
#endif
mfn_t mfn;
+#ifdef CONFIG_HAS_PASSTHROUGH
bool *dont_flush_p, dont_flush;
+#endif
int rc;
#ifdef CONFIG_X86
@@ -385,13 +387,17 @@ int guest_remove_page(struct domain *d, unsigned long
gmfn)
* Since we're likely to free the page below, we need to suspend
* xenmem_add_to_physmap()'s suppressing of IOMMU TLB flushes.
*/
+#ifdef CONFIG_HAS_PASSTHROUGH
dont_flush_p = &this_cpu(iommu_dont_flush_iotlb);
dont_flush = *dont_flush_p;
*dont_flush_p = false;
+#endif
rc = guest_physmap_remove_page(d, _gfn(gmfn), mfn, 0);
+#ifdef CONFIG_HAS_PASSTHROUGH
*dont_flush_p = dont_flush;
+#endif
/*
* With the lack of an IOMMU on some platforms, domains with DMA-capable
@@ -839,11 +845,13 @@ int xenmem_add_to_physmap(struct domain *d, struct
xen_add_to_physmap *xatp,
xatp->gpfn += start;
xatp->size -= start;
+#ifdef CONFIG_HAS_PASSTHROUGH
if ( is_iommu_enabled(d) )
{
this_cpu(iommu_dont_flush_iotlb) = 1;
extra.ppage = &pages[0];
}
+#endif
while ( xatp->size > done )
{
@@ -868,6 +876,7 @@ int xenmem_add_to_physmap(struct domain *d, struct
xen_add_to_physmap *xatp,
}
}
+#ifdef CONFIG_HAS_PASSTHROUGH
if ( is_iommu_enabled(d) )
{
int ret;
@@ -894,6 +903,7 @@ int xenmem_add_to_physmap(struct domain *d, struct
xen_add_to_physmap *xatp,
if ( unlikely(ret) && rc >= 0 )
rc = ret;
}
+#endif
return rc;
}
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -51,9 +51,15 @@ static inline bool_t dfn_eq(dfn_t x, dfn_t y)
return dfn_x(x) == dfn_x(y);
}
-extern bool_t iommu_enable, iommu_enabled;
+extern bool_t iommu_enable;
extern bool force_iommu, iommu_quarantine, iommu_verbose;
+#ifdef CONFIG_HAS_PASSTHROUGH
+extern bool_t iommu_enabled;