We need to reject to populate reserved device memory mapping, and
then make sure all reserved device memory can't be accessed by any
!iommu approach.

Signed-off-by: Tiejun Chen <tiejun.c...@intel.com>
---
 xen/arch/x86/mm/p2m.c     | 59 +++++++++++++++++++++++++++++++++++++++++++++--
 xen/include/asm-x86/p2m.h |  9 ++++++++
 2 files changed, 66 insertions(+), 2 deletions(-)

diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index efa49dd..607ecd0 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -556,6 +556,40 @@ guest_physmap_remove_page(struct domain *d, unsigned long 
gfn,
     gfn_unlock(p2m, gfn, page_order);
 }
 
+/* Check if we are accessing rdm. */
+int p2m_check_reserved_device_memory(xen_pfn_t start, xen_ulong_t nr,
+                                     u32 id, void *ctxt)
+{
+    xen_pfn_t end = start + nr;
+    unsigned int i;
+    u32 sbdf;
+    struct p2m_get_reserved_device_memory *pgrdm = ctxt;
+    struct domain *d = pgrdm->domain;
+
+    if ( d->arch.hvm_domain.pci_force )
+    {
+        if ( pgrdm->gfn >= start && pgrdm->gfn < end )
+            return 1;
+    }
+    else
+    {
+        for ( i = 0; i < d->arch.hvm_domain.num_pcidevs; i++ )
+        {
+            sbdf = PCI_SBDF2(d->arch.hvm_domain.pcidevs[i].seg,
+                             d->arch.hvm_domain.pcidevs[i].bus,
+                             d->arch.hvm_domain.pcidevs[i].devfn);
+
+            if ( sbdf == id )
+            {
+                if ( pgrdm->gfn >= start && pgrdm->gfn < end )
+                    return 1;
+            }
+        }
+    }
+
+    return 0;
+}
+
 int
 guest_physmap_add_entry(struct domain *d, unsigned long gfn,
                         unsigned long mfn, unsigned int page_order, 
@@ -568,6 +602,7 @@ guest_physmap_add_entry(struct domain *d, unsigned long gfn,
     mfn_t omfn;
     int pod_count = 0;
     int rc = 0;
+    struct p2m_get_reserved_device_memory pgrdm;
 
     if ( !paging_mode_translate(d) )
     {
@@ -686,8 +721,28 @@ guest_physmap_add_entry(struct domain *d, unsigned long 
gfn,
     /* Now, actually do the two-way mapping */
     if ( mfn_valid(_mfn(mfn)) ) 
     {
-        rc = p2m_set_entry(p2m, gfn, _mfn(mfn), page_order, t,
-                           p2m->default_access);
+        pgrdm.gfn = gfn;
+        pgrdm.domain = d;
+        if ( !is_hardware_domain(d) && iommu_use_hap_pt(d) )
+        {
+            rc = 
iommu_get_reserved_device_memory(p2m_check_reserved_device_memory,
+                                                  &pgrdm);
+            /* We always avoid populating reserved device memory. */
+            if ( rc == 1 )
+            {
+                rc = -EBUSY;
+                goto out;
+            }
+            else if ( rc < 0 )
+            {
+                printk(XENLOG_G_WARNING
+                       "Can't check reserved device memory for Dom%d.\n",
+                       d->domain_id);
+                goto out;
+            }
+        }
+
+        rc = p2m_set_entry(p2m, gfn, _mfn(mfn), page_order, t, 
p2m->default_access);
         if ( rc )
             goto out; /* Failed to update p2m, bail without updating m2p. */
 
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 5f7fe71..99f7fb7 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -709,6 +709,15 @@ static inline unsigned int p2m_get_iommu_flags(p2m_type_t 
p2mt)
     return flags;
 }
 
+struct p2m_get_reserved_device_memory {
+    unsigned long gfn;
+    struct domain *domain;
+};
+
+/* Check if we are accessing rdm. */
+extern int p2m_check_reserved_device_memory(xen_pfn_t start, xen_ulong_t nr,
+                                            u32 id, void *ctxt);
+
 #endif /* _XEN_P2M_H */
 
 /*
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

Reply via email to