The current implementation of PVH dom0 relies on vPCI to trap and handle
accesses to the MMCFG area.  Previous implementation of PVH dom0 (v1)
didn't have vPCI, and as a classic PV dom0, relied on the MMCFG range being
RO.  As such hvm_emulate_one_mmio() had to special case write accesses to
the MMCFG area.

With PVH dom0 using vPCI, and the MMCFG accesses being fully handled there,
hvm_emulate_one_mmio() should never handle accesses to MMCFG, making the
code effectively unreachable.

Remove it and leave an ASSERT to make sure MMCFG accesses never get into
hvm_emulate_one_mmio().  As a result of the removal of one of the users of
mmcfg_intercept_write(), the function can now be moved into the same
translation unit where it's solely used, allowing it to be made static and
effectively built only when PV support is enabled.

Signed-off-by: Roger Pau Monné <roger....@citrix.com>
---
 xen/arch/x86/hvm/emulate.c      | 22 ++++++++--------------
 xen/arch/x86/include/asm/mm.h   |  3 ---
 xen/arch/x86/mm.c               | 31 -------------------------------
 xen/arch/x86/pv/ro-page-fault.c | 31 +++++++++++++++++++++++++++++++
 4 files changed, 39 insertions(+), 48 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 04d07ccaf6a4..2c9fbacce7fb 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -2858,12 +2858,6 @@ int hvm_emulate_one(
 
 int hvm_emulate_one_mmio(unsigned long mfn, unsigned long gla)
 {
-    static const struct x86_emulate_ops hvm_intercept_ops_mmcfg = {
-        .read       = x86emul_unhandleable_rw,
-        .insn_fetch = hvmemul_insn_fetch,
-        .write      = mmcfg_intercept_write,
-        .validate   = hvmemul_validate,
-    };
     static const struct x86_emulate_ops hvm_ro_emulate_ops_mmio = {
         .read       = x86emul_unhandleable_rw,
         .insn_fetch = hvmemul_insn_fetch,
@@ -2872,28 +2866,28 @@ int hvm_emulate_one_mmio(unsigned long mfn, unsigned 
long gla)
     };
     struct mmio_ro_emulate_ctxt mmio_ro_ctxt = { .cr2 = gla, .mfn = _mfn(mfn) 
};
     struct hvm_emulate_ctxt ctxt;
-    const struct x86_emulate_ops *ops;
     unsigned int seg, bdf;
     int rc;
 
     if ( pci_ro_mmcfg_decode(mfn, &seg, &bdf) )
     {
-        mmio_ro_ctxt.seg = seg;
-        mmio_ro_ctxt.bdf = bdf;
-        ops = &hvm_intercept_ops_mmcfg;
+        /* Should be always handled by vPCI for PVH dom0. */
+        gdprintk(XENLOG_ERR, "unhandled MMCFG access for %pp\n",
+                 &PCI_SBDF(seg, bdf));
+        ASSERT_UNREACHABLE();
+        return X86EMUL_UNHANDLEABLE;
     }
-    else
-        ops = &hvm_ro_emulate_ops_mmio;
 
     hvm_emulate_init_once(&ctxt, x86_insn_is_mem_write,
                           guest_cpu_user_regs());
     ctxt.ctxt.data = &mmio_ro_ctxt;
 
-    switch ( rc = _hvm_emulate_one(&ctxt, ops, VIO_no_completion) )
+    switch ( rc = _hvm_emulate_one(&ctxt, &hvm_ro_emulate_ops_mmio,
+                                   VIO_no_completion) )
     {
     case X86EMUL_UNHANDLEABLE:
     case X86EMUL_UNIMPLEMENTED:
-        hvm_dump_emulation_state(XENLOG_G_WARNING, "MMCFG", &ctxt, rc);
+        hvm_dump_emulation_state(XENLOG_G_WARNING, "RO MMIO", &ctxt, rc);
         break;
     case X86EMUL_EXCEPTION:
         hvm_inject_event(&ctxt.ctxt.event);
diff --git a/xen/arch/x86/include/asm/mm.h b/xen/arch/x86/include/asm/mm.h
index 6c7e66ee21ab..a1bc8cc27451 100644
--- a/xen/arch/x86/include/asm/mm.h
+++ b/xen/arch/x86/include/asm/mm.h
@@ -553,9 +553,6 @@ struct mmio_ro_emulate_ctxt {
 int cf_check mmio_ro_emulated_write(
     enum x86_segment seg, unsigned long offset, void *p_data,
     unsigned int bytes, struct x86_emulate_ctxt *ctxt);
-int cf_check mmcfg_intercept_write(
-    enum x86_segment seg, unsigned long offset, void *p_data,
-    unsigned int bytes, struct x86_emulate_ctxt *ctxt);
 
 int audit_adjust_pgtables(struct domain *d, int dir, int noisy);
 
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index b294497a140d..4fecd37aeca0 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5200,37 +5200,6 @@ int cf_check mmio_ro_emulated_write(
     return X86EMUL_OKAY;
 }
 
-int cf_check mmcfg_intercept_write(
-    enum x86_segment seg,
-    unsigned long offset,
-    void *p_data,
-    unsigned int bytes,
-    struct x86_emulate_ctxt *ctxt)
-{
-    struct mmio_ro_emulate_ctxt *mmio_ctxt = ctxt->data;
-
-    /*
-     * Only allow naturally-aligned stores no wider than 4 bytes to the
-     * original %cr2 address.
-     */
-    if ( ((bytes | offset) & (bytes - 1)) || bytes > 4 || !bytes ||
-         offset != mmio_ctxt->cr2 )
-    {
-        gdprintk(XENLOG_WARNING, "bad write (cr2=%lx, addr=%lx, bytes=%u)\n",
-                mmio_ctxt->cr2, offset, bytes);
-        return X86EMUL_UNHANDLEABLE;
-    }
-
-    offset &= 0xfff;
-    if ( pci_conf_write_intercept(mmio_ctxt->seg, mmio_ctxt->bdf,
-                                  offset, bytes, p_data) >= 0 )
-        pci_mmcfg_write(mmio_ctxt->seg, PCI_BUS(mmio_ctxt->bdf),
-                        PCI_DEVFN(mmio_ctxt->bdf), offset, bytes,
-                        *(uint32_t *)p_data);
-
-    return X86EMUL_OKAY;
-}
-
 /*
  * For these PTE APIs, the caller must follow the alloc-map-unmap-free
  * lifecycle, which means explicitly mapping the PTE pages before accessing
diff --git a/xen/arch/x86/pv/ro-page-fault.c b/xen/arch/x86/pv/ro-page-fault.c
index 372187e9a096..11b01c479e43 100644
--- a/xen/arch/x86/pv/ro-page-fault.c
+++ b/xen/arch/x86/pv/ro-page-fault.c
@@ -298,6 +298,37 @@ static int ptwr_do_page_fault(struct x86_emulate_ctxt 
*ctxt,
  * fault handling for read-only MMIO pages
  */
 
+static int cf_check mmcfg_intercept_write(
+    enum x86_segment seg,
+    unsigned long offset,
+    void *p_data,
+    unsigned int bytes,
+    struct x86_emulate_ctxt *ctxt)
+{
+    struct mmio_ro_emulate_ctxt *mmio_ctxt = ctxt->data;
+
+    /*
+     * Only allow naturally-aligned stores no wider than 4 bytes to the
+     * original %cr2 address.
+     */
+    if ( ((bytes | offset) & (bytes - 1)) || bytes > 4 || !bytes ||
+         offset != mmio_ctxt->cr2 )
+    {
+        gdprintk(XENLOG_WARNING, "bad write (cr2=%lx, addr=%lx, bytes=%u)\n",
+                mmio_ctxt->cr2, offset, bytes);
+        return X86EMUL_UNHANDLEABLE;
+    }
+
+    offset &= 0xfff;
+    if ( pci_conf_write_intercept(mmio_ctxt->seg, mmio_ctxt->bdf,
+                                  offset, bytes, p_data) >= 0 )
+        pci_mmcfg_write(mmio_ctxt->seg, PCI_BUS(mmio_ctxt->bdf),
+                        PCI_DEVFN(mmio_ctxt->bdf), offset, bytes,
+                        *(uint32_t *)p_data);
+
+    return X86EMUL_OKAY;
+}
+
 static const struct x86_emulate_ops mmio_ro_emulate_ops = {
     .read       = x86emul_unhandleable_rw,
     .insn_fetch = ptwr_emulated_insn_fetch,
-- 
2.48.1


Reply via email to