+ if ( dabt.write )
+ return IO_HANDLED;
+
+ /*
+ * Sign extend if required.
+ * Note that we expect the read handler to have zeroed the bits
+ * outside the requested access size.
+ */
+ if ( dabt.sign && (r & (1UL << (size - 1))) )
+ {
+ /*
+ * We are relying on register_t using the same as
+ * an unsigned long in order to keep the 32-bit assembly
+ * code smaller.
+ */
+ BUILD_BUG_ON(sizeof(register_t) != sizeof(unsigned long));
+ r |= (~0UL) << size;
+ }
+
+ set_user_reg(regs, dabt.reg, r);
+
+ return IO_HANDLED;
+}
+
+enum io_state try_fwd_ioserv(struct cpu_user_regs *regs,
+ struct vcpu *v, mmio_info_t *info)
+{
+ struct hvm_vcpu_io *vio = &v->arch.hvm.hvm_io;
+ ioreq_t p = {
+ .type = IOREQ_TYPE_COPY,
+ .addr = info->gpa,
+ .size = 1 << info->dabt.size,
+ .count = 1,
+ .dir = !info->dabt.write,
+ /*
+ * On x86, df is used by 'rep' instruction to tell the
direction
+ * to iterate (forward or backward).
+ * On Arm, all the accesses to MMIO region will do a single
+ * memory access. So for now, we can safely always set to 0.
+ */
+ .df = 0,
+ .data = get_user_reg(regs, info->dabt.reg),
+ .state = STATE_IOREQ_READY,
+ };
+ struct hvm_ioreq_server *s = NULL;
+ enum io_state rc;
+
+ switch ( vio->io_req.state )
+ {
+ case STATE_IOREQ_NONE:
+ break;
+
+ case STATE_IORESP_READY:
+ return IO_HANDLED;
+
+ default:
+ gdprintk(XENLOG_ERR, "wrong state %u\n", vio->io_req.state);
+ return IO_ABORT;
+ }
+
+ s = hvm_select_ioreq_server(v->domain, &p);
+ if ( !s )
+ return IO_UNHANDLED;
+
+ if ( !info->dabt.valid )
+ return IO_ABORT;
+
+ vio->io_req = p;
+
+ rc = hvm_send_ioreq(s, &p, 0);
+ if ( rc != IO_RETRY || v->domain->is_shutting_down )
+ vio->io_req.state = STATE_IOREQ_NONE;
+ else if ( !hvm_ioreq_needs_completion(&vio->io_req) )
+ rc = IO_HANDLED;
+ else
+ vio->io_completion = HVMIO_mmio_completion;
+
+ return rc;
+}
+
+bool ioreq_handle_complete_mmio(void)
+{
+ struct vcpu *v = current;
+ struct cpu_user_regs *regs = guest_cpu_user_regs();
+ const union hsr hsr = { .bits = regs->hsr };
+ paddr_t addr = v->arch.hvm.hvm_io.io_req.addr;
+
+ if ( try_handle_mmio(regs, hsr, addr) == IO_HANDLED )
+ {
+ advance_pc(regs, hsr);
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index 8f40d0e..121942c 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -21,6 +21,7 @@
#include <xen/hypercall.h>
#include <xen/init.h>
#include <xen/iocap.h>
+#include <xen/ioreq.h>
#include <xen/irq.h>
#include <xen/lib.h>
#include <xen/mem_access.h>
@@ -1384,6 +1385,9 @@ static arm_hypercall_t arm_hypercall_table[] = {
#ifdef CONFIG_HYPFS
HYPERCALL(hypfs_op, 5),
#endif
+#ifdef CONFIG_IOREQ_SERVER
+ HYPERCALL(dm_op, 3),
+#endif
};
#ifndef NDEBUG
@@ -1955,9 +1959,14 @@ static void do_trap_stage2_abort_guest(struct
cpu_user_regs *regs,
case IO_HANDLED:
advance_pc(regs, hsr);
return;
+ case IO_RETRY:
+ /* finish later */
+ return;
case IO_UNHANDLED:
/* IO unhandled, try another way to handle it. */
break;
+ default:
+ ASSERT_UNREACHABLE();
}
}
@@ -2249,12 +2258,23 @@ static void check_for_pcpu_work(void)
* Process pending work for the vCPU. Any call should be fast or
* implement preemption.
*/
-static void check_for_vcpu_work(void)
+static bool check_for_vcpu_work(void)
{
struct vcpu *v = current;
+#ifdef CONFIG_IOREQ_SERVER
+ bool handled;
+
+ local_irq_enable();
+ handled = handle_hvm_io_completion(v);
+ local_irq_disable();
+
+ if ( !handled )
+ return true;
+#endif
+
if ( likely(!v->arch.need_flush_to_ram) )
- return;
+ return false;
/*
* Give a chance for the pCPU to process work before handling
the vCPU
@@ -2265,6 +2285,8 @@ static void check_for_vcpu_work(void)
local_irq_enable();
p2m_flush_vm(v);
local_irq_disable();
+
+ return false;
}
/*
@@ -2277,8 +2299,10 @@ void leave_hypervisor_to_guest(void)
{
local_irq_disable();
- check_for_vcpu_work();
- check_for_pcpu_work();
+ do
+ {
+ check_for_pcpu_work();
+ } while ( check_for_vcpu_work() );
vgic_sync_to_lrs();
diff --git a/xen/include/asm-arm/domain.h
b/xen/include/asm-arm/domain.h
index 6819a3b..d1c48d7 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -11,10 +11,27 @@
#include <asm/vgic.h>
#include <asm/vpl011.h>
#include <public/hvm/params.h>
+#include <public/hvm/dm_op.h>
+#include <public/hvm/ioreq.h>
+
+#define MAX_NR_IOREQ_SERVERS 8
struct hvm_domain
{
uint64_t params[HVM_NR_PARAMS];
+
+ /* Guest page range used for non-default ioreq servers */
+ struct {
+ unsigned long base;
+ unsigned long mask;
+ unsigned long legacy_mask; /* indexed by HVM param number */
+ } ioreq_gfn;
+
+ /* Lock protects all other values in the sub-struct and the
default */
+ struct {
+ spinlock_t lock;
+ struct hvm_ioreq_server *server[MAX_NR_IOREQ_SERVERS];
+ } ioreq_server;
};
#ifdef CONFIG_ARM_64
@@ -91,6 +108,28 @@ struct arch_domain
#endif
} __cacheline_aligned;
+enum hvm_io_completion {
+ HVMIO_no_completion,
+ HVMIO_mmio_completion,
+ HVMIO_pio_completion
+};
+
+struct hvm_vcpu_io {
+ /* I/O request in flight to device model. */
+ enum hvm_io_completion io_completion;
+ ioreq_t io_req;
+
+ /*
+ * HVM emulation:
+ * Linear address @mmio_gla maps to MMIO physical frame
@mmio_gpfn.
+ * The latter is known to be an MMIO frame (not RAM).
+ * This translation is only valid for accesses as per
@mmio_access.
+ */
+ struct npfec mmio_access;
+ unsigned long mmio_gla;
+ unsigned long mmio_gpfn;
+};
+