As requested by Paul Durrant <paul.durr...@citrix.com>
Here is a prototype of the QEMU change using a 2nd shared page.
I picked adding HVM_PARAM_VMPORT_IOREQ_PFN as the simple and
fast way to handle QEMU building on older Xen versions.
xen-hvm.c | 128 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 123 insertions(+), 5 deletions(-)
diff --git a/xen-hvm.c b/xen-hvm.c
index 05e522c..5e80159 100644
--- a/xen-hvm.c
+++ b/xen-hvm.c
@@ -41,6 +41,29 @@ static MemoryRegion *framebuffer;
static bool xen_in_migration;
/* Compatibility with older version */
+
+/* This allows QEMU to build on a system that has Xen 4.5 or earlier
+ * installed. This here (not in hw/xen/xen_common.h) because xen/hvm/ioreq.h
+ * needs to be included before this block and hw/xen/xen_common.h needs to
+ * be included before xen/hvm/ioreq.h
+ */
+#ifndef IOREQ_TYPE_VMWARE_PORT
+#define IOREQ_TYPE_VMWARE_PORT 3
+struct vmware_ioreq {
+ uint32_t esi;
+ uint32_t edi;
+ uint32_t ebx;
+ uint32_t ecx;
+ uint32_t edx;
+};
+typedef struct vmware_ioreq vmware_ioreq_t;
+
+struct shared_vmport_iopage {
+ struct vmware_ioreq vcpu_vmport_ioreq[1];
+};
+typedef struct shared_vmport_iopage shared_vmport_iopage_t;
+#endif
+
#if __XEN_LATEST_INTERFACE_VERSION__ < 0x0003020a
static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i)
{
@@ -79,8 +102,10 @@ typedef struct XenPhysmap {
typedef struct XenIOState {
shared_iopage_t *shared_page;
+ shared_vmport_iopage_t *shared_vmport_page;
buffered_iopage_t *buffered_io_page;
QEMUTimer *buffered_io_timer;
+ CPUState **cpu_by_ioreq_id;
/* the evtchn port for polling the notification, */
evtchn_port_t *ioreq_local_port;
/* evtchn local port for buffered io */
@@ -101,6 +126,8 @@ typedef struct XenIOState {
Notifier wakeup;
} XenIOState;
+static void handle_ioreq(XenIOState *state, ioreq_t *req);
+
/* Xen specific function for piix pci */
int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num)
@@ -610,6 +637,20 @@ static ioreq_t
*cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu)
return req;
}
+/* get the vmport ioreq packets from share mem */
+static vmware_ioreq_t *cpu_get_vmport_ioreq_from_shared_memory(
+ XenIOState *state, int vcpu)
+{
+ vmware_ioreq_t *vmport_req;
+
+ assert(state->shared_vmport_page);
+ vmport_req = &state->shared_vmport_page->vcpu_vmport_ioreq[vcpu];
+
+ xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */
+
+ return vmport_req;
+}
+
/* use poll to get the port notification */
/* ioreq_vec--out,the */
/* retval--the number of ioreq packet */
@@ -773,7 +814,51 @@ static void cpu_ioreq_move(ioreq_t *req)
}
}
-static void handle_ioreq(ioreq_t *req)
+static void regs_to_cpu(XenIOState *state, vmware_ioreq_t *vmport_req,
+ ioreq_t *req)
+{
+ X86CPU *cpu;
+ CPUX86State *env;
+
+ current_cpu = state->cpu_by_ioreq_id[state->send_vcpu];
+ cpu = X86_CPU(current_cpu);
+ env = &cpu->env;
+ env->regs[R_EAX] = req->data;
+ env->regs[R_EBX] = vmport_req->ebx;
+ env->regs[R_ECX] = vmport_req->ecx;
+ env->regs[R_EDX] = vmport_req->edx;
+ env->regs[R_ESI] = vmport_req->esi;
+ env->regs[R_EDI] = vmport_req->edi;
+}
+
+static void regs_from_cpu(XenIOState *state, vmware_ioreq_t *vmport_req,
+ ioreq_t *req)
+{
+ X86CPU *cpu = X86_CPU(current_cpu);
+ CPUX86State *env = &cpu->env;
+
+ assert(sizeof(*vmport_req) <= sizeof(*req));
+
+ req->data = env->regs[R_EAX];
+ vmport_req->ebx = env->regs[R_EBX];
+ vmport_req->ecx = env->regs[R_ECX];
+ vmport_req->edx = env->regs[R_EDX];
+ vmport_req->esi = env->regs[R_ESI];
+ vmport_req->edi = env->regs[R_EDI];
+ current_cpu = NULL;
+}
+
+static void handle_vmport_ioreq(XenIOState *state, ioreq_t *req)
+{
+ vmware_ioreq_t *vmport_req =
+ cpu_get_vmport_ioreq_from_shared_memory(state, state->send_vcpu);
+
+ regs_to_cpu(state, vmport_req, req);
+ cpu_ioreq_pio(req);
+ regs_from_cpu(state, vmport_req, req);
+}
+
+static void handle_ioreq(XenIOState *state, ioreq_t *req)
{
if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) &&
(req->size < sizeof (target_ulong))) {
@@ -787,6 +872,9 @@ static void handle_ioreq(ioreq_t *req)
case IOREQ_TYPE_COPY:
cpu_ioreq_move(req);
break;
+ case IOREQ_TYPE_VMWARE_PORT:
+ handle_vmport_ioreq(state, req);
+ break;
case IOREQ_TYPE_TIMEOFFSET:
break;
case IOREQ_TYPE_INVALIDATE:
@@ -828,7 +916,7 @@ static int handle_buffered_iopage(XenIOState *state)
req.data |= ((uint64_t)buf_req->data) << 32;
}
- handle_ioreq(&req);
+ handle_ioreq(state, &req);
xen_mb();
state->buffered_io_page->read_pointer += qw ? 2 : 1;
@@ -857,14 +945,16 @@ static void cpu_handle_ioreq(void *opaque)
handle_buffered_iopage(state);
if (req) {
- handle_ioreq(req);
+ handle_ioreq(state, req);
if (req->state != STATE_IOREQ_INPROCESS) {
fprintf(stderr, "Badness in I/O request ... not in service?!: "
"%x, ptr: %x, port: %"PRIx64", "
- "data: %"PRIx64", count: %" FMT_ioreq_size ", size: %"
FMT_ioreq_size "\n",
+ "data: %"PRIx64", count: %" FMT_ioreq_size
+ ", size: %" FMT_ioreq_size
+ ", type: %"FMT_ioreq_size"\n",
req->state, req->data_is_ptr, req->addr,
- req->data, req->count, req->size);
+ req->data, req->count, req->size, req->type);
destroy_hvm_domain(false);
return;
}
@@ -904,6 +994,14 @@ static void xen_main_loop_prepare(XenIOState *state)
state);
if (evtchn_fd != -1) {
+ CPUState *cpu_state;
+
+ fprintf(stderr, "%s: Init cpu_by_ioreq_id\n", __func__);
+ CPU_FOREACH(cpu_state) {
+ fprintf(stderr, "%s: cpu_by_ioreq_id[%d]=%p\n",
+ __func__, cpu_state->cpu_index, cpu_state);
+ state->cpu_by_ioreq_id[cpu_state->cpu_index] = cpu_state;
+ }
qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state);
}
}
@@ -987,6 +1085,9 @@ int xen_hvm_init(ram_addr_t *below_4g_mem_size, ram_addr_t
*above_4g_mem_size,
unsigned long ioreq_pfn;
unsigned long bufioreq_evtchn;
XenIOState *state;
+#ifdef HVM_PARAM_VMPORT_IOREQ_PFN
+ unsigned long vmport_ioreq_pfn;
+#endif
state = g_malloc0(sizeof (XenIOState));
@@ -1020,6 +1121,20 @@ int xen_hvm_init(ram_addr_t *below_4g_mem_size, ram_addr_t *above_4g_mem_size,
errno, xen_xc);
}
+#ifdef HVM_PARAM_VMPORT_IOREQ_PFN
+ xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_VMPORT_IOREQ_PFN,
+ &vmport_ioreq_pfn);
+ DPRINTF("shared vmport page at pfn %lx\n", vmport_ioreq_pfn);
+ state->shared_vmport_page = xc_map_foreign_range(xen_xc, xen_domid,
+ XC_PAGE_SIZE,
+ PROT_READ|PROT_WRITE,
+ vmport_ioreq_pfn);
+ if (state->shared_vmport_page == NULL) {
+ hw_error("map shared vmport IO page returned error %d handle="
+ XC_INTERFACE_FMT, errno, xen_xc);
+ }
+#endif
+
xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_PFN, &ioreq_pfn);
DPRINTF("buffered io page at pfn %lx\n", ioreq_pfn);
state->buffered_io_page = xc_map_foreign_range(xen_xc, xen_domid,
XC_PAGE_SIZE,
@@ -1028,6 +1143,9 @@ int xen_hvm_init(ram_addr_t *below_4g_mem_size,
ram_addr_t *above_4g_mem_size,
hw_error("map buffered IO page returned error %d", errno);
}
+ /* Note: cpus is empty at this point in init */
+ state->cpu_by_ioreq_id = g_malloc0(max_cpus * sizeof(CPUState *));
+
state->ioreq_local_port = g_malloc0(max_cpus * sizeof (evtchn_port_t));
/* FIXME: how about if we overflow the page here? */
--
1.8.4