Current logic to handle long running operations has two flaws:

 - hvm_io_pending is only used by Intel code, fix this by moving the
   call to vpci_process_pending into handle_hvm_io_completion.
 - Raise a scheduler softirq when preemption is required. The
   do_softirq calls in the SVM/VMX guest entry points will make sure
   the guest vcpu is not restarted until all the pending work is
   finished.

Signed-off-by: Roger Pau Monné <roger....@citrix.com>
---
Cc: Paul Durrant <paul.durr...@citrix.com>
Cc: Jan Beulich <jbeul...@suse.com>
Cc: Andrew Cooper <andrew.coop...@citrix.com>
Cc: Wei Liu <wei.l...@citrix.com>
Cc: George Dunlap <george.dun...@eu.citrix.com>
Cc: Ian Jackson <ian.jack...@eu.citrix.com>
Cc: Julien Grall <julien.gr...@arm.com>
Cc: Konrad Rzeszutek Wilk <konrad.w...@oracle.com>
Cc: Stefano Stabellini <sstabell...@kernel.org>
Cc: Tim Deegan <t...@xen.org>
---
Changes since v3:
 - Don't use a tasklet.
---
 xen/arch/x86/hvm/ioreq.c  | 6 +++---
 xen/drivers/vpci/header.c | 4 ++++
 2 files changed, 7 insertions(+), 3 deletions(-)

diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index e2e755a8a1..4c4b33e220 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -85,9 +85,6 @@ bool hvm_io_pending(struct vcpu *v)
     struct hvm_ioreq_server *s;
     unsigned int id;
 
-    if ( has_vpci(d) && vpci_process_pending(v) )
-        return true;
-
     FOR_EACH_IOREQ_SERVER(d, id, s)
     {
         struct hvm_ioreq_vcpu *sv;
@@ -186,6 +183,9 @@ bool handle_hvm_io_completion(struct vcpu *v)
     enum hvm_io_completion io_completion;
     unsigned int id;
 
+    if ( has_vpci(d) && vpci_process_pending(v) )
+        return false;
+
     FOR_EACH_IOREQ_SERVER(d, id, s)
     {
         struct hvm_ioreq_vcpu *sv;
diff --git a/xen/drivers/vpci/header.c b/xen/drivers/vpci/header.c
index 162d51f7e2..720dec07fa 100644
--- a/xen/drivers/vpci/header.c
+++ b/xen/drivers/vpci/header.c
@@ -127,7 +127,10 @@ bool vpci_process_pending(struct vcpu *v)
         int rc = rangeset_consume_ranges(v->vpci.mem, map_range, &data);
 
         if ( rc == -ERESTART )
+        {
+            raise_softirq(SCHEDULE_SOFTIRQ);
             return true;
+        }
 
         spin_lock(&v->vpci.pdev->vpci->lock);
         /* Disable memory decoding unconditionally on failure. */
@@ -182,6 +185,7 @@ static void defer_map(struct domain *d, struct pci_dev 
*pdev,
     curr->vpci.mem = mem;
     curr->vpci.cmd = cmd;
     curr->vpci.rom_only = rom_only;
+    raise_softirq(SCHEDULE_SOFTIRQ);
 }
 
 static int modify_bars(const struct pci_dev *pdev, uint16_t cmd, bool rom_only)
-- 
2.19.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Reply via email to