Make sure we don't have any more ongoing memory transaction when releasing the
BQL.  This will trigger an abort if we misuse the QEMU memory model, e.g., when
calling run_on_cpu() during a memory commit.

Signed-off-by: Peter Xu <pet...@redhat.com>
---
 include/exec/memory-internal.h | 1 +
 softmmu/cpus.c                 | 2 ++
 softmmu/memory.c               | 6 ++++++
 3 files changed, 9 insertions(+)

diff --git a/include/exec/memory-internal.h b/include/exec/memory-internal.h
index 9fcc2af25c..3124b91c4b 100644
--- a/include/exec/memory-internal.h
+++ b/include/exec/memory-internal.h
@@ -35,6 +35,7 @@ static inline AddressSpaceDispatch 
*address_space_to_dispatch(AddressSpace *as)
 
 FlatView *address_space_get_flatview(AddressSpace *as);
 void flatview_unref(FlatView *view);
+bool memory_region_has_pending_transaction(void);
 
 extern const MemoryRegionOps unassigned_mem_ops;
 
diff --git a/softmmu/cpus.c b/softmmu/cpus.c
index 6085f8edbe..14a50289f8 100644
--- a/softmmu/cpus.c
+++ b/softmmu/cpus.c
@@ -31,6 +31,7 @@
 #include "qapi/qapi-events-run-state.h"
 #include "qapi/qmp/qerror.h"
 #include "exec/gdbstub.h"
+#include "exec/memory-internal.h"
 #include "sysemu/hw_accel.h"
 #include "exec/exec-all.h"
 #include "qemu/thread.h"
@@ -68,6 +69,7 @@ static QemuMutex qemu_global_mutex;
 
 static void qemu_mutex_unlock_iothread_prepare(void)
 {
+    assert(!memory_region_has_pending_transaction());
 }
 
 bool cpu_is_stopped(CPUState *cpu)
diff --git a/softmmu/memory.c b/softmmu/memory.c
index dfce4a2bda..08327c22e2 100644
--- a/softmmu/memory.c
+++ b/softmmu/memory.c
@@ -176,6 +176,12 @@ static bool memory_region_has_pending_update(void)
     return memory_region_update_pending || ioeventfd_update_pending;
 }
 
+bool memory_region_has_pending_transaction(void)
+{
+    return memory_region_transaction_depth ||
+        memory_region_has_pending_update();
+}
+
 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a,
                                            MemoryRegionIoeventfd *b)
 {
-- 
2.31.1


Reply via email to