Once address_space_translate will be called outside the BQL, the returned MemoryRegion might disappear as soon as the RCU read-side critical section ends. Avoid this by moving the critical section to the callers.
Signed-off-by: Paolo Bonzini <pbonz...@redhat.com> --- exec.c | 33 +++++++++++++++++++++++++++++---- hw/vfio/common.c | 7 +++++-- include/exec/memory.h | 3 ++- translate-all.c | 3 +++ 4 files changed, 39 insertions(+), 7 deletions(-) diff --git a/exec.c b/exec.c index 3552192..b264e76 100644 --- a/exec.c +++ b/exec.c @@ -373,6 +373,7 @@ static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write) return false; } +/* Called from RCU critical section */ MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr, hwaddr *xlat, hwaddr *plen, bool is_write) @@ -382,7 +383,6 @@ MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr, MemoryRegion *mr; hwaddr len = *plen; - rcu_read_lock(); for (;;) { AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch); section = address_space_translate_internal(d, addr, &addr, plen, true); @@ -411,7 +411,6 @@ MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr, *plen = len; *xlat = addr; - rcu_read_unlock(); return mr; } @@ -2306,6 +2305,7 @@ bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf, MemoryRegion *mr; bool error = false; + rcu_read_lock(); while (len > 0) { l = len; mr = address_space_translate(as, addr, &addr1, &l, is_write); @@ -2384,6 +2384,7 @@ bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf, buf += l; addr += l; } + rcu_read_unlock(); return error; } @@ -2419,6 +2420,7 @@ static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as, hwaddr addr1; MemoryRegion *mr; + rcu_read_lock(); while (len > 0) { l = len; mr = address_space_translate(as, addr, &addr1, &l, true); @@ -2444,6 +2446,7 @@ static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as, buf += l; addr += l; } + rcu_read_unlock(); } /* used for ROM loading : can write in RAM and ROM */ @@ -2552,6 +2555,7 @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_ MemoryRegion *mr; hwaddr l, xlat; + rcu_read_lock(); while (len > 0) { l = len; mr = address_space_translate(as, addr, &xlat, &l, is_write); @@ -2565,6 +2569,7 @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_ len -= l; addr += l; } + rcu_read_unlock(); return true; } @@ -2591,9 +2596,12 @@ void *address_space_map(AddressSpace *as, } l = len; + rcu_read_lock(); mr = address_space_translate(as, addr, &xlat, &l, is_write); + if (!memory_access_is_direct(mr, is_write)) { if (atomic_xchg(&bounce.in_use, true)) { + rcu_read_unlock(); return NULL; } /* Avoid unbounded allocations */ @@ -2608,6 +2616,7 @@ void *address_space_map(AddressSpace *as, address_space_read(as, addr, bounce.buffer, l); } + rcu_read_unlock(); *plen = l; return bounce.buffer; } @@ -2631,6 +2640,7 @@ void *address_space_map(AddressSpace *as, } memory_region_ref(mr); + rcu_read_unlock(); *plen = done; return qemu_ram_ptr_length(raddr + base, plen); } @@ -2690,6 +2700,7 @@ static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr, hwaddr l = 4; hwaddr addr1; + rcu_read_lock(); mr = address_space_translate(as, addr, &addr1, &l, false); if (l < 4 || !memory_access_is_direct(mr, false)) { /* I/O case */ @@ -2720,6 +2731,7 @@ static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr, break; } } + rcu_read_unlock(); return val; } @@ -2748,6 +2760,7 @@ static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr, hwaddr l = 8; hwaddr addr1; + rcu_read_lock(); mr = address_space_translate(as, addr, &addr1, &l, false); if (l < 8 || !memory_access_is_direct(mr, false)) { @@ -2779,6 +2792,7 @@ static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr, break; } } + rcu_read_unlock(); return val; } @@ -2815,6 +2829,7 @@ static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr, hwaddr l = 2; hwaddr addr1; + rcu_read_lock(); mr = address_space_translate(as, addr, &addr1, &l, false); if (l < 2 || !memory_access_is_direct(mr, false)) { @@ -2846,6 +2861,7 @@ static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr, break; } } + rcu_read_unlock(); return val; } @@ -2874,6 +2890,7 @@ void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val) hwaddr l = 4; hwaddr addr1; + rcu_read_lock(); mr = address_space_translate(as, addr, &addr1, &l, true); if (l < 4 || !memory_access_is_direct(mr, true)) { @@ -2892,6 +2909,7 @@ void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val) } } } + rcu_read_unlock(); } /* warning: addr must be aligned */ @@ -2904,6 +2922,7 @@ static inline void stl_phys_internal(AddressSpace *as, hwaddr l = 4; hwaddr addr1; + rcu_read_lock(); mr = address_space_translate(as, addr, &addr1, &l, true); if (l < 4 || !memory_access_is_direct(mr, true)) { @@ -2934,6 +2953,7 @@ static inline void stl_phys_internal(AddressSpace *as, } invalidate_and_set_dirty(addr1, 4); } + rcu_read_unlock(); } void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val) @@ -2968,6 +2988,7 @@ static inline void stw_phys_internal(AddressSpace *as, hwaddr l = 2; hwaddr addr1; + rcu_read_lock(); mr = address_space_translate(as, addr, &addr1, &l, true); if (l < 2 || !memory_access_is_direct(mr, true)) { #if defined(TARGET_WORDS_BIGENDIAN) @@ -2997,6 +3018,7 @@ static inline void stw_phys_internal(AddressSpace *as, } invalidate_and_set_dirty(addr1, 2); } + rcu_read_unlock(); } void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val) @@ -3083,12 +3105,15 @@ bool cpu_physical_memory_is_io(hwaddr phys_addr) { MemoryRegion*mr; hwaddr l = 1; + bool res; + rcu_read_lock(); mr = address_space_translate(&address_space_memory, phys_addr, &phys_addr, &l, false); - return !(memory_region_is_ram(mr) || - memory_region_is_romd(mr)); + res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr)); + rcu_read_unlock(); + return res; } void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque) diff --git a/hw/vfio/common.c b/hw/vfio/common.c index b012620..b1045da 100644 --- a/hw/vfio/common.c +++ b/hw/vfio/common.c @@ -270,13 +270,14 @@ static void vfio_iommu_map_notify(Notifier *n, void *data) * this IOMMU to its immediate target. We need to translate * it the rest of the way through to memory. */ + rcu_read_lock(); mr = address_space_translate(&address_space_memory, iotlb->translated_addr, &xlat, &len, iotlb->perm & IOMMU_WO); if (!memory_region_is_ram(mr)) { error_report("iommu map to non memory area %"HWADDR_PRIx"", xlat); - return; + goto out; } /* * Translation truncates length to the IOMMU page size, @@ -284,7 +285,7 @@ static void vfio_iommu_map_notify(Notifier *n, void *data) */ if (len & iotlb->addr_mask) { error_report("iommu has granularity incompatible with target AS"); - return; + goto out; } if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) { @@ -307,6 +308,8 @@ static void vfio_iommu_map_notify(Notifier *n, void *data) iotlb->addr_mask + 1, ret); } } +out: + rcu_read_unlock(); } static void vfio_listener_region_add(MemoryListener *listener, diff --git a/include/exec/memory.h b/include/exec/memory.h index 0ee2079..0644dc6 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -1118,7 +1118,8 @@ bool address_space_write(AddressSpace *as, hwaddr addr, bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len); /* address_space_translate: translate an address range into an address space - * into a MemoryRegion and an address range into that section + * into a MemoryRegion and an address range into that section. Add a reference + * to that region. * * @as: #AddressSpace to be accessed * @addr: address within that address space diff --git a/translate-all.c b/translate-all.c index 9f47ce7..9ee8aa6 100644 --- a/translate-all.c +++ b/translate-all.c @@ -1458,14 +1458,17 @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr) MemoryRegion *mr; hwaddr l = 1; + rcu_read_lock(); mr = address_space_translate(as, addr, &addr, &l, false); if (!(memory_region_is_ram(mr) || memory_region_is_romd(mr))) { + rcu_read_unlock(); return; } ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK) + addr; tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0); + rcu_read_unlock(); } #endif /* !defined(CONFIG_USER_ONLY) */ -- 2.3.0