On Tue, 27 Feb 2024, Vikram Garhwal wrote: > From: Juergen Gross <jgr...@suse.com> > > In order to support mapping and unmapping guest memory dynamically to > and from qemu during address_space_[un]map() operations add the map() > and unmap() callbacks to MemoryRegionOps. > > Those will be used e.g. for Xen grant mappings when performing guest > I/Os. > > Signed-off-by: Juergen Gross <jgr...@suse.com> > Signed-off-by: Vikram Garhwal <vikram.garh...@amd.com>
Reviewed-by: Stefano Stabellini <sstabell...@kernel.org> > --- > include/exec/memory.h | 21 ++++++++++++++++++ > system/physmem.c | 50 +++++++++++++++++++++++++++++++++---------- > 2 files changed, 60 insertions(+), 11 deletions(-) > > diff --git a/include/exec/memory.h b/include/exec/memory.h > index 8626a355b3..9f7dfe59c7 100644 > --- a/include/exec/memory.h > +++ b/include/exec/memory.h > @@ -282,6 +282,27 @@ struct MemoryRegionOps { > unsigned size, > MemTxAttrs attrs); > > + /* > + * Dynamically create mapping. @addr is the guest address to map; @plen > + * is the pointer to the usable length of the buffer. > + * @mr contents can be changed in case a new memory region is created for > + * the mapping. > + * Returns the buffer address for accessing the data. > + */ > + void *(*map)(MemoryRegion **mr, > + hwaddr addr, > + hwaddr *plen, > + bool is_write, > + MemTxAttrs attrs); > + > + /* Unmap an area obtained via map() before. */ > + void (*unmap)(MemoryRegion *mr, > + void *buffer, > + ram_addr_t addr, > + hwaddr len, > + bool is_write, > + hwaddr access_len); > + > enum device_endian endianness; > /* Guest-visible constraints: */ > struct { > diff --git a/system/physmem.c b/system/physmem.c > index 949dcb20ba..d989e9fc1f 100644 > --- a/system/physmem.c > +++ b/system/physmem.c > @@ -3141,6 +3141,7 @@ void *address_space_map(AddressSpace *as, > hwaddr len = *plen; > hwaddr l, xlat; > MemoryRegion *mr; > + void *ptr = NULL; > FlatView *fv; > > if (len == 0) { > @@ -3174,12 +3175,20 @@ void *address_space_map(AddressSpace *as, > return bounce.buffer; > } > > - > memory_region_ref(mr); > + > + if (mr->ops && mr->ops->map) { > + ptr = mr->ops->map(&mr, addr, plen, is_write, attrs); > + } > + > *plen = flatview_extend_translation(fv, addr, len, mr, xlat, > l, is_write, attrs); > fuzz_dma_read_cb(addr, *plen, mr); > - return qemu_ram_ptr_length(mr->ram_block, xlat, plen, true); > + if (ptr == NULL) { > + ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen, true); > + } > + > + return ptr; > } > > /* Unmaps a memory region previously mapped by address_space_map(). > @@ -3195,11 +3204,16 @@ void address_space_unmap(AddressSpace *as, void > *buffer, hwaddr len, > > mr = memory_region_from_host(buffer, &addr1); > assert(mr != NULL); > - if (is_write) { > - invalidate_and_set_dirty(mr, addr1, access_len); > - } > - if (xen_enabled()) { > - xen_invalidate_map_cache_entry(buffer); > + > + if (mr->ops && mr->ops->unmap) { > + mr->ops->unmap(mr, buffer, addr1, len, is_write, access_len); > + } else { > + if (is_write) { > + invalidate_and_set_dirty(mr, addr1, access_len); > + } > + if (xen_enabled()) { > + xen_invalidate_map_cache_entry(buffer); > + } > } > memory_region_unref(mr); > return; > @@ -3272,10 +3286,18 @@ int64_t address_space_cache_init(MemoryRegionCache > *cache, > * doing this if we found actual RAM, which behaves the same > * regardless of attributes; so UNSPECIFIED is fine. > */ > + if (mr->ops && mr->ops->map) { > + cache->ptr = mr->ops->map(&mr, addr, &l, is_write, > + MEMTXATTRS_UNSPECIFIED); > + } > + > l = flatview_extend_translation(cache->fv, addr, len, mr, > cache->xlat, l, is_write, > MEMTXATTRS_UNSPECIFIED); > - cache->ptr = qemu_ram_ptr_length(mr->ram_block, cache->xlat, &l, > true); > + if (!cache->ptr) { > + cache->ptr = qemu_ram_ptr_length(mr->ram_block, cache->xlat, &l, > + true); > + } > } else { > cache->ptr = NULL; > } > @@ -3297,14 +3319,20 @@ void address_space_cache_invalidate(MemoryRegionCache > *cache, > > void address_space_cache_destroy(MemoryRegionCache *cache) > { > - if (!cache->mrs.mr) { > + MemoryRegion *mr = cache->mrs.mr; > + > + if (!mr) { > return; > } > > - if (xen_enabled()) { > + if (mr->ops && mr->ops->unmap) { > + mr->ops->unmap(mr, cache->ptr, cache->xlat, cache->len, > + cache->is_write, cache->len); > + } else if (xen_enabled()) { > xen_invalidate_map_cache_entry(cache->ptr); > } > - memory_region_unref(cache->mrs.mr); > + > + memory_region_unref(mr); > flatview_unref(cache->fv); > cache->mrs.mr = NULL; > cache->fv = NULL; > -- > 2.17.1 >