In the common case where there is no combining or splitting, access_with_adjusted_size is adding a lot of overhead. Call the MMIO ops directly in that case.
Signed-off-by: Paolo Bonzini <pbonz...@redhat.com> --- memory.c | 68 ++++++++++++++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 56 insertions(+), 12 deletions(-) diff --git a/memory.c b/memory.c index 56e54aa..1ade19c 100644 --- a/memory.c +++ b/memory.c @@ -443,6 +443,7 @@ static void memory_region_write_accessor(MemoryRegion *mr, static void access_with_adjusted_size(hwaddr addr, uint64_t *value, unsigned size, + unsigned access_size, void (*access)(MemoryRegion *mr, hwaddr addr, uint64_t *value, @@ -452,11 +453,9 @@ static void access_with_adjusted_size(hwaddr addr, MemoryRegion *mr) { uint64_t access_mask; - unsigned access_size; unsigned i; /* FIXME: support unaligned access? */ - access_size = MAX(MIN(size, mr->min_access_size), mr->min_access_size); access_mask = -1ULL >> (64 - access_size * 8); if (memory_region_big_endian(mr)) { for (i = 0; i < size; i += access_size) { @@ -929,13 +928,32 @@ static uint64_t memory_region_dispatch_read1(MemoryRegion *mr, hwaddr addr, unsigned size) { + unsigned access_size; uint64_t data = 0; + if (size < mr->min_access_size) { + access_size = mr->min_access_size; + goto adjusted; + } + if (size > mr->max_access_size) { + access_size = mr->max_access_size; + goto adjusted; + } + + if (mr->ops->read) { + data = mr->ops->read(mr->opaque, addr, size); + } else { + data = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr); + } + trace_memory_region_ops_read(mr, addr, data, size); + return data; + +adjusted: if (mr->ops->read) { - access_with_adjusted_size(addr, &data, size, + access_with_adjusted_size(addr, &data, size, access_size, memory_region_read_accessor, mr); } else { - access_with_adjusted_size(addr, &data, size, + access_with_adjusted_size(addr, &data, size, access_size, memory_region_oldmmio_read_accessor, mr); } @@ -957,6 +975,39 @@ static bool memory_region_dispatch_read(MemoryRegion *mr, return false; } +static void memory_region_dispatch_write1(MemoryRegion *mr, + hwaddr addr, + uint64_t data, + unsigned size) +{ + unsigned access_size; + if (size < mr->min_access_size) { + access_size = mr->min_access_size; + goto adjusted; + } + if (size > mr->max_access_size) { + access_size = mr->max_access_size; + goto adjusted; + } + + trace_memory_region_ops_write(mr, addr, data, size); + if (mr->ops->write) { + mr->ops->write(mr->opaque, addr, data, size); + } else { + mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, data); + } + return; + +adjusted: + if (mr->ops->write) { + access_with_adjusted_size(addr, &data, size, access_size, + memory_region_write_accessor, mr); + } else { + access_with_adjusted_size(addr, &data, size, access_size, + memory_region_oldmmio_write_accessor, mr); + } +} + static bool memory_region_dispatch_write(MemoryRegion *mr, hwaddr addr, uint64_t data, @@ -968,14 +1019,7 @@ static bool memory_region_dispatch_write(MemoryRegion *mr, } adjust_endianness(mr, &data, size); - - if (mr->ops->write) { - access_with_adjusted_size(addr, &data, size, - memory_region_write_accessor, mr); - } else { - access_with_adjusted_size(addr, &data, size, - memory_region_oldmmio_write_accessor, mr); - } + memory_region_dispatch_write1(mr, addr, data, size); return false; } -- 1.8.4.2