The memory API currently manipulates address range start and size values as signed integers. Because memory ranges with size INT64_MAX are very common, we must be careful to to trigger integer overflows. I already fixed such an integer overflow bug in commit d2963631dd54ddf0f46c151b7e3013e39bb78d3b, but there are more.
In particular, intermediate steps mean that ranges with size INT64_MAX and non-zero start are often constructed. This means that simply computing a range's end address, as in addrrange_end(), could trigger a signed integer overflow. Since the behaviour of signed integer overflow in C is undefined, this means that *every* usage of addrrange_end() is a bug. This patch, therefore, replaces every usage of addrange_end() with arithmetic constructed so as not to cause an overflow. This fixes real bugs that have bitten us with upcoming PCI support for the pseries machine. Signed-off-by: David Gibson <da...@gibson.dropbear.id.au> --- memory.c | 35 ++++++++++++++++++++--------------- 1 files changed, 20 insertions(+), 15 deletions(-) diff --git a/memory.c b/memory.c index f46e626..6bf9ba5 100644 --- a/memory.c +++ b/memory.c @@ -26,6 +26,9 @@ typedef struct AddrRange AddrRange; * Note using signed integers limits us to physical addresses at most * 63 bits wide. They are needed for negative offsetting in aliases * (large MemoryRegion::alias_offset). + * + * BEWARE: ranges of sizes INT64_MAX are common, so any arithmetic on + * ranges *must* be careful to avoid integer overflow */ struct AddrRange { int64_t start; @@ -42,11 +45,6 @@ static bool addrrange_equal(AddrRange r1, AddrRange r2) return r1.start == r2.start && r1.size == r2.size; } -static int64_t addrrange_end(AddrRange r) -{ - return r.start + r.size; -} - static AddrRange addrrange_shift(AddrRange range, int64_t delta) { range.start += delta; @@ -61,10 +59,13 @@ static bool addrrange_intersects(AddrRange r1, AddrRange r2) static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2) { - int64_t start = MAX(r1.start, r2.start); - /* off-by-one arithmetic to prevent overflow */ - int64_t end = MIN(addrrange_end(r1) - 1, addrrange_end(r2) - 1); - return addrrange_make(start, end - start + 1); + if (r1.start <= r2.start) { + return addrrange_make(r2.start, + MIN(r2.size, r1.size - (r2.start - r1.start))); + } else { + return addrrange_make(r1.start, + MIN(r1.size, r2.size - (r1.start - r2.start))); + } } struct CoalescedMemoryRange { @@ -201,7 +202,8 @@ static void flatview_destroy(FlatView *view) static bool can_merge(FlatRange *r1, FlatRange *r2) { - return addrrange_end(r1->addr) == r2->addr.start + assert (r1->addr.start < r2->addr.start); + return ((r2->addr.start - r1->addr.start) == r1->addr.size) && r1->mr == r2->mr && r1->offset_in_region + r1->addr.size == r2->offset_in_region && r1->dirty_log_mask == r2->dirty_log_mask @@ -535,11 +537,14 @@ static void render_memory_region(FlatView *view, /* Render the region itself into any gaps left by the current view. */ for (i = 0; i < view->nr && remain; ++i) { - if (base >= addrrange_end(view->ranges[i].addr)) { + AddrRange *vrange = &view->ranges[i].addr; + + if ((base > vrange->start) + && ((base - vrange->start) >= vrange->size)) { continue; } - if (base < view->ranges[i].addr.start) { - now = MIN(remain, view->ranges[i].addr.start - base); + if (base < vrange->start) { + now = MIN(remain, vrange->start - base); fr.mr = mr; fr.offset_in_region = offset_in_region; fr.addr = addrrange_make(base, now); @@ -552,8 +557,8 @@ static void render_memory_region(FlatView *view, offset_in_region += now; remain -= now; } - if (base == view->ranges[i].addr.start) { - now = MIN(remain, view->ranges[i].addr.size); + if (base == vrange->start) { + now = MIN(remain, vrange->size); base += now; offset_in_region += now; remain -= now; -- 1.7.6.3