On Mon, Dec 14, 2015 at 10:35 AM, Paolo Bonzini <pbonz...@redhat.com> wrote: > > > On 14/12/2015 09:41, Alvise Rigo wrote: >> +static inline void excl_history_put_addr(CPUState *cpu, hwaddr addr) >> +{ >> + /* Avoid some overhead if the address we are about to put is equal to >> + * the last one */ >> + if (cpu->excl_protected_addr[cpu->excl_protected_last] != >> + (addr & TARGET_PAGE_MASK)) { >> + cpu->excl_protected_last = (cpu->excl_protected_last + 1) % >> + EXCLUSIVE_HISTORY_LEN; > > Either use "&" here... > >> + /* Unset EXCL bit of the oldest entry */ >> + if (cpu->excl_protected_addr[cpu->excl_protected_last] != >> + EXCLUSIVE_RESET_ADDR) { >> + cpu_physical_memory_unset_excl( >> + cpu->excl_protected_addr[cpu->excl_protected_last], >> + cpu->cpu_index); >> + } >> + >> + /* Add a new address, overwriting the oldest one */ >> + cpu->excl_protected_addr[cpu->excl_protected_last] = >> + addr & TARGET_PAGE_MASK; >> + } >> +} >> + >> #define MMUSUFFIX _mmu >> >> /* Generates LoadLink/StoreConditional helpers in softmmu_template.h */ >> diff --git a/include/qom/cpu.h b/include/qom/cpu.h >> index 9e409ce..5f65ebf 100644 >> --- a/include/qom/cpu.h >> +++ b/include/qom/cpu.h >> @@ -217,6 +217,7 @@ struct kvm_run; >> >> /* Atomic insn translation TLB support. */ >> #define EXCLUSIVE_RESET_ADDR ULLONG_MAX >> +#define EXCLUSIVE_HISTORY_LEN 8 >> >> /** >> * CPUState: >> @@ -343,6 +344,8 @@ struct CPUState { >> * The address is set to EXCLUSIVE_RESET_ADDR if the vCPU is not. >> * in the middle of a LL/SC. */ >> struct Range excl_protected_range; >> + hwaddr excl_protected_addr[EXCLUSIVE_HISTORY_LEN]; >> + int excl_protected_last; > > ... or make this an "unsigned int". Otherwise the code will contain an > actual (and slow) modulo operation.
Absolutely true. Thank you, alvise > > Paolo > >> /* Used to carry the SC result but also to flag a normal (legacy) >> * store access made by a stcond (see softmmu_template.h). */ >> int excl_succeeded;