From: Yazen Ghannam <yazen.ghan...@amd.com> The block address is saved after the block is initialized when threshold_init_device() is called.
Use the saved block address, if available, rather than trying to rediscover it. This will avoid a call trace, when resuming from suspend, due to the rdmsr_safe_on_cpu() call in get_block_address(). The rdmsr_safe_on_cpu() call issues an IPI but we're running with interrupts disabled. This triggers: WARNING: CPU: 0 PID: 11523 at kernel/smp.c:291 smp_call_function_single+0xdc/0xe0 Cc: <sta...@vger.kernel.org> # 4.14.x Signed-off-by: Yazen Ghannam <yazen.ghan...@amd.com> --- Link: https://lkml.kernel.org/r/20180201184813.82253-3-yazen.ghan...@amd.com v1->v2: * Give more detail on call trace issue. arch/x86/kernel/cpu/mcheck/mce_amd.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index d8ba9d0c3f01..12bc2863a4d6 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -436,6 +436,21 @@ static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 hi { u32 addr = 0, offset = 0; + if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS)) + return addr; + + /* Get address from already initialized block. */ + if (per_cpu(threshold_banks, cpu)) { + struct threshold_bank *bankp = per_cpu(threshold_banks, cpu)[bank]; + + if (bankp && bankp->blocks) { + struct threshold_block *blockp = &bankp->blocks[block]; + + if (blockp) + return blockp->address; + } + } + if (mce_flags.smca) { if (smca_get_bank_type(bank) == SMCA_RESERVED) return addr; -- 2.14.1