On Fri, 20 Dec 2024 15:59:27 +0800 Yicong Yang <yangyic...@huawei.com> wrote:
> On 2024/12/20 15:53, Yicong Yang wrote: > > From: Yicong Yang <yangyic...@hisilicon.com> > > > > Currently if architectures want to support HOTPLUG_SMT they need to > > provide a topology_is_primary_thread() telling the framework which > > thread in the SMT cannot offline. However arm64 doesn't have a > > restriction on which thread in the SMT cannot offline, a simplest > > choice is that just make 1st thread as the "primary" thread. So > > just make this as the default implementation in the framework and > > let architectures like x86 that have special primary thread to > > override this function (which they've already done). > > > > There's no need to provide a stub function if !CONFIG_SMP or > > !CONFIG_HOTPLUG_SMP. In such case the testing CPU is already > > the 1st CPU in the SMT so it's always the primary thread. > > > > Signed-off-by: Yicong Yang <yangyic...@hisilicon.com> > > --- > > As questioned in v9 [1] whether this works on architectures not using > > CONFIG_GENERIC_ARCH_TOPOLOGY, hacked on LoongArch VM and this also works. > > Architectures should use this on their own situation. > > [1] > > https://lore.kernel.org/linux-arm-kernel/427bd639-33c3-47e4-9e83-68c428eb1...@arm.com/ > > > > [root@localhost smt]# uname -m > > loongarch64 > > [root@localhost smt]# pwd > > /sys/devices/system/cpu/smt > > [root@localhost smt]# cat ../possible > > 0-3 > > [root@localhost smt]# cat ../online > > 0-3 > > [root@localhost smt]# cat control > > on > > [root@localhost smt]# echo off > control > > [root@localhost smt]# cat control > > off > > [root@localhost smt]# cat ../online > > 0,2 > > [root@localhost smt]# echo on > control > > [root@localhost smt]# cat control > > on > > [root@localhost smt]# cat ../online > > 0-3 > > Tested with below code using the topology_is_primary_thread() introduced > in this patch. Tested on an ACPI-based QEMU VM emulating SMT2. Nice bit of testing. Given it all seems fine. FWIW Reviewed-by: Jonathan Cameron <jonathan.came...@huawei.com> (for original patch, not the longarch one!) > > Subject: [PATCH] LoongArch: Support HOTPLUG_SMT on ACPI-based system > > Support HOTPLUG_SMT on ACPI-based system using generic > topology_is_primary_thread(). > > Signed-off-by: Yicong Yang <yangyic...@hisilicon.com> > --- > arch/loongarch/Kconfig | 1 + > arch/loongarch/kernel/acpi.c | 26 ++++++++++++++++++++++++-- > 2 files changed, 25 insertions(+), 2 deletions(-) > > diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig > index dae3a9104ca6..bed1b0640b97 100644 > --- a/arch/loongarch/Kconfig > +++ b/arch/loongarch/Kconfig > @@ -172,6 +172,7 @@ config LOONGARCH > select HAVE_SYSCALL_TRACEPOINTS > select HAVE_TIF_NOHZ > select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP > + select HOTPLUG_SMT if HOTPLUG_CPU > select IRQ_FORCED_THREADING > select IRQ_LOONGARCH_CPU > select LOCK_MM_AND_FIND_VMA > diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c > index 382a09a7152c..e642b0de57e7 100644 > --- a/arch/loongarch/kernel/acpi.c > +++ b/arch/loongarch/kernel/acpi.c > @@ -15,9 +15,11 @@ > #include <linux/memblock.h> > #include <linux/of_fdt.h> > #include <linux/serial_core.h> > +#include <linux/xarray.h> > #include <asm/io.h> > #include <asm/numa.h> > #include <asm/loongson.h> > +#include <linux/cpu_smt.h> > > int acpi_disabled; > EXPORT_SYMBOL(acpi_disabled); > @@ -175,8 +177,12 @@ int pptt_enabled; > > int __init parse_acpi_topology(void) > { > + int thread_num, max_smt_thread_num = 1; > + struct xarray core_threads; > int cpu, topology_id; > + void *entry; > > + xa_init(&core_threads); > for_each_possible_cpu(cpu) { > topology_id = find_acpi_cpu_topology(cpu, 0); > if (topology_id < 0) { > @@ -184,19 +190,35 @@ int __init parse_acpi_topology(void) > return -ENOENT; > } > > - if (acpi_pptt_cpu_is_thread(cpu) <= 0) > + if (acpi_pptt_cpu_is_thread(cpu) <= 0) { > cpu_data[cpu].core = topology_id; > - else { > + } else { > topology_id = find_acpi_cpu_topology(cpu, 1); > if (topology_id < 0) > return -ENOENT; > > cpu_data[cpu].core = topology_id; > + > + entry = xa_load(&core_threads, topology_id); > + if (!entry) { > + xa_store(&core_threads, topology_id, > + xa_mk_value(1), GFP_KERNEL); > + } else { > + thread_num = xa_to_value(entry); > + thread_num++; > + xa_store(&core_threads, topology_id, > + xa_mk_value(thread_num), GFP_KERNEL); > + > + if (thread_num > max_smt_thread_num) > + max_smt_thread_num = thread_num; > + } > } > } > > pptt_enabled = 1; > > + cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num); > + xa_destroy(&core_threads); > return 0; > } >