From: Fenghua Yu <fenghua...@intel.com> Hook the x86 scheduler code to update closid based on whether the current task is assigned to a specific closid or running on a CPU assigned to a specific closid.
Signed-off-by: Fenghua Yu <fenghua...@intel.com> Signed-off-by: Tony Luck <tony.l...@intel.com> --- arch/x86/include/asm/intel_rdt.h | 41 ++++++++++++++++++++++++++++++++ arch/x86/kernel/cpu/intel_rdt.c | 1 - arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 3 +++ arch/x86/kernel/process_32.c | 4 ++++ arch/x86/kernel/process_64.c | 4 ++++ 5 files changed, 52 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/intel_rdt.h b/arch/x86/include/asm/intel_rdt.h index c052652..aefa3a6 100644 --- a/arch/x86/include/asm/intel_rdt.h +++ b/arch/x86/include/asm/intel_rdt.h @@ -1,6 +1,9 @@ #ifndef _ASM_X86_INTEL_RDT_H #define _ASM_X86_INTEL_RDT_H +#ifdef CONFIG_INTEL_RDT +#include <asm/intel_rdt_common.h> + /** * struct rdtgroup - store rdtgroup's data in resctrl file system. * @kn: kernfs node @@ -161,4 +164,42 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off); int rdtgroup_schemata_show(struct kernfs_open_file *of, struct seq_file *s, void *v); + +/* + * intel_rdt_sched_in() - Writes the task's CLOSid to IA32_PQR_MSR + * + * Following considerations are made so that this has minimal impact + * on scheduler hot path: + * - This will stay as no-op unless we are running on an Intel SKU + * which supports resource control and we enable by mounting the + * resctrl file system. + * - Caches the per cpu CLOSid values and does the MSR write only + * when a task with a different CLOSid is scheduled in. + */ +static inline void intel_rdt_sched_in(void) +{ + if (static_branch_likely(&rdt_enable_key)) { + struct intel_pqr_state *state = this_cpu_ptr(&pqr_state); + int closid; + + /* + * If this task has a closid assigned, use it. + * Else use the closid assigned to this cpu. + */ + closid = current->closid; + if (closid == 0) + closid = this_cpu_read(cpu_closid); + + if (closid != state->closid) { + state->closid = closid; + wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, closid); + } + } +} + +#else + +static inline void intel_rdt_sched_in(void) {} + +#endif /* CONFIG_INTEL_RDT */ #endif /* _ASM_X86_INTEL_RDT_H */ diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c index 0b076d0..16a982e7 100644 --- a/arch/x86/kernel/cpu/intel_rdt.c +++ b/arch/x86/kernel/cpu/intel_rdt.c @@ -29,7 +29,6 @@ #include <linux/err.h> #include <linux/cpuhotplug.h> -#include <asm/intel_rdt_common.h> #include <asm/intel-family.h> #include <asm/intel_rdt.h> diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index 4740b7e..b8c6e6e 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c @@ -282,6 +282,9 @@ static void move_myself(struct callback_head *head) kfree(rdtgrp); } + /* update PQR_ASSOC MSR to make resource group go into effect */ + intel_rdt_sched_in(); + kfree(callback); } diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index d86be29..5495527 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -54,6 +54,7 @@ #include <asm/debugreg.h> #include <asm/switch_to.h> #include <asm/vm86.h> +#include <asm/intel_rdt.h> asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread"); @@ -314,5 +315,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) this_cpu_write(current_task, next_p); + /* Load the Intel cache allocation PQR MSR. */ + intel_rdt_sched_in(); + return prev_p; } diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 63236d8..cdea7e3 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -49,6 +49,7 @@ #include <asm/debugreg.h> #include <asm/switch_to.h> #include <asm/xen/hypervisor.h> +#include <asm/intel_rdt.h> asmlinkage extern void ret_from_fork(void); @@ -472,6 +473,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) loadsegment(ss, __KERNEL_DS); } + /* Load the Intel cache allocation PQR MSR. */ + intel_rdt_sched_in(); + return prev_p; } -- 2.5.0