All calls through hvm_funcs are fully altcall'd. Harden all function pointer targets.
This optimises away 106 targets. Signed-off-by: Andrew Cooper <andrew.coop...@citrix.com> --- CC: Jan Beulich <jbeul...@suse.com> CC: Roger Pau Monné <roger....@citrix.com> CC: Wei Liu <w...@xen.org> --- xen/arch/x86/hvm/hvm.c | 2 +- xen/arch/x86/hvm/svm/svm.c | 2 +- xen/arch/x86/hvm/vmx/vmx.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index cdd1529014f2..709a4191efe8 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -88,7 +88,7 @@ unsigned int opt_hvm_debug_level __read_mostly; integer_param("hvm_debug", opt_hvm_debug_level); #endif -struct hvm_function_table hvm_funcs __read_mostly; +struct hvm_function_table __ro_after_init hvm_funcs; /* * The I/O permission bitmap is globally shared by all HVM guests except diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index 63535a74b504..b80d4af6cb90 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -2513,7 +2513,7 @@ static void cf_check svm_set_reg(struct vcpu *v, unsigned int reg, uint64_t val) } } -static struct hvm_function_table __initdata svm_function_table = { +static struct hvm_function_table __initdata_cf_clobber svm_function_table = { .name = "SVM", .cpu_up_prepare = svm_cpu_up_prepare, .cpu_dead = svm_cpu_dead, diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 41db538a9e3d..758df3321884 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -2473,7 +2473,7 @@ static void cf_check vmx_set_reg(struct vcpu *v, unsigned int reg, uint64_t val) vmx_vmcs_exit(v); } -static struct hvm_function_table __initdata vmx_function_table = { +static struct hvm_function_table __initdata_cf_clobber vmx_function_table = { .name = "VMX", .cpu_up_prepare = vmx_cpu_up_prepare, .cpu_dead = vmx_cpu_dead, -- 2.11.0