Add a return code to __paravirt_set_sched_clock() so that the kernel can
reject attempts to use a PV sched_clock without breaking the caller.  E.g.
when running as a CoCo VM with a secure TSC, using a PV clock is generally
undesirable.

Note, kvmclock is the only PV clock that does anything "extra" beyond
simply registering itself as sched_clock, i.e. is the only caller that
needs to check the new return value.

Signed-off-by: Sean Christopherson <sea...@google.com>
---
 arch/x86/include/asm/paravirt.h | 6 +++---
 arch/x86/kernel/kvmclock.c      | 7 +++++--
 arch/x86/kernel/paravirt.c      | 5 +++--
 3 files changed, 11 insertions(+), 7 deletions(-)

diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index e6d5e77753c4..5de31b22aa5f 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -28,14 +28,14 @@ u64 dummy_sched_clock(void);
 DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
 DECLARE_STATIC_CALL(pv_sched_clock, dummy_sched_clock);
 
-void __init __paravirt_set_sched_clock(u64 (*func)(void), bool stable,
-                                      void (*save)(void), void 
(*restore)(void));
+int __init __paravirt_set_sched_clock(u64 (*func)(void), bool stable,
+                                     void (*save)(void), void 
(*restore)(void));
 
 static __always_inline void paravirt_set_sched_clock(u64 (*func)(void),
                                                     void (*save)(void),
                                                     void (*restore)(void))
 {
-       __paravirt_set_sched_clock(func, true, save, restore);
+       (void)__paravirt_set_sched_clock(func, true, save, restore);
 }
 
 static __always_inline u64 paravirt_sched_clock(void)
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 76884dfc77f4..1dbe12ecb26e 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -337,9 +337,12 @@ static int kvmclock_setup_percpu(unsigned int cpu)
 
 static void __init kvm_sched_clock_init(bool stable)
 {
+       if (__paravirt_set_sched_clock(kvm_sched_clock_read, stable,
+                                      kvm_save_sched_clock_state,
+                                      kvm_restore_sched_clock_state))
+               return;
+
        kvm_sched_clock_offset = kvm_clock_read();
-       __paravirt_set_sched_clock(kvm_sched_clock_read, stable,
-                                  kvm_save_sched_clock_state, 
kvm_restore_sched_clock_state);
        kvmclock_is_sched_clock = true;
 
        /*
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 92bf831a63b1..a3a1359cfc26 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -86,8 +86,8 @@ static u64 native_steal_clock(int cpu)
 DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
 DEFINE_STATIC_CALL(pv_sched_clock, native_sched_clock);
 
-void __init __paravirt_set_sched_clock(u64 (*func)(void), bool stable,
-                                      void (*save)(void), void 
(*restore)(void))
+int __init __paravirt_set_sched_clock(u64 (*func)(void), bool stable,
+                                     void (*save)(void), void (*restore)(void))
 {
        if (!stable)
                clear_sched_clock_stable();
@@ -95,6 +95,7 @@ void __init __paravirt_set_sched_clock(u64 (*func)(void), 
bool stable,
        static_call_update(pv_sched_clock, func);
        x86_platform.save_sched_clock_state = save;
        x86_platform.restore_sched_clock_state = restore;
+       return 0;
 }
 
 /* These are in entry.S */
-- 
2.48.1.711.g2feabab25a-goog


Reply via email to