Le 25/02/2020 à 18:35, Nicholas Piggin a écrit :
Implement the bulk of interrupt return logic in C. The asm return code
must handle a few cases: restoring full GPRs, and emulating stack store.



+notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, 
unsigned long msr)
+{
+       unsigned long *ti_flagsp = &current_thread_info()->flags;
+       unsigned long flags;
+
+       if (IS_ENABLED(CONFIG_PPC_BOOK3S) && unlikely(!(regs->msr & MSR_RI)))
+               unrecoverable_exception(regs);
+       BUG_ON(regs->msr & MSR_PR);
+       BUG_ON(!FULL_REGS(regs));
+
+       local_irq_save(flags);
+
+       if (regs->softe == IRQS_ENABLED) {
+               /* Returning to a kernel context with local irqs enabled. */
+               WARN_ON_ONCE(!(regs->msr & MSR_EE));
+again:
+               if (IS_ENABLED(CONFIG_PREEMPT)) {
+                       /* Return to preemptible kernel context */
+                       if (unlikely(*ti_flagsp & _TIF_NEED_RESCHED)) {
+                               if (preempt_count() == 0)
+                                       preempt_schedule_irq();
+                       }
+               }
+
+               trace_hardirqs_on();
+               __hard_EE_RI_disable();
+               if (unlikely(lazy_irq_pending())) {
+                       __hard_RI_enable();
+                       irq_soft_mask_set(IRQS_ALL_DISABLED);
+                       trace_hardirqs_off();
+                       local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+                       /*
+                        * Can't local_irq_enable in case we are in interrupt
+                        * context. Must replay directly.
+                        */
+                       replay_soft_interrupts();
+                       irq_soft_mask_set(flags);
+                       /* Took an interrupt, may have more exit work to do. */
+                       goto again;
+               }
+               local_paca->irq_happened = 0;
+               irq_soft_mask_set(IRQS_ENABLED);
+       } else {
+               /* Returning to a kernel context with local irqs disabled. */
+               trace_hardirqs_on();
+               __hard_EE_RI_disable();
+               if (regs->msr & MSR_EE)
+                       local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
+       }
+
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+       local_paca->tm_scratch = regs->msr;
+#endif
+
+       /*
+        * We don't need to restore AMR on the way back to userspace for KUAP.
+        * The value of AMR only matters while we're in the kernel.
+        */
+       kuap_restore_amr(regs);

Is that correct to restore KUAP state here ? Shouldn't we have it at lower 
level in assembly ?

Isn't there a risk that someone manages to call interrupt_exit_kernel_prepare() or the end of it in a way or another, and get the previous KUAP state restored by this way ?

Also, it looks a bit strange to have kuap_save_amr_and_lock() done at lowest level in assembly, and kuap_restore_amr() done in upper level. That looks unbalanced.

Christophe


+
+       if (unlikely(*ti_flagsp & _TIF_EMULATE_STACK_STORE)) {
+               clear_bits(_TIF_EMULATE_STACK_STORE, ti_flagsp);
+               return 1;
+       }
+       return 0;
+}
+#endif
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 25c14a0981bf..d20c5e79e03c 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -134,7 +134,7 @@ _GLOBAL(load_up_vsx)
        /* enable use of VSX after return */
        oris    r12,r12,MSR_VSX@h
        std     r12,_MSR(r1)
-       b       fast_exception_return
+       b       fast_interrupt_return
#endif /* CONFIG_VSX */

Reply via email to