generic irq entry support via generic irqentry is added for powerpc.
There may be duplciate calls and missing callbacks requires further
work.

Signed-off-by: Luming Yu <luming...@shingroup.cn>
---
 arch/powerpc/include/asm/entry-common.h | 32 ++++++++++++++++
 arch/powerpc/kernel/interrupt.c         | 51 +++++--------------------
 arch/powerpc/kernel/signal.c            |  7 ++++
 arch/powerpc/kernel/syscall.c           |  2 -
 4 files changed, 49 insertions(+), 43 deletions(-)

diff --git a/arch/powerpc/include/asm/entry-common.h 
b/arch/powerpc/include/asm/entry-common.h
index 51f1eb767696..faa829e15b5d 100644
--- a/arch/powerpc/include/asm/entry-common.h
+++ b/arch/powerpc/include/asm/entry-common.h
@@ -3,6 +3,7 @@
 #define ARCH_POWERPC_ENTRY_COMMON_H
 
 #include <linux/user-return-notifier.h>
+#include <asm/switch_to.h>
 
 static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
                                                  unsigned long ti_work)
@@ -13,4 +14,35 @@ static inline void arch_exit_to_user_mode_prepare(struct 
pt_regs *regs,
 
 #define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
 
+static inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
+                                               unsigned long ti_work)
+{
+
+       if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && IS_ENABLED(CONFIG_PPC_FPU)) {
+               if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
+                               unlikely((ti_work & _TIF_RESTORE_TM))) {
+                       restore_tm_state(regs);
+               } else {
+                       unsigned long mathflags = MSR_FP;
+
+                       if (cpu_has_feature(CPU_FTR_VSX))
+                               mathflags |= MSR_VEC | MSR_VSX;
+                       else if (cpu_has_feature(CPU_FTR_ALTIVEC))
+                               mathflags |= MSR_VEC;
+
+                       /*
+                        * If userspace MSR has all available FP bits set,
+                        * then they are live and no need to restore. If not,
+                        * it means the regs were given up and restore_math
+                        * may decide to restore them (to avoid taking an FP
+                        * fault).
+                        */
+                       if ((regs->msr & mathflags) != mathflags)
+                               restore_math(regs);
+               }
+       }
+}
+
+#define arch_exit_to_user_mode_work arch_exit_to_user_mode_work
+
 #endif
diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
index 609ba48034de..42af9217136d 100644
--- a/arch/powerpc/kernel/interrupt.c
+++ b/arch/powerpc/kernel/interrupt.c
@@ -3,6 +3,7 @@
 #include <linux/context_tracking.h>
 #include <linux/err.h>
 #include <linux/compat.h>
+#include <linux/entry-common.h>
 #include <linux/sched/debug.h> /* for show_regs */
 
 #include <asm/kup.h>
@@ -183,47 +184,11 @@ interrupt_exit_user_prepare_main(unsigned long ret, 
struct pt_regs *regs)
 
 again:
        ti_flags = read_thread_flags();
-       while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
-               local_irq_enable();
-               if (ti_flags & _TIF_NEED_RESCHED) {
-                       schedule();
-               } else {
-                       /*
-                        * SIGPENDING must restore signal handler function
-                        * argument GPRs, and some non-volatiles (e.g., r1).
-                        * Restore all for now. This could be made lighter.
-                        */
-                       if (ti_flags & _TIF_SIGPENDING)
-                               ret |= _TIF_RESTOREALL;
-                       do_notify_resume(regs, ti_flags);
-               }
-               local_irq_disable();
-               ti_flags = read_thread_flags();
-       }
 
-       if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && IS_ENABLED(CONFIG_PPC_FPU)) {
-               if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
-                               unlikely((ti_flags & _TIF_RESTORE_TM))) {
-                       restore_tm_state(regs);
-               } else {
-                       unsigned long mathflags = MSR_FP;
-
-                       if (cpu_has_feature(CPU_FTR_VSX))
-                               mathflags |= MSR_VEC | MSR_VSX;
-                       else if (cpu_has_feature(CPU_FTR_ALTIVEC))
-                               mathflags |= MSR_VEC;
-
-                       /*
-                        * If userspace MSR has all available FP bits set,
-                        * then they are live and no need to restore. If not,
-                        * it means the regs were given up and restore_math
-                        * may decide to restore them (to avoid taking an FP
-                        * fault).
-                        */
-                       if ((regs->msr & mathflags) != mathflags)
-                               restore_math(regs);
-               }
-       }
+       if (ti_flags & _TIF_SIGPENDING)
+               ret |= _TIF_RESTOREALL;
+       if (unlikely(ti_flags & EXIT_TO_USER_MODE_WORK))
+               ti_flags = exit_to_user_mode_loop(regs, ti_flags);
 
        check_return_regs_valid(regs);
 
@@ -297,11 +262,15 @@ notrace unsigned long syscall_exit_prepare(unsigned long 
r3,
        }
 
        local_irq_disable();
-       ret = interrupt_exit_user_prepare_main(ret, regs);
+       if (ti_flags & _TIF_RESTOREALL)
+               ret |= _TIF_RESTOREALL;
 
+       if (ti_flags & _TIF_SIGPENDING)
+               ret |= _TIF_RESTOREALL;
 #ifdef CONFIG_PPC64
        regs->exit_result = ret;
 #endif
+       syscall_exit_to_user_mode(regs);
 
        return ret;
 }
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index aa17e62f3754..da21e7fef46a 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -11,6 +11,7 @@
 #include <linux/uprobes.h>
 #include <linux/key.h>
 #include <linux/context_tracking.h>
+#include <linux/entry-common.h>
 #include <linux/livepatch.h>
 #include <linux/syscalls.h>
 #include <asm/hw_breakpoint.h>
@@ -368,3 +369,9 @@ void signal_fault(struct task_struct *tsk, struct pt_regs 
*regs,
                printk_ratelimited(regs->msr & MSR_64BIT ? fm64 : fm32, 
tsk->comm,
                                   task_pid_nr(tsk), where, ptr, regs->nip, 
regs->link);
 }
+
+void arch_do_signal_or_restart(struct pt_regs *regs)
+{
+       BUG_ON(regs != current->thread.regs);
+       do_signal(current);
+}
diff --git a/arch/powerpc/kernel/syscall.c b/arch/powerpc/kernel/syscall.c
index e0338bd8d383..97f158d13944 100644
--- a/arch/powerpc/kernel/syscall.c
+++ b/arch/powerpc/kernel/syscall.c
@@ -185,8 +185,6 @@ notrace long system_call_exception(struct pt_regs *regs, 
unsigned long r0)
         * So the resulting 6 or 7 bits of entropy is seen in SP[9:4] or 
SP[9:3].
         */
        choose_random_kstack_offset(mftb());
-       /*common entry*/
-       syscall_exit_to_user_mode(regs);
 
        return ret;
 }
-- 
2.42.0.windows.2


Reply via email to