Renaming arch_irq_disabled_regs to regs_irqs_disabled to be used
commonly in generic entry exit framework and ppc arch code.

Signed-off-by: Mukesh Kumar Chaurasiya <mchau...@linux.ibm.com>
---
 arch/powerpc/include/asm/hw_irq.h    |  4 ++--
 arch/powerpc/include/asm/interrupt.h | 12 ++++++------
 arch/powerpc/kernel/interrupt.c      |  4 ++--
 arch/powerpc/kernel/syscall.c        |  2 +-
 arch/powerpc/kernel/traps.c          |  2 +-
 arch/powerpc/kernel/watchdog.c       |  2 +-
 arch/powerpc/perf/core-book3s.c      |  2 +-
 7 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/arch/powerpc/include/asm/hw_irq.h 
b/arch/powerpc/include/asm/hw_irq.h
index 569ac1165b069..2b9cf0380e0e9 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -393,7 +393,7 @@ static inline void do_hard_irq_enable(void)
        __hard_irq_enable();
 }
 
-static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
+static inline bool regs_irqs_disabled(struct pt_regs *regs)
 {
        return (regs->softe & IRQS_DISABLED);
 }
@@ -466,7 +466,7 @@ static inline bool arch_irqs_disabled(void)
 
 #define hard_irq_disable()             arch_local_irq_disable()
 
-static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
+static inline bool regs_irqs_disabled(struct pt_regs *regs)
 {
        return !(regs->msr & MSR_EE);
 }
diff --git a/arch/powerpc/include/asm/interrupt.h 
b/arch/powerpc/include/asm/interrupt.h
index 23638d4e73ac0..56bc8113b8cde 100644
--- a/arch/powerpc/include/asm/interrupt.h
+++ b/arch/powerpc/include/asm/interrupt.h
@@ -172,7 +172,7 @@ static inline void interrupt_enter_prepare(struct pt_regs 
*regs)
        /* Enable MSR[RI] early, to support kernel SLB and hash faults */
 #endif
 
-       if (!arch_irq_disabled_regs(regs))
+       if (!regs_irqs_disabled(regs))
                trace_hardirqs_off();
 
        if (user_mode(regs)) {
@@ -192,10 +192,10 @@ static inline void interrupt_enter_prepare(struct pt_regs 
*regs)
                        CT_WARN_ON(ct_state() != CT_STATE_KERNEL &&
                                   ct_state() != CT_STATE_IDLE);
                INT_SOFT_MASK_BUG_ON(regs, is_implicit_soft_masked(regs));
-               INT_SOFT_MASK_BUG_ON(regs, arch_irq_disabled_regs(regs) &&
+               INT_SOFT_MASK_BUG_ON(regs, regs_irqs_disabled(regs) &&
                                           
search_kernel_restart_table(regs->nip));
        }
-       INT_SOFT_MASK_BUG_ON(regs, !arch_irq_disabled_regs(regs) &&
+       INT_SOFT_MASK_BUG_ON(regs, !regs_irqs_disabled(regs) &&
                                   !(regs->msr & MSR_EE));
 
        booke_restore_dbcr0();
@@ -298,7 +298,7 @@ static inline void interrupt_nmi_enter_prepare(struct 
pt_regs *regs, struct inte
                 * Adjust regs->softe to be soft-masked if it had not been
                 * reconcied (e.g., interrupt entry with MSR[EE]=0 but softe
                 * not yet set disabled), or if it was in an implicit soft
-                * masked state. This makes arch_irq_disabled_regs(regs)
+                * masked state. This makes regs_irqs_disabled(regs)
                 * behave as expected.
                 */
                regs->softe = IRQS_ALL_DISABLED;
@@ -372,7 +372,7 @@ static inline void interrupt_nmi_exit_prepare(struct 
pt_regs *regs, struct inter
 
 #ifdef CONFIG_PPC64
 #ifdef CONFIG_PPC_BOOK3S
-       if (arch_irq_disabled_regs(regs)) {
+       if (regs_irqs_disabled(regs)) {
                unsigned long rst = search_kernel_restart_table(regs->nip);
                if (rst)
                        regs_set_return_ip(regs, rst);
@@ -661,7 +661,7 @@ void replay_soft_interrupts(void);
 
 static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs)
 {
-       if (!arch_irq_disabled_regs(regs))
+       if (!regs_irqs_disabled(regs))
                local_irq_enable();
 }
 
diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
index 8f4acc55407b0..f656192f075fb 100644
--- a/arch/powerpc/kernel/interrupt.c
+++ b/arch/powerpc/kernel/interrupt.c
@@ -343,7 +343,7 @@ notrace unsigned long interrupt_exit_user_prepare(struct 
pt_regs *regs)
        unsigned long ret;
 
        BUG_ON(regs_is_unrecoverable(regs));
-       BUG_ON(arch_irq_disabled_regs(regs));
+       BUG_ON(regs_irqs_disabled(regs));
        CT_WARN_ON(ct_state() == CT_STATE_USER);
 
        /*
@@ -392,7 +392,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct 
pt_regs *regs)
 
        local_irq_disable();
 
-       if (!arch_irq_disabled_regs(regs)) {
+       if (!regs_irqs_disabled(regs)) {
                /* Returning to a kernel context with local irqs enabled. */
                WARN_ON_ONCE(!(regs->msr & MSR_EE));
 again:
diff --git a/arch/powerpc/kernel/syscall.c b/arch/powerpc/kernel/syscall.c
index be159ad4b77bd..9f03a6263fb41 100644
--- a/arch/powerpc/kernel/syscall.c
+++ b/arch/powerpc/kernel/syscall.c
@@ -32,7 +32,7 @@ notrace long system_call_exception(struct pt_regs *regs, 
unsigned long r0)
 
        BUG_ON(regs_is_unrecoverable(regs));
        BUG_ON(!user_mode(regs));
-       BUG_ON(arch_irq_disabled_regs(regs));
+       BUG_ON(regs_irqs_disabled(regs));
 
 #ifdef CONFIG_PPC_PKEY
        if (mmu_has_feature(MMU_FTR_PKEY)) {
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index cb8e9357383e9..629f2a2d4780e 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1956,7 +1956,7 @@ 
DEFINE_INTERRUPT_HANDLER_RAW(performance_monitor_exception)
         * prevent hash faults on user addresses when reading callchains (and
         * looks better from an irq tracing perspective).
         */
-       if (IS_ENABLED(CONFIG_PPC64) && unlikely(arch_irq_disabled_regs(regs)))
+       if (IS_ENABLED(CONFIG_PPC64) && unlikely(regs_irqs_disabled(regs)))
                performance_monitor_exception_nmi(regs);
        else
                performance_monitor_exception_async(regs);
diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
index 2429cb1c7baa7..6111cbbde069d 100644
--- a/arch/powerpc/kernel/watchdog.c
+++ b/arch/powerpc/kernel/watchdog.c
@@ -373,7 +373,7 @@ DEFINE_INTERRUPT_HANDLER_NMI(soft_nmi_interrupt)
        u64 tb;
 
        /* should only arrive from kernel, with irqs disabled */
-       WARN_ON_ONCE(!arch_irq_disabled_regs(regs));
+       WARN_ON_ONCE(!regs_irqs_disabled(regs));
 
        if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
                return 0;
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index b906d28f74fd4..35f5f33f5777e 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -2483,7 +2483,7 @@ static void __perf_event_interrupt(struct pt_regs *regs)
         * will trigger a PMI after waking up from idle. Since counter values 
are _not_
         * saved/restored in idle path, can lead to below "Can't find PMC" 
message.
         */
-       if (unlikely(!found) && !arch_irq_disabled_regs(regs))
+       if (unlikely(!found) && !regs_irqs_disabled(regs))
                printk_ratelimited(KERN_WARNING "Can't find PMC that caused 
IRQ\n");
 
        /*
-- 
2.49.0


Reply via email to