mtmsr() does the right thing on 32bit and 64bit, so use it everywhere.

Signed-off-by: Anton Blanchard <an...@samba.org>
---
 arch/powerpc/include/asm/reg.h          | 3 +--
 arch/powerpc/oprofile/op_model_power4.c | 4 ++--
 2 files changed, 3 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index af56b5c..bb27588 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1193,8 +1193,7 @@
 #ifdef CONFIG_PPC_BOOK3S_64
 #define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
                                     : : "r" (v) : "memory")
-#define mtmsrd(v)      __mtmsrd((v), 0)
-#define mtmsr(v)       mtmsrd(v)
+#define mtmsr(v)       __mtmsrd((v), 0)
 #else
 #define mtmsr(v)       asm volatile("mtmsr %0" : \
                                     : "r" ((unsigned long)(v)) \
diff --git a/arch/powerpc/oprofile/op_model_power4.c 
b/arch/powerpc/oprofile/op_model_power4.c
index 962fe7b..4b32e94 100644
--- a/arch/powerpc/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -207,7 +207,7 @@ static int power4_start(struct op_counter_config *ctr)
        unsigned int mmcr0;
 
        /* set the PMM bit (see comment below) */
-       mtmsrd(mfmsr() | MSR_PMM);
+       mtmsr(mfmsr() | MSR_PMM);
 
        for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
                if (ctr[i].enabled) {
@@ -377,7 +377,7 @@ static void power4_handle_interrupt(struct pt_regs *regs,
        is_kernel = get_kernel(pc, mmcra);
 
        /* set the PMM bit (see comment below) */
-       mtmsrd(mfmsr() | MSR_PMM);
+       mtmsr(mfmsr() | MSR_PMM);
 
        /* Check that the SIAR  valid bit in MMCRA is set to 1. */
        if ((mmcra & MMCRA_SIAR_VALID_MASK) == MMCRA_SIAR_VALID_MASK)
-- 
2.1.4

_______________________________________________
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Reply via email to