> ---
> Since the previous version of this patchset (named KHRAP) there
> have been
> several changes, some of which include:
>
> - macro naming, suggested by Nick
> - builds should be fixed outside of 64s
> - no longer unlock heading out to userspace
> - removal of unnecessary isyncs
> - more config option testing
> - removal of save/restore
> - use pr_crit() and reword message on fault
>
> arch/powerpc/include/asm/exception-64e.h | 3 ++
> arch/powerpc/include/asm/exception-64s.h | 19 +++++++-
> arch/powerpc/include/asm/mmu.h | 7 +++
> arch/powerpc/include/asm/paca.h | 3 ++
> arch/powerpc/include/asm/reg.h | 1 +
> arch/powerpc/include/asm/uaccess.h | 57
> ++++++++++++++++++++----
> arch/powerpc/kernel/asm-offsets.c | 1 +
> arch/powerpc/kernel/dt_cpu_ftrs.c | 4 ++
> arch/powerpc/kernel/entry_64.S | 17 ++++++-
> arch/powerpc/mm/fault.c | 12 +++++
> arch/powerpc/mm/pgtable-radix.c | 2 +
> arch/powerpc/mm/pkeys.c | 7 ++-
> arch/powerpc/platforms/Kconfig.cputype | 15 +++++++
> 13 files changed, 135 insertions(+), 13 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/exception-64e.h
> b/arch/powerpc/include/asm/exception-64e.h
> index 555e22d5e07f..bf25015834ee 100644
> --- a/arch/powerpc/include/asm/exception-64e.h
> +++ b/arch/powerpc/include/asm/exception-64e.h
> @@ -215,5 +215,8 @@ exc_##label##_book3e:
> #define RFI_TO_USER
> \
> rfi
>
> +#define UNLOCK_USER_ACCESS(reg)
> +#define LOCK_USER_ACCESS(reg)
> +
> #endif /* _ASM_POWERPC_EXCEPTION_64E_H */
>
> diff --git a/arch/powerpc/include/asm/exception-64s.h
> b/arch/powerpc/include/asm/exception-64s.h
> index 3b4767ed3ec5..0cac5bd380ca 100644
> --- a/arch/powerpc/include/asm/exception-64s.h
> +++ b/arch/powerpc/include/asm/exception-64s.h
> @@ -264,6 +264,19 @@ BEGIN_FTR_SECTION_NESTED(943)
> \
> std ra,offset(r13); \
> END_FTR_SECTION_NESTED(ftr,ftr,943)
>
> +#define LOCK_USER_ACCESS(reg)
> \
> +BEGIN_MMU_FTR_SECTION_NESTED(944)
> \
> + LOAD_REG_IMMEDIATE(reg,AMR_LOCKED); \
> + mtspr SPRN_AMR,reg;
> \
> +END_MMU_FTR_SECTION_NESTED(MMU_FTR_RADIX_GUAP,MMU_FTR_RADIX_GUAP,9
> 44)
> +
> +#define UNLOCK_USER_ACCESS(reg)
> \
> +BEGIN_MMU_FTR_SECTION_NESTED(945)
> \
> + li reg,0; \
> + mtspr SPRN_AMR,reg;
> \
> + isync
> \
> +END_MMU_FTR_SECTION_NESTED(MMU_FTR_RADIX_GUAP,MMU_FTR_RADIX_GUAP,9
> 45)
> +
> #define EXCEPTION_PROLOG_0(area)
> \
> GET_PACA(r13);
> \
> std r9,area+EX_R9(r13); /* save r9 */ \
> @@ -500,7 +513,11 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
> beq 4f; /* if from kernel mode */
> \
> ACCOUNT_CPU_USER_ENTRY(r13, r9, r10);
> \
> SAVE_PPR(area, r9);
> \
> -4: EXCEPTION_PROLOG_COMMON_2(area)
> \
> +4: lbz r9,PACA_USER_ACCESS_ALLOWED(r13);
> \
> + cmpwi cr1,r9,0;
> \
> + beq 5f;
> \
> + LOCK_USER_ACCESS(r9);
> \
> +5: EXCEPTION_PROLOG_COMMON_2(area)
> \
> EXCEPTION_PROLOG_COMMON_3(n)
> \
> ACCOUNT_STOLEN_TIME
>
> diff --git a/arch/powerpc/include/asm/mmu.h
> b/arch/powerpc/include/asm/mmu.h
> index eb20eb3b8fb0..3b31ed702785 100644
> --- a/arch/powerpc/include/asm/mmu.h
> +++ b/arch/powerpc/include/asm/mmu.h
> @@ -107,6 +107,10 @@
> */
> #define MMU_FTR_1T_SEGMENT ASM_CONST(0x40000000)
>
> +/* Supports GUAP (key 0 controlling userspace addresses) on radix
> + */
> +#define MMU_FTR_RADIX_GUAP ASM_CONST(0x80000000)
> +
> /* MMU feature bit sets for various CPUs */
> #define MMU_FTRS_DEFAULT_HPTE_ARCH_V2 \
> MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2
> @@ -143,6 +147,9 @@ enum {
> MMU_FTR_KERNEL_RO | MMU_FTR_68_BIT_VA |
> #ifdef CONFIG_PPC_RADIX_MMU
> MMU_FTR_TYPE_RADIX |
> +#endif
> +#ifdef CONFIG_PPC_RADIX_GUAP
> + MMU_FTR_RADIX_GUAP |
Can this exist without MMT_FTR_TYPE_RADIX ?
> #endif
> 0,
> };
> diff --git a/arch/powerpc/include/asm/paca.h
> b/arch/powerpc/include/asm/paca.h
> index e843bc5d1a0f..e905f09b2d38 100644
> --- a/arch/powerpc/include/asm/paca.h
> +++ b/arch/powerpc/include/asm/paca.h
> @@ -169,6 +169,9 @@ struct paca_struct {
> u64 saved_r1; /* r1 save for RTAS calls
> or PM or EE=0 */
> u64 saved_msr; /* MSR saved here by
> enter_rtas */
> u16 trap_save; /* Used when bad stack is
> encountered */
> +#ifdef CONFIG_PPC_RADIX_GUAP
> + u8 user_access_allowed; /* set when AMR allows user
> accesses */
> +#endif
> u8 irq_soft_mask; /* mask for irq soft masking */
> u8 irq_happened; /* irq happened while soft-disabled
> */
> u8 io_sync; /* writel() needs spin_unlock sync
> */
> diff --git a/arch/powerpc/include/asm/reg.h
> b/arch/powerpc/include/asm/reg.h
> index 640a4d818772..b994099a906b 100644
> --- a/arch/powerpc/include/asm/reg.h
> +++ b/arch/powerpc/include/asm/reg.h
> @@ -246,6 +246,7 @@
> #define SPRN_DSCR 0x11
> #define SPRN_CFAR 0x1c /* Come From Address Register */
> #define SPRN_AMR 0x1d /* Authority Mask Register */
> +#define AMR_LOCKED 0xC000000000000000ULL /* Read & Write
> disabled */
Why ULL ? mtspr() takes unsigned long arg.
> #define SPRN_UAMOR 0x9d /* User Authority Mask Override
> Register */
> #define SPRN_AMOR 0x15d /* Authority Mask Override Register
> */
> #define SPRN_ACOP 0x1F /* Available Coprocessor Register
> */
> diff --git a/arch/powerpc/include/asm/uaccess.h
> b/arch/powerpc/include/asm/uaccess.h
> index 15bea9a0f260..209bfc47c340 100644
> --- a/arch/powerpc/include/asm/uaccess.h
> +++ b/arch/powerpc/include/asm/uaccess.h
> @@ -62,6 +62,27 @@ static inline int __access_ok(unsigned long
> addr,
> unsigned long size,
>
> #endif
>
> +static inline void unlock_user_access(void)
> +{
> +#ifdef CONFIG_PPC_RADIX_GUAP
> + if (mmu_has_feature(MMU_FTR_RADIX_GUAP)) {
You need to include the .h which provides mmu_has_feature()
I think uaccess.h should only include the empty function for when
CONFIG_PPC_GUAP is not defined. Radix guap function should go in a
radix header file.
> + mtspr(SPRN_AMR, 0);
> + isync();
> + get_paca()->user_access_allowed = 1;
> + }
> +#endif
> +}
> +
> +static inline void lock_user_access(void)
> +{
> +#ifdef CONFIG_PPC_RADIX_GUAP
> + if (mmu_has_feature(MMU_FTR_RADIX_GUAP)) {
> + mtspr(SPRN_AMR, AMR_LOCKED);
> + get_paca()->user_access_allowed = 0;
> + }
> +#endif
> +}
> +
> #define access_ok(type, addr, size) \
> (__chk_user_ptr(addr), \
> __access_ok((__force unsigned long)(addr), (size), get_fs()))
> @@ -141,6 +162,7 @@ extern long __put_user_bad(void);
> #define __put_user_size(x, ptr, size, retval)
> \
> do {
> \
> retval = 0; \
> + unlock_user_access(); \
> switch (size) { \
> case 1: __put_user_asm(x, ptr, retval, "stb"); break; \
> case 2: __put_user_asm(x, ptr, retval, "sth"); break; \
> @@ -148,6 +170,7 @@ do {
> \
> case 8: __put_user_asm2(x, ptr, retval); break; \
> default: __put_user_bad(); \
> } \
> + lock_user_access(); \
> } while (0)
>
> #define __put_user_nocheck(x, ptr, size) \
> @@ -240,6 +263,7 @@ do {
> \
> __chk_user_ptr(ptr); \
> if (size > sizeof(x)) \
> (x) = __get_user_bad(); \
> + unlock_user_access(); \
> switch (size) { \
> case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \
> case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \
> @@ -247,6 +271,7 @@ do {
> \
> case 8: __get_user_asm2(x, ptr, retval); break; \
> default: (x) = __get_user_bad(); \
> } \
> + lock_user_access(); \
> } while (0)
>
> /*
> @@ -306,15 +331,20 @@ extern unsigned long
> __copy_tofrom_user(void
> __user *to,
> static inline unsigned long
> raw_copy_in_user(void __user *to, const void __user *from,
> unsigned long n)
> {
> - return __copy_tofrom_user(to, from, n);
> + unsigned long ret;
> + unlock_user_access(); \
> + ret = __copy_tofrom_user(to, from, n); \
> + lock_user_access(); \
> + return ret; \
> }
> #endif /* __powerpc64__ */
>
> static inline unsigned long raw_copy_from_user(void *to,
> const void __user *from, unsigned long n)
> {
> + unsigned long ret;
> if (__builtin_constant_p(n) && (n <= 8)) {
> - unsigned long ret = 1;
> + ret = 1;
>
> switch (n) {
> case 1:
> @@ -339,14 +369,18 @@ static inline unsigned long
> raw_copy_from_user(void *to,
> }
>
> barrier_nospec();
> - return __copy_tofrom_user((__force void __user *)to, from, n);
> + unlock_user_access();
> + ret = __copy_tofrom_user((__force void __user *)to, from, n);
> + lock_user_access();
> + return ret;
> }
>
> static inline unsigned long raw_copy_to_user(void __user *to,
> const void *from, unsigned long n)
> {
> + unsigned long ret;
> if (__builtin_constant_p(n) && (n <= 8)) {
> - unsigned long ret = 1;
> + ret = 1;
>
> switch (n) {
> case 1:
> @@ -366,17 +400,24 @@ static inline unsigned long
> raw_copy_to_user(void __user *to,
> return 0;
> }
>
> - return __copy_tofrom_user(to, (__force const void __user
> *)from, n);
> + unlock_user_access();
> + ret = __copy_tofrom_user(to, (__force const void __user *)from,
> n);
> + lock_user_access();
> + return ret;
> }
>
> extern unsigned long __clear_user(void __user *addr, unsigned long
> size);
>
> static inline unsigned long clear_user(void __user *addr,
> unsigned
> long size)
> {
> + unsigned long ret = size;
> might_fault();
> - if (likely(access_ok(VERIFY_WRITE, addr, size)))
> - return __clear_user(addr, size);
> - return size;
> + if (likely(access_ok(VERIFY_WRITE, addr, size))) {
> + unlock_user_access();
> + ret = __clear_user(addr, size);
> + lock_user_access();
> + }
> + return ret;
> }
>
> extern long strncpy_from_user(char *dst, const char __user *src,
> long count);
> diff --git a/arch/powerpc/kernel/asm-offsets.c
> b/arch/powerpc/kernel/asm-offsets.c
> index 10ef2e4db2fd..5050f15ad2f5 100644
> --- a/arch/powerpc/kernel/asm-offsets.c
> +++ b/arch/powerpc/kernel/asm-offsets.c
> @@ -260,6 +260,7 @@ int main(void)
> OFFSET(ACCOUNT_STARTTIME_USER, paca_struct,
> accounting.starttime_user);
> OFFSET(ACCOUNT_USER_TIME, paca_struct, accounting.utime);
> OFFSET(ACCOUNT_SYSTEM_TIME, paca_struct, accounting.stime);
> + OFFSET(PACA_USER_ACCESS_ALLOWED, paca_struct,
> user_access_allowed);
> OFFSET(PACA_TRAP_SAVE, paca_struct, trap_save);
> OFFSET(PACA_NAPSTATELOST, paca_struct, nap_state_lost);
> OFFSET(PACA_SPRG_VDSO, paca_struct, sprg_vdso);
> diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c
> b/arch/powerpc/kernel/dt_cpu_ftrs.c
> index f432054234a4..df4716624840 100644
> --- a/arch/powerpc/kernel/dt_cpu_ftrs.c
> +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
> @@ -337,6 +337,10 @@ static int __init
> feat_enable_mmu_radix(struct
> dt_cpu_feature *f)
> cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
> cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
>
> +#ifdef CONFIG_PPC_RADIX_GUAP
> + cur_cpu_spec->mmu_features |= MMU_FTR_RADIX_GUAP;
> +#endif
> +
> return 1;
> #endif
> return 0;
> diff --git a/arch/powerpc/kernel/entry_64.S
> b/arch/powerpc/kernel/entry_64.S
> index 7b1693adff2a..23f0944185d3 100644
> --- a/arch/powerpc/kernel/entry_64.S
> +++ b/arch/powerpc/kernel/entry_64.S
> @@ -297,7 +297,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
> b . /* prevent speculative execution */
>
> /* exit to kernel */
> -1: ld r2,GPR2(r1)
> +1: /* if the AMR was unlocked before, unlock it again */
> + lbz r2,PACA_USER_ACCESS_ALLOWED(r13)
> + cmpwi cr1,0
> + bne 2f
> + UNLOCK_USER_ACCESS(r2)
> +2: ld r2,GPR2(r1)
> ld r1,GPR1(r1)
> mtlr r4
> mtcr r5
> @@ -965,6 +970,7 @@ BEGIN_FTR_SECTION
> ld r2,_PPR(r1)
> mtspr SPRN_PPR,r2
> END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
> +
> ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
> REST_GPR(13, r1)
>
> @@ -983,7 +989,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
> RFI_TO_USER
> b . /* prevent speculative execution */
>
> -1: mtspr SPRN_SRR1,r3
> +1: /* exit to kernel */
> + /* if the AMR was unlocked before, unlock it again */
> + lbz r2,PACA_USER_ACCESS_ALLOWED(r13)
> + cmpwi cr1,0
> + bne 2f
> + UNLOCK_USER_ACCESS(r2)
> +
> +2: mtspr SPRN_SRR1,r3
>
> ld r2,_CCR(r1)
> mtcrf 0xFF,r2
> diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
> index d51cf5f4e45e..17fd8c6b055b 100644
> --- a/arch/powerpc/mm/fault.c
> +++ b/arch/powerpc/mm/fault.c
> @@ -462,6 +462,18 @@ static int __do_page_fault(struct pt_regs
> *regs, unsigned long address,
> return bad_key_fault_exception(regs, address,
> get_mm_addr_key(mm,
> address));
>
> +#ifdef CONFIG_PPC_RADIX_SMAP
SMAP ?