Le 15/05/2019 à 03:30, Russell Currey a écrit :
Strict module RWX is just like strict kernel RWX, but for modules - so
loadable modules aren't marked both writable and executable at the same
time.  This is handled by the generic code in kernel/module.c, and
simply requires the architecture to implement the set_memory() set of
functions, declared with ARCH_HAS_SET_MEMORY.

The set_memory() family of functions are implemented for book3s64
MMUs (so Hash and Radix), however they could likely be adapted to work
for other platforms as well and made more generic.  I did it this way
since they're the platforms I have the most understanding of and ability
to test.

Based on this patch, I have drafted a generic implementation. Please comment and test. I'll test on my side on PPC32.

Christophe


There's nothing other than these functions required to turn
ARCH_HAS_STRICT_MODULE_RWX on, so turn that on too.

With STRICT_MODULE_RWX enabled, there are as many W+X pages at runtime
as there are with CONFIG_MODULES=n (none), so in my testing it works
well on both Hash and Radix.

There's a TODO in the code for also applying the page permission changes
to the backing pages in the linear mapping: this is pretty simple for
Radix and (seemingly) a lot harder for Hash, so I've left it for now
since there's still a notable security benefit for the patch as-is.

Technically can be enabled without STRICT_KERNEL_RWX, but I don't think
that gets you a whole lot, so I think we should leave it off by default
until we can get STRICT_KERNEL_RWX to the point where it's enabled by
default.

Signed-off-by: Russell Currey <rus...@russell.cc>
---
  arch/powerpc/Kconfig                  |   2 +
  arch/powerpc/include/asm/set_memory.h |  12 +++
  arch/powerpc/mm/book3s64/Makefile     |   2 +-
  arch/powerpc/mm/book3s64/pageattr.c   | 106 ++++++++++++++++++++++++++
  4 files changed, 121 insertions(+), 1 deletion(-)
  create mode 100644 arch/powerpc/include/asm/set_memory.h
  create mode 100644 arch/powerpc/mm/book3s64/pageattr.c

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index d7996cfaceca..9e1bfa81bc5a 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -131,7 +131,9 @@ config PPC
        select ARCH_HAS_PTE_SPECIAL
        select ARCH_HAS_MEMBARRIER_CALLBACKS
        select ARCH_HAS_SCALED_CPUTIME          if VIRT_CPU_ACCOUNTING_NATIVE 
&& PPC64
+       select ARCH_HAS_SET_MEMORY              if PPC_BOOK3S_64
        select ARCH_HAS_STRICT_KERNEL_RWX       if ((PPC_BOOK3S_64 || PPC32) && 
!RELOCATABLE && !HIBERNATION)
+       select ARCH_HAS_STRICT_MODULE_RWX       if PPC_BOOK3S_64
        select ARCH_HAS_TICK_BROADCAST          if GENERIC_CLOCKEVENTS_BROADCAST
        select ARCH_HAS_UACCESS_FLUSHCACHE      if PPC64
        select ARCH_HAS_UBSAN_SANITIZE_ALL
diff --git a/arch/powerpc/include/asm/set_memory.h 
b/arch/powerpc/include/asm/set_memory.h
new file mode 100644
index 000000000000..5323a8b06f98
--- /dev/null
+++ b/arch/powerpc/include/asm/set_memory.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef _ASM_POWERPC_SET_MEMORY_H
+#define _ASM_POWERPC_SET_MEMORY_H
+
+#ifdef CONFIG_PPC_BOOK3S_64
+int set_memory_ro(unsigned long addr, int numpages);
+int set_memory_rw(unsigned long addr, int numpages);
+int set_memory_nx(unsigned long addr, int numpages);
+int set_memory_x(unsigned long addr, int numpages);
+#endif
+
+#endif
diff --git a/arch/powerpc/mm/book3s64/Makefile 
b/arch/powerpc/mm/book3s64/Makefile
index 974b4fc19f4f..09c5afadf235 100644
--- a/arch/powerpc/mm/book3s64/Makefile
+++ b/arch/powerpc/mm/book3s64/Makefile
@@ -5,7 +5,7 @@ ccflags-y       := $(NO_MINIMAL_TOC)
  CFLAGS_REMOVE_slb.o = $(CC_FLAGS_FTRACE)
obj-y += hash_pgtable.o hash_utils.o slb.o \
-                                  mmu_context.o pgtable.o hash_tlb.o
+                                  mmu_context.o pgtable.o hash_tlb.o pageattr.o
  obj-$(CONFIG_PPC_NATIVE)      += hash_native.o
  obj-$(CONFIG_PPC_RADIX_MMU)   += radix_pgtable.o radix_tlb.o
  obj-$(CONFIG_PPC_4K_PAGES)    += hash_4k.o
diff --git a/arch/powerpc/mm/book3s64/pageattr.c 
b/arch/powerpc/mm/book3s64/pageattr.c
new file mode 100644
index 000000000000..d6afa89fb407
--- /dev/null
+++ b/arch/powerpc/mm/book3s64/pageattr.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Page attribute and set_memory routines for Radix and Hash MMUs
+ *
+ * Derived from the arm64 implementation.
+ *
+ * Author: Russell Currey <rus...@russell.cc>
+ *
+ * Copyright 2019, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.

Above text should be removed as it is redundant with the SPDX-Licence-Identifier

+ */
+
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+
+#include <asm/mmu.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+// we need this to have a single pointer to pass into apply_to_page_range()
+struct page_change_data {
+       pgprot_t set_mask;
+       pgprot_t clear_mask;
+};
+
+static pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
+{
+       return __pte(pte_val(pte) & ~pgprot_val(prot));
+}
+
+static pte_t set_pte_bit(pte_t pte, pgprot_t prot)
+{
+       return __pte(pte_val(pte) | pgprot_val(prot));
+}
+
+static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
+                            void *data)
+{
+       struct page_change_data *cdata = data;
+       pte_t pte = READ_ONCE(*ptep);
+
+       pte = clear_pte_bit(pte, cdata->clear_mask);
+       pte = set_pte_bit(pte, cdata->set_mask);
+
+       set_pte_at(&init_mm, addr, ptep, pte);
+       return 0;
+}
+
+static int change_memory(unsigned long addr, int numpages, pgprot_t set_mask,
+                        pgprot_t clear_mask)
+{
+       unsigned long size = numpages * PAGE_SIZE;
+       unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
+       unsigned long end = PAGE_ALIGN(start + size);

We have aligned the start and the size, so the end is automatically aligned.

Christophe

+       struct page_change_data data;
+       struct vm_struct *area;
+       int ret;
+
+       if (!numpages)
+               return 0;
+
+       // only operate on VM areas for now
+       area = find_vm_area((void *)addr);
+       if (!area || end > (unsigned long)area->addr + area->size ||
+           !(area->flags & VM_ALLOC))
+               return -EINVAL;
+
+       // TODO: also apply change to the backing pages in the linear mapping
+       data.set_mask = set_mask;
+       data.clear_mask = clear_mask;
+
+       ret = apply_to_page_range(&init_mm, start, size, change_page_range,
+                                 &data);
+
+       flush_tlb_kernel_range(start, end);
+       return ret;
+}
+
+int set_memory_ro(unsigned long addr, int numpages)
+{
+       return change_memory(addr, numpages,
+                            __pgprot(0), __pgprot(_PAGE_WRITE));
+}
+
+int set_memory_rw(unsigned long addr, int numpages)
+{
+       return change_memory(addr, numpages,
+                            __pgprot(_PAGE_RW), __pgprot(0));
+}
+
+int set_memory_nx(unsigned long addr, int numpages)
+{
+       return change_memory(addr, numpages,
+                            __pgprot(0), __pgprot(_PAGE_EXEC));
+}
+
+int set_memory_x(unsigned long addr, int numpages)
+{
+       return change_memory(addr, numpages,
+                            __pgprot(_PAGE_EXEC), __pgprot(0));
+}

Reply via email to