Linus,

Please pull the latest x86/urgent git tree from:

   git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git 
x86-urgent-2020-07-25

   # HEAD: d181d2da0141371bbc360eaea78719203e165e1c x86/dumpstack: Dump user 
space code correctly again

Misc fixes:

 - Fix a section end page alignment assumption that was causing crashes

 - Fix ORC unwinding on freshly forked tasks which haven't executed yet
   and which have empty user task stacks

 - Fix the debug.exception-trace=1 sysctl dumping of user stacks, which
   was broken by recent maccess changes


  out-of-topic modifications in x86-urgent-2020-07-25:
  ------------------------------------------------------
  include/asm-generic/vmlinux.lds.h  # de2b41be8fcc: x86, vmlinux.lds: 
Page-align

 Thanks,

        Ingo

------------------>
Joerg Roedel (1):
      x86, vmlinux.lds: Page-align end of ..page_aligned sections

Josh Poimboeuf (2):
      x86/unwind/orc: Fix ORC for newly forked tasks
      x86/stacktrace: Fix reliable check for empty user task stacks

Thomas Gleixner (1):
      x86/dumpstack: Dump user space code correctly again


 arch/x86/kernel/dumpstack.c       | 27 +++++++++++++++++----------
 arch/x86/kernel/stacktrace.c      |  5 -----
 arch/x86/kernel/unwind_orc.c      |  8 ++++++--
 arch/x86/kernel/vmlinux.lds.S     |  1 +
 include/asm-generic/vmlinux.lds.h |  5 ++++-
 5 files changed, 28 insertions(+), 18 deletions(-)

diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index b037cfa7c0c5..7401cc12c3cc 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -71,6 +71,22 @@ static void printk_stack_address(unsigned long address, int 
reliable,
        printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address);
 }
 
+static int copy_code(struct pt_regs *regs, u8 *buf, unsigned long src,
+                    unsigned int nbytes)
+{
+       if (!user_mode(regs))
+               return copy_from_kernel_nofault(buf, (u8 *)src, nbytes);
+
+       /*
+        * Make sure userspace isn't trying to trick us into dumping kernel
+        * memory by pointing the userspace instruction pointer at it.
+        */
+       if (__chk_range_not_ok(src, nbytes, TASK_SIZE_MAX))
+               return -EINVAL;
+
+       return copy_from_user_nmi(buf, (void __user *)src, nbytes);
+}
+
 /*
  * There are a couple of reasons for the 2/3rd prologue, courtesy of Linus:
  *
@@ -97,17 +113,8 @@ void show_opcodes(struct pt_regs *regs, const char *loglvl)
 #define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE)
        u8 opcodes[OPCODE_BUFSIZE];
        unsigned long prologue = regs->ip - PROLOGUE_SIZE;
-       bool bad_ip;
-
-       /*
-        * Make sure userspace isn't trying to trick us into dumping kernel
-        * memory by pointing the userspace instruction pointer at it.
-        */
-       bad_ip = user_mode(regs) &&
-               __chk_range_not_ok(prologue, OPCODE_BUFSIZE, TASK_SIZE_MAX);
 
-       if (bad_ip || copy_from_kernel_nofault(opcodes, (u8 *)prologue,
-                                       OPCODE_BUFSIZE)) {
+       if (copy_code(regs, opcodes, prologue, sizeof(opcodes))) {
                printk("%sCode: Bad RIP value.\n", loglvl);
        } else {
                printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %"
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 6ad43fc44556..2fd698e28e4d 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -58,7 +58,6 @@ int arch_stack_walk_reliable(stack_trace_consume_fn 
consume_entry,
                         * or a page fault), which can make frame pointers
                         * unreliable.
                         */
-
                        if (IS_ENABLED(CONFIG_FRAME_POINTER))
                                return -EINVAL;
                }
@@ -81,10 +80,6 @@ int arch_stack_walk_reliable(stack_trace_consume_fn 
consume_entry,
        if (unwind_error(&state))
                return -EINVAL;
 
-       /* Success path for non-user tasks, i.e. kthreads and idle tasks */
-       if (!(task->flags & (PF_KTHREAD | PF_IDLE)))
-               return -EINVAL;
-
        return 0;
 }
 
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
index 7f969b2d240f..ec88bbe08a32 100644
--- a/arch/x86/kernel/unwind_orc.c
+++ b/arch/x86/kernel/unwind_orc.c
@@ -440,8 +440,11 @@ bool unwind_next_frame(struct unwind_state *state)
        /*
         * Find the orc_entry associated with the text address.
         *
-        * Decrement call return addresses by one so they work for sibling
-        * calls and calls to noreturn functions.
+        * For a call frame (as opposed to a signal frame), state->ip points to
+        * the instruction after the call.  That instruction's stack layout
+        * could be different from the call instruction's layout, for example
+        * if the call was to a noreturn function.  So get the ORC data for the
+        * call instruction itself.
         */
        orc = orc_find(state->signal ? state->ip : state->ip - 1);
        if (!orc) {
@@ -662,6 +665,7 @@ void __unwind_start(struct unwind_state *state, struct 
task_struct *task,
                state->sp = task->thread.sp;
                state->bp = READ_ONCE_NOCHECK(frame->bp);
                state->ip = READ_ONCE_NOCHECK(frame->ret_addr);
+               state->signal = (void *)state->ip == ret_from_fork;
        }
 
        if (get_stack_info((unsigned long *)state->sp, state->task,
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 3bfc8dd8a43d..9a03e5b23135 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -358,6 +358,7 @@ SECTIONS
        .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
                __bss_start = .;
                *(.bss..page_aligned)
+               . = ALIGN(PAGE_SIZE);
                *(BSS_MAIN)
                BSS_DECRYPTED
                . = ALIGN(PAGE_SIZE);
diff --git a/include/asm-generic/vmlinux.lds.h 
b/include/asm-generic/vmlinux.lds.h
index db600ef218d7..052e0f05a984 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -341,7 +341,8 @@
 
 #define PAGE_ALIGNED_DATA(page_align)                                  \
        . = ALIGN(page_align);                                          \
-       *(.data..page_aligned)
+       *(.data..page_aligned)                                          \
+       . = ALIGN(page_align);
 
 #define READ_MOSTLY_DATA(align)                                                
\
        . = ALIGN(align);                                               \
@@ -737,7 +738,9 @@
        . = ALIGN(bss_align);                                           \
        .bss : AT(ADDR(.bss) - LOAD_OFFSET) {                           \
                BSS_FIRST_SECTIONS                                      \
+               . = ALIGN(PAGE_SIZE);                                   \
                *(.bss..page_aligned)                                   \
+               . = ALIGN(PAGE_SIZE);                                   \
                *(.dynbss)                                              \
                *(BSS_MAIN)                                             \
                *(COMMON)                                               \

Reply via email to