在 2018年10月06日 19:47, tip-bot for Lianbo Jiang 写道:
> Commit-ID:  992b649a3f013465d8128da02e5449def662a4c3
> Gitweb:     
> https://git.kernel.org/tip/992b649a3f013465d8128da02e5449def662a4c3
> Author:     Lianbo Jiang <liji...@redhat.com>
> AuthorDate: Sun, 30 Sep 2018 16:37:41 +0800
> Committer:  Borislav Petkov <b...@suse.de>
> CommitDate: Sat, 6 Oct 2018 12:09:26 +0200
> 
> kdump, proc/vmcore: Enable kdumping encrypted memory with SME enabled
> 
> In the kdump kernel, the memory of the first kernel needs to be dumped
> into the vmcore file.
> 
> If SME is enabled in the first kernel, the old memory has to be remapped
> with the memory encryption mask in order to access it properly.
> 
> Split copy_oldmem_page() functionality to handle encrypted memory
> properly.
> 
>  [ bp: Heavily massage everything. ]
> 
> Signed-off-by: Lianbo Jiang <liji...@redhat.com>
> Signed-off-by: Borislav Petkov <b...@suse.de>
> Cc: ke...@lists.infradead.org
> Cc: t...@linutronix.de
> Cc: mi...@redhat.com
> Cc: h...@zytor.com
> Cc: a...@linux-foundation.org
> Cc: dan.j.willi...@intel.com
> Cc: bhelg...@google.com
> Cc: baiyao...@cmss.chinamobile.com
> Cc: ti...@suse.de
> Cc: brijesh.si...@amd.com
> Cc: dyo...@redhat.com
> Cc: b...@redhat.com
> Cc: jroe...@suse.de
> Link: 
> https://lkml.kernel.org/r/be7b47f9-6be6-e0d1-2c2a-9125bc74b...@redhat.com
> ---
>  arch/x86/kernel/crash_dump_64.c | 60 
> ++++++++++++++++++++++++++++-------------
>  fs/proc/vmcore.c                | 24 ++++++++++++-----
>  include/linux/crash_dump.h      |  4 +++
>  3 files changed, 63 insertions(+), 25 deletions(-)
> 
> diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
> index 4f2e0778feac..eb8ab3915268 100644
> --- a/arch/x86/kernel/crash_dump_64.c
> +++ b/arch/x86/kernel/crash_dump_64.c
> @@ -11,40 +11,62 @@
>  #include <linux/uaccess.h>
>  #include <linux/io.h>
>  
> -/**
> - * copy_oldmem_page - copy one page from "oldmem"
> - * @pfn: page frame number to be copied
> - * @buf: target memory address for the copy; this can be in kernel address
> - *   space or user address space (see @userbuf)
> - * @csize: number of bytes to copy
> - * @offset: offset in bytes into the page (based on pfn) to begin the copy
> - * @userbuf: if set, @buf is in user address space, use copy_to_user(),
> - *   otherwise @buf is in kernel address space, use memcpy().
> - *
> - * Copy a page from "oldmem". For this page, there is no pte mapped
> - * in the current kernel. We stitch up a pte, similar to kmap_atomic.
> - */
> -ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
> -             size_t csize, unsigned long offset, int userbuf)
> +static ssize_t __copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
> +                               unsigned long offset, int userbuf,
> +                               bool encrypted)
>  {
>       void  *vaddr;
>  
>       if (!csize)
>               return 0;
>  
> -     vaddr = ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE);
> +     if (encrypted)
> +             vaddr = (__force void *)ioremap_encrypted(pfn << PAGE_SHIFT, 
> PAGE_SIZE);
> +     else
> +             vaddr = (__force void *)ioremap_cache(pfn << PAGE_SHIFT, 
> PAGE_SIZE);
> +
>       if (!vaddr)
>               return -ENOMEM;
>  
>       if (userbuf) {
> -             if (copy_to_user(buf, vaddr + offset, csize)) {
> -                     iounmap(vaddr);
> +             if (copy_to_user((void __user *)buf, vaddr + offset, csize)) {
> +                     iounmap((void __iomem *)vaddr);
>                       return -EFAULT;
>               }
>       } else
>               memcpy(buf, vaddr + offset, csize);
>  
>       set_iounmap_nonlazy();
> -     iounmap(vaddr);
> +     iounmap((void __iomem *)vaddr);
>       return csize;
>  }
> +
> +/**
> + * copy_oldmem_page - copy one page of memory
> + * @pfn: page frame number to be copied
> + * @buf: target memory address for the copy; this can be in kernel address
> + *   space or user address space (see @userbuf)
> + * @csize: number of bytes to copy
> + * @offset: offset in bytes into the page (based on pfn) to begin the copy
> + * @userbuf: if set, @buf is in user address space, use copy_to_user(),
> + *   otherwise @buf is in kernel address space, use memcpy().
> + *
> + * Copy a page from the old kernel's memory. For this page, there is no pte
> + * mapped in the current kernel. We stitch up a pte, similar to kmap_atomic.
> + */
> +ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
> +                      unsigned long offset, int userbuf)
> +{
> +     return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, false);
> +}
> +
> +/**
> + * copy_oldmem_page_encrypted - same as copy_oldmem_page() above but ioremap 
> the
> + * memory with the encryption mask set to accomodate kdump on SME-enabled
> + * machines.
> + */
> +ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t 
> csize,
> +                                unsigned long offset, int userbuf)
> +{
> +     return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, true);
> +}
> diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
> index cbde728f8ac6..42c32d06f7da 100644
> --- a/fs/proc/vmcore.c
> +++ b/fs/proc/vmcore.c
> @@ -24,6 +24,8 @@
>  #include <linux/vmalloc.h>
>  #include <linux/pagemap.h>
>  #include <linux/uaccess.h>
> +#include <linux/mem_encrypt.h>
> +#include <asm/pgtable.h>
>  #include <asm/io.h>
>  #include "internal.h"
>  
> @@ -98,7 +100,8 @@ static int pfn_is_ram(unsigned long pfn)
>  
>  /* Reads a page from the oldmem device from given offset. */
>  static ssize_t read_from_oldmem(char *buf, size_t count,
> -                             u64 *ppos, int userbuf)
> +                             u64 *ppos, int userbuf,
> +                             bool encrypted)
>  {
>       unsigned long pfn, offset;
>       size_t nr_bytes;
> @@ -120,8 +123,15 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
>               if (pfn_is_ram(pfn) == 0)
>                       memset(buf, 0, nr_bytes);
>               else {
> -                     tmp = copy_oldmem_page(pfn, buf, nr_bytes,
> -                                             offset, userbuf);
> +                     if (encrypted)
> +                             tmp = copy_oldmem_page_encrypted(pfn, buf,
> +                                                              nr_bytes,
> +                                                              offset,
> +                                                              userbuf);
> +                     else
> +                             tmp = copy_oldmem_page(pfn, buf, nr_bytes,
> +                                                    offset, userbuf);
> +
>                       if (tmp < 0)
>                               return tmp;
>               }
> @@ -155,7 +165,7 @@ void __weak elfcorehdr_free(unsigned long long addr)
>   */
>  ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
>  {
> -     return read_from_oldmem(buf, count, ppos, 0);
> +     return read_from_oldmem(buf, count, ppos, 0, false);
>  }
>  
>  /*
> @@ -163,7 +173,7 @@ ssize_t __weak elfcorehdr_read(char *buf, size_t count, 
> u64 *ppos)
>   */
>  ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
>  {
> -     return read_from_oldmem(buf, count, ppos, 0);
> +     return read_from_oldmem(buf, count, ppos, 0, sme_active());
>  }
>  
>  /*
> @@ -173,6 +183,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct 
> *vma,
>                                 unsigned long from, unsigned long pfn,
>                                 unsigned long size, pgprot_t prot)
>  {
> +     prot = pgprot_encrypted(prot);
>       return remap_pfn_range(vma, from, pfn, size, prot);
>  }
>  
> @@ -351,7 +362,8 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, 
> loff_t *fpos,
>                                           m->offset + m->size - *fpos,
>                                           buflen);
>                       start = m->paddr + *fpos - m->offset;
> -                     tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
> +                     tmp = read_from_oldmem(buffer, tsz, &start,
> +                                            userbuf, sme_active());
>                       if (tmp < 0)
>                               return tmp;
>                       buflen -= tsz;
> diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
> index 3e4ba9d753c8..f774c5eb9e3c 100644
> --- a/include/linux/crash_dump.h
> +++ b/include/linux/crash_dump.h
> @@ -26,6 +26,10 @@ extern int remap_oldmem_pfn_range(struct vm_area_struct 
> *vma,
>  
>  extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
>                                               unsigned long, int);
> +extern ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf,
> +                                       size_t csize, unsigned long offset,
> +                                       int userbuf);
> +

Here, it may be have a compile error.
Links: https://lore.kernel.org/patchwork/patch/993337/
kbuild test robot Sept. 29, 2018, 6:25 p.m. UTC | #1

The correct patch is this one, you might refer to "Re: [PATCH v9 4/4] 
kdump/vmcore:support
encrypted old memory with SME enabled" or this links.
Links: https://lore.kernel.org/patchwork/patch/993538/#1177439
lijiang Sept. 30, 2018, 8:37 a.m. UTC | #2 

diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 3e4ba9d753c8..84d8ddcb818e 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -26,6 +26,19 @@ extern int remap_oldmem_pfn_range(struct vm_area_struct *vma,
 
 extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
                                                unsigned long, int);
+#if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_X86_64)
+extern ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf,
+                                         size_t csize, unsigned long offset,
+                                         int userbuf);
+#else
+static inline
+ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
+                                  unsigned long offset, int userbuf)
+{
+       return 0;
+}
+#endif
+


Thanks.
Lianbo

>  void vmcore_cleanup(void);
>  
>  /* Architecture code defines this if there are other possible ELF
> 

Reply via email to