Reviewed-by: Sinclair Yeh <s...@vmware.com>

On Wed, Sep 07, 2016 at 11:08:45AM -0700, Kees Cook wrote:
> A custom allocator without __GFP_COMP that copies to userspace has been
> found in vmw_execbuf_process[1], so this disables the page-span checker
> by placing it behind a CONFIG for future work where such things can be
> tracked down later.
> 
> [1] 
> https://urldefense.proofpoint.com/v2/url?u=https-3A__bugzilla.redhat.com_show-5Fbug.cgi-3Fid-3D1373326&d=CwIBAg&c=Sqcl0Ez6M0X8aeM67LKIiDJAXVeAw-YihVMNtXt-uEs&r=w9Iu3o4zAy-3-s8MFvrNSQ&m=HmnBFNgZxprKMPy51P5NjJYN3A5FrdjyzaM817eexKU&s=Htqh5qkhK5mXIdzTCYiqCaZU1R98sOatRFgsoDbGOzw&e=
>  
> 
> Reported-by: Vinson Lee <v...@freedesktop.org>
> Fixes: f5509cc18daa ("mm: Hardened usercopy")
> Signed-off-by: Kees Cook <keesc...@chromium.org>
> ---
> v2:
> - split logic into separate function entirely, torvalds
> ---
>  mm/usercopy.c    | 61 
> ++++++++++++++++++++++++++++++++------------------------
>  security/Kconfig | 11 ++++++++++
>  2 files changed, 46 insertions(+), 26 deletions(-)
> 
> diff --git a/mm/usercopy.c b/mm/usercopy.c
> index a3cc3052f830..089328f2b920 100644
> --- a/mm/usercopy.c
> +++ b/mm/usercopy.c
> @@ -134,31 +134,16 @@ static inline const char *check_bogus_address(const 
> void *ptr, unsigned long n)
>       return NULL;
>  }
>  
> -static inline const char *check_heap_object(const void *ptr, unsigned long n,
> -                                         bool to_user)
> +/* Checks for allocs that are marked in some way as spanning multiple pages. 
> */
> +static inline const char *check_page_span(const void *ptr, unsigned long n,
> +                                       struct page *page, bool to_user)
>  {
> -     struct page *page, *endpage;
> +#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
>       const void *end = ptr + n - 1;
> +     struct page *endpage;
>       bool is_reserved, is_cma;
>  
>       /*
> -      * Some architectures (arm64) return true for virt_addr_valid() on
> -      * vmalloced addresses. Work around this by checking for vmalloc
> -      * first.
> -      */
> -     if (is_vmalloc_addr(ptr))
> -             return NULL;
> -
> -     if (!virt_addr_valid(ptr))
> -             return NULL;
> -
> -     page = virt_to_head_page(ptr);
> -
> -     /* Check slab allocator for flags and size. */
> -     if (PageSlab(page))
> -             return __check_heap_object(ptr, n, page);
> -
> -     /*
>        * Sometimes the kernel data regions are not marked Reserved (see
>        * check below). And sometimes [_sdata,_edata) does not cover
>        * rodata and/or bss, so check each range explicitly.
> @@ -186,7 +171,7 @@ static inline const char *check_heap_object(const void 
> *ptr, unsigned long n,
>                  ((unsigned long)end & (unsigned long)PAGE_MASK)))
>               return NULL;
>  
> -     /* Allow if start and end are inside the same compound page. */
> +     /* Allow if fully inside the same compound (__GFP_COMP) page. */
>       endpage = virt_to_head_page(end);
>       if (likely(endpage == page))
>               return NULL;
> @@ -199,20 +184,44 @@ static inline const char *check_heap_object(const void 
> *ptr, unsigned long n,
>       is_reserved = PageReserved(page);
>       is_cma = is_migrate_cma_page(page);
>       if (!is_reserved && !is_cma)
> -             goto reject;
> +             return "<spans multiple pages>";
>  
>       for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
>               page = virt_to_head_page(ptr);
>               if (is_reserved && !PageReserved(page))
> -                     goto reject;
> +                     return "<spans Reserved and non-Reserved pages>";
>               if (is_cma && !is_migrate_cma_page(page))
> -                     goto reject;
> +                     return "<spans CMA and non-CMA pages>";
>       }
> +#endif
>  
>       return NULL;
> +}
> +
> +static inline const char *check_heap_object(const void *ptr, unsigned long n,
> +                                         bool to_user)
> +{
> +     struct page *page;
> +
> +     /*
> +      * Some architectures (arm64) return true for virt_addr_valid() on
> +      * vmalloced addresses. Work around this by checking for vmalloc
> +      * first.
> +      */
> +     if (is_vmalloc_addr(ptr))
> +             return NULL;
> +
> +     if (!virt_addr_valid(ptr))
> +             return NULL;
> +
> +     page = virt_to_head_page(ptr);
> +
> +     /* Check slab allocator for flags and size. */
> +     if (PageSlab(page))
> +             return __check_heap_object(ptr, n, page);
>  
> -reject:
> -     return "<spans multiple pages>";
> +     /* Verify object does not incorrectly span multiple pages. */
> +     return check_page_span(ptr, n, page, to_user);
>  }
>  
>  /*
> diff --git a/security/Kconfig b/security/Kconfig
> index da10d9b573a4..2dfc0ce4083e 100644
> --- a/security/Kconfig
> +++ b/security/Kconfig
> @@ -147,6 +147,17 @@ config HARDENED_USERCOPY
>         or are part of the kernel text. This kills entire classes
>         of heap overflow exploits and similar kernel memory exposures.
>  
> +config HARDENED_USERCOPY_PAGESPAN
> +     bool "Refuse to copy allocations that span multiple pages"
> +     depends on HARDENED_USERCOPY
> +     depends on !COMPILE_TEST
> +     help
> +       When a multi-page allocation is done without __GFP_COMP,
> +       hardened usercopy will reject attempts to copy it. There are,
> +       however, several cases of this in the kernel that have not all
> +       been removed. This config is intended to be used only while
> +       trying to find such users.
> +
>  source security/selinux/Kconfig
>  source security/smack/Kconfig
>  source security/tomoyo/Kconfig
> -- 
> 2.7.4
> 
> 
> -- 
> Kees Cook
> Nexus Security

Reply via email to