On Mon 09-07-18 14:19:55, Marek Szyprowski wrote:
> cma_alloc() function doesn't really support gfp flags other than
> __GFP_NOWARN, so convert gfp_mask parameter to boolean no_warn parameter.
> 
> This will help to avoid giving false feeling that this function supports
> standard gfp flags and callers can pass __GFP_ZERO to get zeroed buffer,
> what has already been an issue: see commit dd65a941f6ba ("arm64:
> dma-mapping: clear buffers allocated with FORCE_CONTIGUOUS flag").
> 
> Signed-off-by: Marek Szyprowski <m.szyprow...@samsung.com>

Thanks! This makes perfect sense to me. If there is a real need for the
gfp_mask then we should start by defining the semantic first.

Acked-by: Michal Hocko <mho...@suse.com>

> ---
>  arch/powerpc/kvm/book3s_hv_builtin.c       | 2 +-
>  drivers/s390/char/vmcp.c                   | 2 +-
>  drivers/staging/android/ion/ion_cma_heap.c | 2 +-
>  include/linux/cma.h                        | 2 +-
>  kernel/dma/contiguous.c                    | 3 ++-
>  mm/cma.c                                   | 8 ++++----
>  mm/cma_debug.c                             | 2 +-
>  7 files changed, 11 insertions(+), 10 deletions(-)
> 
> diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c 
> b/arch/powerpc/kvm/book3s_hv_builtin.c
> index d4a3f4da409b..fc6bb9630a9c 100644
> --- a/arch/powerpc/kvm/book3s_hv_builtin.c
> +++ b/arch/powerpc/kvm/book3s_hv_builtin.c
> @@ -77,7 +77,7 @@ struct page *kvm_alloc_hpt_cma(unsigned long nr_pages)
>       VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
>  
>       return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES),
> -                      GFP_KERNEL);
> +                      false);
>  }
>  EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma);
>  
> diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
> index 948ce82a7725..0fa1b6b1491a 100644
> --- a/drivers/s390/char/vmcp.c
> +++ b/drivers/s390/char/vmcp.c
> @@ -68,7 +68,7 @@ static void vmcp_response_alloc(struct vmcp_session 
> *session)
>        * anymore the system won't work anyway.
>        */
>       if (order > 2)
> -             page = cma_alloc(vmcp_cma, nr_pages, 0, GFP_KERNEL);
> +             page = cma_alloc(vmcp_cma, nr_pages, 0, false);
>       if (page) {
>               session->response = (char *)page_to_phys(page);
>               session->cma_alloc = 1;
> diff --git a/drivers/staging/android/ion/ion_cma_heap.c 
> b/drivers/staging/android/ion/ion_cma_heap.c
> index 49718c96bf9e..3fafd013d80a 100644
> --- a/drivers/staging/android/ion/ion_cma_heap.c
> +++ b/drivers/staging/android/ion/ion_cma_heap.c
> @@ -39,7 +39,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct 
> ion_buffer *buffer,
>       if (align > CONFIG_CMA_ALIGNMENT)
>               align = CONFIG_CMA_ALIGNMENT;
>  
> -     pages = cma_alloc(cma_heap->cma, nr_pages, align, GFP_KERNEL);
> +     pages = cma_alloc(cma_heap->cma, nr_pages, align, false);
>       if (!pages)
>               return -ENOMEM;
>  
> diff --git a/include/linux/cma.h b/include/linux/cma.h
> index bf90f0bb42bd..190184b5ff32 100644
> --- a/include/linux/cma.h
> +++ b/include/linux/cma.h
> @@ -33,7 +33,7 @@ extern int cma_init_reserved_mem(phys_addr_t base, 
> phys_addr_t size,
>                                       const char *name,
>                                       struct cma **res_cma);
>  extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int 
> align,
> -                           gfp_t gfp_mask);
> +                           bool no_warn);
>  extern bool cma_release(struct cma *cma, const struct page *pages, unsigned 
> int count);
>  
>  extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void 
> *data);
> diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
> index d987dcd1bd56..19ea5d70150c 100644
> --- a/kernel/dma/contiguous.c
> +++ b/kernel/dma/contiguous.c
> @@ -191,7 +191,8 @@ struct page *dma_alloc_from_contiguous(struct device 
> *dev, size_t count,
>       if (align > CONFIG_CMA_ALIGNMENT)
>               align = CONFIG_CMA_ALIGNMENT;
>  
> -     return cma_alloc(dev_get_cma_area(dev), count, align, gfp_mask);
> +     return cma_alloc(dev_get_cma_area(dev), count, align,
> +                      gfp_mask & __GFP_NOWARN);
>  }
>  
>  /**
> diff --git a/mm/cma.c b/mm/cma.c
> index 5809bbe360d7..4cb76121a3ab 100644
> --- a/mm/cma.c
> +++ b/mm/cma.c
> @@ -395,13 +395,13 @@ static inline void cma_debug_show_areas(struct cma 
> *cma) { }
>   * @cma:   Contiguous memory region for which the allocation is performed.
>   * @count: Requested number of pages.
>   * @align: Requested alignment of pages (in PAGE_SIZE order).
> - * @gfp_mask:  GFP mask to use during compaction
> + * @no_warn: Avoid printing message about failed allocation
>   *
>   * This function allocates part of contiguous memory on specific
>   * contiguous memory area.
>   */
>  struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
> -                    gfp_t gfp_mask)
> +                    bool no_warn)
>  {
>       unsigned long mask, offset;
>       unsigned long pfn = -1;
> @@ -447,7 +447,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, 
> unsigned int align,
>               pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
>               mutex_lock(&cma_mutex);
>               ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
> -                                      gfp_mask);
> +                                  GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
>               mutex_unlock(&cma_mutex);
>               if (ret == 0) {
>                       page = pfn_to_page(pfn);
> @@ -466,7 +466,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, 
> unsigned int align,
>  
>       trace_cma_alloc(pfn, page, count, align)>  
> -     if (ret && !(gfp_mask & __GFP_NOWARN)) {
> +     if (ret && !no_warn) {
>               pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n",
>                       __func__, count, ret);
>               cma_debug_show_areas(cma);
> diff --git a/mm/cma_debug.c b/mm/cma_debug.c
> index f23467291cfb..ad6723e9d110 100644
> --- a/mm/cma_debug.c
> +++ b/mm/cma_debug.c
> @@ -139,7 +139,7 @@ static int cma_alloc_mem(struct cma *cma, int count)
>       if (!mem)
>               return -ENOMEM;
>  
> -     p = cma_alloc(cma, count, 0, GFP_KERNEL);
> +     p = cma_alloc(cma, count, 0, false);
>       if (!p) {
>               kfree(mem);
>               return -ENOMEM;
> -- 
> 2.17.1
> 

-- 
Michal Hocko
SUSE Labs

Reply via email to