On 3/29/19 10:30 AM, Benjamin Gaignard wrote: > Le ven. 29 mars 2019 à 16:19, Andrew F. Davis <a...@ti.com> a écrit : >> >> On 3/29/19 9:44 AM, Benjamin Gaignard wrote: >>> Le ven. 29 mars 2019 à 01:16, John Stultz <john.stu...@linaro.org> a écrit : >>>> >>>> This adds a CMA heap, which allows userspace to allocate >>>> a dma-buf of contiguous memory out of a CMA region. >>>> >>>> This code is an evolution of the Android ION implementation, so >>>> thanks to its original author and maintainters: >>>> Benjamin Gaignard, Laura Abbott, and others! >>>> >>>> Cc: Laura Abbott <labb...@redhat.com> >>>> Cc: Benjamin Gaignard <benjamin.gaign...@linaro.org> >>>> Cc: Sumit Semwal <sumit.sem...@linaro.org> >>>> Cc: Liam Mark <lm...@codeaurora.org> >>>> Cc: Pratik Patel <prat...@codeaurora.org> >>>> Cc: Brian Starkey <brian.star...@arm.com> >>>> Cc: Vincent Donnefort <vincent.donnef...@arm.com> >>>> Cc: Sudipto Paul <sudipto.p...@arm.com> >>>> Cc: Andrew F. Davis <a...@ti.com> >>>> Cc: Xu YiPing <xuyip...@hisilicon.com> >>>> Cc: "Chenfeng (puck)" <puck.c...@hisilicon.com> >>>> Cc: butao <bu...@hisilicon.com> >>>> Cc: "Xiaqing (A)" <saberlily....@hisilicon.com> >>>> Cc: Yudongbin <yudong...@hisilicon.com> >>>> Cc: Christoph Hellwig <h...@infradead.org> >>>> Cc: Chenbo Feng <fe...@google.com> >>>> Cc: Alistair Strachan <astrac...@google.com> >>>> Cc: dri-devel@lists.freedesktop.org >>>> Signed-off-by: John Stultz <john.stu...@linaro.org> >>>> --- >>>> v2: >>>> * Switch allocate to return dmabuf fd >>>> * Simplify init code >>>> * Checkpatch fixups >>>> v3: >>>> * Switch to inline function for to_cma_heap() >>>> * Minor cleanups suggested by Brian >>>> * Fold in new registration style from Andrew >>>> * Folded in changes from Andrew to use simplified page list >>>> from the heap helpers >>>> --- >>>> drivers/dma-buf/heaps/Kconfig | 8 ++ >>>> drivers/dma-buf/heaps/Makefile | 1 + >>>> drivers/dma-buf/heaps/cma_heap.c | 170 >>>> +++++++++++++++++++++++++++++++++++++++ >>>> 3 files changed, 179 insertions(+) >>>> create mode 100644 drivers/dma-buf/heaps/cma_heap.c >>>> >>>> diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig >>>> index 2050527..a5eef06 100644 >>>> --- a/drivers/dma-buf/heaps/Kconfig >>>> +++ b/drivers/dma-buf/heaps/Kconfig >>>> @@ -4,3 +4,11 @@ config DMABUF_HEAPS_SYSTEM >>>> help >>>> Choose this option to enable the system dmabuf heap. The system >>>> heap >>>> is backed by pages from the buddy allocator. If in doubt, say Y. >>>> + >>>> +config DMABUF_HEAPS_CMA >>>> + bool "DMA-BUF CMA Heap" >>>> + depends on DMABUF_HEAPS && DMA_CMA >>>> + help >>>> + Choose this option to enable dma-buf CMA heap. This heap is >>>> backed >>>> + by the Contiguous Memory Allocator (CMA). If your system has >>>> these >>>> + regions, you should say Y here. >>>> diff --git a/drivers/dma-buf/heaps/Makefile >>>> b/drivers/dma-buf/heaps/Makefile >>>> index d1808ec..6e54cde 100644 >>>> --- a/drivers/dma-buf/heaps/Makefile >>>> +++ b/drivers/dma-buf/heaps/Makefile >>>> @@ -1,3 +1,4 @@ >>>> # SPDX-License-Identifier: GPL-2.0 >>>> obj-y += heap-helpers.o >>>> obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o >>>> +obj-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o >>>> diff --git a/drivers/dma-buf/heaps/cma_heap.c >>>> b/drivers/dma-buf/heaps/cma_heap.c >>>> new file mode 100644 >>>> index 0000000..f4485c60 >>>> --- /dev/null >>>> +++ b/drivers/dma-buf/heaps/cma_heap.c >>>> @@ -0,0 +1,170 @@ >>>> +// SPDX-License-Identifier: GPL-2.0 >>>> +/* >>>> + * DMABUF CMA heap exporter >>>> + * >>>> + * Copyright (C) 2012, 2019 Linaro Ltd. >>>> + * Author: <benjamin.gaign...@linaro.org> for ST-Ericsson. >>>> + */ >>>> + >>>> +#include <linux/device.h> >>>> +#include <linux/dma-buf.h> >>>> +#include <linux/dma-heap.h> >>>> +#include <linux/slab.h> >>>> +#include <linux/errno.h> >>>> +#include <linux/err.h> >>>> +#include <linux/cma.h> >>>> +#include <linux/scatterlist.h> >>>> +#include <linux/highmem.h> >>>> + >>>> +#include "heap-helpers.h" >>>> + >>>> +struct cma_heap { >>>> + struct dma_heap *heap; >>>> + struct cma *cma; >>>> +}; >>>> + >>>> +static void cma_heap_free(struct heap_helper_buffer *buffer) >>>> +{ >>>> + struct cma_heap *cma_heap = >>>> dma_heap_get_data(buffer->heap_buffer.heap); >>>> + struct page *pages = buffer->priv_virt; >>>> + unsigned long nr_pages; >>>> + >>>> + nr_pages = buffer->heap_buffer.size >> PAGE_SHIFT; >>>> + >>>> + /* free page list */ >>>> + kfree(buffer->pages); >>>> + /* release memory */ >>>> + cma_release(cma_heap->cma, pages, nr_pages); >>>> + kfree(buffer); >>>> +} >>>> + >>>> +/* dmabuf heap CMA operations functions */ >>>> +static int cma_heap_allocate(struct dma_heap *heap, >>>> + unsigned long len, >>>> + unsigned long flags) >>>> +{ >>>> + struct cma_heap *cma_heap = dma_heap_get_data(heap); >>>> + struct heap_helper_buffer *helper_buffer; >>>> + struct page *pages; >>>> + size_t size = PAGE_ALIGN(len); >>>> + unsigned long nr_pages = size >> PAGE_SHIFT; >>>> + unsigned long align = get_order(size); >>>> + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); >>>> + struct dma_buf *dmabuf; >>>> + int ret = -ENOMEM; >>>> + pgoff_t pg; >>>> + >>>> + if (align > CONFIG_CMA_ALIGNMENT) >>>> + align = CONFIG_CMA_ALIGNMENT; >>>> + >>>> + helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL); >>>> + if (!helper_buffer) >>>> + return -ENOMEM; >>>> + >>>> + INIT_HEAP_HELPER_BUFFER(helper_buffer, cma_heap_free); >>>> + helper_buffer->heap_buffer.flags = flags; >>>> + helper_buffer->heap_buffer.heap = heap; >>>> + helper_buffer->heap_buffer.size = len; >>>> + >>>> + pages = cma_alloc(cma_heap->cma, nr_pages, align, false); >>>> + if (!pages) >>>> + goto free_buf; >>>> + >>>> + if (PageHighMem(pages)) { >>>> + unsigned long nr_clear_pages = nr_pages; >>>> + struct page *page = pages; >>>> + >>>> + while (nr_clear_pages > 0) { >>>> + void *vaddr = kmap_atomic(page); >>>> + >>>> + memset(vaddr, 0, PAGE_SIZE); >>>> + kunmap_atomic(vaddr); >>>> + page++; >>>> + nr_clear_pages--; >>>> + } >>>> + } else { >>>> + memset(page_address(pages), 0, size); >>>> + } >>>> + >>>> + helper_buffer->pagecount = nr_pages; >>>> + helper_buffer->pages = kmalloc_array(helper_buffer->pagecount, >>>> + sizeof(*helper_buffer->pages), >>>> + GFP_KERNEL); >>>> + if (!helper_buffer->pages) { >>>> + ret = -ENOMEM; >>>> + goto free_cma; >>>> + } >>>> + >>>> + for (pg = 0; pg < helper_buffer->pagecount; pg++) { >>>> + helper_buffer->pages[pg] = &pages[pg]; >>>> + if (!helper_buffer->pages[pg]) >>>> + goto free_pages; >>>> + } >>>> + >>>> + /* create the dmabuf */ >>>> + exp_info.ops = &heap_helper_ops; >>>> + exp_info.size = len; >>>> + exp_info.flags = O_RDWR; >>> >>> I think that the flags should be provided when requesting the allocation >>> like it is done in DRM or V4L2. >>> For me DMA_HEAP_VALID_FLAGS = (O_CLOEXEC | O_ACCMODE). >>> >> >> >> So something like done in udmabuf? >> >> https://elixir.bootlin.com/linux/v5.1-rc1/source/include/uapi/linux/udmabuf.h#L8 >> >> I think that can be arranged. > > I have in mind what is done by DRM: > https://elixir.bootlin.com/linux/v5.1-rc1/source/include/uapi/drm/drm.h#L707 > > V4L2 does the same but without redefine the flags (which is better) >
That would mean we would need a whole flag word passed in just for file flags when I only ever see one bit being useful(O_CLOEXEC or not). Do you see any situation where we need more than default O_RDWR and optional O_CLOEXEC for a dma-buf file? Andrew > Benjamin > >> >> John, >> >> This might be a good reason to move the dma_buf_fd() call below out of >> the heap and back into the core. That way the file exporting flags can >> be common and core handled. I don't think it would complicate the error >> handling any as we only need to dma_buf_put(dmabuf) in the core on >> failure and the heap specific cleanup will happen for us. >> >> Andrew >> >> >>> Benjamin >>> >>> Benjamin >>> >>>> + exp_info.priv = &helper_buffer->heap_buffer; >>>> + dmabuf = dma_buf_export(&exp_info); >>>> + if (IS_ERR(dmabuf)) { >>>> + ret = PTR_ERR(dmabuf); >>>> + goto free_pages; >>>> + } >>>> + >>>> + helper_buffer->heap_buffer.dmabuf = dmabuf; >>>> + helper_buffer->priv_virt = pages; >>>> + >>>> + ret = dma_buf_fd(dmabuf, O_CLOEXEC); >>>> + if (ret < 0) { >>>> + dma_buf_put(dmabuf); >>>> + /* just return, as put will call release and that will >>>> free */ >>>> + return ret; >>>> + } >>>> + >>>> + return ret; >>>> + >>>> +free_pages: >>>> + kfree(helper_buffer->pages); >>>> +free_cma: >>>> + cma_release(cma_heap->cma, pages, nr_pages); >>>> +free_buf: >>>> + kfree(helper_buffer); >>>> + return ret; >>>> +} >>>> + >>>> +static struct dma_heap_ops cma_heap_ops = { >>>> + .allocate = cma_heap_allocate, >>>> +}; >>>> + >>>> +static int __add_cma_heap(struct cma *cma, void *data) >>>> +{ >>>> + struct cma_heap *cma_heap; >>>> + struct dma_heap_export_info exp_info; >>>> + >>>> + cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL); >>>> + if (!cma_heap) >>>> + return -ENOMEM; >>>> + cma_heap->cma = cma; >>>> + >>>> + exp_info.name = cma_get_name(cma); >>>> + exp_info.ops = &cma_heap_ops; >>>> + exp_info.priv = cma_heap; >>>> + >>>> + cma_heap->heap = dma_heap_add(&exp_info); >>>> + if (IS_ERR(cma_heap->heap)) { >>>> + int ret = PTR_ERR(cma_heap->heap); >>>> + >>>> + kfree(cma_heap); >>>> + return ret; >>>> + } >>>> + >>>> + return 0; >>>> +} >>>> + >>>> +static int add_cma_heaps(void) >>>> +{ >>>> + cma_for_each_area(__add_cma_heap, NULL); >>>> + return 0; >>>> +} >>>> +device_initcall(add_cma_heaps); >>>> -- >>>> 2.7.4 >>>> _______________________________________________ dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel