On Fri, Jul 19, 2019 at 11:21:53AM +0200, Daniel Vetter wrote:
> On Wed, Jul 17, 2019 at 02:15:37PM -0700, Rob Clark wrote:
> > From: Rob Clark <robdcl...@chromium.org>
> > 
> > drm_cflush_pages() is no-op on arm/arm64.  But instead we can use
> > dma_sync API.
> > 
> > Fixes failures w/ vgem_test.
> > 
> > Signed-off-by: Rob Clark <robdcl...@chromium.org>
> > ---
> > An alternative approach to the series[1] I sent yesterday
> > 
> > On the plus side, it keeps the WC buffers and avoids any drm core
> > changes.  On the minus side, I don't think it will work (at least
> > on arm64) prior to v5.0[2], so the fix can't be backported very
> > far.
> 
> Yeah seems a lot more reasonable.
> 
> Acked-by: Daniel Vetter <daniel.vet...@ffwll.ch>

Applied to drm-misc-fixes, thanks!

Sean

> > 
> > [1] https://patchwork.freedesktop.org/series/63771/
> > [2] depends on 356da6d0cde3323236977fce54c1f9612a742036
> > 
> >  drivers/gpu/drm/vgem/vgem_drv.c | 130 ++++++++++++++++++++------------
> >  1 file changed, 83 insertions(+), 47 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/vgem/vgem_drv.c 
> > b/drivers/gpu/drm/vgem/vgem_drv.c
> > index 76d95b5e289c..6c9b5e20b3d4 100644
> > --- a/drivers/gpu/drm/vgem/vgem_drv.c
> > +++ b/drivers/gpu/drm/vgem/vgem_drv.c
> > @@ -47,10 +47,16 @@ static struct vgem_device {
> >     struct platform_device *platform;
> >  } *vgem_device;
> >  
> > +static void sync_and_unpin(struct drm_vgem_gem_object *bo);
> > +static struct page **pin_and_sync(struct drm_vgem_gem_object *bo);
> > +
> >  static void vgem_gem_free_object(struct drm_gem_object *obj)
> >  {
> >     struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
> >  
> > +   if (!obj->import_attach)
> > +           sync_and_unpin(vgem_obj);
> > +
> >     kvfree(vgem_obj->pages);
> >     mutex_destroy(&vgem_obj->pages_lock);
> >  
> > @@ -78,40 +84,15 @@ static vm_fault_t vgem_gem_fault(struct vm_fault *vmf)
> >             return VM_FAULT_SIGBUS;
> >  
> >     mutex_lock(&obj->pages_lock);
> > +   if (!obj->pages)
> > +           pin_and_sync(obj);
> >     if (obj->pages) {
> >             get_page(obj->pages[page_offset]);
> >             vmf->page = obj->pages[page_offset];
> >             ret = 0;
> >     }
> >     mutex_unlock(&obj->pages_lock);
> > -   if (ret) {
> > -           struct page *page;
> > -
> > -           page = shmem_read_mapping_page(
> > -                                   file_inode(obj->base.filp)->i_mapping,
> > -                                   page_offset);
> > -           if (!IS_ERR(page)) {
> > -                   vmf->page = page;
> > -                   ret = 0;
> > -           } else switch (PTR_ERR(page)) {
> > -                   case -ENOSPC:
> > -                   case -ENOMEM:
> > -                           ret = VM_FAULT_OOM;
> > -                           break;
> > -                   case -EBUSY:
> > -                           ret = VM_FAULT_RETRY;
> > -                           break;
> > -                   case -EFAULT:
> > -                   case -EINVAL:
> > -                           ret = VM_FAULT_SIGBUS;
> > -                           break;
> > -                   default:
> > -                           WARN_ON(PTR_ERR(page));
> > -                           ret = VM_FAULT_SIGBUS;
> > -                           break;
> > -           }
> >  
> > -   }
> >     return ret;
> >  }
> >  
> > @@ -277,32 +258,93 @@ static const struct file_operations vgem_driver_fops 
> > = {
> >     .release        = drm_release,
> >  };
> >  
> > -static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo)
> > +/* Called under pages_lock, except in free path (where it can't race): */
> > +static void sync_and_unpin(struct drm_vgem_gem_object *bo)
> >  {
> > -   mutex_lock(&bo->pages_lock);
> > -   if (bo->pages_pin_count++ == 0) {
> > -           struct page **pages;
> > +   struct drm_device *dev = bo->base.dev;
> > +
> > +   if (bo->table) {
> > +           dma_sync_sg_for_cpu(dev->dev, bo->table->sgl,
> > +                           bo->table->nents, DMA_BIDIRECTIONAL);
> > +           sg_free_table(bo->table);
> > +           kfree(bo->table);
> > +           bo->table = NULL;
> > +   }
> > +
> > +   if (bo->pages) {
> > +           drm_gem_put_pages(&bo->base, bo->pages, true, true);
> > +           bo->pages = NULL;
> > +   }
> > +}
> > +
> > +static struct page **pin_and_sync(struct drm_vgem_gem_object *bo)
> > +{
> > +   struct drm_device *dev = bo->base.dev;
> > +   int npages = bo->base.size >> PAGE_SHIFT;
> > +   struct page **pages;
> > +   struct sg_table *sgt;
> > +
> > +   WARN_ON(!mutex_is_locked(&bo->pages_lock));
> > +
> > +   pages = drm_gem_get_pages(&bo->base);
> > +   if (IS_ERR(pages)) {
> > +           bo->pages_pin_count--;
> > +           mutex_unlock(&bo->pages_lock);
> > +           return pages;
> > +   }
> >  
> > -           pages = drm_gem_get_pages(&bo->base);
> > -           if (IS_ERR(pages)) {
> > -                   bo->pages_pin_count--;
> > -                   mutex_unlock(&bo->pages_lock);
> > -                   return pages;
> > -           }
> > +   sgt = drm_prime_pages_to_sg(pages, npages);
> > +   if (IS_ERR(sgt)) {
> > +           dev_err(dev->dev,
> > +                   "failed to allocate sgt: %ld\n",
> > +                   PTR_ERR(bo->table));
> > +           drm_gem_put_pages(&bo->base, pages, false, false);
> > +           mutex_unlock(&bo->pages_lock);
> > +           return ERR_CAST(bo->table);
> > +   }
> > +
> > +   /*
> > +    * Flush the object from the CPU cache so that importers
> > +    * can rely on coherent indirect access via the exported
> > +    * dma-address.
> > +    */
> > +   dma_sync_sg_for_device(dev->dev, sgt->sgl,
> > +                   sgt->nents, DMA_BIDIRECTIONAL);
> > +
> > +   bo->pages = pages;
> > +   bo->table = sgt;
> > +
> > +   return pages;
> > +}
> > +
> > +static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo)
> > +{
> > +   struct page **pages;
> >  
> > -           bo->pages = pages;
> > +   mutex_lock(&bo->pages_lock);
> > +   if (bo->pages_pin_count++ == 0 && !bo->pages) {
> > +           pages = pin_and_sync(bo);
> > +   } else {
> > +           WARN_ON(!bo->pages);
> > +           pages = bo->pages;
> >     }
> >     mutex_unlock(&bo->pages_lock);
> >  
> > -   return bo->pages;
> > +   return pages;
> >  }
> >  
> >  static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
> >  {
> > +   /*
> > +    * We shouldn't hit this for imported bo's.. in the import
> > +    * case we don't own the scatter-table
> > +    */
> > +   WARN_ON(bo->base.import_attach);
> > +
> >     mutex_lock(&bo->pages_lock);
> >     if (--bo->pages_pin_count == 0) {
> > -           drm_gem_put_pages(&bo->base, bo->pages, true, true);
> > -           bo->pages = NULL;
> > +           WARN_ON(!bo->table);
> > +           sync_and_unpin(bo);
> >     }
> >     mutex_unlock(&bo->pages_lock);
> >  }
> > @@ -310,18 +352,12 @@ static void vgem_unpin_pages(struct 
> > drm_vgem_gem_object *bo)
> >  static int vgem_prime_pin(struct drm_gem_object *obj)
> >  {
> >     struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
> > -   long n_pages = obj->size >> PAGE_SHIFT;
> >     struct page **pages;
> >  
> >     pages = vgem_pin_pages(bo);
> >     if (IS_ERR(pages))
> >             return PTR_ERR(pages);
> >  
> > -   /* Flush the object from the CPU cache so that importers can rely
> > -    * on coherent indirect access via the exported dma-address.
> > -    */
> > -   drm_clflush_pages(pages, n_pages);
> > -
> >     return 0;
> >  }
> >  
> > -- 
> > 2.21.0
> > 
> 
> -- 
> Daniel Vetter
> Software Engineer, Intel Corporation
> http://blog.ffwll.ch
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel

-- 
Sean Paul, Software Engineer, Google / Chromium OS

Reply via email to