On Mon, Sep 12, 2011 at 2:21 PM, Rob Clark <rob.clark at linaro.org> wrote:
> From: Rob Clark <rob at ti.com>
>
> This factors out common code from psb_gtt_attach_pages()/
> i915_gem_object_get_pages_gtt() and psb_gtt_detach_pages()/
> i915_gem_object_put_pages_gtt().
>
> Signed-off-by: Rob Clark <rob at ti.com>
> ---
> ?drivers/gpu/drm/drm_gem.c | ? 68 
> +++++++++++++++++++++++++++++++++++++++++++++
> ?include/drm/drmP.h ? ? ? ?| ? ?3 ++
> ?2 files changed, 71 insertions(+), 0 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
> index 396e60c..05113c3 100644
> --- a/drivers/gpu/drm/drm_gem.c
> +++ b/drivers/gpu/drm/drm_gem.c
> @@ -285,6 +285,74 @@ again:
> ?}
> ?EXPORT_SYMBOL(drm_gem_handle_create);
>
> +/**
> + * drm_gem_get_pages - helper to allocate backing pages for a GEM object
> + * @obj: obj in question
> + * @gfpmask: gfp mask of requested pages
> + */
> +struct page ** drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
> +{
> + ? ? ? struct inode *inode;
> + ? ? ? struct address_space *mapping;
> + ? ? ? struct page *p, **pages;
> + ? ? ? int i, npages;
> +
> + ? ? ? /* This is the shared memory object that backs the GEM resource */
> + ? ? ? inode = obj->filp->f_path.dentry->d_inode;
> + ? ? ? mapping = inode->i_mapping;
> +
> + ? ? ? npages = obj->size >> PAGE_SHIFT;
> +
> + ? ? ? pages = drm_malloc_ab(npages, sizeof(struct page *));
> + ? ? ? if (pages == NULL)
> + ? ? ? ? ? ? ? return ERR_PTR(-ENOMEM);
> +
> + ? ? ? gfpmask |= mapping_gfp_mask(mapping);
> +
> + ? ? ? for (i = 0; i < npages; i++) {
> + ? ? ? ? ? ? ? p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);

note:  I'll send an updated version of this patch w/ a

  BUG_ON((gfpmask & __GFP_DMA32) && (page_to_pfn(p) >= 0x00100000UL));

or something roughly like this, to catch cases where
shmem_read_mapping_page_gfp() doesn't actually give us a page in the
low 4GB..

It is only a theoretical issue currently, as (AFAIK) no devices w/ 4GB
restriction currently have enough memory to hit this problem.  But it
would be good to have some error checking in case
shmem_read_mapping_page_gfp() isn't fixed by the time we have devices
that would have this problem.

BR,
-R

> + ? ? ? ? ? ? ? if (IS_ERR(p))
> + ? ? ? ? ? ? ? ? ? ? ? goto fail;
> + ? ? ? ? ? ? ? pages[i] = p;
> + ? ? ? }
> +
> + ? ? ? return pages;
> +
> +fail:
> + ? ? ? while (i--) {
> + ? ? ? ? ? ? ? page_cache_release(pages[i]);
> + ? ? ? }
> + ? ? ? drm_free_large(pages);
> + ? ? ? return ERR_PTR(PTR_ERR(p));
> +}
> +EXPORT_SYMBOL(drm_gem_get_pages);
> +
> +/**
> + * drm_gem_put_pages - helper to free backing pages for a GEM object
> + * @obj: obj in question
> + * @pages: pages to free
> + */
> +void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
> + ? ? ? ? ? ? ? bool dirty, bool accessed)
> +{
> + ? ? ? int i, npages;
> +
> + ? ? ? npages = obj->size >> PAGE_SHIFT;
> +
> + ? ? ? for (i = 0; i < npages; i++) {
> + ? ? ? ? ? ? ? if (dirty)
> + ? ? ? ? ? ? ? ? ? ? ? set_page_dirty(pages[i]);
> +
> + ? ? ? ? ? ? ? if (accessed)
> + ? ? ? ? ? ? ? ? ? ? ? mark_page_accessed(pages[i]);
> +
> + ? ? ? ? ? ? ? /* Undo the reference we took when populating the table */
> + ? ? ? ? ? ? ? page_cache_release(pages[i]);
> + ? ? ? }
> +
> + ? ? ? drm_free_large(pages);
> +}
> +EXPORT_SYMBOL(drm_gem_put_pages);
>
> ?/**
> ?* drm_gem_free_mmap_offset - release a fake mmap offset for an object
> diff --git a/include/drm/drmP.h b/include/drm/drmP.h
> index 43538b6..a62d8fe 100644
> --- a/include/drm/drmP.h
> +++ b/include/drm/drmP.h
> @@ -1624,6 +1624,9 @@ drm_gem_object_handle_unreference_unlocked(struct 
> drm_gem_object *obj)
> ? ? ? ?drm_gem_object_unreference_unlocked(obj);
> ?}
>
> +struct page ** drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
> +void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
> + ? ? ? ? ? ? ? bool dirty, bool accessed);
> ?void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
> ?int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
>
> --
> 1.7.5.4
>
>

Reply via email to