This speeds up pwrite and pread from ~120 µs ro ~100 µs for
reading/writing 1mb on my snb (if the backing storage pages
are already pinned, of course).

v2: Chris Wilson pointed out a claring page reference bug - I've
unconditionally dropped the reference. With that fixed (and the
associated reduction of dirt in dmesg) it's now even a notch faster.

v3: Unconditionaly grab a page reference when dropping
dev->struct_mutex to simplify the code-flow.

Signed-off-by: Daniel Vetter <daniel.vet...@ffwll.ch>
---
 drivers/gpu/drm/i915/i915_gem.c |   42 +++++++++++++++++++++++++++-----------
 1 files changed, 30 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0446c4c..0487889 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -321,6 +321,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
        int obj_do_bit17_swizzling, page_do_bit17_swizzling;
        int hit_slowpath = 0;
        int needs_clflush = 0;
+       int release_page;
 
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
@@ -355,10 +356,16 @@ i915_gem_shmem_pread(struct drm_device *dev,
                if ((shmem_page_offset + page_length) > PAGE_SIZE)
                        page_length = PAGE_SIZE - shmem_page_offset;
 
-               page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
-               if (IS_ERR(page)) {
-                       ret = PTR_ERR(page);
-                       goto out;
+               if (obj->pages) {
+                       page = obj->pages[offset >> PAGE_SHIFT];
+                       release_page = 0;
+               } else {
+                       page = shmem_read_mapping_page(mapping, offset >> 
PAGE_SHIFT);
+                       if (IS_ERR(page)) {
+                               ret = PTR_ERR(page);
+                               goto out;
+                       }
+                       release_page = 1;
                }
 
                page_do_bit17_swizzling = obj_do_bit17_swizzling &&
@@ -378,7 +385,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
                }
 
                hit_slowpath = 1;
-
+               page_cache_get(page);
                mutex_unlock(&dev->struct_mutex);
 
                vaddr = kmap(page);
@@ -397,9 +404,11 @@ i915_gem_shmem_pread(struct drm_device *dev,
                kunmap(page);
 
                mutex_lock(&dev->struct_mutex);
+               page_cache_release(page);
 next_page:
                mark_page_accessed(page);
-               page_cache_release(page);
+               if (release_page)
+                       page_cache_release(page);
 
                if (ret) {
                        ret = -EFAULT;
@@ -680,6 +689,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
        int shmem_page_offset, page_length, ret;
        int obj_do_bit17_swizzling, page_do_bit17_swizzling;
        int hit_slowpath = 0;
+       int release_page;
 
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
@@ -704,10 +714,16 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
                if ((shmem_page_offset + page_length) > PAGE_SIZE)
                        page_length = PAGE_SIZE - shmem_page_offset;
 
-               page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
-               if (IS_ERR(page)) {
-                       ret = PTR_ERR(page);
-                       goto out;
+               if (obj->pages) {
+                       page = obj->pages[offset >> PAGE_SHIFT];
+                       release_page = 0;
+               } else {
+                       page = shmem_read_mapping_page(mapping, offset >> 
PAGE_SHIFT);
+                       if (IS_ERR(page)) {
+                               ret = PTR_ERR(page);
+                               goto out;
+                       }
+                       release_page = 1;
                }
 
                page_do_bit17_swizzling = obj_do_bit17_swizzling &&
@@ -725,7 +741,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
                }
 
                hit_slowpath = 1;
-
+               page_cache_get(page);
                mutex_unlock(&dev->struct_mutex);
 
                vaddr = kmap(page);
@@ -740,10 +756,12 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
                kunmap(page);
 
                mutex_lock(&dev->struct_mutex);
+               page_cache_release(page);
 next_page:
                set_page_dirty(page);
                mark_page_accessed(page);
-               page_cache_release(page);
+               if (release_page)
+                       page_cache_release(page);
 
                if (ret) {
                        ret = -EFAULT;
-- 
1.7.7.5

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to