Continuing the decluttering of i915_gem.c

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/Makefile                 |   2 +
 drivers/gpu/drm/i915/gem/i915_gem_object.h    |   8 +
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |   2 +
 drivers/gpu/drm/i915/gem/i915_gem_pages.c     | 482 ++++++++++++
 drivers/gpu/drm/i915/gem/i915_gem_phys.c      | 212 ++++++
 drivers/gpu/drm/i915/gem/i915_gem_shmem.c     |  22 +
 .../drm/i915/gem/selftests/i915_gem_phys.c    |  80 ++
 drivers/gpu/drm/i915/i915_drv.h               |   2 -
 drivers/gpu/drm/i915/i915_gem.c               | 685 ------------------
 .../gpu/drm/i915/selftests/i915_gem_object.c  |  54 --
 .../drm/i915/selftests/i915_mock_selftests.h  |   1 +
 11 files changed, 809 insertions(+), 741 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_pages.c
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_phys.c
 create mode 100644 drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 54418ce5faac..8e6ef54f2497 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -72,6 +72,8 @@ i915-$(CONFIG_DRM_I915_WERROR) += \
 # GEM code
 i915-y += \
          gem/i915_gem_object.o \
+         gem/i915_gem_pages.o \
+         gem/i915_gem_phys.o \
          gem/i915_gem_shmem.o \
          i915_active.o \
          i915_cmd_parser.o \
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 05bbb3f33904..ebab3505e51d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -33,11 +33,17 @@ void __i915_gem_object_release_shmem(struct 
drm_i915_gem_object *obj,
                                     struct sg_table *pages,
                                     bool needs_clflush);
 
+int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
+
 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
 void i915_gem_free_object(struct drm_gem_object *obj);
 
 void i915_gem_flush_free_objects(struct drm_i915_private *i915);
 
+struct sg_table *
+__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
+void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
+
 /**
  * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
  * @filp: DRM file private date
@@ -231,6 +237,8 @@ i915_gem_object_get_dma_address(struct drm_i915_gem_object 
*obj,
 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
                                 struct sg_table *pages,
                                 unsigned int sg_page_sizes);
+
+int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 
 static inline int __must_check
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index e4b50944f553..da6a33e2395f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -52,6 +52,8 @@ struct drm_i915_gem_object_ops {
        int (*get_pages)(struct drm_i915_gem_object *obj);
        void (*put_pages)(struct drm_i915_gem_object *obj,
                          struct sg_table *pages);
+       void (*truncate)(struct drm_i915_gem_object *obj);
+       void (*invalidate)(struct drm_i915_gem_object *obj);
 
        int (*pwrite)(struct drm_i915_gem_object *obj,
                      const struct drm_i915_gem_pwrite *arg);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c 
b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
new file mode 100644
index 000000000000..a594f48db28e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -0,0 +1,482 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2016 Intel Corporation
+ */
+
+#include "i915_gem_object.h"
+
+#include "../i915_drv.h"
+
+void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
+                                struct sg_table *pages,
+                                unsigned int sg_page_sizes)
+{
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
+       unsigned long supported = INTEL_INFO(i915)->page_sizes;
+       int i;
+
+       lockdep_assert_held(&obj->mm.lock);
+
+       obj->mm.get_page.sg_pos = pages->sgl;
+       obj->mm.get_page.sg_idx = 0;
+
+       obj->mm.pages = pages;
+
+       if (i915_gem_object_is_tiled(obj) &&
+           i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+               GEM_BUG_ON(obj->mm.quirked);
+               __i915_gem_object_pin_pages(obj);
+               obj->mm.quirked = true;
+       }
+
+       GEM_BUG_ON(!sg_page_sizes);
+       obj->mm.page_sizes.phys = sg_page_sizes;
+
+       /*
+        * Calculate the supported page-sizes which fit into the given
+        * sg_page_sizes. This will give us the page-sizes which we may be able
+        * to use opportunistically when later inserting into the GTT. For
+        * example if phys=2G, then in theory we should be able to use 1G, 2M,
+        * 64K or 4K pages, although in practice this will depend on a number of
+        * other factors.
+        */
+       obj->mm.page_sizes.sg = 0;
+       for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
+               if (obj->mm.page_sizes.phys & ~0u << i)
+                       obj->mm.page_sizes.sg |= BIT(i);
+       }
+       GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
+
+       spin_lock(&i915->mm.obj_lock);
+       list_add(&obj->mm.link, &i915->mm.unbound_list);
+       spin_unlock(&i915->mm.obj_lock);
+}
+
+int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+       int err;
+
+       if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
+               DRM_DEBUG("Attempting to obtain a purgeable object\n");
+               return -EFAULT;
+       }
+
+       err = obj->ops->get_pages(obj);
+       GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
+
+       return err;
+}
+
+/* Ensure that the associated pages are gathered from the backing storage
+ * and pinned into our object. i915_gem_object_pin_pages() may be called
+ * multiple times before they are released by a single call to
+ * i915_gem_object_unpin_pages() - once the pages are no longer referenced
+ * either as a result of memory pressure (reaping pages under the shrinker)
+ * or as the object is itself released.
+ */
+int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+       int err;
+
+       err = mutex_lock_interruptible(&obj->mm.lock);
+       if (err)
+               return err;
+
+       if (unlikely(!i915_gem_object_has_pages(obj))) {
+               GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
+
+               err = ____i915_gem_object_get_pages(obj);
+               if (err)
+                       goto unlock;
+
+               smp_mb__before_atomic();
+       }
+       atomic_inc(&obj->mm.pages_pin_count);
+
+unlock:
+       mutex_unlock(&obj->mm.lock);
+       return err;
+}
+
+/* Immediately discard the backing storage */
+void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
+{
+       drm_gem_free_mmap_offset(&obj->base);
+       if (obj->ops->truncate)
+               obj->ops->truncate(obj);
+}
+
+/* Try to discard unwanted pages */
+void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
+{
+       lockdep_assert_held(&obj->mm.lock);
+       GEM_BUG_ON(i915_gem_object_has_pages(obj));
+
+       switch (obj->mm.madv) {
+       case I915_MADV_DONTNEED:
+               i915_gem_object_truncate(obj);
+       case __I915_MADV_PURGED:
+               return;
+       }
+
+       if (obj->ops->invalidate)
+               obj->ops->invalidate(obj);
+}
+
+static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
+{
+       struct radix_tree_iter iter;
+       void __rcu **slot;
+
+       rcu_read_lock();
+       radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
+               radix_tree_delete(&obj->mm.get_page.radix, iter.index);
+       rcu_read_unlock();
+}
+
+struct sg_table *
+__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
+{
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
+       struct sg_table *pages;
+
+       pages = fetch_and_zero(&obj->mm.pages);
+       if (IS_ERR_OR_NULL(pages))
+               return pages;
+
+       spin_lock(&i915->mm.obj_lock);
+       list_del(&obj->mm.link);
+       spin_unlock(&i915->mm.obj_lock);
+
+       if (obj->mm.mapping) {
+               void *ptr;
+
+               ptr = page_mask_bits(obj->mm.mapping);
+               if (is_vmalloc_addr(ptr))
+                       vunmap(ptr);
+               else
+                       kunmap(kmap_to_page(ptr));
+
+               obj->mm.mapping = NULL;
+       }
+
+       __i915_gem_object_reset_page_iter(obj);
+       obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
+
+       return pages;
+}
+
+int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+                               enum i915_mm_subclass subclass)
+{
+       struct sg_table *pages;
+       int err;
+
+       if (i915_gem_object_has_pinned_pages(obj))
+               return -EBUSY;
+
+       GEM_BUG_ON(obj->bind_count);
+
+       /* May be called by shrinker from within get_pages() (on another bo) */
+       mutex_lock_nested(&obj->mm.lock, subclass);
+       if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
+               err = -EBUSY;
+               goto unlock;
+       }
+
+       /*
+        * ->put_pages might need to allocate memory for the bit17 swizzle
+        * array, hence protect them from being reaped by removing them from gtt
+        * lists early.
+        */
+       pages = __i915_gem_object_unset_pages(obj);
+
+       /*
+        * XXX Temporary hijinx to avoid updating all backends to handle
+        * NULL pages. In the future, when we have more asynchronous
+        * get_pages backends we should be better able to handle the
+        * cancellation of the async task in a more uniform manner.
+        */
+       if (!pages && !i915_gem_object_needs_async_cancel(obj))
+               pages = ERR_PTR(-EINVAL);
+
+       if (!IS_ERR(pages))
+               obj->ops->put_pages(obj, pages);
+
+       err = 0;
+unlock:
+       mutex_unlock(&obj->mm.lock);
+
+       return err;
+}
+
+/* The 'mapping' part of i915_gem_object_pin_map() below */
+static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
+                                enum i915_map_type type)
+{
+       unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
+       struct sg_table *sgt = obj->mm.pages;
+       struct sgt_iter sgt_iter;
+       struct page *page;
+       struct page *stack_pages[32];
+       struct page **pages = stack_pages;
+       unsigned long i = 0;
+       pgprot_t pgprot;
+       void *addr;
+
+       /* A single page can always be kmapped */
+       if (n_pages == 1 && type == I915_MAP_WB)
+               return kmap(sg_page(sgt->sgl));
+
+       if (n_pages > ARRAY_SIZE(stack_pages)) {
+               /* Too big for stack -- allocate temporary array instead */
+               pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
+               if (!pages)
+                       return NULL;
+       }
+
+       for_each_sgt_page(page, sgt_iter, sgt)
+               pages[i++] = page;
+
+       /* Check that we have the expected number of pages */
+       GEM_BUG_ON(i != n_pages);
+
+       switch (type) {
+       default:
+               MISSING_CASE(type);
+               /* fallthrough to use PAGE_KERNEL anyway */
+       case I915_MAP_WB:
+               pgprot = PAGE_KERNEL;
+               break;
+       case I915_MAP_WC:
+               pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
+               break;
+       }
+       addr = vmap(pages, n_pages, 0, pgprot);
+
+       if (pages != stack_pages)
+               kvfree(pages);
+
+       return addr;
+}
+
+/* get, pin, and map the pages of the object into kernel space */
+void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
+                             enum i915_map_type type)
+{
+       enum i915_map_type has_type;
+       bool pinned;
+       void *ptr;
+       int err;
+
+       if (unlikely(!i915_gem_object_has_struct_page(obj)))
+               return ERR_PTR(-ENXIO);
+
+       err = mutex_lock_interruptible(&obj->mm.lock);
+       if (err)
+               return ERR_PTR(err);
+
+       pinned = !(type & I915_MAP_OVERRIDE);
+       type &= ~I915_MAP_OVERRIDE;
+
+       if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
+               if (unlikely(!i915_gem_object_has_pages(obj))) {
+                       GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
+
+                       err = ____i915_gem_object_get_pages(obj);
+                       if (err)
+                               goto err_unlock;
+
+                       smp_mb__before_atomic();
+               }
+               atomic_inc(&obj->mm.pages_pin_count);
+               pinned = false;
+       }
+       GEM_BUG_ON(!i915_gem_object_has_pages(obj));
+
+       ptr = page_unpack_bits(obj->mm.mapping, &has_type);
+       if (ptr && has_type != type) {
+               if (pinned) {
+                       err = -EBUSY;
+                       goto err_unpin;
+               }
+
+               if (is_vmalloc_addr(ptr))
+                       vunmap(ptr);
+               else
+                       kunmap(kmap_to_page(ptr));
+
+               ptr = obj->mm.mapping = NULL;
+       }
+
+       if (!ptr) {
+               ptr = i915_gem_object_map(obj, type);
+               if (!ptr) {
+                       err = -ENOMEM;
+                       goto err_unpin;
+               }
+
+               obj->mm.mapping = page_pack_bits(ptr, type);
+       }
+
+out_unlock:
+       mutex_unlock(&obj->mm.lock);
+       return ptr;
+
+err_unpin:
+       atomic_dec(&obj->mm.pages_pin_count);
+err_unlock:
+       ptr = ERR_PTR(err);
+       goto out_unlock;
+}
+
+struct scatterlist *
+i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
+                      unsigned int n,
+                      unsigned int *offset)
+{
+       struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
+       struct scatterlist *sg;
+       unsigned int idx, count;
+
+       might_sleep();
+       GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
+       GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+       /* As we iterate forward through the sg, we record each entry in a
+        * radixtree for quick repeated (backwards) lookups. If we have seen
+        * this index previously, we will have an entry for it.
+        *
+        * Initial lookup is O(N), but this is amortized to O(1) for
+        * sequential page access (where each new request is consecutive
+        * to the previous one). Repeated lookups are O(lg(obj->base.size)),
+        * i.e. O(1) with a large constant!
+        */
+       if (n < READ_ONCE(iter->sg_idx))
+               goto lookup;
+
+       mutex_lock(&iter->lock);
+
+       /* We prefer to reuse the last sg so that repeated lookup of this
+        * (or the subsequent) sg are fast - comparing against the last
+        * sg is faster than going through the radixtree.
+        */
+
+       sg = iter->sg_pos;
+       idx = iter->sg_idx;
+       count = __sg_page_count(sg);
+
+       while (idx + count <= n) {
+               void *entry;
+               unsigned long i;
+               int ret;
+
+               /* If we cannot allocate and insert this entry, or the
+                * individual pages from this range, cancel updating the
+                * sg_idx so that on this lookup we are forced to linearly
+                * scan onwards, but on future lookups we will try the
+                * insertion again (in which case we need to be careful of
+                * the error return reporting that we have already inserted
+                * this index).
+                */
+               ret = radix_tree_insert(&iter->radix, idx, sg);
+               if (ret && ret != -EEXIST)
+                       goto scan;
+
+               entry = xa_mk_value(idx);
+               for (i = 1; i < count; i++) {
+                       ret = radix_tree_insert(&iter->radix, idx + i, entry);
+                       if (ret && ret != -EEXIST)
+                               goto scan;
+               }
+
+               idx += count;
+               sg = ____sg_next(sg);
+               count = __sg_page_count(sg);
+       }
+
+scan:
+       iter->sg_pos = sg;
+       iter->sg_idx = idx;
+
+       mutex_unlock(&iter->lock);
+
+       if (unlikely(n < idx)) /* insertion completed by another thread */
+               goto lookup;
+
+       /* In case we failed to insert the entry into the radixtree, we need
+        * to look beyond the current sg.
+        */
+       while (idx + count <= n) {
+               idx += count;
+               sg = ____sg_next(sg);
+               count = __sg_page_count(sg);
+       }
+
+       *offset = n - idx;
+       return sg;
+
+lookup:
+       rcu_read_lock();
+
+       sg = radix_tree_lookup(&iter->radix, n);
+       GEM_BUG_ON(!sg);
+
+       /* If this index is in the middle of multi-page sg entry,
+        * the radix tree will contain a value entry that points
+        * to the start of that range. We will return the pointer to
+        * the base page and the offset of this page within the
+        * sg entry's range.
+        */
+       *offset = 0;
+       if (unlikely(xa_is_value(sg))) {
+               unsigned long base = xa_to_value(sg);
+
+               sg = radix_tree_lookup(&iter->radix, base);
+               GEM_BUG_ON(!sg);
+
+               *offset = n - base;
+       }
+
+       rcu_read_unlock();
+
+       return sg;
+}
+
+struct page *
+i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
+{
+       struct scatterlist *sg;
+       unsigned int offset;
+
+       GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
+
+       sg = i915_gem_object_get_sg(obj, n, &offset);
+       return nth_page(sg_page(sg), offset);
+}
+
+/* Like i915_gem_object_get_page(), but mark the returned page dirty */
+struct page *
+i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
+                              unsigned int n)
+{
+       struct page *page;
+
+       page = i915_gem_object_get_page(obj, n);
+       if (!obj->mm.dirty)
+               set_page_dirty(page);
+
+       return page;
+}
+
+dma_addr_t
+i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
+                               unsigned long n)
+{
+       struct scatterlist *sg;
+       unsigned int offset;
+
+       sg = i915_gem_object_get_sg(obj, n, &offset);
+       return sg_dma_address(sg) + (offset << PAGE_SHIFT);
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c 
b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
new file mode 100644
index 000000000000..1bf3e0afcba2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -0,0 +1,212 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2016 Intel Corporation
+ */
+
+#include <linux/highmem.h>
+#include <linux/shmem_fs.h>
+#include <linux/swap.h>
+
+#include <drm/drm.h> /* for drm_legacy.h! */
+#include <drm/drm_cache.h>
+#include <drm/drm_legacy.h> /* for drm_pci.h! */
+#include <drm/drm_pci.h>
+
+#include "i915_gem_object.h"
+
+#include "../i915_drv.h"
+
+static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
+{
+       struct address_space *mapping = obj->base.filp->f_mapping;
+       struct drm_dma_handle *phys;
+       struct sg_table *st;
+       struct scatterlist *sg;
+       char *vaddr;
+       int i;
+       int err;
+
+       if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
+               return -EINVAL;
+
+       /* Always aligning to the object size, allows a single allocation
+        * to handle all possible callers, and given typical object sizes,
+        * the alignment of the buddy allocation will naturally match.
+        */
+       phys = drm_pci_alloc(obj->base.dev,
+                            roundup_pow_of_two(obj->base.size),
+                            roundup_pow_of_two(obj->base.size));
+       if (!phys)
+               return -ENOMEM;
+
+       vaddr = phys->vaddr;
+       for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
+               struct page *page;
+               char *src;
+
+               page = shmem_read_mapping_page(mapping, i);
+               if (IS_ERR(page)) {
+                       err = PTR_ERR(page);
+                       goto err_phys;
+               }
+
+               src = kmap_atomic(page);
+               memcpy(vaddr, src, PAGE_SIZE);
+               drm_clflush_virt_range(vaddr, PAGE_SIZE);
+               kunmap_atomic(src);
+
+               put_page(page);
+               vaddr += PAGE_SIZE;
+       }
+
+       i915_gem_chipset_flush(to_i915(obj->base.dev));
+
+       st = kmalloc(sizeof(*st), GFP_KERNEL);
+       if (!st) {
+               err = -ENOMEM;
+               goto err_phys;
+       }
+
+       if (sg_alloc_table(st, 1, GFP_KERNEL)) {
+               kfree(st);
+               err = -ENOMEM;
+               goto err_phys;
+       }
+
+       sg = st->sgl;
+       sg->offset = 0;
+       sg->length = obj->base.size;
+
+       sg_dma_address(sg) = phys->busaddr;
+       sg_dma_len(sg) = obj->base.size;
+
+       obj->phys_handle = phys;
+
+       __i915_gem_object_set_pages(obj, st, sg->length);
+
+       return 0;
+
+err_phys:
+       drm_pci_free(obj->base.dev, phys);
+
+       return err;
+}
+
+static void
+i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
+                              struct sg_table *pages)
+{
+       __i915_gem_object_release_shmem(obj, pages, false);
+
+       if (obj->mm.dirty) {
+               struct address_space *mapping = obj->base.filp->f_mapping;
+               char *vaddr = obj->phys_handle->vaddr;
+               int i;
+
+               for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
+                       struct page *page;
+                       char *dst;
+
+                       page = shmem_read_mapping_page(mapping, i);
+                       if (IS_ERR(page))
+                               continue;
+
+                       dst = kmap_atomic(page);
+                       drm_clflush_virt_range(vaddr, PAGE_SIZE);
+                       memcpy(dst, vaddr, PAGE_SIZE);
+                       kunmap_atomic(dst);
+
+                       set_page_dirty(page);
+                       if (obj->mm.madv == I915_MADV_WILLNEED)
+                               mark_page_accessed(page);
+                       put_page(page);
+                       vaddr += PAGE_SIZE;
+               }
+               obj->mm.dirty = false;
+       }
+
+       sg_free_table(pages);
+       kfree(pages);
+
+       drm_pci_free(obj->base.dev, obj->phys_handle);
+}
+
+static void
+i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
+{
+       i915_gem_object_unpin_pages(obj);
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
+       .get_pages = i915_gem_object_get_pages_phys,
+       .put_pages = i915_gem_object_put_pages_phys,
+       .release = i915_gem_object_release_phys,
+};
+
+int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
+{
+       struct sg_table *pages;
+       int err;
+
+       if (align > obj->base.size)
+               return -EINVAL;
+
+       if (obj->ops == &i915_gem_phys_ops)
+               return 0;
+
+       if (obj->ops != &i915_gem_shmem_ops)
+               return -EINVAL;
+
+       err = i915_gem_object_unbind(obj);
+       if (err)
+               return err;
+
+       mutex_lock(&obj->mm.lock);
+
+       if (obj->mm.madv != I915_MADV_WILLNEED) {
+               err = -EFAULT;
+               goto err_unlock;
+       }
+
+       if (obj->mm.quirked) {
+               err = -EFAULT;
+               goto err_unlock;
+       }
+
+       if (obj->mm.mapping) {
+               err = -EBUSY;
+               goto err_unlock;
+       }
+
+       pages = __i915_gem_object_unset_pages(obj);
+
+       obj->ops = &i915_gem_phys_ops;
+
+       err = ____i915_gem_object_get_pages(obj);
+       if (err)
+               goto err_xfer;
+
+       /* Perma-pin (until release) the physical set of pages */
+       __i915_gem_object_pin_pages(obj);
+
+       if (!IS_ERR_OR_NULL(pages))
+               i915_gem_shmem_ops.put_pages(obj, pages);
+       mutex_unlock(&obj->mm.lock);
+       return 0;
+
+err_xfer:
+       obj->ops = &i915_gem_shmem_ops;
+       if (!IS_ERR_OR_NULL(pages)) {
+               unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
+
+               __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
+       }
+err_unlock:
+       mutex_unlock(&obj->mm.lock);
+       return err;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_gem_phys.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c 
b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index bccc0944d177..9a08aba6005e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -214,6 +214,26 @@ static int shmem_get_pages_gtt(struct drm_i915_gem_object 
*obj)
        return ret;
 }
 
+static void
+shmem_truncate(struct drm_i915_gem_object *obj)
+{
+       /*
+        * Our goal here is to return as much of the memory as
+        * is possible back to the system as we are called from OOM.
+        * To do this we must instruct the shmfs to drop all of its
+        * backing pages, *now*.
+        */
+       shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
+       obj->mm.madv = __I915_MADV_PURGED;
+       obj->mm.pages = ERR_PTR(-EFAULT);
+}
+
+static void
+shmem_invalidate(struct drm_i915_gem_object *obj)
+{
+       invalidate_mapping_pages(obj->base.filp->f_mapping, 0, (loff_t)-1);
+}
+
 void
 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
                                struct sg_table *pages,
@@ -345,6 +365,8 @@ const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
 
        .get_pages = shmem_get_pages_gtt,
        .put_pages = shmem_put_pages_gtt,
+       .truncate = shmem_truncate,
+       .invalidate = shmem_invalidate,
 
        .pwrite = shmem_pwrite_gtt,
 };
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c 
b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
new file mode 100644
index 000000000000..b76b503b3999
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
@@ -0,0 +1,80 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016 Intel Corporation
+ */
+
+#include "../../i915_selftest.h"
+
+#include "../../selftests/mock_gem_device.h"
+
+static int mock_phys_object(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       struct drm_i915_gem_object *obj;
+       int err;
+
+       /* Create an object and bind it to a contiguous set of physical pages,
+        * i.e. exercise the i915_gem_object_phys API.
+        */
+
+       obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
+       if (IS_ERR(obj)) {
+               err = PTR_ERR(obj);
+               pr_err("i915_gem_object_create failed, err=%d\n", err);
+               goto out;
+       }
+
+       mutex_lock(&i915->drm.struct_mutex);
+       err = i915_gem_object_attach_phys(obj, PAGE_SIZE);
+       mutex_unlock(&i915->drm.struct_mutex);
+       if (err) {
+               pr_err("i915_gem_object_attach_phys failed, err=%d\n", err);
+               goto out_obj;
+       }
+
+       if (obj->ops != &i915_gem_phys_ops) {
+               pr_err("i915_gem_object_attach_phys did not create a phys 
object\n");
+               err = -EINVAL;
+               goto out_obj;
+       }
+
+       if (!atomic_read(&obj->mm.pages_pin_count)) {
+               pr_err("i915_gem_object_attach_phys did not pin its phys 
pages\n");
+               err = -EINVAL;
+               goto out_obj;
+       }
+
+       /* Make the object dirty so that put_pages must do copy back the data */
+       mutex_lock(&i915->drm.struct_mutex);
+       err = i915_gem_object_set_to_gtt_domain(obj, true);
+       mutex_unlock(&i915->drm.struct_mutex);
+       if (err) {
+               pr_err("i915_gem_object_set_to_gtt_domain failed with err=%d\n",
+                      err);
+               goto out_obj;
+       }
+
+out_obj:
+       i915_gem_object_put(obj);
+out:
+       return err;
+}
+
+int i915_gem_phys_mock_selftests(void)
+{
+       static const struct i915_subtest tests[] = {
+               SUBTEST(mock_phys_object),
+       };
+       struct drm_i915_private *i915;
+       int err;
+
+       i915 = mock_gem_device();
+       if (!i915)
+               return -ENOMEM;
+
+       err = i915_subtests(tests, i915);
+
+       drm_dev_put(&i915->drm);
+       return err;
+}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ffbb8a10a714..2d9aaa45f950 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2886,8 +2886,6 @@ i915_gem_object_pin_to_display_plane(struct 
drm_i915_gem_object *obj,
                                     const struct i915_ggtt_view *view,
                                     unsigned int flags);
 void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
-int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
-                               int align);
 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5816e32fe30b..69a1f3ce19f5 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -26,7 +26,6 @@
  */
 
 #include <drm/drm_vma_manager.h>
-#include <drm/drm_pci.h>
 #include <drm/i915_drm.h>
 #include <linux/dma-fence-array.h>
 #include <linux/kthread.h>
@@ -194,133 +193,6 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void 
*data,
        return 0;
 }
 
-static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
-{
-       struct address_space *mapping = obj->base.filp->f_mapping;
-       drm_dma_handle_t *phys;
-       struct sg_table *st;
-       struct scatterlist *sg;
-       char *vaddr;
-       int i;
-       int err;
-
-       if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
-               return -EINVAL;
-
-       /* Always aligning to the object size, allows a single allocation
-        * to handle all possible callers, and given typical object sizes,
-        * the alignment of the buddy allocation will naturally match.
-        */
-       phys = drm_pci_alloc(obj->base.dev,
-                            roundup_pow_of_two(obj->base.size),
-                            roundup_pow_of_two(obj->base.size));
-       if (!phys)
-               return -ENOMEM;
-
-       vaddr = phys->vaddr;
-       for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
-               struct page *page;
-               char *src;
-
-               page = shmem_read_mapping_page(mapping, i);
-               if (IS_ERR(page)) {
-                       err = PTR_ERR(page);
-                       goto err_phys;
-               }
-
-               src = kmap_atomic(page);
-               memcpy(vaddr, src, PAGE_SIZE);
-               drm_clflush_virt_range(vaddr, PAGE_SIZE);
-               kunmap_atomic(src);
-
-               put_page(page);
-               vaddr += PAGE_SIZE;
-       }
-
-       i915_gem_chipset_flush(to_i915(obj->base.dev));
-
-       st = kmalloc(sizeof(*st), GFP_KERNEL);
-       if (!st) {
-               err = -ENOMEM;
-               goto err_phys;
-       }
-
-       if (sg_alloc_table(st, 1, GFP_KERNEL)) {
-               kfree(st);
-               err = -ENOMEM;
-               goto err_phys;
-       }
-
-       sg = st->sgl;
-       sg->offset = 0;
-       sg->length = obj->base.size;
-
-       sg_dma_address(sg) = phys->busaddr;
-       sg_dma_len(sg) = obj->base.size;
-
-       obj->phys_handle = phys;
-
-       __i915_gem_object_set_pages(obj, st, sg->length);
-
-       return 0;
-
-err_phys:
-       drm_pci_free(obj->base.dev, phys);
-
-       return err;
-}
-
-static void
-i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
-                              struct sg_table *pages)
-{
-       __i915_gem_object_release_shmem(obj, pages, false);
-
-       if (obj->mm.dirty) {
-               struct address_space *mapping = obj->base.filp->f_mapping;
-               char *vaddr = obj->phys_handle->vaddr;
-               int i;
-
-               for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
-                       struct page *page;
-                       char *dst;
-
-                       page = shmem_read_mapping_page(mapping, i);
-                       if (IS_ERR(page))
-                               continue;
-
-                       dst = kmap_atomic(page);
-                       drm_clflush_virt_range(vaddr, PAGE_SIZE);
-                       memcpy(dst, vaddr, PAGE_SIZE);
-                       kunmap_atomic(dst);
-
-                       set_page_dirty(page);
-                       if (obj->mm.madv == I915_MADV_WILLNEED)
-                               mark_page_accessed(page);
-                       put_page(page);
-                       vaddr += PAGE_SIZE;
-               }
-               obj->mm.dirty = false;
-       }
-
-       sg_free_table(pages);
-       kfree(pages);
-
-       drm_pci_free(obj->base.dev, obj->phys_handle);
-}
-
-static void
-i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
-{
-       i915_gem_object_unpin_pages(obj);
-}
-
-static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
-       .get_pages = i915_gem_object_get_pages_phys,
-       .put_pages = i915_gem_object_put_pages_phys,
-       .release = i915_gem_object_release_phys,
-};
-
 int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 {
        struct i915_vma *vma;
@@ -2020,11 +1892,6 @@ static int i915_gem_object_create_mmap_offset(struct 
drm_i915_gem_object *obj)
        return err;
 }
 
-static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
-{
-       drm_gem_free_mmap_offset(&obj->base);
-}
-
 int
 i915_gem_mmap_gtt(struct drm_file *file,
                  struct drm_device *dev,
@@ -2070,134 +1937,6 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void 
*data,
        return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
 }
 
-/* Immediately discard the backing storage */
-static void
-i915_gem_object_truncate(struct drm_i915_gem_object *obj)
-{
-       i915_gem_object_free_mmap_offset(obj);
-
-       if (obj->base.filp == NULL)
-               return;
-
-       /* Our goal here is to return as much of the memory as
-        * is possible back to the system as we are called from OOM.
-        * To do this we must instruct the shmfs to drop all of its
-        * backing pages, *now*.
-        */
-       shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
-       obj->mm.madv = __I915_MADV_PURGED;
-       obj->mm.pages = ERR_PTR(-EFAULT);
-}
-
-/* Try to discard unwanted pages */
-void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
-{
-       struct address_space *mapping;
-
-       lockdep_assert_held(&obj->mm.lock);
-       GEM_BUG_ON(i915_gem_object_has_pages(obj));
-
-       switch (obj->mm.madv) {
-       case I915_MADV_DONTNEED:
-               i915_gem_object_truncate(obj);
-       case __I915_MADV_PURGED:
-               return;
-       }
-
-       if (obj->base.filp == NULL)
-               return;
-
-       mapping = obj->base.filp->f_mapping,
-       invalidate_mapping_pages(mapping, 0, (loff_t)-1);
-}
-
-static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
-{
-       struct radix_tree_iter iter;
-       void __rcu **slot;
-
-       rcu_read_lock();
-       radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
-               radix_tree_delete(&obj->mm.get_page.radix, iter.index);
-       rcu_read_unlock();
-}
-
-static struct sg_table *
-__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
-{
-       struct drm_i915_private *i915 = to_i915(obj->base.dev);
-       struct sg_table *pages;
-
-       pages = fetch_and_zero(&obj->mm.pages);
-       if (IS_ERR_OR_NULL(pages))
-               return pages;
-
-       spin_lock(&i915->mm.obj_lock);
-       list_del(&obj->mm.link);
-       spin_unlock(&i915->mm.obj_lock);
-
-       if (obj->mm.mapping) {
-               void *ptr;
-
-               ptr = page_mask_bits(obj->mm.mapping);
-               if (is_vmalloc_addr(ptr))
-                       vunmap(ptr);
-               else
-                       kunmap(kmap_to_page(ptr));
-
-               obj->mm.mapping = NULL;
-       }
-
-       __i915_gem_object_reset_page_iter(obj);
-       obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
-
-       return pages;
-}
-
-int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
-                               enum i915_mm_subclass subclass)
-{
-       struct sg_table *pages;
-       int ret;
-
-       if (i915_gem_object_has_pinned_pages(obj))
-               return -EBUSY;
-
-       GEM_BUG_ON(obj->bind_count);
-
-       /* May be called by shrinker from within get_pages() (on another bo) */
-       mutex_lock_nested(&obj->mm.lock, subclass);
-       if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
-               ret = -EBUSY;
-               goto unlock;
-       }
-
-       /*
-        * ->put_pages might need to allocate memory for the bit17 swizzle
-        * array, hence protect them from being reaped by removing them from gtt
-        * lists early.
-        */
-       pages = __i915_gem_object_unset_pages(obj);
-
-       /*
-        * XXX Temporary hijinx to avoid updating all backends to handle
-        * NULL pages. In the future, when we have more asynchronous
-        * get_pages backends we should be better able to handle the
-        * cancellation of the async task in a more uniform manner.
-        */
-       if (!pages && !i915_gem_object_needs_async_cancel(obj))
-               pages = ERR_PTR(-EINVAL);
-
-       if (!IS_ERR(pages))
-               obj->ops->put_pages(obj, pages);
-
-       ret = 0;
-unlock:
-       mutex_unlock(&obj->mm.lock);
-
-       return ret;
-}
-
 bool i915_sg_trim(struct sg_table *orig_st)
 {
        struct sg_table new_st;
@@ -2226,217 +1965,6 @@ bool i915_sg_trim(struct sg_table *orig_st)
        return true;
 }
 
-void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
-                                struct sg_table *pages,
-                                unsigned int sg_page_sizes)
-{
-       struct drm_i915_private *i915 = to_i915(obj->base.dev);
-       unsigned long supported = INTEL_INFO(i915)->page_sizes;
-       int i;
-
-       lockdep_assert_held(&obj->mm.lock);
-
-       obj->mm.get_page.sg_pos = pages->sgl;
-       obj->mm.get_page.sg_idx = 0;
-
-       obj->mm.pages = pages;
-
-       if (i915_gem_object_is_tiled(obj) &&
-           i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
-               GEM_BUG_ON(obj->mm.quirked);
-               __i915_gem_object_pin_pages(obj);
-               obj->mm.quirked = true;
-       }
-
-       GEM_BUG_ON(!sg_page_sizes);
-       obj->mm.page_sizes.phys = sg_page_sizes;
-
-       /*
-        * Calculate the supported page-sizes which fit into the given
-        * sg_page_sizes. This will give us the page-sizes which we may be able
-        * to use opportunistically when later inserting into the GTT. For
-        * example if phys=2G, then in theory we should be able to use 1G, 2M,
-        * 64K or 4K pages, although in practice this will depend on a number of
-        * other factors.
-        */
-       obj->mm.page_sizes.sg = 0;
-       for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
-               if (obj->mm.page_sizes.phys & ~0u << i)
-                       obj->mm.page_sizes.sg |= BIT(i);
-       }
-       GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
-
-       spin_lock(&i915->mm.obj_lock);
-       list_add(&obj->mm.link, &i915->mm.unbound_list);
-       spin_unlock(&i915->mm.obj_lock);
-}
-
-static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
-{
-       int err;
-
-       if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
-               DRM_DEBUG("Attempting to obtain a purgeable object\n");
-               return -EFAULT;
-       }
-
-       err = obj->ops->get_pages(obj);
-       GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
-
-       return err;
-}
-
-/* Ensure that the associated pages are gathered from the backing storage
- * and pinned into our object. i915_gem_object_pin_pages() may be called
- * multiple times before they are released by a single call to
- * i915_gem_object_unpin_pages() - once the pages are no longer referenced
- * either as a result of memory pressure (reaping pages under the shrinker)
- * or as the object is itself released.
- */
-int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
-{
-       int err;
-
-       err = mutex_lock_interruptible(&obj->mm.lock);
-       if (err)
-               return err;
-
-       if (unlikely(!i915_gem_object_has_pages(obj))) {
-               GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
-
-               err = ____i915_gem_object_get_pages(obj);
-               if (err)
-                       goto unlock;
-
-               smp_mb__before_atomic();
-       }
-       atomic_inc(&obj->mm.pages_pin_count);
-
-unlock:
-       mutex_unlock(&obj->mm.lock);
-       return err;
-}
-
-/* The 'mapping' part of i915_gem_object_pin_map() below */
-static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
-                                enum i915_map_type type)
-{
-       unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
-       struct sg_table *sgt = obj->mm.pages;
-       struct sgt_iter sgt_iter;
-       struct page *page;
-       struct page *stack_pages[32];
-       struct page **pages = stack_pages;
-       unsigned long i = 0;
-       pgprot_t pgprot;
-       void *addr;
-
-       /* A single page can always be kmapped */
-       if (n_pages == 1 && type == I915_MAP_WB)
-               return kmap(sg_page(sgt->sgl));
-
-       if (n_pages > ARRAY_SIZE(stack_pages)) {
-               /* Too big for stack -- allocate temporary array instead */
-               pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
-               if (!pages)
-                       return NULL;
-       }
-
-       for_each_sgt_page(page, sgt_iter, sgt)
-               pages[i++] = page;
-
-       /* Check that we have the expected number of pages */
-       GEM_BUG_ON(i != n_pages);
-
-       switch (type) {
-       default:
-               MISSING_CASE(type);
-               /* fallthrough to use PAGE_KERNEL anyway */
-       case I915_MAP_WB:
-               pgprot = PAGE_KERNEL;
-               break;
-       case I915_MAP_WC:
-               pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
-               break;
-       }
-       addr = vmap(pages, n_pages, 0, pgprot);
-
-       if (pages != stack_pages)
-               kvfree(pages);
-
-       return addr;
-}
-
-/* get, pin, and map the pages of the object into kernel space */
-void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
-                             enum i915_map_type type)
-{
-       enum i915_map_type has_type;
-       bool pinned;
-       void *ptr;
-       int ret;
-
-       if (unlikely(!i915_gem_object_has_struct_page(obj)))
-               return ERR_PTR(-ENXIO);
-
-       ret = mutex_lock_interruptible(&obj->mm.lock);
-       if (ret)
-               return ERR_PTR(ret);
-
-       pinned = !(type & I915_MAP_OVERRIDE);
-       type &= ~I915_MAP_OVERRIDE;
-
-       if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
-               if (unlikely(!i915_gem_object_has_pages(obj))) {
-                       GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
-
-                       ret = ____i915_gem_object_get_pages(obj);
-                       if (ret)
-                               goto err_unlock;
-
-                       smp_mb__before_atomic();
-               }
-               atomic_inc(&obj->mm.pages_pin_count);
-               pinned = false;
-       }
-       GEM_BUG_ON(!i915_gem_object_has_pages(obj));
-
-       ptr = page_unpack_bits(obj->mm.mapping, &has_type);
-       if (ptr && has_type != type) {
-               if (pinned) {
-                       ret = -EBUSY;
-                       goto err_unpin;
-               }
-
-               if (is_vmalloc_addr(ptr))
-                       vunmap(ptr);
-               else
-                       kunmap(kmap_to_page(ptr));
-
-               ptr = obj->mm.mapping = NULL;
-       }
-
-       if (!ptr) {
-               ptr = i915_gem_object_map(obj, type);
-               if (!ptr) {
-                       ret = -ENOMEM;
-                       goto err_unpin;
-               }
-
-               obj->mm.mapping = page_pack_bits(ptr, type);
-       }
-
-out_unlock:
-       mutex_unlock(&obj->mm.lock);
-       return ptr;
-
-err_unpin:
-       atomic_dec(&obj->mm.pages_pin_count);
-err_unlock:
-       ptr = ERR_PTR(ret);
-       goto out_unlock;
-}
-
 static void
 i915_gem_retire_work_handler(struct work_struct *work)
 {
@@ -4449,219 +3977,6 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
        }
 }
 
-struct scatterlist *
-i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
-                      unsigned int n,
-                      unsigned int *offset)
-{
-       struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
-       struct scatterlist *sg;
-       unsigned int idx, count;
-
-       might_sleep();
-       GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
-       GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
-
-       /* As we iterate forward through the sg, we record each entry in a
-        * radixtree for quick repeated (backwards) lookups. If we have seen
-        * this index previously, we will have an entry for it.
-        *
-        * Initial lookup is O(N), but this is amortized to O(1) for
-        * sequential page access (where each new request is consecutive
-        * to the previous one). Repeated lookups are O(lg(obj->base.size)),
-        * i.e. O(1) with a large constant!
-        */
-       if (n < READ_ONCE(iter->sg_idx))
-               goto lookup;
-
-       mutex_lock(&iter->lock);
-
-       /* We prefer to reuse the last sg so that repeated lookup of this
-        * (or the subsequent) sg are fast - comparing against the last
-        * sg is faster than going through the radixtree.
-        */
-
-       sg = iter->sg_pos;
-       idx = iter->sg_idx;
-       count = __sg_page_count(sg);
-
-       while (idx + count <= n) {
-               void *entry;
-               unsigned long i;
-               int ret;
-
-               /* If we cannot allocate and insert this entry, or the
-                * individual pages from this range, cancel updating the
-                * sg_idx so that on this lookup we are forced to linearly
-                * scan onwards, but on future lookups we will try the
-                * insertion again (in which case we need to be careful of
-                * the error return reporting that we have already inserted
-                * this index).
-                */
-               ret = radix_tree_insert(&iter->radix, idx, sg);
-               if (ret && ret != -EEXIST)
-                       goto scan;
-
-               entry = xa_mk_value(idx);
-               for (i = 1; i < count; i++) {
-                       ret = radix_tree_insert(&iter->radix, idx + i, entry);
-                       if (ret && ret != -EEXIST)
-                               goto scan;
-               }
-
-               idx += count;
-               sg = ____sg_next(sg);
-               count = __sg_page_count(sg);
-       }
-
-scan:
-       iter->sg_pos = sg;
-       iter->sg_idx = idx;
-
-       mutex_unlock(&iter->lock);
-
-       if (unlikely(n < idx)) /* insertion completed by another thread */
-               goto lookup;
-
-       /* In case we failed to insert the entry into the radixtree, we need
-        * to look beyond the current sg.
-        */
-       while (idx + count <= n) {
-               idx += count;
-               sg = ____sg_next(sg);
-               count = __sg_page_count(sg);
-       }
-
-       *offset = n - idx;
-       return sg;
-
-lookup:
-       rcu_read_lock();
-
-       sg = radix_tree_lookup(&iter->radix, n);
-       GEM_BUG_ON(!sg);
-
-       /* If this index is in the middle of multi-page sg entry,
-        * the radix tree will contain a value entry that points
-        * to the start of that range. We will return the pointer to
-        * the base page and the offset of this page within the
-        * sg entry's range.
-        */
-       *offset = 0;
-       if (unlikely(xa_is_value(sg))) {
-               unsigned long base = xa_to_value(sg);
-
-               sg = radix_tree_lookup(&iter->radix, base);
-               GEM_BUG_ON(!sg);
-
-               *offset = n - base;
-       }
-
-       rcu_read_unlock();
-
-       return sg;
-}
-
-struct page *
-i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
-{
-       struct scatterlist *sg;
-       unsigned int offset;
-
-       GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
-
-       sg = i915_gem_object_get_sg(obj, n, &offset);
-       return nth_page(sg_page(sg), offset);
-}
-
-/* Like i915_gem_object_get_page(), but mark the returned page dirty */
-struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
-                              unsigned int n)
-{
-       struct page *page;
-
-       page = i915_gem_object_get_page(obj, n);
-       if (!obj->mm.dirty)
-               set_page_dirty(page);
-
-       return page;
-}
-
-dma_addr_t
-i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
-                               unsigned long n)
-{
-       struct scatterlist *sg;
-       unsigned int offset;
-
-       sg = i915_gem_object_get_sg(obj, n, &offset);
-       return sg_dma_address(sg) + (offset << PAGE_SHIFT);
-}
-
-int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
-{
-       struct sg_table *pages;
-       int err;
-
-       if (align > obj->base.size)
-               return -EINVAL;
-
-       if (obj->ops == &i915_gem_phys_ops)
-               return 0;
-
-       if (obj->ops != &i915_gem_shmem_ops)
-               return -EINVAL;
-
-       err = i915_gem_object_unbind(obj);
-       if (err)
-               return err;
-
-       mutex_lock(&obj->mm.lock);
-
-       if (obj->mm.madv != I915_MADV_WILLNEED) {
-               err = -EFAULT;
-               goto err_unlock;
-       }
-
-       if (obj->mm.quirked) {
-               err = -EFAULT;
-               goto err_unlock;
-       }
-
-       if (obj->mm.mapping) {
-               err = -EBUSY;
-               goto err_unlock;
-       }
-
-       pages = __i915_gem_object_unset_pages(obj);
-
-       obj->ops = &i915_gem_phys_ops;
-
-       err = ____i915_gem_object_get_pages(obj);
-       if (err)
-               goto err_xfer;
-
-       /* Perma-pin (until release) the physical set of pages */
-       __i915_gem_object_pin_pages(obj);
-
-       if (!IS_ERR_OR_NULL(pages))
-               i915_gem_shmem_ops.put_pages(obj, pages);
-       mutex_unlock(&obj->mm.lock);
-       return 0;
-
-err_xfer:
-       obj->ops = &i915_gem_shmem_ops;
-       if (!IS_ERR_OR_NULL(pages)) {
-               unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
-
-               __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
-       }
-err_unlock:
-       mutex_unlock(&obj->mm.lock);
-       return err;
-}
-
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/scatterlist.c"
 #include "selftests/mock_gem_device.c"
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c 
b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index d5bd33c867c0..ab6ae9ecf03b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -48,59 +48,6 @@ static int igt_gem_object(void *arg)
        return err;
 }
 
-static int igt_phys_object(void *arg)
-{
-       struct drm_i915_private *i915 = arg;
-       struct drm_i915_gem_object *obj;
-       int err;
-
-       /* Create an object and bind it to a contiguous set of physical pages,
-        * i.e. exercise the i915_gem_object_phys API.
-        */
-
-       obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
-       if (IS_ERR(obj)) {
-               err = PTR_ERR(obj);
-               pr_err("i915_gem_object_create failed, err=%d\n", err);
-               goto out;
-       }
-
-       mutex_lock(&i915->drm.struct_mutex);
-       err = i915_gem_object_attach_phys(obj, PAGE_SIZE);
-       mutex_unlock(&i915->drm.struct_mutex);
-       if (err) {
-               pr_err("i915_gem_object_attach_phys failed, err=%d\n", err);
-               goto out_obj;
-       }
-
-       if (obj->ops != &i915_gem_phys_ops) {
-               pr_err("i915_gem_object_attach_phys did not create a phys 
object\n");
-               err = -EINVAL;
-               goto out_obj;
-       }
-
-       if (!atomic_read(&obj->mm.pages_pin_count)) {
-               pr_err("i915_gem_object_attach_phys did not pin its phys 
pages\n");
-               err = -EINVAL;
-               goto out_obj;
-       }
-
-       /* Make the object dirty so that put_pages must do copy back the data */
-       mutex_lock(&i915->drm.struct_mutex);
-       err = i915_gem_object_set_to_gtt_domain(obj, true);
-       mutex_unlock(&i915->drm.struct_mutex);
-       if (err) {
-               pr_err("i915_gem_object_set_to_gtt_domain failed with err=%d\n",
-                      err);
-               goto out_obj;
-       }
-
-out_obj:
-       i915_gem_object_put(obj);
-out:
-       return err;
-}
-
 static int igt_gem_huge(void *arg)
 {
        const unsigned int nreal = 509; /* just to be awkward */
@@ -632,7 +579,6 @@ int i915_gem_object_mock_selftests(void)
 {
        static const struct i915_subtest tests[] = {
                SUBTEST(igt_gem_object),
-               SUBTEST(igt_phys_object),
        };
        struct drm_i915_private *i915;
        int err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h 
b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index 88e5ab586337..510eb176bb2c 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -18,6 +18,7 @@ selftest(engine, intel_engine_cs_mock_selftests)
 selftest(timelines, i915_timeline_mock_selftests)
 selftest(requests, i915_request_mock_selftests)
 selftest(objects, i915_gem_object_mock_selftests)
+selftest(phys, i915_gem_phys_mock_selftests)
 selftest(dmabuf, i915_gem_dmabuf_mock_selftests)
 selftest(vma, i915_vma_mock_selftests)
 selftest(evict, i915_gem_evict_mock_selftests)
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to