From: Rob Clark <robdcl...@chromium.org>

It is standing in the way of drm_gpuvm / VM_BIND support.  Not to
mention frequently broken and rarely tested.  And I think only needed
for a 10yr old not quite upstream SoC (msm8974).

Maybe we can add support back in later, but I'm doubtful.

Signed-off-by: Rob Clark <robdcl...@chromium.org>
---
 drivers/gpu/drm/msm/msm_drv.c        | 117 +-----------------------
 drivers/gpu/drm/msm/msm_drv.h        |  11 ---
 drivers/gpu/drm/msm/msm_gem.c        | 131 +++------------------------
 drivers/gpu/drm/msm/msm_gem.h        |   5 -
 drivers/gpu/drm/msm/msm_gem_submit.c |   5 -
 5 files changed, 13 insertions(+), 256 deletions(-)

diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 88cd1ed59d48..a5a95a53d2c8 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -46,12 +46,6 @@
 #define MSM_VERSION_MINOR      12
 #define MSM_VERSION_PATCHLEVEL 0
 
-static void msm_deinit_vram(struct drm_device *ddev);
-
-static char *vram = "16m";
-MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without 
IOMMU/GPUMMU)");
-module_param(vram, charp, 0);
-
 bool dumpstate;
 MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
 module_param(dumpstate, bool, 0600);
@@ -97,8 +91,6 @@ static int msm_drm_uninit(struct device *dev)
        if (priv->kms)
                msm_drm_kms_uninit(dev);
 
-       msm_deinit_vram(ddev);
-
        component_unbind_all(dev, ddev);
 
        ddev->dev_private = NULL;
@@ -109,107 +101,6 @@ static int msm_drm_uninit(struct device *dev)
        return 0;
 }
 
-bool msm_use_mmu(struct drm_device *dev)
-{
-       struct msm_drm_private *priv = dev->dev_private;
-
-       /*
-        * a2xx comes with its own MMU
-        * On other platforms IOMMU can be declared specified either for the
-        * MDP/DPU device or for its parent, MDSS device.
-        */
-       return priv->is_a2xx ||
-               device_iommu_mapped(dev->dev) ||
-               device_iommu_mapped(dev->dev->parent);
-}
-
-static int msm_init_vram(struct drm_device *dev)
-{
-       struct msm_drm_private *priv = dev->dev_private;
-       struct device_node *node;
-       unsigned long size = 0;
-       int ret = 0;
-
-       /* In the device-tree world, we could have a 'memory-region'
-        * phandle, which gives us a link to our "vram".  Allocating
-        * is all nicely abstracted behind the dma api, but we need
-        * to know the entire size to allocate it all in one go. There
-        * are two cases:
-        *  1) device with no IOMMU, in which case we need exclusive
-        *     access to a VRAM carveout big enough for all gpu
-        *     buffers
-        *  2) device with IOMMU, but where the bootloader puts up
-        *     a splash screen.  In this case, the VRAM carveout
-        *     need only be large enough for fbdev fb.  But we need
-        *     exclusive access to the buffer to avoid the kernel
-        *     using those pages for other purposes (which appears
-        *     as corruption on screen before we have a chance to
-        *     load and do initial modeset)
-        */
-
-       node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
-       if (node) {
-               struct resource r;
-               ret = of_address_to_resource(node, 0, &r);
-               of_node_put(node);
-               if (ret)
-                       return ret;
-               size = r.end - r.start + 1;
-               DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
-
-               /* if we have no IOMMU, then we need to use carveout allocator.
-                * Grab the entire DMA chunk carved out in early startup in
-                * mach-msm:
-                */
-       } else if (!msm_use_mmu(dev)) {
-               DRM_INFO("using %s VRAM carveout\n", vram);
-               size = memparse(vram, NULL);
-       }
-
-       if (size) {
-               unsigned long attrs = 0;
-               void *p;
-
-               priv->vram.size = size;
-
-               drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
-               spin_lock_init(&priv->vram.lock);
-
-               attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
-               attrs |= DMA_ATTR_WRITE_COMBINE;
-
-               /* note that for no-kernel-mapping, the vaddr returned
-                * is bogus, but non-null if allocation succeeded:
-                */
-               p = dma_alloc_attrs(dev->dev, size,
-                               &priv->vram.paddr, GFP_KERNEL, attrs);
-               if (!p) {
-                       DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n");
-                       priv->vram.paddr = 0;
-                       return -ENOMEM;
-               }
-
-               DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n",
-                               (uint32_t)priv->vram.paddr,
-                               (uint32_t)(priv->vram.paddr + size));
-       }
-
-       return ret;
-}
-
-static void msm_deinit_vram(struct drm_device *ddev)
-{
-       struct msm_drm_private *priv = ddev->dev_private;
-       unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
-
-       if (!priv->vram.paddr)
-               return;
-
-       drm_mm_takedown(&priv->vram.mm);
-       dma_free_attrs(ddev->dev, priv->vram.size, NULL, priv->vram.paddr,
-                       attrs);
-}
-
 static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
 {
        struct msm_drm_private *priv = dev_get_drvdata(dev);
@@ -256,16 +147,12 @@ static int msm_drm_init(struct device *dev, const struct 
drm_driver *drv)
                        goto err_destroy_wq;
        }
 
-       ret = msm_init_vram(ddev);
-       if (ret)
-               goto err_destroy_wq;
-
        dma_set_max_seg_size(dev, UINT_MAX);
 
        /* Bind all our sub-components: */
        ret = component_bind_all(dev, ddev);
        if (ret)
-               goto err_deinit_vram;
+               goto err_destroy_wq;
 
 //     ret = msm_gem_shrinker_init(ddev);
        if (ret)
@@ -302,8 +189,6 @@ static int msm_drm_init(struct device *dev, const struct 
drm_driver *drv)
 
        return ret;
 
-err_deinit_vram:
-       msm_deinit_vram(ddev);
 err_destroy_wq:
        destroy_workqueue(priv->wq);
 err_put_dev:
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index ce1ef981a309..20a0f8f23490 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -183,17 +183,6 @@ struct msm_drm_private {
 
        struct msm_drm_thread event_thread[MAX_CRTCS];
 
-       /* VRAM carveout, used when no IOMMU: */
-       struct {
-               unsigned long size;
-               dma_addr_t paddr;
-               /* NOTE: mm managed at the page level, size is in # of pages
-                * and position mm_node->start is in # of pages:
-                */
-               struct drm_mm mm;
-               spinlock_t lock; /* Protects drm_mm node allocation/removal */
-       } vram;
-
        struct notifier_block vmap_notifier;
        struct shrinker *shrinker;
 
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index c29367239283..f42bfa70502a 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -17,24 +17,8 @@
 #include <trace/events/gpu_mem.h>
 
 #include "msm_drv.h"
-#include "msm_fence.h"
 #include "msm_gem.h"
 #include "msm_gpu.h"
-#include "msm_mmu.h"
-
-static dma_addr_t physaddr(struct drm_gem_object *obj)
-{
-       struct msm_gem_object *msm_obj = to_msm_bo(obj);
-       struct msm_drm_private *priv = obj->dev->dev_private;
-       return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
-                       priv->vram.paddr;
-}
-
-static bool use_pages(struct drm_gem_object *obj)
-{
-       struct msm_gem_object *msm_obj = to_msm_bo(obj);
-       return !msm_obj->vram_node;
-}
 
 static void update_device_mem(struct msm_drm_private *priv, ssize_t size)
 {
@@ -135,36 +119,6 @@ static void update_lru(struct drm_gem_object *obj)
        mutex_unlock(&priv->lru.lock);
 }
 
-/* allocate pages from VRAM carveout, used when no IOMMU: */
-static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
-{
-       struct msm_gem_object *msm_obj = to_msm_bo(obj);
-       struct msm_drm_private *priv = obj->dev->dev_private;
-       dma_addr_t paddr;
-       struct page **p;
-       int ret, i;
-
-       p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
-       if (!p)
-               return ERR_PTR(-ENOMEM);
-
-       spin_lock(&priv->vram.lock);
-       ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
-       spin_unlock(&priv->vram.lock);
-       if (ret) {
-               kvfree(p);
-               return ERR_PTR(ret);
-       }
-
-       paddr = physaddr(obj);
-       for (i = 0; i < npages; i++) {
-               p[i] = pfn_to_page(__phys_to_pfn(paddr));
-               paddr += PAGE_SIZE;
-       }
-
-       return p;
-}
-
 static struct page **get_pages(struct drm_gem_object *obj)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -176,10 +130,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
                struct page **p;
                int npages = obj->size >> PAGE_SHIFT;
 
-               if (use_pages(obj))
-                       p = drm_gem_get_pages(obj);
-               else
-                       p = get_pages_vram(obj, npages);
+               p = drm_gem_get_pages(obj);
 
                if (IS_ERR(p)) {
                        DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
@@ -212,18 +163,6 @@ static struct page **get_pages(struct drm_gem_object *obj)
        return msm_obj->pages;
 }
 
-static void put_pages_vram(struct drm_gem_object *obj)
-{
-       struct msm_gem_object *msm_obj = to_msm_bo(obj);
-       struct msm_drm_private *priv = obj->dev->dev_private;
-
-       spin_lock(&priv->vram.lock);
-       drm_mm_remove_node(msm_obj->vram_node);
-       spin_unlock(&priv->vram.lock);
-
-       kvfree(msm_obj->pages);
-}
-
 static void put_pages(struct drm_gem_object *obj)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -244,10 +183,7 @@ static void put_pages(struct drm_gem_object *obj)
 
                update_device_mem(obj->dev->dev_private, -obj->size);
 
-               if (use_pages(obj))
-                       drm_gem_put_pages(obj, msm_obj->pages, true, false);
-               else
-                       put_pages_vram(obj);
+               drm_gem_put_pages(obj, msm_obj->pages, true, false);
 
                msm_obj->pages = NULL;
                update_lru(obj);
@@ -1207,19 +1143,10 @@ struct drm_gem_object *msm_gem_new(struct drm_device 
*dev, uint32_t size, uint32
        struct msm_drm_private *priv = dev->dev_private;
        struct msm_gem_object *msm_obj;
        struct drm_gem_object *obj = NULL;
-       bool use_vram = false;
        int ret;
 
        size = PAGE_ALIGN(size);
 
-       if (!msm_use_mmu(dev))
-               use_vram = true;
-       else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
-               use_vram = true;
-
-       if (GEM_WARN_ON(use_vram && !priv->vram.size))
-               return ERR_PTR(-EINVAL);
-
        /* Disallow zero sized objects as they make the underlying
         * infrastructure grumpy
         */
@@ -1232,44 +1159,16 @@ struct drm_gem_object *msm_gem_new(struct drm_device 
*dev, uint32_t size, uint32
 
        msm_obj = to_msm_bo(obj);
 
-       if (use_vram) {
-               struct msm_gem_vma *vma;
-               struct page **pages;
-
-               drm_gem_private_object_init(dev, obj, size);
-
-               msm_gem_lock(obj);
-
-               vma = add_vma(obj, NULL);
-               msm_gem_unlock(obj);
-               if (IS_ERR(vma)) {
-                       ret = PTR_ERR(vma);
-                       goto fail;
-               }
-
-               to_msm_bo(obj)->vram_node = &vma->node;
-
-               msm_gem_lock(obj);
-               pages = get_pages(obj);
-               msm_gem_unlock(obj);
-               if (IS_ERR(pages)) {
-                       ret = PTR_ERR(pages);
-                       goto fail;
-               }
-
-               vma->iova = physaddr(obj);
-       } else {
-               ret = drm_gem_object_init(dev, obj, size);
-               if (ret)
-                       goto fail;
-               /*
-                * Our buffers are kept pinned, so allocating them from the
-                * MOVABLE zone is a really bad idea, and conflicts with CMA.
-                * See comments above new_inode() why this is required _and_
-                * expected if you're going to pin these pages.
-                */
-               mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
-       }
+       ret = drm_gem_object_init(dev, obj, size);
+       if (ret)
+               goto fail;
+       /*
+        * Our buffers are kept pinned, so allocating them from the
+        * MOVABLE zone is a really bad idea, and conflicts with CMA.
+        * See comments above new_inode() why this is required _and_
+        * expected if you're going to pin these pages.
+        */
+       mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
 
        drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
 
@@ -1297,12 +1196,6 @@ struct drm_gem_object *msm_gem_import(struct drm_device 
*dev,
        uint32_t size;
        int ret, npages;
 
-       /* if we don't have IOMMU, don't bother pretending we can import: */
-       if (!msm_use_mmu(dev)) {
-               DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
-               return ERR_PTR(-EINVAL);
-       }
-
        size = PAGE_ALIGN(dmabuf->size);
 
        ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index d2f39a371373..c16b11182831 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -102,11 +102,6 @@ struct msm_gem_object {
 
        struct list_head vmas;    /* list of msm_gem_vma */
 
-       /* For physically contiguous buffers.  Used when we don't have
-        * an IOMMU.  Also used for stolen/splashscreen buffer.
-        */
-       struct drm_mm_node *vram_node;
-
        char name[32]; /* Identifier to print for the debugfs files */
 
        /* userspace metadata backchannel */
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c 
b/drivers/gpu/drm/msm/msm_gem_submit.c
index 30a281aa1353..235ad4be7fd0 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -659,11 +659,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void 
*data,
        if (args->pad)
                return -EINVAL;
 
-       if (unlikely(!ctx->vm) && !capable(CAP_SYS_RAWIO)) {
-               DRM_ERROR_RATELIMITED("IOMMU support or CAP_SYS_RAWIO 
required!\n");
-               return -EPERM;
-       }
-
        /* for now, we just have 3d pipe.. eventually this would need to
         * be more clever to dispatch to appropriate gpu module:
         */
-- 
2.47.1

Reply via email to