Fix a couple comments which had become (partially) obsolete or incorrect
with the gpuvm conversion.

Signed-off-by: Rob Clark <robin.cl...@oss.qualcomm.com>
---
 drivers/gpu/drm/msm/msm_gem.h     | 2 +-
 drivers/gpu/drm/msm/msm_gem_vma.c | 5 +----
 2 files changed, 2 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 88239da1cd72..751c3b4965bc 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -100,7 +100,7 @@ struct msm_gem_vm {
         *
         * Only used for kernel managed VMs, unused for user managed VMs.
         *
-        * Protected by @mm_lock.
+        * Protected by vm lock.  See msm_gem_lock_vm_and_obj(), for ex.
         */
        struct drm_mm mm;
 
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c 
b/drivers/gpu/drm/msm/msm_gem_vma.c
index dc54c693b28d..d1f5bb2e0a16 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -319,13 +319,10 @@ msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct 
sg_table *sgt)
                mutex_lock(&vm->mmu_lock);
 
        /*
-        * NOTE: iommu/io-pgtable can allocate pages, so we cannot hold
+        * NOTE: if not using pgtable preallocation, we cannot hold
         * a lock across map/unmap which is also used in the job_run()
         * path, as this can cause deadlock in job_run() vs shrinker/
         * reclaim.
-        *
-        * Revisit this if we can come up with a scheme to pre-alloc pages
-        * for the pgtable in map/unmap ops.
         */
        ret = vm_map_op(vm, &(struct msm_vm_map_op){
                .iova = vma->va.addr,
-- 
2.50.1

Reply via email to