From: Rob Clark <robdcl...@chromium.org>

Just some tidying up.

Signed-off-by: Rob Clark <robdcl...@chromium.org>
---
 drivers/gpu/drm/msm/msm_gpu.h | 44 +++++++++++++++++++++++------------
 1 file changed, 29 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 957d6fb3469d..c699ce0c557b 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -348,25 +348,39 @@ struct msm_gpu_perfcntr {
 
 /**
  * struct msm_context - per-drm_file context
- *
- * @queuelock:    synchronizes access to submitqueues list
- * @submitqueues: list of &msm_gpu_submitqueue created by userspace
- * @queueid:      counter incremented each time a submitqueue is created,
- *                used to assign &msm_gpu_submitqueue.id
- * @aspace:       the per-process GPU address-space
- * @ref:          reference count
- * @seqno:        unique per process seqno
  */
 struct msm_context {
+       /** @queuelock: synchronizes access to submitqueues list */
        rwlock_t queuelock;
+
+       /** @submitqueues: list of &msm_gpu_submitqueue created by userspace */
        struct list_head submitqueues;
+
+       /**
+        * @queueid:
+        *
+        * Counter incremented each time a submitqueue is created, used to
+        * assign &msm_gpu_submitqueue.id
+        */
        int queueid;
+
+       /** @aspace: the per-process GPU address-space */
        struct msm_gem_address_space *aspace;
+
+       /** @kref: the reference count */
        struct kref ref;
+
+       /**
+        * @seqno:
+        *
+        * A unique per-process sequence number.  Used to detect context
+        * switches, without relying on keeping a, potentially dangling,
+        * pointer to the previous context.
+        */
        int seqno;
 
        /**
-        * sysprof:
+        * @sysprof:
         *
         * The value of MSM_PARAM_SYSPROF set by userspace.  This is
         * intended to be used by system profiling tools like Mesa's
@@ -384,21 +398,21 @@ struct msm_context {
        int sysprof;
 
        /**
-        * comm: Overridden task comm, see MSM_PARAM_COMM
+        * @comm: Overridden task comm, see MSM_PARAM_COMM
         *
         * Accessed under msm_gpu::lock
         */
        char *comm;
 
        /**
-        * cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE
+        * @cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE
         *
         * Accessed under msm_gpu::lock
         */
        char *cmdline;
 
        /**
-        * elapsed:
+        * @elapsed:
         *
         * The total (cumulative) elapsed time GPU was busy with rendering
         * from this context in ns.
@@ -406,7 +420,7 @@ struct msm_context {
        uint64_t elapsed_ns;
 
        /**
-        * cycles:
+        * @cycles:
         *
         * The total (cumulative) GPU cycles elapsed attributed to this
         * context.
@@ -414,7 +428,7 @@ struct msm_context {
        uint64_t cycles;
 
        /**
-        * entities:
+        * @entities:
         *
         * Table of per-priority-level sched entities used by submitqueues
         * associated with this &drm_file.  Because some userspace apps
@@ -427,7 +441,7 @@ struct msm_context {
        struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * 
MSM_GPU_MAX_RINGS];
 
        /**
-        * ctx_mem:
+        * @ctx_mem:
         *
         * Total amount of memory of GEM buffers with handles attached for
         * this context.
-- 
2.48.1

Reply via email to