On Thu, Oct 30, 2014 at 04:03:23PM -0700, armin.c.re...@intel.com wrote:
> From: Armin Reese <armin.c.re...@intel.com>
> 
> The new 'i915_context_dump' file generates a hex dump of the
> entire logical context DRM object.  It is useful for
> validating the contents of the default context set up by
> the golden state batch buffer.

This patch needs a changelog providing brief descriptions of what
has changed in each new version that you send.

> 
> Signed-off-by: Armin Reese <armin.c.re...@intel.com>
> ---
>  drivers/gpu/drm/i915/i915_debugfs.c | 92 
> ++++++++++++++++++++++++++++++-------
>  1 file changed, 76 insertions(+), 16 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c 
> b/drivers/gpu/drm/i915/i915_debugfs.c
> index a79f83c..7807c14 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -46,6 +46,11 @@ enum {
>       PINNED_LIST,
>  };
>  
> +enum {
> +     LRC_CONTEXT_DUMP,

I think this should be something like REG_STATE_CONTEXT_DUMP or similar.
It better describes what we're actually dumping in this case.

> +     FULL_CONTEXT_DUMP,
> +};
> +
>  static const char *yesno(int v)
>  {
>       return v ? "yes" : "no";
> @@ -120,6 +125,52 @@ static inline const char *get_global_flag(struct 
> drm_i915_gem_object *obj)
>  }
>  
>  static void
> +dump_32_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
> +{
> +     struct page *page;
> +     size_t size; /* In bytes */
> +     size_t start_dword, end_dword, end_row_dword; /* In uint32_t offsets */
> +     int i, num_pages;
> +     uint32_t *obj_ptr;
> +
> +     if (i915_gem_object_get_pages(obj))
> +             return;
> +
> +     size = obj->base.size; /* In bytes */
> +     num_pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
> +
> +     for (i = 0; i < num_pages; i++) {
> +             page = i915_gem_object_get_page(obj, i);

I believe the preferred way to walk the list of pages is:

        struct sg_page_iter sg_iter;
        for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
                page = sg_page_iter_page(&sg_iter);

If we make this change, num_pages is no longer used.

> +             drm_clflush_pages(&page, 1);
> +
> +             start_dword = (i * PAGE_SIZE) / sizeof(uint32_t);
> +             end_dword = start_dword + (PAGE_SIZE / sizeof(uint32_t));
> +             if ((end_dword * sizeof(uint32_t)) > size)
> +                     end_dword = size / sizeof(uint32_t);

The size of a GEM object is always a multiple of the page size, so you
can assume that it's safe to just dump the entirety of each page in the
object's page list. I think you can also use this fact to simplify the
looping below. And you might consider doing obj_ptr++ instead of the
bitwise 'and'.

Brad

> +
> +             obj_ptr = (uint32_t *)kmap_atomic(page);
> +
> +             while (start_dword < end_dword) {
> +                     end_row_dword = start_dword + 8;
> +                     if (end_row_dword > end_dword)
> +                             end_row_dword = end_dword;
> +                     seq_printf(m, "0x%08lx:  ",
> +                                     start_dword * sizeof(uint32_t));
> +                     while (start_dword < end_row_dword) {
> +                             seq_printf(m, "0x%08x ",
> +                                             *(obj_ptr +
> +                                             (start_dword &
> +                                              (PAGE_SIZE - 1))));
> +                             start_dword++;
> +                     }
> +                     seq_puts(m, "\n");
> +             }
> +
> +             kunmap_atomic(obj_ptr);
> +     }
> +}
> +
> +static void
>  describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
>  {
>       struct i915_vma *vma;
> @@ -1773,9 +1824,10 @@ static int i915_context_status(struct seq_file *m, 
> void *unused)
>       return 0;
>  }
>  
> -static int i915_dump_lrc(struct seq_file *m, void *unused)
> +static int i915_dump_lrc(struct seq_file *m, void *data)
>  {
>       struct drm_info_node *node = (struct drm_info_node *) m->private;
> +     uintptr_t dump_flag = (uintptr_t) node->info_ent->data;
>       struct drm_device *dev = node->minor->dev;
>       struct drm_i915_private *dev_priv = dev->dev_private;
>       struct intel_engine_cs *ring;
> @@ -1795,24 +1847,31 @@ static int i915_dump_lrc(struct seq_file *m, void 
> *unused)
>               for_each_ring(ring, dev_priv, i) {
>                       struct drm_i915_gem_object *ctx_obj = 
> ctx->engine[i].state;
>  
> -                     if (ring->default_context == ctx)
> +                     if ((ring->default_context == ctx) &&
> +                             (dump_flag == LRC_CONTEXT_DUMP))
>                               continue;
>  
>                       if (ctx_obj) {
> -                             struct page *page = 
> i915_gem_object_get_page(ctx_obj, 1);
> -                             uint32_t *reg_state = kmap_atomic(page);
> -                             int j;
> -
> -                             seq_printf(m, "CONTEXT: %s %u\n", ring->name,
> -                                             
> intel_execlists_ctx_id(ctx_obj));
> -
> -                             for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 
> 4) {
> -                                     seq_printf(m, "\t[0x%08lx] 0x%08x 
> 0x%08x 0x%08x 0x%08x\n",
> -                                     i915_gem_obj_ggtt_offset(ctx_obj) + 
> 4096 + (j * 4),
> -                                     reg_state[j], reg_state[j + 1],
> -                                     reg_state[j + 2], reg_state[j + 3]);
> +                             if (dump_flag == LRC_CONTEXT_DUMP) {
> +                                     struct page *page = 
> i915_gem_object_get_page(ctx_obj, 1);
> +                                     uint32_t *reg_state = kmap_atomic(page);
> +                                     int j;
> +
> +                                     seq_printf(m, "CONTEXT: %s %u\n", 
> ring->name,
> +                                                     
> intel_execlists_ctx_id(ctx_obj));
> +
> +                                     for (j = 0; j < 0x600 / sizeof(u32) / 
> 4; j += 4) {
> +                                             seq_printf(m, "\t[0x%08lx] 
> 0x%08x 0x%08x 0x%08x 0x%08x\n",
> +                                             
> i915_gem_obj_ggtt_offset(ctx_obj) + 4096 + (j * 4),
> +                                             reg_state[j], reg_state[j + 1],
> +                                             reg_state[j + 2], reg_state[j + 
> 3]);
> +                                     }
> +                                     kunmap_atomic(reg_state);
> +                             } else if (dump_flag == FULL_CONTEXT_DUMP) {
> +                                     seq_printf(m, "Full Context Dump: %s 
> %u\n", ring->name,
> +                                                     
> intel_execlists_ctx_id(ctx_obj));
> +                                     dump_32_obj(m, ctx_obj);
>                               }
> -                             kunmap_atomic(reg_state);
>  
>                               seq_putc(m, '\n');
>                       }
> @@ -4187,7 +4246,8 @@ static const struct drm_info_list i915_debugfs_list[] = 
> {
>       {"i915_opregion", i915_opregion, 0},
>       {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
>       {"i915_context_status", i915_context_status, 0},
> -     {"i915_dump_lrc", i915_dump_lrc, 0},
> +     {"i915_context_dump", i915_dump_lrc, 0, (void *)FULL_CONTEXT_DUMP},
> +     {"i915_dump_lrc", i915_dump_lrc, 0, (void *)LRC_CONTEXT_DUMP},
>       {"i915_execlists", i915_execlists, 0},
>       {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
>       {"i915_swizzle_info", i915_swizzle_info, 0},
> -- 
> 1.9.1
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to