tree:   git://anongit.freedesktop.org/drm-intel for-linux-next
head:   faf654864b25f4ca4efd416145d37b794c0b805f
commit: faf654864b25f4ca4efd416145d37b794c0b805f [2/2] drm/i915: Unify uC 
variable types to avoid flooding checkpatch.pl
config: i386-randconfig-x003-201740 (attached as .config)
compiler: gcc-6 (Debian 6.2.0-3) 6.2.0 20160901
reproduce:
        git checkout faf654864b25f4ca4efd416145d37b794c0b805f
        # save the attached .config to linux build tree
        make ARCH=i386 

All errors (new ones prefixed by >>):

   drivers/gpu//drm/i915/i915_guc_submission.c: In function 
'guc_stage_desc_init':
>> drivers/gpu//drm/i915/i915_guc_submission.c:391:25: error: cast from pointer 
>> to integer of different size [-Werror=pointer-to-int-cast]
     desc->db_trigger_cpu = (u64)__get_doorbell(client);
                            ^
   drivers/gpu//drm/i915/i915_guc_submission.c:397:23: error: cast from pointer 
to integer of different size [-Werror=pointer-to-int-cast]
     desc->desc_private = (u64)client;
                          ^
   cc1: all warnings being treated as errors

vim +391 drivers/gpu//drm/i915/i915_guc_submission.c

   313  
   314  /*
   315   * Initialise/clear the stage descriptor shared with the GuC firmware.
   316   *
   317   * This descriptor tells the GuC where (in GGTT space) to find the 
important
   318   * data structures relating to this client (doorbell, process 
descriptor,
   319   * write queue, etc).
   320   */
   321  static void guc_stage_desc_init(struct intel_guc *guc,
   322                                  struct i915_guc_client *client)
   323  {
   324          struct drm_i915_private *dev_priv = guc_to_i915(guc);
   325          struct intel_engine_cs *engine;
   326          struct i915_gem_context *ctx = client->owner;
   327          struct guc_stage_desc *desc;
   328          unsigned int tmp;
   329          u32 gfx_addr;
   330  
   331          desc = __get_stage_desc(client);
   332          memset(desc, 0, sizeof(*desc));
   333  
   334          desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE | 
GUC_STAGE_DESC_ATTR_KERNEL;
   335          desc->stage_id = client->stage_id;
   336          desc->priority = client->priority;
   337          desc->db_id = client->doorbell_id;
   338  
   339          for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
   340                  struct intel_context *ce = &ctx->engine[engine->id];
   341                  u32 guc_engine_id = engine->guc_id;
   342                  struct guc_execlist_context *lrc = 
&desc->lrc[guc_engine_id];
   343  
   344                  /* TODO: We have a design issue to be solved here. Only 
when we
   345                   * receive the first batch, we know which engine is 
used by the
   346                   * user. But here GuC expects the lrc and ring to be 
pinned. It
   347                   * is not an issue for default context, which is the 
only one
   348                   * for now who owns a GuC client. But for future owner 
of GuC
   349                   * client, need to make sure lrc is pinned prior to 
enter here.
   350                   */
   351                  if (!ce->state)
   352                          break;  /* XXX: continue? */
   353  
   354                  /*
   355                   * XXX: When this is a GUC_STAGE_DESC_ATTR_KERNEL 
client (proxy
   356                   * submission or, in other words, not using a direct 
submission
   357                   * model) the KMD's LRCA is not used for any work 
submission.
   358                   * Instead, the GuC uses the LRCA of the user mode 
context (see
   359                   * guc_wq_item_append below).
   360                   */
   361                  lrc->context_desc = lower_32_bits(ce->lrc_desc);
   362  
   363                  /* The state page is after PPHWSP */
   364                  lrc->ring_lrca =
   365                          guc_ggtt_offset(ce->state) + LRC_STATE_PN * 
PAGE_SIZE;
   366  
   367                  /* XXX: In direct submission, the GuC wants the HW 
context id
   368                   * here. In proxy submission, it wants the stage id */
   369                  lrc->context_id = (client->stage_id << 
GUC_ELC_CTXID_OFFSET) |
   370                                  (guc_engine_id << 
GUC_ELC_ENGINE_OFFSET);
   371  
   372                  lrc->ring_begin = guc_ggtt_offset(ce->ring->vma);
   373                  lrc->ring_end = lrc->ring_begin + ce->ring->size - 1;
   374                  lrc->ring_next_free_location = lrc->ring_begin;
   375                  lrc->ring_current_tail_pointer_value = 0;
   376  
   377                  desc->engines_used |= (1 << guc_engine_id);
   378          }
   379  
   380          DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n",
   381                          client->engines, desc->engines_used);
   382          WARN_ON(desc->engines_used == 0);
   383  
   384          /*
   385           * The doorbell, process descriptor, and workqueue are all parts
   386           * of the client object, which the GuC will reference via the 
GGTT
   387           */
   388          gfx_addr = guc_ggtt_offset(client->vma);
   389          desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
   390                                  client->doorbell_offset;
 > 391          desc->db_trigger_cpu = (u64)__get_doorbell(client);
   392          desc->db_trigger_uk = gfx_addr + client->doorbell_offset;
   393          desc->process_desc = gfx_addr + client->proc_desc_offset;
   394          desc->wq_addr = gfx_addr + GUC_DB_SIZE;
   395          desc->wq_size = GUC_WQ_SIZE;
   396  
   397          desc->desc_private = (u64)client;
   398  }
   399  

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

Attachment: .config.gz
Description: application/gzip

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to