Tvrtko Ursulin <tvrtko.ursu...@linux.intel.com> wrote on śro [2022-sty-12 
08:54:19 +0000]:
> 
> On 11/01/2022 16:30, Matthew Brost wrote:
> > Move the multi-lrc guc_id from the lower allocation partition (0 to
> > number of multi-lrc guc_ids) to upper allocation partition (number of
> > single-lrc to max guc_ids).
> 
> Just a reminder that best practice for commit messages is to include the
> "why" as well.
> 
> Regards,
> 
> Tvrtko
> 

 In my opinion this patch is good step forward.
 Lazy allocation of the bitmap for MLRC and moving the MLRC pool to the 
 end will allow easier development contexts for SR-IOV.
 Introduction of two new helpers (new_mlrc_guc_id and new_slrc_guc_id) cleans 
up the code.

 I agree with Tvrtko's comment that you should expand your commit
 message.

One thing I personally don't like is this NUMBER_SINGLE_LRC_GUC_ID definition 
(same for MLRC)
In my opinion it should be inline function and this value 1/16 defined as 
constant

- Piotr
> > Signed-off-by: Matthew Brost <matthew.br...@intel.com>
> > ---
> >   .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 57 ++++++++++++++-----
> >   1 file changed, 42 insertions(+), 15 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
> > b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > index 9989d121127df..1bacc9621cea8 100644
> > --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > @@ -147,6 +147,8 @@ guc_create_parallel(struct intel_engine_cs **engines,
> >    */
> >   #define NUMBER_MULTI_LRC_GUC_ID(guc)      \
> >     ((guc)->submission_state.num_guc_ids / 16)
> > +#define NUMBER_SINGLE_LRC_GUC_ID(guc)      \
> > +   ((guc)->submission_state.num_guc_ids - NUMBER_MULTI_LRC_GUC_ID(guc))
> >   /*
> >    * Below is a set of functions which control the GuC scheduling state 
> > which
> > @@ -1776,11 +1778,6 @@ int intel_guc_submission_init(struct intel_guc *guc)
> >     INIT_WORK(&guc->submission_state.destroyed_worker,
> >               destroyed_worker_func);
> > -   guc->submission_state.guc_ids_bitmap =
> > -           bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
> > -   if (!guc->submission_state.guc_ids_bitmap)
> > -           return -ENOMEM;
> > -
> >     spin_lock_init(&guc->timestamp.lock);
> >     INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
> >     guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) 
> > * HZ;
> > @@ -1796,7 +1793,8 @@ void intel_guc_submission_fini(struct intel_guc *guc)
> >     guc_flush_destroyed_contexts(guc);
> >     guc_lrc_desc_pool_destroy(guc);
> >     i915_sched_engine_put(guc->sched_engine);
> > -   bitmap_free(guc->submission_state.guc_ids_bitmap);
> > +   if (guc->submission_state.guc_ids_bitmap)
> > +           bitmap_free(guc->submission_state.guc_ids_bitmap);
> >   }
> >   static inline void queue_request(struct i915_sched_engine *sched_engine,
> > @@ -1863,6 +1861,33 @@ static void guc_submit_request(struct i915_request 
> > *rq)
> >     spin_unlock_irqrestore(&sched_engine->lock, flags);
> >   }
> > +static int new_mlrc_guc_id(struct intel_guc *guc, struct intel_context *ce)
> > +{
> > +   int ret;
> > +
> > +   GEM_BUG_ON(!intel_context_is_parent(ce));
> > +   GEM_BUG_ON(!guc->submission_state.guc_ids_bitmap);
> > +
> > +   ret =  bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
> > +                                  NUMBER_MULTI_LRC_GUC_ID(guc),
> > +                                  order_base_2(ce->parallel.number_children
> > +                                               + 1));
> > +   if (likely(!(ret < 0)))
> > +           ret += NUMBER_SINGLE_LRC_GUC_ID(guc);
> > +
> > +   return ret;
> > +}
> > +
> > +static int new_slrc_guc_id(struct intel_guc *guc, struct intel_context *ce)
> > +{
> > +   GEM_BUG_ON(intel_context_is_parent(ce));
> > +
> > +   return ida_simple_get(&guc->submission_state.guc_ids,
> > +                         0, NUMBER_SINGLE_LRC_GUC_ID(guc),
> > +                         GFP_KERNEL | __GFP_RETRY_MAYFAIL |
> > +                         __GFP_NOWARN);
> > +}
> > +
> >   static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
> >   {
> >     int ret;
> > @@ -1870,16 +1895,10 @@ static int new_guc_id(struct intel_guc *guc, struct 
> > intel_context *ce)
> >     GEM_BUG_ON(intel_context_is_child(ce));
> >     if (intel_context_is_parent(ce))
> > -           ret = 
> > bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
> > -                                         NUMBER_MULTI_LRC_GUC_ID(guc),
> > -                                         
> > order_base_2(ce->parallel.number_children
> > -                                                      + 1));
> > +           ret = new_mlrc_guc_id(guc, ce);
> >     else
> > -           ret = ida_simple_get(&guc->submission_state.guc_ids,
> > -                                NUMBER_MULTI_LRC_GUC_ID(guc),
> > -                                guc->submission_state.num_guc_ids,
> > -                                GFP_KERNEL | __GFP_RETRY_MAYFAIL |
> > -                                __GFP_NOWARN);
> > +           ret = new_slrc_guc_id(guc, ce);
> > +
> >     if (unlikely(ret < 0))
> >             return ret;
> > @@ -1989,6 +2008,14 @@ static int pin_guc_id(struct intel_guc *guc, struct 
> > intel_context *ce)
> >     GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
> > +   if (unlikely(intel_context_is_parent(ce) &&
> > +                !guc->submission_state.guc_ids_bitmap)) {
> > +           guc->submission_state.guc_ids_bitmap =
> > +                   bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
> > +           if (!guc->submission_state.guc_ids_bitmap)
> > +                   return -ENOMEM;
> > +   }
> > +
> >   try_again:
> >     spin_lock_irqsave(&guc->submission_state.lock, flags);
> > 

-- 

Reply via email to