Hi Chris,

[...]

> +static int s_many(void *arg)
> +{
> +     struct perf_series *ps = arg;

why do we need to go through void... all functions are taking a
perf_series structure.

> +     IGT_TIMEOUT(end_time);
> +     unsigned int idx = 0;
> +

[...]

> +             for (idx = 0; idx < nengines; idx++) {
> +                     struct perf_stats *p =
> +                             memset(&stats[idx], 0, sizeof(stats[idx]));
> +                     struct intel_context *ce = ps->ce[idx];
> +
> +                     p->engine = ps->ce[idx]->engine;
> +                     intel_engine_pm_get(p->engine);
> +
> +                     if (intel_engine_supports_stats(p->engine) &&
> +                         !intel_enable_engine_stats(p->engine))
> +                             p->busy = intel_engine_get_busy_time(p->engine) 
> + 1;
> +                     p->runtime = -intel_context_get_total_runtime_ns(ce);
> +                     p->time = ktime_get();
> +             }
> +
> +             err = (*fn)(ps);
> +             if (igt_live_test_end(&t))
> +                     err = -EIO;
> +
> +             for (idx = 0; idx < nengines; idx++) {
> +                     struct perf_stats *p = &stats[idx];
> +                     struct intel_context *ce = ps->ce[idx];
> +                     int integer, decimal;
> +                     u64 busy, dt;
> +
> +                     p->time = ktime_sub(ktime_get(), p->time);
> +                     if (p->busy) {
> +                             p->busy = 
> ktime_sub(intel_engine_get_busy_time(p->engine),
> +                                                 p->busy - 1);
> +                             intel_disable_engine_stats(p->engine);
> +                     }
> +
> +                     err = switch_to_kernel_sync(ce, err);

how about err?

> +                     p->runtime += intel_context_get_total_runtime_ns(ce);

assigning a negative value to an unsigned so that you can add it
later? looks nice but odd :)

It's easier to understand if we do

p->runtime = intel_context_get_total_runtime_ns(ce) - p->runtime;

if you like it, no need to change, though.

[...]

> +static int p_many(void *arg)
> +{
> +     struct perf_stats *p = arg;
> +     struct intel_engine_cs *engine = p->engine;
> +     struct intel_context *ce;
> +     IGT_TIMEOUT(end_time);
> +     unsigned long count;
> +     int err = 0;
> +     bool busy;
> +
> +     ce = intel_context_create(engine);
> +     if (IS_ERR(ce))
> +             return PTR_ERR(ce);
> +
> +     err = intel_context_pin(ce);
> +     if (err) {
> +             intel_context_put(ce);
> +             return err;
> +     }
> +
> +     busy = false;
> +     if (intel_engine_supports_stats(engine) &&
> +         !intel_enable_engine_stats(engine)) {
> +             p->busy = intel_engine_get_busy_time(engine);
> +             busy = true;
> +     }
> +
> +     count = 0;
> +     p->time = ktime_get();
> +     do {
> +             struct i915_request *rq;
> +
> +             rq = i915_request_create(ce);
> +             if (IS_ERR(rq)) {
> +                     err = PTR_ERR(rq);
> +                     break;
> +             }
> +
> +             i915_request_add(rq);

do we need a request_put as well?

> +             count++;
> +     } while (!__igt_timeout(end_time, NULL));
> +     p->time = ktime_sub(ktime_get(), p->time);
> +
> +     if (busy) {
> +             p->busy = ktime_sub(intel_engine_get_busy_time(engine),
> +                                 p->busy);
> +             intel_disable_engine_stats(engine);
> +     }
> +
> +     err = switch_to_kernel_sync(ce, err);
> +     p->runtime = intel_context_get_total_runtime_ns(ce);
> +     p->count = count;
> +
> +     intel_context_unpin(ce);
> +     intel_context_put(ce);
> +     return err;
> +}

[...]

> +             for_each_uabi_engine(engine, i915) {
> +                     int status;
> +
> +                     if (IS_ERR(engines[idx].tsk))
> +                             break;

if we break here aren't we missing engine_pm_put and put_task?

Andi

> +
> +                     status = kthread_stop(engines[idx].tsk);
> +                     if (status && !err)
> +                             err = status;
> +
> +                     intel_engine_pm_put(engine);
> +                     put_task_struct(engines[idx++].tsk);
> +             }
> +
> +             if (igt_live_test_end(&t))
> +                     err = -EIO;
> +             if (err)
> +                     break;
> +
> +             idx = 0;
> +             for_each_uabi_engine(engine, i915) {
> +                     struct perf_stats *p = &engines[idx].p;
> +                     u64 busy = 100 * ktime_to_ns(p->busy);
> +                     u64 dt = ktime_to_ns(p->time);
> +                     int integer, decimal;
> +
> +                     if (dt) {
> +                             integer = div64_u64(busy, dt);
> +                             busy -= integer * dt;
> +                             decimal = div64_u64(100 * busy, dt);
> +                     } else {
> +                             integer = 0;
> +                             decimal = 0;
> +                     }
> +
> +                     GEM_BUG_ON(engine != p->engine);
> +                     pr_info("%s %5s: { count:%lu, busy:%d.%02d%%, 
> runtime:%lldms, walltime:%lldms }\n",
> +                             name, engine->name, p->count, integer, decimal,
> +                             div_u64(p->runtime, 1000 * 1000),
> +                             div_u64(ktime_to_ns(p->time), 1000 * 1000));
> +                     idx++;
> +             }
> +     }
> +
> +     cpu_latency_qos_remove_request(&qos);
> +     kfree(engines);
> +     return err;
> +}
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to