Add a job scheduler for CAMSS offline ISP drivers which serialises
job execution, tracks which context is currently running on hardware,
and provides cancel/suspend/resume operations. Jobs carry optional
ready/run/abort callbacks via camss_isp_job_ops, allowing the
scheduler to gate submission on hardware and buffer availability.

Signed-off-by: Loic Poulain <[email protected]>
---
 drivers/media/platform/qcom/camss/Makefile         |   3 +-
 .../media/platform/qcom/camss/camss-isp-sched.c    | 223 +++++++++++++++++++++
 .../media/platform/qcom/camss/camss-isp-sched.h    | 174 ++++++++++++++++
 3 files changed, 399 insertions(+), 1 deletion(-)

diff --git a/drivers/media/platform/qcom/camss/Makefile 
b/drivers/media/platform/qcom/camss/Makefile
index 
bfc05db0eada1d801839ceb8a3b157baae613053..f13c9f326cf81962bd165dc8dd2bb60207cd54a7
 100644
--- a/drivers/media/platform/qcom/camss/Makefile
+++ b/drivers/media/platform/qcom/camss/Makefile
@@ -30,7 +30,8 @@ qcom-camss-objs += \
 
 obj-$(CONFIG_VIDEO_QCOM_CAMSS) += qcom-camss.o
 
-qcom-camss-isp-objs := camss-isp-bufq.o
+qcom-camss-isp-objs := camss-isp-bufq.o \
+               camss-isp-sched.o
 
 obj-$(CONFIG_VIDEO_QCOM_CAMSS_ISP) += qcom-camss-isp.o
 
diff --git a/drivers/media/platform/qcom/camss/camss-isp-sched.c 
b/drivers/media/platform/qcom/camss/camss-isp-sched.c
new file mode 100644
index 
0000000000000000000000000000000000000000..6940087f94d00570a82666e882ffc8b38891736b
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-isp-sched.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * CAMSS ISP scheduler helper — ISP job scheduling
+ *
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include "camss-isp-sched.h"
+
+/* Job state flags */
+#define ISP_JOB_QUEUED         BIT(0)
+#define ISP_JOB_RUNNING                BIT(1)
+#define ISP_JOB_ABORT          BIT(2)
+
+/* Scheduler flags */
+#define ISP_SCHED_PAUSED       BIT(0)
+
+/* -------- Internal helpers -------- */
+
+static void isp_sched_try_run(struct camss_isp_sched *sched)
+{
+       void (*run_fn)(void *priv, bool ctx_changed);
+       struct camss_isp_job *job;
+       unsigned long flags;
+       bool ctx_changed;
+       void *priv;
+
+       spin_lock_irqsave(&sched->lock, flags);
+
+       if (sched->curr_job || list_empty(&sched->pending) ||
+           (sched->flags & ISP_SCHED_PAUSED)) {
+               spin_unlock_irqrestore(&sched->lock, flags);
+               return;
+       }
+
+       job = list_first_entry(&sched->pending, struct camss_isp_job, queue);
+       job->flags |= ISP_JOB_RUNNING;
+       sched->curr_job = job;
+       run_fn = job->ops ? job->ops->run : NULL;
+       priv   = job->priv;
+       ctx_changed = (sched->prev_job != job);
+
+       spin_unlock_irqrestore(&sched->lock, flags);
+
+       run_fn(priv, ctx_changed);
+       sched->prev_job = job;
+}
+
+static void isp_sched_work(struct work_struct *work)
+{
+       struct camss_isp_sched *sched =
+               container_of(work, struct camss_isp_sched, work);
+
+       isp_sched_try_run(sched);
+}
+
+/* -------- Public API -------- */
+
+void camss_isp_sched_init(struct camss_isp_sched *sched)
+{
+       sched->curr_job = NULL;
+       sched->prev_job = NULL;
+       INIT_LIST_HEAD(&sched->pending);
+       spin_lock_init(&sched->lock);
+       INIT_WORK(&sched->work, isp_sched_work);
+       sched->flags = 0;
+}
+EXPORT_SYMBOL_GPL(camss_isp_sched_init);
+
+void camss_isp_sched_destroy(struct camss_isp_sched *sched)
+{
+       cancel_work_sync(&sched->work);
+}
+EXPORT_SYMBOL_GPL(camss_isp_sched_destroy);
+
+void camss_isp_job_init(struct camss_isp_job *job,
+                      const struct camss_isp_job_ops *ops,
+                      void *priv)
+{
+       INIT_LIST_HEAD(&job->queue);
+       job->flags = 0;
+       job->ops   = ops;
+       job->priv  = priv;
+       init_waitqueue_head(&job->finished);
+}
+EXPORT_SYMBOL_GPL(camss_isp_job_init);
+
+void camss_isp_sched_try_run(struct camss_isp_sched *sched,
+                            struct camss_isp_job *job)
+{
+       unsigned long flags;
+
+       if (job->ops && job->ops->ready && !job->ops->ready(job->priv))
+               return;
+
+       spin_lock_irqsave(&sched->lock, flags);
+
+       if (job->flags & (ISP_JOB_ABORT | ISP_JOB_QUEUED | ISP_JOB_RUNNING)) {
+               spin_unlock_irqrestore(&sched->lock, flags);
+               return;
+       }
+
+       list_add_tail(&job->queue, &sched->pending);
+       job->flags |= ISP_JOB_QUEUED;
+
+       spin_unlock_irqrestore(&sched->lock, flags);
+
+       isp_sched_try_run(sched);
+}
+EXPORT_SYMBOL_GPL(camss_isp_sched_try_run);
+
+void camss_isp_sched_job_finish(struct camss_isp_sched *sched,
+                               struct camss_isp_job *job,
+                               bool requeue)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&sched->lock, flags);
+
+       if (sched->curr_job != job) {
+               /*
+                * curr_job may have been cleared by a racing cancel/streamoff.
+                * If this job is still marked RUNNING, clear it and wake any
+                * waiter in camss_isp_sched_cancel() so it can unblock.
+                */
+               if (job->flags & ISP_JOB_RUNNING) {
+                       job->flags &= ~(ISP_JOB_QUEUED | ISP_JOB_RUNNING);
+                       wake_up(&job->finished);
+               }
+               spin_unlock_irqrestore(&sched->lock, flags);
+               return;
+       }
+
+       list_del(&job->queue);
+       job->flags &= ~(ISP_JOB_QUEUED | ISP_JOB_RUNNING);
+       wake_up(&job->finished);
+       sched->curr_job = NULL;
+
+       if (requeue && !(job->flags & ISP_JOB_ABORT)) {
+               job->flags |= ISP_JOB_QUEUED;
+               list_add(&job->queue, &sched->pending);
+       }
+
+       spin_unlock_irqrestore(&sched->lock, flags);
+
+       schedule_work(&sched->work);
+}
+EXPORT_SYMBOL_GPL(camss_isp_sched_job_finish);
+
+void camss_isp_sched_cancel(struct camss_isp_sched *sched,
+                           struct camss_isp_job *job)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&sched->lock, flags);
+       job->flags |= ISP_JOB_ABORT;
+
+       if (job->flags & ISP_JOB_RUNNING) {
+               spin_unlock_irqrestore(&sched->lock, flags);
+               if (job->ops && job->ops->abort)
+                       job->ops->abort(job->priv);
+               wait_event(job->finished, !(job->flags & ISP_JOB_RUNNING));
+       } else if (job->flags & ISP_JOB_QUEUED) {
+               list_del(&job->queue);
+               job->flags &= ~(ISP_JOB_QUEUED | ISP_JOB_RUNNING);
+               spin_unlock_irqrestore(&sched->lock, flags);
+       } else {
+               spin_unlock_irqrestore(&sched->lock, flags);
+       }
+
+       /* Clear abort flag so the job can be reused after cancel */
+       spin_lock_irqsave(&sched->lock, flags);
+       job->flags &= ~ISP_JOB_ABORT;
+       spin_unlock_irqrestore(&sched->lock, flags);
+}
+EXPORT_SYMBOL_GPL(camss_isp_sched_cancel);
+
+void camss_isp_sched_suspend(struct camss_isp_sched *sched)
+{
+       struct camss_isp_job *curr;
+       unsigned long flags;
+
+       spin_lock_irqsave(&sched->lock, flags);
+       sched->flags |= ISP_SCHED_PAUSED;
+       curr = sched->curr_job;
+       spin_unlock_irqrestore(&sched->lock, flags);
+
+       if (curr)
+               wait_event(curr->finished, !(curr->flags & ISP_JOB_RUNNING));
+}
+EXPORT_SYMBOL_GPL(camss_isp_sched_suspend);
+
+void camss_isp_sched_resume(struct camss_isp_sched *sched)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&sched->lock, flags);
+       sched->flags &= ~ISP_SCHED_PAUSED;
+       spin_unlock_irqrestore(&sched->lock, flags);
+
+       isp_sched_try_run(sched);
+}
+EXPORT_SYMBOL_GPL(camss_isp_sched_resume);
+
+bool camss_isp_sched_is_running(struct camss_isp_sched *sched,
+                               struct camss_isp_job *job)
+{
+       unsigned long flags;
+       bool running;
+
+       spin_lock_irqsave(&sched->lock, flags);
+       running = (sched->curr_job == job);
+       spin_unlock_irqrestore(&sched->lock, flags);
+
+       return running;
+}
+EXPORT_SYMBOL_GPL(camss_isp_sched_is_running);
+
+MODULE_DESCRIPTION("CAMSS ISP job scheduler");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/qcom/camss/camss-isp-sched.h 
b/drivers/media/platform/qcom/camss/camss-isp-sched.h
new file mode 100644
index 
0000000000000000000000000000000000000000..5b6034976de65be57581ccaa92d1f15d7cb4a688
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-isp-sched.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * CAMSS ISP scheduler helper — ISP job scheduling
+ *
+ * Tracks which context is currently running on the hardware and
+ * serialises job execution. This is a pure helper: it has no knowledge
+ * of buffers, vb2 queues, or the uAPI.  Drivers call these functions
+ * explicitly from their own code paths.
+ *
+ * Usage pattern:
+ *   - Embed struct camss_isp_sched in the driver's device struct.
+ *   - Call camss_isp_sched_init() at probe time.
+ *   - Call camss_isp_job_init() with ready_fn/run_fn/abort_fn/priv.
+ *   - Call camss_isp_sched_try_run() from buf_queue / streamon to start jobs.
+ *   - Call camss_isp_sched_job_finish() from the IRQ handler when done.
+ *   - Call camss_isp_sched_cancel() from streamoff / release.
+ *
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef _CAMSS_ISP_SCHED_H
+#define _CAMSS_ISP_SCHED_H
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+/**
+ * struct camss_isp_job_ops - per-job operation callbacks
+ *
+ * @ready:  Optional; return %true if the job can be submitted to hardware.
+ *          Called outside the scheduler spinlock.  May be NULL (always ready).
+ * @run:    Start the hardware for this job.  Called from workqueue context.
+ *          @ctx_changed is %true when this job differs from the previously
+ *          run job (i.e. first run ever, or a different context took over).
+ * @abort:  Optional; abort a running job (e.g. trigger a HW reset).
+ *          Called from process context during camss_isp_sched_cancel().
+ *          May be NULL.
+ */
+struct camss_isp_job_ops {
+       bool    (*ready)(void *priv);
+       void    (*run)(void *priv, bool ctx_changed);
+       void    (*abort)(void *priv);
+};
+
+/**
+ * struct camss_isp_job - per-context scheduler state
+ *
+ * Embed one of these in the driver's per-context struct.
+ * Initialise with camss_isp_job_init().
+ *
+ * @queue:     Entry in the scheduler's pending-job list.
+ * @flags:     Internal state flags (ISP_JOB_*).
+ * @finished:  Wait queue signalled when the running job completes.
+ * @ops:   Job operation callbacks (ready/run/abort).
+ * @priv:  Opaque pointer passed to all callbacks.
+ */
+struct camss_isp_job {
+       struct list_head        queue;
+       unsigned long           flags;
+       wait_queue_head_t       finished;
+       const struct camss_isp_job_ops *ops;
+       void                    *priv;
+};
+
+/**
+ * struct camss_isp_sched - ISP job scheduler
+ *
+ * Embed one of these in the driver's device struct.
+ * Initialise with camss_isp_sched_init().
+ *
+ * @curr_job:    Job currently running on hardware (NULL if idle).
+ * @prev_job:    Job that ran most recently (never dereferenced, pointer only).
+ * @pending:     List of jobs waiting to run.
+ * @lock:        Protects @curr_job, @pending, and @flags.
+ * @work:        Work item used to run jobs from non-atomic context.
+ * @flags:       Scheduler-level flags (ISP_SCHED_PAUSED).
+ */
+struct camss_isp_sched {
+       struct camss_isp_job    *curr_job;
+       struct camss_isp_job    *prev_job;
+       struct list_head        pending;
+       spinlock_t              lock;
+       struct work_struct      work;
+       unsigned long           flags;
+};
+
+/**
+ * camss_isp_sched_init() - initialise a scheduler
+ * @sched: scheduler to initialise
+ */
+void camss_isp_sched_init(struct camss_isp_sched *sched);
+
+/**
+ * camss_isp_sched_destroy() - destroy a scheduler (waits for any running job)
+ * @sched: scheduler to destroy
+ */
+void camss_isp_sched_destroy(struct camss_isp_sched *sched);
+
+/**
+ * camss_isp_job_init() - initialise per-context job state
+ * @job:  job to initialise
+ * @ops:  operation callbacks (run is required; ready and abort may be NULL)
+ * @priv: opaque pointer passed to all callbacks
+ */
+void camss_isp_job_init(struct camss_isp_job *job,
+                      const struct camss_isp_job_ops *ops,
+                      void *priv);
+
+/**
+ * camss_isp_sched_try_run() - enqueue a job and try to start it
+ * @sched: scheduler
+ * @job:   job to enqueue; callbacks and @priv are taken from the job.
+ *
+ * Calls @job->ready_fn (if set); returns immediately if it returns %false.
+ * Otherwise enqueues the job and starts it if the hardware is idle.
+ * Safe to call from atomic context.
+ */
+void camss_isp_sched_try_run(struct camss_isp_sched *sched,
+                            struct camss_isp_job *job);
+
+/**
+ * camss_isp_sched_job_finish() - signal that the current job has completed
+ * @sched: scheduler
+ * @job:   job that just finished (must be the currently running job)
+ * @requeue: if %true and the job's ready_fn passes, immediately re-enqueue
+ *           the job so the next frame starts as soon as the workqueue runs.
+ *
+ * Clears the running state, wakes any waiter in camss_isp_sched_cancel(),
+ * and schedules the next pending job via the work queue.
+ * Safe to call from atomic/IRQ context.
+ */
+void camss_isp_sched_job_finish(struct camss_isp_sched *sched,
+                               struct camss_isp_job *job,
+                               bool requeue);
+
+/**
+ * camss_isp_sched_cancel() - cancel a pending or running job and wait
+ * @sched: scheduler
+ * @job:   job to cancel; @job->abort_fn is called if the job is running.
+ *
+ * If the job is queued but not yet running, it is simply removed.
+ * If the job is running, @job->abort_fn is called (if set) and the
+ * function blocks until camss_isp_sched_job_finish() is called.
+ * Must be called from process context (may sleep).
+ */
+void camss_isp_sched_cancel(struct camss_isp_sched *sched,
+                           struct camss_isp_job *job);
+
+/**
+ * camss_isp_sched_suspend() - pause the scheduler and wait for current job
+ * @sched: scheduler
+ *
+ * No new jobs will be started until camss_isp_sched_resume() is called.
+ * Blocks until any currently running job finishes.
+ */
+void camss_isp_sched_suspend(struct camss_isp_sched *sched);
+
+/**
+ * camss_isp_sched_resume() - resume the scheduler
+ * @sched: scheduler
+ */
+void camss_isp_sched_resume(struct camss_isp_sched *sched);
+
+/**
+ * camss_isp_sched_is_running() - check if a job is currently running
+ * @sched: scheduler
+ * @job:   job to check
+ */
+bool camss_isp_sched_is_running(struct camss_isp_sched *sched,
+                               struct camss_isp_job *job);
+
+#endif /* _CAMSS_ISP_SCHED_H */

-- 
2.34.1


Reply via email to