Hi Mark, On Fri, 14 Jan 2022 at 04:05, Mark Kettenis <kette...@openbsd.org> wrote: > > The NVMe storage controller integrated on Apple SoCs deviates > from the NVMe standard in two aspects. It uses a "linear" > submission queue and it integrates an NVMMU that needs to be > programmed for each NVMe command. Introduce driver ops such > that we can set up the linear submission queue and program the > NVMMU in the driver for this strange beast. > > Signed-off-by: Mark Kettenis <kette...@openbsd.org> > --- > drivers/nvme/nvme.c | 45 ++++++++++++++++++--------------------------- > drivers/nvme/nvme.h | 33 +++++++++++++++++++++++++++++++++ > 2 files changed, 51 insertions(+), 27 deletions(-)
Reviewed-by: Simon Glass <s...@chromium.org> Tested on: Macbook Air M1 Tested-by: Simon Glass <s...@chromium.org> But please see below > > diff --git a/drivers/nvme/nvme.c b/drivers/nvme/nvme.c > index be518ec20b..e2d0f9c668 100644 > --- a/drivers/nvme/nvme.c > +++ b/drivers/nvme/nvme.c > @@ -27,33 +27,6 @@ > #define IO_TIMEOUT 30 > #define MAX_PRP_POOL 512 > > -enum nvme_queue_id { > - NVME_ADMIN_Q, > - NVME_IO_Q, > - NVME_Q_NUM, > -}; > - > -/* > - * An NVM Express queue. Each device has at least two (one for admin > - * commands and one for I/O commands). > - */ > -struct nvme_queue { > - struct nvme_dev *dev; > - struct nvme_command *sq_cmds; > - struct nvme_completion *cqes; > - wait_queue_head_t sq_full; > - u32 __iomem *q_db; > - u16 q_depth; > - s16 cq_vector; > - u16 sq_head; > - u16 sq_tail; > - u16 cq_head; > - u16 qid; > - u8 cq_phase; > - u8 cqe_seen; > - unsigned long cmdid_data[]; > -}; > - > static int nvme_wait_ready(struct nvme_dev *dev, bool enabled) > { > u32 bit = enabled ? NVME_CSTS_RDY : 0; > @@ -167,12 +140,19 @@ static u16 nvme_read_completion_status(struct > nvme_queue *nvmeq, u16 index) > */ > static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command > *cmd) > { > + struct nvme_ops *ops; > u16 tail = nvmeq->sq_tail; > > memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd)); > flush_dcache_range((ulong)&nvmeq->sq_cmds[tail], > (ulong)&nvmeq->sq_cmds[tail] + sizeof(*cmd)); > > + ops = (struct nvme_ops *)nvmeq->dev->udev->driver->ops; > + if (ops && ops->submit_cmd) { > + ops->submit_cmd(nvmeq, cmd); > + return; > + } > + > if (++tail == nvmeq->q_depth) > tail = 0; > writel(tail, nvmeq->q_db); > @@ -183,6 +163,7 @@ static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, > struct nvme_command *cmd, > u32 *result, unsigned timeout) > { > + struct nvme_ops *ops; > u16 head = nvmeq->cq_head; > u16 phase = nvmeq->cq_phase; > u16 status; > @@ -203,6 +184,10 @@ static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, > return -ETIMEDOUT; > } > > + ops = (struct nvme_ops *)nvmeq->dev->udev->driver->ops; > + if (ops && ops->complete_cmd) > + ops->complete_cmd(nvmeq, cmd); > + > status >>= 1; > if (status) { > printf("ERROR: status = %x, phase = %d, head = %d\n", > @@ -243,6 +228,7 @@ static int nvme_submit_admin_cmd(struct nvme_dev *dev, > struct nvme_command *cmd, > static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, > int qid, int depth) > { > + struct nvme_ops *ops; > struct nvme_queue *nvmeq = malloc(sizeof(*nvmeq)); > if (!nvmeq) > return NULL; > @@ -268,6 +254,10 @@ static struct nvme_queue *nvme_alloc_queue(struct > nvme_dev *dev, > dev->queue_count++; > dev->queues[qid] = nvmeq; > > + ops = (struct nvme_ops *)dev->udev->driver->ops; > + if (ops && ops->alloc_queue) > + ops->alloc_queue(nvmeq); > + > return nvmeq; > > free_queue: > @@ -821,6 +811,7 @@ int nvme_init(struct udevice *udev) > struct nvme_id_ns *id; > int ret; > > + ndev->udev = udev; > INIT_LIST_HEAD(&ndev->namespaces); > if (readl(&ndev->bar->csts) == -1) { > ret = -ENODEV; > diff --git a/drivers/nvme/nvme.h b/drivers/nvme/nvme.h > index 8e9ae3c7f6..57803b43fd 100644 > --- a/drivers/nvme/nvme.h > +++ b/drivers/nvme/nvme.h > @@ -596,6 +596,7 @@ enum { > > /* Represents an NVM Express device. Each nvme_dev is a PCI function. */ > struct nvme_dev { > + struct udevice *udev; > struct list_head node; > struct nvme_queue **queues; > u32 __iomem *dbs; > @@ -622,6 +623,32 @@ struct nvme_dev { > u32 nn; > }; > > +enum nvme_queue_id { comment > + NVME_ADMIN_Q, > + NVME_IO_Q, > + NVME_Q_NUM, > +}; > + > +/* > + * An NVM Express queue. Each device has at least two (one for admin > + * commands and one for I/O commands). > + */ > +struct nvme_queue { > + struct nvme_dev *dev; > + struct nvme_command *sq_cmds; > + struct nvme_completion *cqes; > + u32 __iomem *q_db; > + u16 q_depth; > + s16 cq_vector; > + u16 sq_head; > + u16 sq_tail; > + u16 cq_head; > + u16 qid; > + u8 cq_phase; > + u8 cqe_seen; > + unsigned long cmdid_data[]; > +}; > + > /* > * An NVM Express namespace is equivalent to a SCSI LUN. > * Each namespace is operated as an independent "device". > @@ -636,6 +663,12 @@ struct nvme_ns { > u8 flbas; > }; > > +struct nvme_ops { > + int (*alloc_queue)(struct nvme_queue *); > + void (*submit_cmd)(struct nvme_queue *, struct nvme_command *); > + void (*complete_cmd)(struct nvme_queue *, struct nvme_command *); each of these needs a comment > +}; > + > int nvme_init(struct udevice *udev); (for later, this should be dev, not udev) > > #endif /* __DRIVER_NVME_H__ */ > -- > 2.34.1 > Regards, Simon