Current design only supports single source range.
We receive a request with REQ_OP_COPY_DST.
Parse this request which consists of dst(1st) and src(2nd) bios.
Form a copy command (TP 4065)

trace event support for nvme_copy_cmd.
Set the device copy limits to queue limits.

Signed-off-by: Kanchan Joshi <josh...@samsung.com>
Signed-off-by: Nitesh Shetty <nj.she...@samsung.com>
Signed-off-by: Javier González <javier.g...@samsung.com>
Signed-off-by: Anuj Gupta <anuj2...@samsung.com>
---
 drivers/nvme/host/constants.c |  1 +
 drivers/nvme/host/core.c      | 79 +++++++++++++++++++++++++++++++++++
 drivers/nvme/host/trace.c     | 19 +++++++++
 include/linux/nvme.h          | 43 +++++++++++++++++--
 4 files changed, 139 insertions(+), 3 deletions(-)

diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c
index 5e4f8848dce0..311ad67e9cf3 100644
--- a/drivers/nvme/host/constants.c
+++ b/drivers/nvme/host/constants.c
@@ -19,6 +19,7 @@ static const char * const nvme_ops[] = {
        [nvme_cmd_resv_report] = "Reservation Report",
        [nvme_cmd_resv_acquire] = "Reservation Acquire",
        [nvme_cmd_resv_release] = "Reservation Release",
+       [nvme_cmd_copy] = "Copy Offload",
        [nvme_cmd_zone_mgmt_send] = "Zone Management Send",
        [nvme_cmd_zone_mgmt_recv] = "Zone Management Receive",
        [nvme_cmd_zone_append] = "Zone Append",
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 98bfb3d9c22a..d4063e981492 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -763,6 +763,60 @@ static inline void nvme_setup_flush(struct nvme_ns *ns,
        cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
 }
 
+static inline blk_status_t nvme_setup_copy_write(struct nvme_ns *ns,
+              struct request *req, struct nvme_command *cmnd)
+{
+       struct nvme_copy_range *range = NULL;
+       struct bio *bio;
+       u64 dst_lba, src_lba, n_lba;
+       u16 nr_range = 1, control = 0;
+
+       if (blk_rq_nr_phys_segments(req) != 2)
+               return BLK_STS_IOERR;
+
+       /* +1 shift as dst+src length is added in request merging, we send copy
+        * for half the length.
+        */
+       n_lba = blk_rq_bytes(req) >> (ns->lba_shift + 1);
+       if (WARN_ON(!n_lba))
+               return BLK_STS_NOTSUPP;
+
+       dst_lba = nvme_sect_to_lba(ns, blk_rq_pos(req));
+       __rq_for_each_bio(bio, req) {
+               src_lba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
+               if (n_lba != bio->bi_iter.bi_size >> ns->lba_shift)
+                       return BLK_STS_IOERR;
+       }
+
+       if (req->cmd_flags & REQ_FUA)
+               control |= NVME_RW_FUA;
+
+       if (req->cmd_flags & REQ_FAILFAST_DEV)
+               control |= NVME_RW_LR;
+
+       memset(cmnd, 0, sizeof(*cmnd));
+       cmnd->copy.opcode = nvme_cmd_copy;
+       cmnd->copy.nsid = cpu_to_le32(ns->head->ns_id);
+       cmnd->copy.control = cpu_to_le16(control);
+       cmnd->copy.sdlba = cpu_to_le64(dst_lba);
+       cmnd->copy.nr_range = 0;
+
+       range = kmalloc_array(nr_range, sizeof(*range),
+                       GFP_ATOMIC | __GFP_NOWARN);
+       if (!range)
+               return BLK_STS_RESOURCE;
+
+       range[0].slba = cpu_to_le64(src_lba);
+       range[0].nlb = cpu_to_le16(n_lba - 1);
+
+       req->special_vec.bv_page = virt_to_page(range);
+       req->special_vec.bv_offset = offset_in_page(range);
+       req->special_vec.bv_len = sizeof(*range) * nr_range;
+       req->rq_flags |= RQF_SPECIAL_PAYLOAD;
+
+       return BLK_STS_OK;
+}
+
 static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
                struct nvme_command *cmnd)
 {
@@ -1005,6 +1059,9 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct 
request *req)
        case REQ_OP_ZONE_APPEND:
                ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append);
                break;
+       case REQ_OP_COPY_DST:
+               ret = nvme_setup_copy_write(ns, req, cmd);
+               break;
        default:
                WARN_ON_ONCE(1);
                return BLK_STS_IOERR;
@@ -1742,6 +1799,26 @@ static void nvme_config_discard(struct gendisk *disk, 
struct nvme_ns *ns)
                blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
 }
 
+static void nvme_config_copy(struct gendisk *disk, struct nvme_ns *ns,
+                                      struct nvme_id_ns *id)
+{
+       struct nvme_ctrl *ctrl = ns->ctrl;
+       struct request_queue *q = disk->queue;
+
+       if (!(ctrl->oncs & NVME_CTRL_ONCS_COPY)) {
+               blk_queue_max_copy_sectors_hw(q, 0);
+               blk_queue_flag_clear(QUEUE_FLAG_COPY, q);
+               return;
+       }
+
+       /* setting copy limits */
+       if (blk_queue_flag_test_and_set(QUEUE_FLAG_COPY, q))
+               return;
+
+       blk_queue_max_copy_sectors_hw(q,
+               nvme_lba_to_sect(ns, le16_to_cpu(id->mssrl)));
+}
+
 static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
 {
        return uuid_equal(&a->uuid, &b->uuid) &&
@@ -1941,6 +2018,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
        set_capacity_and_notify(disk, capacity);
 
        nvme_config_discard(disk, ns);
+       nvme_config_copy(disk, ns, id);
        blk_queue_max_write_zeroes_sectors(disk->queue,
                                           ns->ctrl->max_zeroes_sectors);
 }
@@ -4600,6 +4678,7 @@ static inline void _nvme_check_size(void)
        BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64);
        BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
        BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64);
+       BUILD_BUG_ON(sizeof(struct nvme_copy_command) != 64);
        BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64);
        BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
        BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64);
diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c
index 1c36fcedea20..da4a7494e5a7 100644
--- a/drivers/nvme/host/trace.c
+++ b/drivers/nvme/host/trace.c
@@ -150,6 +150,23 @@ static const char *nvme_trace_read_write(struct trace_seq 
*p, u8 *cdw10)
        return ret;
 }
 
+static const char *nvme_trace_copy(struct trace_seq *p, u8 *cdw10)
+{
+       const char *ret = trace_seq_buffer_ptr(p);
+       u64 slba = get_unaligned_le64(cdw10);
+       u8 nr_range = get_unaligned_le16(cdw10 + 8);
+       u16 control = get_unaligned_le16(cdw10 + 10);
+       u32 dsmgmt = get_unaligned_le32(cdw10 + 12);
+       u32 reftag = get_unaligned_le32(cdw10 +  16);
+
+       trace_seq_printf(p,
+                        "slba=%llu, nr_range=%u, ctrl=0x%x, dsmgmt=%u, 
reftag=%u",
+                        slba, nr_range, control, dsmgmt, reftag);
+       trace_seq_putc(p, 0);
+
+       return ret;
+}
+
 static const char *nvme_trace_dsm(struct trace_seq *p, u8 *cdw10)
 {
        const char *ret = trace_seq_buffer_ptr(p);
@@ -243,6 +260,8 @@ const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p,
                return nvme_trace_zone_mgmt_send(p, cdw10);
        case nvme_cmd_zone_mgmt_recv:
                return nvme_trace_zone_mgmt_recv(p, cdw10);
+       case nvme_cmd_copy:
+               return nvme_trace_copy(p, cdw10);
        default:
                return nvme_trace_common(p, cdw10);
        }
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 182b6d614eb1..bbd877111b57 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -337,7 +337,7 @@ struct nvme_id_ctrl {
        __u8                    nvscc;
        __u8                    nwpc;
        __le16                  acwu;
-       __u8                    rsvd534[2];
+       __le16                  ocfs;
        __le32                  sgls;
        __le32                  mnan;
        __u8                    rsvd544[224];
@@ -365,6 +365,7 @@ enum {
        NVME_CTRL_ONCS_WRITE_ZEROES             = 1 << 3,
        NVME_CTRL_ONCS_RESERVATIONS             = 1 << 5,
        NVME_CTRL_ONCS_TIMESTAMP                = 1 << 6,
+       NVME_CTRL_ONCS_COPY                     = 1 << 8,
        NVME_CTRL_VWC_PRESENT                   = 1 << 0,
        NVME_CTRL_OACS_SEC_SUPP                 = 1 << 0,
        NVME_CTRL_OACS_NS_MNGT_SUPP             = 1 << 3,
@@ -414,7 +415,10 @@ struct nvme_id_ns {
        __le16                  npdg;
        __le16                  npda;
        __le16                  nows;
-       __u8                    rsvd74[18];
+       __le16                  mssrl;
+       __le32                  mcl;
+       __u8                    msrc;
+       __u8                    rsvd91[11];
        __le32                  anagrpid;
        __u8                    rsvd96[3];
        __u8                    nsattr;
@@ -831,6 +835,7 @@ enum nvme_opcode {
        nvme_cmd_resv_report    = 0x0e,
        nvme_cmd_resv_acquire   = 0x11,
        nvme_cmd_resv_release   = 0x15,
+       nvme_cmd_copy           = 0x19,
        nvme_cmd_zone_mgmt_send = 0x79,
        nvme_cmd_zone_mgmt_recv = 0x7a,
        nvme_cmd_zone_append    = 0x7d,
@@ -854,7 +859,8 @@ enum nvme_opcode {
                nvme_opcode_name(nvme_cmd_resv_release),        \
                nvme_opcode_name(nvme_cmd_zone_mgmt_send),      \
                nvme_opcode_name(nvme_cmd_zone_mgmt_recv),      \
-               nvme_opcode_name(nvme_cmd_zone_append))
+               nvme_opcode_name(nvme_cmd_zone_append),         \
+               nvme_opcode_name(nvme_cmd_copy))
 
 
 
@@ -1031,6 +1037,36 @@ struct nvme_dsm_range {
        __le64                  slba;
 };
 
+struct nvme_copy_command {
+       __u8                    opcode;
+       __u8                    flags;
+       __u16                   command_id;
+       __le32                  nsid;
+       __u64                   rsvd2;
+       __le64                  metadata;
+       union nvme_data_ptr     dptr;
+       __le64                  sdlba;
+       __u8                    nr_range;
+       __u8                    rsvd12;
+       __le16                  control;
+       __le16                  rsvd13;
+       __le16                  dspec;
+       __le32                  ilbrt;
+       __le16                  lbat;
+       __le16                  lbatm;
+};
+
+struct nvme_copy_range {
+       __le64                  rsvd0;
+       __le64                  slba;
+       __le16                  nlb;
+       __le16                  rsvd18;
+       __le32                  rsvd20;
+       __le32                  eilbrt;
+       __le16                  elbat;
+       __le16                  elbatm;
+};
+
 struct nvme_write_zeroes_cmd {
        __u8                    opcode;
        __u8                    flags;
@@ -1792,6 +1828,7 @@ struct nvme_command {
                struct nvme_download_firmware dlfw;
                struct nvme_format_cmd format;
                struct nvme_dsm_cmd dsm;
+               struct nvme_copy_command copy;
                struct nvme_write_zeroes_cmd write_zeroes;
                struct nvme_zone_mgmt_send_cmd zms;
                struct nvme_zone_mgmt_recv_cmd zmr;
-- 
2.35.1.500.gb896f729e2

--
dm-devel mailing list
dm-devel@redhat.com
https://listman.redhat.com/mailman/listinfo/dm-devel

Reply via email to