On 2/11/21 2:10 PM, Boris Pismenny wrote:
>  
> +static int nvme_tcp_teardown_ddp(struct nvme_tcp_queue *queue,
> +                              u16 command_id,
> +                              struct request *rq)
> +{
> +     struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
> +     struct net_device *netdev = queue->ctrl->offloading_netdev;
> +     int ret;
> +
> +     if (unlikely(!netdev)) {
> +             dev_info_ratelimited(queue->ctrl->ctrl.device, "netdev not 
> found\n");

again, unnecessary. you only get here because the rquest is marked
offloaded and that only happens if the netdev exists and supports DDP.

> +             return -EINVAL;
> +     }
> +
> +     ret = netdev->tcp_ddp_ops->tcp_ddp_teardown(netdev, queue->sock->sk,
> +                                                 &req->ddp, rq);
> +     sg_free_table_chained(&req->ddp.sg_table, SG_CHUNK_SIZE);
> +     return ret;
> +}
> +
> +static void nvme_tcp_ddp_teardown_done(void *ddp_ctx)
> +{
> +     struct request *rq = ddp_ctx;
> +     struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
> +
> +     if (!nvme_try_complete_req(rq, req->status, req->result))
> +             nvme_complete_rq(rq);
> +}
> +
> +static int nvme_tcp_setup_ddp(struct nvme_tcp_queue *queue,
> +                           u16 command_id,
> +                           struct request *rq)
> +{
> +     struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
> +     struct net_device *netdev = queue->ctrl->offloading_netdev;
> +     int ret;
> +
> +     if (unlikely(!netdev)) {
> +             dev_info_ratelimited(queue->ctrl->ctrl.device, "netdev not 
> found\n");

similarly here. you can't get here if netdev is null.

> +             return -EINVAL;
> +     }
> +
> +     req->ddp.command_id = command_id;
> +     ret = nvme_tcp_req_map_sg(req, rq);
> +     if (ret)
> +             return -ENOMEM;
> +
> +     ret = netdev->tcp_ddp_ops->tcp_ddp_setup(netdev,
> +                                              queue->sock->sk,
> +                                              &req->ddp);
> +     if (!ret)
> +             req->offloaded = true;
> +     return ret;
> +}
> +
>  static int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue)
>  {
>       struct net_device *netdev = queue->ctrl->offloading_netdev;
> @@ -343,7 +417,7 @@ static void nvme_tcp_resync_response(struct 
> nvme_tcp_queue *queue,
>               return;
>  
>       if (unlikely(!netdev)) {
> -             pr_info_ratelimited("%s: netdev not found\n", __func__);
> +             dev_info_ratelimited(queue->ctrl->ctrl.device, "netdev not 
> found\n");

and per comment on the last patch, this is not needed.

> @@ -849,10 +953,39 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue 
> *queue, struct sk_buff *skb,
>  
>  static inline void nvme_tcp_end_request(struct request *rq, u16 status)
>  {
> +     struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
> +     struct nvme_tcp_queue *queue = req->queue;
> +     struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
>       union nvme_result res = {};
>  
> -     if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
> -             nvme_complete_rq(rq);
> +     nvme_tcp_complete_request(rq, cpu_to_le16(status << 1), res, 
> pdu->command_id);
> +}
> +
> +
> +static int nvme_tcp_consume_skb(struct nvme_tcp_queue *queue, struct sk_buff 
> *skb,
> +                             unsigned int *offset, struct iov_iter *iter, 
> int recv_len)
> +{
> +     int ret;
> +
> +#ifdef CONFIG_TCP_DDP
> +     if (test_bit(NVME_TCP_Q_OFF_DDP, &queue->flags)) {
> +             if (queue->data_digest)
> +                     ret = skb_ddp_copy_and_hash_datagram_iter(skb, *offset, 
> iter, recv_len,
> +                                     queue->rcv_hash);
> +             else
> +                     ret = skb_ddp_copy_datagram_iter(skb, *offset, iter, 
> recv_len);
> +     } else {
> +#endif

why not make that a helper defined in the CONFIG_TCP_DDP section with an
inline for the unset case. Keeps this code from being polluted with the
ifdef checks.

> +             if (queue->data_digest)
> +                     ret = skb_copy_and_hash_datagram_iter(skb, *offset, 
> iter, recv_len,
> +                                     queue->rcv_hash);
> +             else
> +                     ret = skb_copy_datagram_iter(skb, *offset, iter, 
> recv_len);
> +#ifdef CONFIG_TCP_DDP
> +     }
> +#endif
> +
> +     return ret;
>  }
>  
>  static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff 
> *skb,
> @@ -899,12 +1032,7 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue 
> *queue, struct sk_buff *skb,
>               recv_len = min_t(size_t, recv_len,
>                               iov_iter_count(&req->iter));
>  
> -             if (queue->data_digest)
> -                     ret = skb_copy_and_hash_datagram_iter(skb, *offset,
> -                             &req->iter, recv_len, queue->rcv_hash);
> -             else
> -                     ret = skb_copy_datagram_iter(skb, *offset,
> -                                     &req->iter, recv_len);
> +             ret = nvme_tcp_consume_skb(queue, skb, offset, &req->iter, 
> recv_len);
>               if (ret) {
>                       dev_err(queue->ctrl->ctrl.device,
>                               "queue %d failed to copy request %#x data",
> @@ -1128,6 +1256,7 @@ static int nvme_tcp_try_send_cmd_pdu(struct 
> nvme_tcp_request *req)
>       bool inline_data = nvme_tcp_has_inline_data(req);
>       u8 hdgst = nvme_tcp_hdgst_len(queue);
>       int len = sizeof(*pdu) + hdgst - req->offset;
> +     struct request *rq = blk_mq_rq_from_pdu(req);
>       int flags = MSG_DONTWAIT;
>       int ret;
>  
> @@ -1136,6 +1265,10 @@ static int nvme_tcp_try_send_cmd_pdu(struct 
> nvme_tcp_request *req)
>       else
>               flags |= MSG_EOR;
>  
> +     if (test_bit(NVME_TCP_Q_OFF_DDP, &queue->flags) &&
> +         blk_rq_nr_phys_segments(rq) && rq_data_dir(rq) == READ)
> +             nvme_tcp_setup_ddp(queue, pdu->cmd.common.command_id, rq);
> +

For consistency, shouldn't this be wrapped in the CONFIG_TCP_DDP check too?

>       if (queue->hdr_digest && !req->offset)
>               nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
>  

Reply via email to