Add ioat ring space count and check, if ioat ring space is not enough for the next async vhost packet enqueue, then just return to prevent enqueue failure. Add rte_ioat_completed_ops() fail handler.
Signed-off-by: Cheng Jiang <cheng1.ji...@intel.com> --- examples/vhost/ioat.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/examples/vhost/ioat.c b/examples/vhost/ioat.c index 71d8a1f1f..679d1e2f5 100644 --- a/examples/vhost/ioat.c +++ b/examples/vhost/ioat.c @@ -17,6 +17,7 @@ struct packet_tracker { unsigned short next_read; unsigned short next_write; unsigned short last_remain; + unsigned short ioat_space; }; struct packet_tracker cb_tracker[MAX_VHOST_DEVICE]; @@ -113,7 +114,7 @@ open_ioat(const char *value) goto out; } rte_rawdev_start(dev_id); - + cb_tracker[dev_id].ioat_space = IOAT_RING_SIZE; dma_info->nr++; i++; } @@ -140,13 +141,9 @@ ioat_transfer_data_cb(int vid, uint16_t queue_id, src = descs[i_desc].src; dst = descs[i_desc].dst; i_seg = 0; + if (cb_tracker[dev_id].ioat_space < src->nr_segs) + break; while (i_seg < src->nr_segs) { - /* - * TODO: Assuming that the ring space of the - * IOAT device is large enough, so there is no - * error here, and the actual error handling - * will be added later. - */ rte_ioat_enqueue_copy(dev_id, (uintptr_t)(src->iov[i_seg].iov_base) + src->offset, @@ -158,7 +155,8 @@ ioat_transfer_data_cb(int vid, uint16_t queue_id, i_seg++; } write &= mask; - cb_tracker[dev_id].size_track[write] = i_seg; + cb_tracker[dev_id].size_track[write] = src->nr_segs; + cb_tracker[dev_id].ioat_space -= src->nr_segs; write++; } } else { @@ -178,17 +176,21 @@ ioat_check_completed_copies_cb(int vid, uint16_t queue_id, { if (!opaque_data) { uintptr_t dump[255]; - unsigned short n_seg; + int n_seg; unsigned short read, write; unsigned short nb_packet = 0; unsigned short mask = MAX_ENQUEUED_SIZE - 1; unsigned short i; + int dev_id = dma_bind[vid].dmas[queue_id * 2 + VIRTIO_RXQ].dev_id; n_seg = rte_ioat_completed_ops(dev_id, 255, dump, dump); - n_seg += cb_tracker[dev_id].last_remain; if (!n_seg) return 0; + + cb_tracker[dev_id].ioat_space += n_seg; + n_seg += cb_tracker[dev_id].last_remain; + read = cb_tracker[dev_id].next_read; write = cb_tracker[dev_id].next_write; for (i = 0; i < max_packets; i++) { -- 2.29.2