On Fri, Mar 24, 2017 at 04:53:03PM +0000, Harry van Haaren wrote: > From: Bruce Richardson <bruce.richard...@intel.com> > > Signed-off-by: Bruce Richardson <bruce.richard...@intel.com> > Signed-off-by: Harry van Haaren <harry.van.haa...@intel.com> > --- > drivers/event/sw/sw_evdev.c | 81 > +++++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 81 insertions(+) > > diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c > index 4b8370d..82ac3bd 100644 > --- a/drivers/event/sw/sw_evdev.c > +++ b/drivers/event/sw/sw_evdev.c > @@ -36,6 +36,7 @@ > #include <rte_memzone.h> > #include <rte_kvargs.h> > #include <rte_ring.h> > +#include <rte_errno.h> > > #include "sw_evdev.h" > #include "iq_ring.h" > @@ -50,6 +51,84 @@ static void > sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info); > > static int > +sw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[], > + const uint8_t priorities[], uint16_t num) > +{ > + struct sw_port *p = (void *)port;
(void *) typecast is not required. > + struct sw_evdev *sw = sw_pmd_priv(dev); > + int i; > + > + RTE_SET_USED(priorities); > + for (i = 0; i < num; i++) { > + struct sw_qid *q = &sw->qids[queues[i]]; > + > + /* check for qid map overflow */ > + if (q->cq_num_mapped_cqs >= RTE_DIM(q->cq_map)) > + break; > + > + if (p->is_directed && p->num_qids_mapped > 0) Do we need to set rte_errno = -EDQUOT here too? > + break; > + > + if (q->type == SW_SCHED_TYPE_DIRECT) { > + /* check directed qids only map to one port */ > + if (p->num_qids_mapped > 0) { > + rte_errno = -EDQUOT; > + break; > + } > + /* check port only takes a directed flow */ > + if (num > 1) { > + rte_errno = -EDQUOT; > + break; > + } > + > + p->is_directed = 1; > + p->num_qids_mapped = 1; > + } else if (q->type == RTE_SCHED_TYPE_ORDERED) { Will this "else if" have similar issue shared in http://dpdk.org/ml/archives/dev/2017-March/061497.html > + p->num_ordered_qids++; > + p->num_qids_mapped++; > + } else if (q->type == RTE_SCHED_TYPE_ATOMIC) { > + p->num_qids_mapped++; > + } > + > + q->cq_map[q->cq_num_mapped_cqs] = p->id; > + rte_smp_wmb(); > + q->cq_num_mapped_cqs++; > + } > + return i; > +} > + > +static int > +sw_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[], > + uint16_t nb_unlinks) > +{ > + struct sw_port *p = (void *)port; (void *) typecast is not required. > + struct sw_evdev *sw = sw_pmd_priv(dev); > + unsigned int i, j; > + > + int unlinked = 0; > + for (i = 0; i < nb_unlinks; i++) { > + struct sw_qid *q = &sw->qids[queues[i]]; > + for (j = 0; j < q->cq_num_mapped_cqs; j++) { > + if (q->cq_map[j] == p->id) { > + q->cq_map[j] = > + q->cq_map[q->cq_num_mapped_cqs - 1]; > + rte_smp_wmb(); > + q->cq_num_mapped_cqs--; > + unlinked++; > + > + p->num_qids_mapped--; > + > + if (q->type == RTE_SCHED_TYPE_ORDERED) > + p->num_ordered_qids--; > + > + continue; > + } > + } > + } > + return unlinked; > +} > + With above suggested changes, Acked-by: Jerin Jacob <jerin.ja...@caviumnetworks.com>