> diff --git a/lib/librte_stack/rte_stack_lf_stubs.h > b/lib/librte_stack/rte_stack_lf_stubs.h > new file mode 100644 > index 0000000..d924bc6 > --- /dev/null > +++ b/lib/librte_stack/rte_stack_lf_stubs.h > @@ -0,0 +1,59 @@ > +/* SPDX-License-Identifier: BSD-3-Clause > + * Copyright(c) 2019 Arm Limited > + */ > + > +#ifndef _RTE_STACK_LF_STUBS_H_ > +#define _RTE_STACK_LF_STUBS_H_ > + > +#include <rte_common.h> > +#include <rte_atomic.h> > + > +static __rte_always_inline unsigned int __rte_stack_lf_count(struct > +rte_stack *s) { > + /* stack_lf_push() and stack_lf_pop() do not update the list's > contents > + * and stack_lf->len atomically, which can cause the list to appear > + * shorter than it actually is if this function is called while other > + * threads are modifying the list. > + * > + * However, given the inherently approximate nature of the > get_count > + * callback -- even if the list and its size were updated atomically, > + * the size could change between when get_count executes and > when the > + * value is returned to the caller -- this is acceptable. > + * > + * The stack_lf->len updates are placed such that the list may appear > to > + * have fewer elements than it does, but will never appear to have > more > + * elements. If the mempool is near-empty to the point that this is a > + * concern, the user should consider increasing the mempool size. > + */ > + return (unsigned int)rte_atomic64_read((rte_atomic64_t *) > + &s->stack_lf.used.len); > +}
Since the stub functions are not functional, these comments should be removed and the function simply return 0. The rte_atomic.h include can be removed as well.