On Wed, Mar 25, 2015 at 06:00:34PM +0100, Olivier Matz wrote:
> Add a new private_size field in mbuf structure that should
> be initialized at mbuf pool creation. This field contains the
> size of the application private data in mbufs.
> 
> Introduce new static inline functions rte_mbuf_from_baddr()
> and rte_mbuf_to_baddr() to replace the existing macros, which
> take the private size in account when attaching and detaching
> mbufs.
> 
> Signed-off-by: Olivier Matz <olivier.matz at 6wind.com>
> ---
>  app/test-pmd/testpmd.c     |  1 +
>  examples/vhost/main.c      |  6 ++----
>  lib/librte_mbuf/rte_mbuf.c |  1 +
>  lib/librte_mbuf/rte_mbuf.h | 44 +++++++++++++++++++++++++++++++++++---------
>  4 files changed, 39 insertions(+), 13 deletions(-)
> 
> diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
> index 3057791..c5a195a 100644
> --- a/app/test-pmd/testpmd.c
> +++ b/app/test-pmd/testpmd.c
> @@ -425,6 +425,7 @@ testpmd_mbuf_ctor(struct rte_mempool *mp,
>       mb->tx_offload   = 0;
>       mb->vlan_tci     = 0;
>       mb->hash.rss     = 0;
> +     mb->priv_size    = 0;
>  }
>  
>  static void
> diff --git a/examples/vhost/main.c b/examples/vhost/main.c
> index c3fcb80..050f3ac 100644
> --- a/examples/vhost/main.c
> +++ b/examples/vhost/main.c
> @@ -139,8 +139,6 @@
>  /* Number of descriptors per cacheline. */
>  #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
>  
> -#define MBUF_EXT_MEM(mb)   (RTE_MBUF_FROM_BADDR((mb)->buf_addr) != (mb))
> -
>  /* mask of enabled ports */
>  static uint32_t enabled_port_mask = 0;
>  
> @@ -1590,7 +1588,7 @@ txmbuf_clean_zcp(struct virtio_net *dev, struct vpool 
> *vpool)
>  
>       for (index = 0; index < mbuf_count; index++) {
>               mbuf = __rte_mbuf_raw_alloc(vpool->pool);
> -             if (likely(MBUF_EXT_MEM(mbuf)))
> +             if (likely(RTE_MBUF_INDIRECT(mbuf)))
>                       pktmbuf_detach_zcp(mbuf);
>               rte_ring_sp_enqueue(vpool->ring, mbuf);
>  
> @@ -1653,7 +1651,7 @@ static void mbuf_destroy_zcp(struct vpool *vpool)
>       for (index = 0; index < mbuf_count; index++) {
>               mbuf = __rte_mbuf_raw_alloc(vpool->pool);
>               if (likely(mbuf != NULL)) {
> -                     if (likely(MBUF_EXT_MEM(mbuf)))
> +                     if (likely(RTE_MBUF_INDIRECT(mbuf)))
>                               pktmbuf_detach_zcp(mbuf);
>                       rte_ring_sp_enqueue(vpool->ring, (void *)mbuf);
>               }
> diff --git a/lib/librte_mbuf/rte_mbuf.c b/lib/librte_mbuf/rte_mbuf.c
> index 526b18d..e095999 100644
> --- a/lib/librte_mbuf/rte_mbuf.c
> +++ b/lib/librte_mbuf/rte_mbuf.c
> @@ -125,6 +125,7 @@ rte_pktmbuf_init(struct rte_mempool *mp,
>       m->pool = mp;
>       m->nb_segs = 1;
>       m->port = 0xff;
> +     m->priv_size = 0;
>  }
>  
>  /* do some sanity checks on a mbuf: panic if it fails */
> diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
> index 17ba791..4ced6d3 100644
> --- a/lib/librte_mbuf/rte_mbuf.h
> +++ b/lib/librte_mbuf/rte_mbuf.h
> @@ -268,7 +268,7 @@ struct rte_mbuf {
>       uint16_t data_len;        /**< Amount of data in segment buffer. */
>       uint32_t pkt_len;         /**< Total pkt len: sum of all segments. */
>       uint16_t vlan_tci;        /**< VLAN Tag Control Identifier (CPU order) 
> */
> -     uint16_t reserved;
> +     uint16_t priv_size;       /**< size of the application private data */
>       union {
>               uint32_t rss;     /**< RSS hash result if RSS enabled */
>               struct {
> @@ -320,15 +320,38 @@ struct rte_mbuf {

Why does this new field need to go in cache line zero? New fields should go
by default in cache line 1, where there is more space, unless there is a 
compelling
reason not to. Space in cache line 0 is at a premium.

/Bruce

Reply via email to