Fix checkpatch warnings and errors in lib/librte_mempool. checkpatch
is run as follows

scripts/checkpatch.pl --no-tree --file <file_name>

Following warning is treated as false-positive

1. WARNING: quoted string split across lines

Signed-off-by: Ravi Kerur <rkerur at gmail.com>
---
 lib/librte_mempool/rte_dom0_mempool.c | 41 +++++++++++-----------
 lib/librte_mempool/rte_mempool.c      | 64 +++++++++++++++++++++--------------
 lib/librte_mempool/rte_mempool.h      | 58 ++++++++++++++++++-------------
 3 files changed, 94 insertions(+), 69 deletions(-)

diff --git a/lib/librte_mempool/rte_dom0_mempool.c 
b/lib/librte_mempool/rte_dom0_mempool.c
index 9ec68fb..1545436 100644
--- a/lib/librte_mempool/rte_dom0_mempool.c
+++ b/lib/librte_mempool/rte_dom0_mempool.c
@@ -62,30 +62,31 @@

 static void
 get_phys_map(void *va, phys_addr_t pa[], uint32_t pg_num,
-            uint32_t pg_sz, uint32_t memseg_id)
+               uint32_t pg_sz, uint32_t memseg_id)
 {
-    uint32_t i;
-    uint64_t virt_addr, mfn_id;
-    struct rte_mem_config *mcfg;
-    uint32_t page_size = getpagesize();
-
-    /* get pointer to global configuration */
-    mcfg = rte_eal_get_configuration()->mem_config;
-    virt_addr =(uintptr_t) mcfg->memseg[memseg_id].addr;
-
-    for (i = 0; i != pg_num; i++) {
-        mfn_id = ((uintptr_t)va + i * pg_sz - virt_addr) / RTE_PGSIZE_2M;
-        pa[i] = mcfg->memseg[memseg_id].mfn[mfn_id] * page_size;
-    }
+       uint32_t i;
+       uint64_t virt_addr, mfn_id;
+       struct rte_mem_config *mcfg;
+       uint32_t page_size = getpagesize();
+
+       /* get pointer to global configuration */
+       mcfg = rte_eal_get_configuration()->mem_config;
+       virt_addr = (uintptr_t) mcfg->memseg[memseg_id].addr;
+
+       for (i = 0; i != pg_num; i++) {
+               mfn_id =
+               ((uintptr_t)va + i * pg_sz - virt_addr) / RTE_PGSIZE_2M;
+               pa[i] = mcfg->memseg[memseg_id].mfn[mfn_id] * page_size;
+       }
 }

 /* create the mempool for supporting Dom0 */
 struct rte_mempool *
 rte_dom0_mempool_create(const char *name, unsigned elt_num, unsigned elt_size,
-           unsigned cache_size, unsigned private_data_size,
-           rte_mempool_ctor_t *mp_init, void *mp_init_arg,
-           rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
-           int socket_id, unsigned flags)
+               unsigned cache_size, unsigned private_data_size,
+               rte_mempool_ctor_t *mp_init, void *mp_init_arg,
+               rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
+               int socket_id, unsigned flags)
 {
        struct rte_mempool *mp = NULL;
        phys_addr_t *pa;
@@ -107,7 +108,7 @@ rte_dom0_mempool_create(const char *name, unsigned elt_num, 
unsigned elt_size,
        pg_num = sz >> pg_shift;

        /* extract physical mappings of the allocated memory. */
-       pa = calloc(pg_num, sizeof (*pa));
+       pa = calloc(pg_num, sizeof(*pa));
        if (pa == NULL)
                return mp;

@@ -130,5 +131,5 @@ rte_dom0_mempool_create(const char *name, unsigned elt_num, 
unsigned elt_size,

        free(pa);

-       return (mp);
+       return mp;
 }
diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index 4cf6c25..e1374c4 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -196,7 +196,7 @@ rte_mempool_obj_iter(void *vaddr, uint32_t elt_num, size_t 
elt_sz, size_t align,
                }
        }

-       return (i);
+       return i;
 }

 /*
@@ -280,18 +280,20 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t 
flags,
         */
        if ((flags & MEMPOOL_F_NO_SPREAD) == 0) {
                unsigned new_size;
+
                new_size = optimize_object_size(sz->header_size + sz->elt_size +
                        sz->trailer_size);
                sz->trailer_size = new_size - sz->header_size - sz->elt_size;
        }

-       if (! rte_eal_has_hugepages()) {
+       if (!rte_eal_has_hugepages()) {
                /*
                 * compute trailer size so that pool elements fit exactly in
                 * a standard page
                 */
                int page_size = getpagesize();
                int new_size = page_size - sz->header_size - sz->elt_size;
+
                if (new_size < 0 || (unsigned int)new_size < sz->trailer_size) {
                        printf("When hugepages are disabled, pool objects "
                               "can't exceed PAGE_SIZE: %d + %d + %d > %d\n",
@@ -305,7 +307,7 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
        /* this is the size of an object, including header and trailer */
        sz->total_size = sz->header_size + sz->elt_size + sz->trailer_size;

-       return (sz->total_size);
+       return sz->total_size;
 }


@@ -319,14 +321,16 @@ rte_mempool_xmem_size(uint32_t elt_num, size_t elt_sz, 
uint32_t pg_shift)

        pg_sz = (size_t)1 << pg_shift;

-       if ((n = pg_sz / elt_sz) > 0) {
+       n = pg_sz / elt_sz;
+
+       if (n > 0) {
                pg_num = (elt_num + n - 1) / n;
                sz = pg_num << pg_shift;
        } else {
                sz = RTE_ALIGN_CEIL(elt_sz, pg_sz) * elt_num;
        }

-       return (sz);
+       return sz;
 }

 /*
@@ -335,9 +339,9 @@ rte_mempool_xmem_size(uint32_t elt_num, size_t elt_sz, 
uint32_t pg_shift)
  */
 static void
 mempool_lelem_iter(void *arg, __rte_unused void *start, void *end,
-        __rte_unused uint32_t idx)
+                       __rte_unused uint32_t idx)
 {
-        *(uintptr_t *)arg = (uintptr_t)end;
+       *(uintptr_t *)arg = (uintptr_t)end;
 }

 ssize_t
@@ -352,15 +356,16 @@ rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, 
size_t elt_sz,
        va = (uintptr_t)vaddr;
        uv = va;

-       if ((n = rte_mempool_obj_iter(vaddr, elt_num, elt_sz, 1,
+       n = rte_mempool_obj_iter(vaddr, elt_num, elt_sz, 1,
                        paddr, pg_num, pg_shift, mempool_lelem_iter,
-                       &uv)) != elt_num) {
+                       &uv);
+
+       if (n != elt_num)
                return (-n);
-       }

        uv = RTE_ALIGN_CEIL(uv, pg_sz);
        usz = uv - va;
-       return (usz);
+       return usz;
 }

 /* create the mempool */
@@ -491,16 +496,16 @@ rte_mempool_xmem_create(const char *name, unsigned n, 
unsigned elt_size,
        private_data_size = (private_data_size +
                             RTE_CACHE_LINE_MASK) & (~RTE_CACHE_LINE_MASK);

-       if (! rte_eal_has_hugepages()) {
+       if (!rte_eal_has_hugepages()) {
                /*
                 * expand private data size to a whole page, so that the
                 * first pool element will start on a new standard page
                 */
                int head = sizeof(struct rte_mempool);
                int new_size = (private_data_size + head) % page_size;
-               if (new_size) {
+
+               if (new_size)
                        private_data_size += page_size - new_size;
-               }
        }

        /* try to allocate tailq entry */
@@ -519,7 +524,7 @@ rte_mempool_xmem_create(const char *name, unsigned n, 
unsigned elt_size,
        if (vaddr == NULL)
                mempool_size += (size_t)objsz.total_size * n;

-       if (! rte_eal_has_hugepages()) {
+       if (!rte_eal_has_hugepages()) {
                /*
                 * we want the memory pool to start on a page boundary,
                 * because pool elements crossing page boundaries would
@@ -542,15 +547,16 @@ rte_mempool_xmem_create(const char *name, unsigned n, 
unsigned elt_size,
        }

        if (rte_eal_has_hugepages()) {
-               startaddr = (void*)mz->addr;
+               startaddr = (void *)mz->addr;
        } else {
                /* align memory pool start address on a page boundary */
                unsigned long addr = (unsigned long)mz->addr;
+
                if (addr & (page_size - 1)) {
                        addr += page_size;
                        addr &= ~(page_size - 1);
                }
-               startaddr = (void*)addr;
+               startaddr = (void *)addr;
        }

        /* init the mempool structure */
@@ -587,7 +593,7 @@ rte_mempool_xmem_create(const char *name, unsigned n, 
unsigned elt_size,
        /* mempool elements in a separate chunk of memory. */
        } else {
                mp->elt_va_start = (uintptr_t)vaddr;
-               memcpy(mp->elt_pa, paddr, sizeof (mp->elt_pa[0]) * pg_num);
+               memcpy(mp->elt_pa, paddr, sizeof(mp->elt_pa[0]) * pg_num);
        }

        mp->elt_va_end = mp->elt_va_start;
@@ -619,6 +625,7 @@ rte_mempool_count(const struct rte_mempool *mp)
 #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
        {
                unsigned lcore_id;
+
                if (mp->cache_size == 0)
                        return count;

@@ -720,7 +727,7 @@ mempool_audit_cookies(const struct rte_mempool *mp)
 #pragma GCC diagnostic error "-Wcast-qual"
 #endif
 #else
-#define mempool_audit_cookies(mp) do {} while(0)
+#define mempool_audit_cookies(mp) do {} while (0)
 #endif

 #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
@@ -730,6 +737,7 @@ mempool_audit_cache(const struct rte_mempool *mp)
 {
        /* check cache size consistency */
        unsigned lcore_id;
+
        for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
                if (mp->local_cache[lcore_id].len > mp->cache_flushthresh) {
                        RTE_LOG(CRIT, MEMPOOL, "badness on cache[%u]\n",
@@ -739,7 +747,7 @@ mempool_audit_cache(const struct rte_mempool *mp)
        }
 }
 #else
-#define mempool_audit_cache(mp) do {} while(0)
+#define mempool_audit_cache(mp) do {} while (0)
 #endif


@@ -831,8 +839,9 @@ rte_mempool_list_dump(FILE *f)
        struct rte_tailq_entry *te;
        struct rte_mempool_list *mempool_list;

-       if ((mempool_list =
-            RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_MEMPOOL, rte_mempool_list)) == 
NULL) {
+       mempool_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_MEMPOOL,
+                               rte_mempool_list);
+       if (mempool_list == NULL) {
                rte_errno = E_RTE_NO_TAILQ;
                return;
        }
@@ -855,8 +864,10 @@ rte_mempool_lookup(const char *name)
        struct rte_tailq_entry *te;
        struct rte_mempool_list *mempool_list;

-       if ((mempool_list =
-            RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_MEMPOOL, rte_mempool_list)) == 
NULL) {
+       mempool_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_MEMPOOL,
+                                       rte_mempool_list);
+
+       if (mempool_list == NULL) {
                rte_errno = E_RTE_NO_TAILQ;
                return NULL;
        }
@@ -885,8 +896,9 @@ void rte_mempool_walk(void (*func)(const struct rte_mempool 
*, void *),
        struct rte_tailq_entry *te = NULL;
        struct rte_mempool_list *mempool_list;

-       if ((mempool_list =
-            RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_MEMPOOL, rte_mempool_list)) == 
NULL) {
+       mempool_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_MEMPOOL,
+                               rte_mempool_list);
+       if (mempool_list == NULL) {
                rte_errno = E_RTE_NO_TAILQ;
                return;
        }
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index 3314651..2da5425 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -179,7 +179,9 @@ struct rte_mempool {
        uintptr_t   elt_va_end;
        /**< Virtual address of the <size + 1> mempool object. */
        phys_addr_t elt_pa[MEMPOOL_PG_NUM_DEFAULT];
-       /**< Array of physical pages addresses for the mempool objects buffer. 
*/
+       /**< Array of physical pages addresses for the
+        * mempool objects buffer.
+        */

 }  __rte_cache_aligned;

@@ -200,11 +202,12 @@ struct rte_mempool {
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 #define __MEMPOOL_STAT_ADD(mp, name, n) do {                   \
                unsigned __lcore_id = rte_lcore_id();           \
+                                                               \
                mp->stats[__lcore_id].name##_objs += n;         \
                mp->stats[__lcore_id].name##_bulk += 1;         \
-       } while(0)
+       } while (0)
 #else
-#define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0)
+#define __MEMPOOL_STAT_ADD(mp, name, n) do {} while (0)
 #endif

 /**
@@ -216,7 +219,7 @@ struct rte_mempool {
  */
 #define        MEMPOOL_HEADER_SIZE(mp, pgn)    (sizeof(*(mp)) + \
        RTE_ALIGN_CEIL(((pgn) - RTE_DIM((mp)->elt_pa)) * \
-       sizeof ((mp)->elt_pa[0]), RTE_CACHE_LINE_SIZE))
+       sizeof((mp)->elt_pa[0]), RTE_CACHE_LINE_SIZE))

 /**
  * Returns TRUE if whole mempool is allocated in one contiguous block of 
memory.
@@ -257,6 +260,7 @@ static inline struct rte_mempool **__mempool_from_obj(void 
*obj)
 static inline const struct rte_mempool *rte_mempool_from_obj(void *obj)
 {
        struct rte_mempool * const *mpp;
+
        mpp = __mempool_from_obj(obj);
        return *mpp;
 }
@@ -272,6 +276,7 @@ static inline uint64_t __mempool_read_header_cookie(const 
void *obj)
 static inline uint64_t __mempool_read_trailer_cookie(void *obj)
 {
        struct rte_mempool **mpp = __mempool_from_obj(obj);
+
        return *(uint64_t *)((char *)obj + (*mpp)->elt_size);
 }

@@ -279,6 +284,7 @@ static inline uint64_t __mempool_read_trailer_cookie(void 
*obj)
 static inline void __mempool_write_header_cookie(void *obj, int free)
 {
        uint64_t *cookie_p;
+
        cookie_p = (uint64_t *)((char *)obj - sizeof(uint64_t));
        if (free == 0)
                *cookie_p = RTE_MEMPOOL_HEADER_COOKIE1;
@@ -292,6 +298,7 @@ static inline void __mempool_write_trailer_cookie(void *obj)
 {
        uint64_t *cookie_p;
        struct rte_mempool **mpp = __mempool_from_obj(obj);
+
        cookie_p = (uint64_t *)((char *)obj + (*mpp)->elt_size);
        *cookie_p = RTE_MEMPOOL_TRAILER_COOKIE;
 }
@@ -333,8 +340,8 @@ static inline void __mempool_check_cookies(const struct 
rte_mempool *mp,
                obj = obj_table[n];

                if (rte_mempool_from_obj(obj) != mp)
-                       rte_panic("MEMPOOL: object is owned by another "
-                                 "mempool\n");
+                       rte_panic(
+                       "MEMPOOL: object is owned by another mempool\n");

                cookie = __mempool_read_header_cookie(obj);

@@ -342,30 +349,29 @@ static inline void __mempool_check_cookies(const struct 
rte_mempool *mp,
                        if (cookie != RTE_MEMPOOL_HEADER_COOKIE1) {
                                rte_log_set_history(0);
                                RTE_LOG(CRIT, MEMPOOL,
-                                       "obj=%p, mempool=%p, 
cookie=%"PRIx64"\n",
-                                       obj, mp, cookie);
+                               "obj=%p, mempool=%p, cookie=%"PRIx64"\n",
+                               obj, mp, cookie);
                                rte_panic("MEMPOOL: bad header cookie (put)\n");
                        }
                        __mempool_write_header_cookie(obj, 1);
-               }
-               else if (free == 1) {
+               } else if (free == 1) {
                        if (cookie != RTE_MEMPOOL_HEADER_COOKIE2) {
                                rte_log_set_history(0);
                                RTE_LOG(CRIT, MEMPOOL,
-                                       "obj=%p, mempool=%p, 
cookie=%"PRIx64"\n",
-                                       obj, mp, cookie);
+                               "obj=%p, mempool=%p, cookie=%"PRIx64"\n",
+                               obj, mp, cookie);
                                rte_panic("MEMPOOL: bad header cookie (get)\n");
                        }
                        __mempool_write_header_cookie(obj, 0);
-               }
-               else if (free == 2) {
+               } else if (free == 2) {
                        if (cookie != RTE_MEMPOOL_HEADER_COOKIE1 &&
                            cookie != RTE_MEMPOOL_HEADER_COOKIE2) {
                                rte_log_set_history(0);
                                RTE_LOG(CRIT, MEMPOOL,
-                                       "obj=%p, mempool=%p, 
cookie=%"PRIx64"\n",
-                                       obj, mp, cookie);
-                               rte_panic("MEMPOOL: bad header cookie 
(audit)\n");
+                               "obj=%p, mempool=%p, cookie=%"PRIx64"\n",
+                               obj, mp, cookie);
+                               rte_panic(
+                               "MEMPOOL: bad header cookie (audit)\n");
                        }
                }
                cookie = __mempool_read_trailer_cookie(obj);
@@ -382,7 +388,7 @@ static inline void __mempool_check_cookies(const struct 
rte_mempool *mp,
 #pragma GCC diagnostic error "-Wcast-qual"
 #endif
 #else
-#define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0)
+#define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while (0)
 #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */

 /**
@@ -807,8 +813,7 @@ ring_enqueue:
        if (is_mp) {
                if (rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n) < 0)
                        rte_panic("cannot put objects in mempool\n");
-       }
-       else {
+       } else {
                if (rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n) < 0)
                        rte_panic("cannot put objects in mempool\n");
        }
@@ -963,8 +968,11 @@ __mempool_get_bulk(struct rte_mempool *mp, void 
**obj_table,
                /* No. Backfill the cache first, and then fill from it */
                uint32_t req = n + (cache_size - cache->len);

-               /* How many do we require i.e. number to fill the cache + the 
request */
-               ret = rte_ring_mc_dequeue_bulk(mp->ring, 
&cache->objs[cache->len], req);
+               /* How many do we require i.e. number to fill the
+                * cache + the request
+                */
+               ret = rte_ring_mc_dequeue_bulk(mp->ring,
+                                       &cache->objs[cache->len], req);
                if (unlikely(ret < 0)) {
                        /*
                         * In the offchance that we are buffer constrained,
@@ -979,7 +987,8 @@ __mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
        }

        /* Now fill in the response ... */
-       for (index = 0, len = cache->len - 1; index < n; ++index, len--, 
obj_table++)
+       for (index = 0, len = cache->len - 1; index < n;
+               ++index, len--, obj_table++)
                *obj_table = cache_objs[len];

        cache->len -= n;
@@ -1027,6 +1036,7 @@ static inline int __attribute__((always_inline))
 rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
 {
        int ret;
+
        ret = __mempool_get_bulk(mp, obj_table, n, 1);
        if (ret == 0)
                __mempool_check_cookies(mp, obj_table, n, 1);
@@ -1056,6 +1066,7 @@ static inline int __attribute__((always_inline))
 rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
 {
        int ret;
+
        ret = __mempool_get_bulk(mp, obj_table, n, 0);
        if (ret == 0)
                __mempool_check_cookies(mp, obj_table, n, 1);
@@ -1088,6 +1099,7 @@ static inline int __attribute__((always_inline))
 rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
 {
        int ret;
+
        ret = __mempool_get_bulk(mp, obj_table, n,
                                 !(mp->flags & MEMPOOL_F_SC_GET));
        if (ret == 0)
-- 
1.9.1

Reply via email to