Go through the linuxapp eal code and cleanup whitespace
issues reported by checkpatch.
---
 lib/librte_eal/linuxapp/eal/eal_alarm.c         |  17 ++--
 lib/librte_eal/linuxapp/eal/eal_debug.c         |   2 +-
 lib/librte_eal/linuxapp/eal/eal_hugepage_info.c |  57 ++++++-------
 lib/librte_eal/linuxapp/eal/eal_interrupts.c    |  26 +++---
 lib/librte_eal/linuxapp/eal/eal_ivshmem.c       |  76 ++++++++---------
 lib/librte_eal/linuxapp/eal/eal_lcore.c         |   5 +-
 lib/librte_eal/linuxapp/eal/eal_memory.c        | 103 +++++++++++++-----------
 lib/librte_eal/linuxapp/eal/eal_pci.c           |  25 +++---
 lib/librte_eal/linuxapp/eal/eal_pci_uio.c       |  12 ++-
 lib/librte_eal/linuxapp/eal/eal_pci_vfio.c      |   3 +-
 lib/librte_eal/linuxapp/eal/eal_timer.c         |  11 +--
 lib/librte_eal/linuxapp/eal/eal_xen_memory.c    |  41 +++++-----
 12 files changed, 199 insertions(+), 179 deletions(-)
diff --git a/lib/librte_eal/linuxapp/eal/eal_alarm.c 
b/lib/librte_eal/linuxapp/eal/eal_alarm.c
index a0eae1e..762f162 100644
--- a/lib/librte_eal/linuxapp/eal/eal_alarm.c
+++ b/lib/librte_eal/linuxapp/eal/eal_alarm.c
@@ -97,16 +97,17 @@ error:

 static void
 eal_alarm_callback(struct rte_intr_handle *hdl __rte_unused,
-               void *arg __rte_unused)
+                  void *arg __rte_unused)
 {
        struct timeval now;
        struct alarm_entry *ap;

        rte_spinlock_lock(&alarm_list_lk);
-       while ((ap = LIST_FIRST(&alarm_list)) !=NULL &&
-                       gettimeofday(&now, NULL) == 0 &&
-                       (ap->time.tv_sec < now.tv_sec || (ap->time.tv_sec == 
now.tv_sec &&
-                                               ap->time.tv_usec <= 
now.tv_usec))){
+       while ((ap = LIST_FIRST(&alarm_list)) != NULL &&
+              gettimeofday(&now, NULL) == 0 &&
+              (ap->time.tv_sec < now.tv_sec ||
+               (ap->time.tv_sec == now.tv_sec &&
+                ap->time.tv_usec <= now.tv_usec))){
                ap->executing = 1;
                ap->executing_id = pthread_self();
                rte_spinlock_unlock(&alarm_list_lk);
@@ -162,7 +163,7 @@ rte_eal_alarm_set(uint64_t us, rte_eal_alarm_callback 
cb_fn, void *cb_arg)
        rte_spinlock_lock(&alarm_list_lk);
        if (!handler_registered) {
                ret |= rte_intr_callback_register(&intr_handle,
-                               eal_alarm_callback, NULL);
+                                                 eal_alarm_callback, NULL);
                handler_registered = (ret == 0) ? 1 : 0;
        }

@@ -171,8 +172,8 @@ rte_eal_alarm_set(uint64_t us, rte_eal_alarm_callback 
cb_fn, void *cb_arg)
        else {
                LIST_FOREACH(ap, &alarm_list, next) {
                        if (ap->time.tv_sec > new_alarm->time.tv_sec ||
-                                       (ap->time.tv_sec == 
new_alarm->time.tv_sec &&
-                                                       ap->time.tv_usec > 
new_alarm->time.tv_usec)){
+                           (ap->time.tv_sec == new_alarm->time.tv_sec &&
+                            ap->time.tv_usec > new_alarm->time.tv_usec)){
                                LIST_INSERT_BEFORE(ap, new_alarm, next);
                                break;
                        }
diff --git a/lib/librte_eal/linuxapp/eal/eal_debug.c 
b/lib/librte_eal/linuxapp/eal/eal_debug.c
index 44fc4f3..c825057 100644
--- a/lib/librte_eal/linuxapp/eal/eal_debug.c
+++ b/lib/librte_eal/linuxapp/eal/eal_debug.c
@@ -56,7 +56,7 @@ void rte_dump_stack(void)
        while (size > 0) {
                rte_log(RTE_LOG_ERR, RTE_LOGTYPE_EAL,
                        "%d: [%s]\n", size, symb[size - 1]);
-               size --;
+               size--;
        }
 }

diff --git a/lib/librte_eal/linuxapp/eal/eal_hugepage_info.c 
b/lib/librte_eal/linuxapp/eal/eal_hugepage_info.c
index 028e309..4d4e226 100644
--- a/lib/librte_eal/linuxapp/eal/eal_hugepage_info.c
+++ b/lib/librte_eal/linuxapp/eal/eal_hugepage_info.c
@@ -65,7 +65,7 @@ static int32_t
 get_num_hugepages(const char *subdir)
 {
        char path[PATH_MAX];
-       long unsigned resv_pages, num_pages = 0;
+       unsigned long resv_pages, num_pages = 0;
        const char *nr_hp_file;
        const char *nr_rsvd_file = "resv_hugepages";

@@ -112,8 +112,8 @@ get_default_hp_size(void)
        FILE *fd = fopen(proc_meminfo, "r");
        if (fd == NULL)
                rte_panic("Cannot open %s\n", proc_meminfo);
-       while(fgets(buffer, sizeof(buffer), fd)){
-               if (strncmp(buffer, str_hugepagesz, hugepagesz_len) == 0){
+       while (fgets(buffer, sizeof(buffer), fd)) {
+               if (strncmp(buffer, str_hugepagesz, hugepagesz_len) == 0) {
                        size = rte_str_to_size(&buffer[hugepagesz_len]);
                        break;
                }
@@ -152,7 +152,7 @@ get_hugepage_dir(uint64_t hugepage_sz)
        if (default_size == 0)
                default_size = get_default_hp_size();

-       while (fgets(buf, sizeof(buf), fd)){
+       while (fgets(buf, sizeof(buf), fd)) {
                if (rte_strsplit(buf, sizeof(buf), splitstr, _FIELDNAME_MAX,
                                split_tok) != _FIELDNAME_MAX) {
                        RTE_LOG(ERR, EAL, "Error parsing %s\n", proc_mounts);
@@ -164,12 +164,12 @@ get_hugepage_dir(uint64_t hugepage_sz)
                                strcmp(splitstr[MOUNTPT], 
internal_config.hugepage_dir) != 0)
                        continue;

-               if (strncmp(splitstr[FSTYPE], hugetlbfs_str, htlbfs_str_len) == 
0){
+               if (strncmp(splitstr[FSTYPE], hugetlbfs_str, htlbfs_str_len) == 
0) {
                        const char *pagesz_str = strstr(splitstr[OPTIONS], 
pagesize_opt);

                        /* if no explicit page size, the default page size is 
compared */
-                       if (pagesz_str == NULL){
-                               if (hugepage_sz == default_size){
+                       if (pagesz_str == NULL) {
+                               if (hugepage_sz == default_size) {
                                        retval = strdup(splitstr[MOUNTPT]);
                                        break;
                                }
@@ -204,7 +204,7 @@ swap_hpi(struct hugepage_info *a, struct hugepage_info *b)
  * if it's in use by another DPDK process).
  */
 static int
-clear_hugedir(const char * hugedir)
+clear_hugedir(const char *hugedir)
 {
        DIR *dir;
        struct dirent *dirent;
@@ -227,7 +227,7 @@ clear_hugedir(const char * hugedir)
                goto error;
        }

-       while(dirent != NULL){
+       while (dirent != NULL) {
                /* skip files that don't match the hugepage pattern */
                if (fnmatch(filter, dirent->d_name, 0) > 0) {
                        dirent = readdir(dir);
@@ -251,7 +251,7 @@ clear_hugedir(const char * hugedir)
                        flock(fd, LOCK_UN);
                        unlinkat(dir_fd, dirent->d_name, 0);
                }
-               close (fd);
+               close(fd);
                dirent = readdir(dir);
        }

@@ -283,24 +283,27 @@ eal_hugepage_info_init(void)
        DIR *dir = opendir(sys_dir_path);
        if (dir == NULL)
                rte_panic("Cannot open directory %s to read system hugepage 
info\n",
-                               sys_dir_path);
+                         sys_dir_path);

        struct dirent *dirent = readdir(dir);
-       while(dirent != NULL){
-               if (strncmp(dirent->d_name, dirent_start_text, 
dirent_start_len) == 0){
-                       struct hugepage_info *hpi = \
-                                       
&internal_config.hugepage_info[num_sizes];
+       while (dirent != NULL) {
+               if (strncmp(dirent->d_name, dirent_start_text, 
dirent_start_len) == 0) {
+                       struct hugepage_info *hpi =
+                               &internal_config.hugepage_info[num_sizes];
+
                        hpi->hugepage_sz = 
rte_str_to_size(&dirent->d_name[dirent_start_len]);
                        hpi->hugedir = get_hugepage_dir(hpi->hugepage_sz);

                        /* first, check if we have a mountpoint */
-                       if (hpi->hugedir == NULL){
-                               int32_t num_pages;
-                               if ((num_pages = 
get_num_hugepages(dirent->d_name)) > 0)
-                                       RTE_LOG(INFO, EAL, "%u hugepages of 
size %llu reserved, "\
-                                                       "but no mounted 
hugetlbfs found for that size\n",
-                                                       (unsigned)num_pages,
-                                                       (unsigned long 
long)hpi->hugepage_sz);
+                       if (hpi->hugedir == NULL) {
+                               int32_t num_pages
+                                       = get_num_hugepages(dirent->d_name);
+
+                               if (num_pages > 0)
+                                       RTE_LOG(INFO, EAL, "%u hugepages of 
size %llu reserved, "
+                                               "but no mounted hugetlbfs found 
for that size\n",
+                                               (unsigned)num_pages,
+                                               (unsigned long 
long)hpi->hugepage_sz);
                        } else {
                                /* try to obtain a writelock */
                                hpi->lock_descriptor = open(hpi->hugedir, 
O_RDONLY);
@@ -324,7 +327,7 @@ eal_hugepage_info_init(void)
 #ifndef RTE_ARCH_64
                                /* for 32-bit systems, limit number of 
hugepages to 1GB per page size */
                                hpi->num_pages[0] = RTE_MIN(hpi->num_pages[0],
-                                               RTE_PGSIZE_1G / 
hpi->hugepage_sz);
+                                                           RTE_PGSIZE_1G / 
hpi->hugepage_sz);
 #endif

                                num_sizes++;
@@ -336,19 +339,19 @@ eal_hugepage_info_init(void)
        internal_config.num_hugepage_sizes = num_sizes;

        /* sort the page directory entries by size, largest to smallest */
-       for (i = 0; i < num_sizes; i++){
+       for (i = 0; i < num_sizes; i++) {
                unsigned j;
                for (j = i+1; j < num_sizes; j++)
                        if (internal_config.hugepage_info[j-1].hugepage_sz < \
-                                       
internal_config.hugepage_info[j].hugepage_sz)
+                           internal_config.hugepage_info[j].hugepage_sz)
                                swap_hpi(&internal_config.hugepage_info[j-1],
-                                               
&internal_config.hugepage_info[j]);
+                                        &internal_config.hugepage_info[j]);
        }

        /* now we have all info, check we have at least one valid size */
        for (i = 0; i < num_sizes; i++)
                if (internal_config.hugepage_info[i].hugedir != NULL &&
-                               internal_config.hugepage_info[i].num_pages[0] > 
0)
+                   internal_config.hugepage_info[i].num_pages[0] > 0)
                        return 0;

        /* no valid hugepage mounts available, return error */
diff --git a/lib/librte_eal/linuxapp/eal/eal_interrupts.c 
b/lib/librte_eal/linuxapp/eal/eal_interrupts.c
index 66deda2..b7fae8a 100644
--- a/lib/librte_eal/linuxapp/eal/eal_interrupts.c
+++ b/lib/librte_eal/linuxapp/eal/eal_interrupts.c
@@ -72,7 +72,7 @@
 /**
  * union for pipe fds.
  */
-union intr_pipefds{
+union intr_pipefds {
        struct {
                int pipefd[2];
        };
@@ -452,8 +452,8 @@ rte_intr_callback_register(struct rte_intr_handle 
*intr_handle,

        /* no existing callbacks for this - add new source */
        if (src == NULL) {
-               if ((src = rte_zmalloc("interrupt source list",
-                               sizeof(*src), 0)) == NULL) {
+               src = rte_zmalloc("interrupt source list", sizeof(*src), 0);
+               if (src == NULL) {
                        RTE_LOG(ERR, EAL, "Can not allocate memory\n");
                        rte_free(callback);
                        ret = -ENOMEM;
@@ -477,7 +477,7 @@ rte_intr_callback_register(struct rte_intr_handle 
*intr_handle,
                if (write(intr_pipe.writefd, "1", 1) < 0)
                        return -EPIPE;

-       return (ret);
+       return ret;
 }

 int
@@ -537,11 +537,10 @@ rte_intr_callback_unregister(struct rte_intr_handle 
*intr_handle,
        rte_spinlock_unlock(&intr_lock);

        /* notify the pipe fd waited by epoll_wait to rebuild the wait list */
-       if (ret >= 0 && write(intr_pipe.writefd, "1", 1) < 0) {
+       if (ret >= 0 && write(intr_pipe.writefd, "1", 1) < 0)
                ret = -EPIPE;
-       }

-       return (ret);
+       return ret;
 }

 int
@@ -550,7 +549,7 @@ rte_intr_enable(struct rte_intr_handle *intr_handle)
        if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
                return -1;

-       switch (intr_handle->type){
+       switch (intr_handle->type) {
        /* write to the uio fd to enable the interrupt */
        case RTE_INTR_HANDLE_UIO:
                if (uio_intr_enable(intr_handle))
@@ -590,7 +589,7 @@ rte_intr_disable(struct rte_intr_handle *intr_handle)
        if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
                return -1;

-       switch (intr_handle->type){
+       switch (intr_handle->type) {
        /* write to the uio fd to disable the interrupt */
        case RTE_INTR_HANDLE_UIO:
                if (uio_intr_disable(intr_handle))
@@ -639,7 +638,7 @@ eal_intr_process_interrupts(struct epoll_event *events, int 
nfds)
                 * if the pipe fd is ready to read, return out to
                 * rebuild the wait list.
                 */
-               if (events[n].data.fd == intr_pipe.readfd){
+               if (events[n].data.fd == intr_pipe.readfd) {
                        int r = read(intr_pipe.readfd, buf.charbuf,
                                        sizeof(buf.charbuf));
                        RTE_SET_USED(r);
@@ -650,7 +649,7 @@ eal_intr_process_interrupts(struct epoll_event *events, int 
nfds)
                        if (src->intr_handle.fd ==
                                        events[n].data.fd)
                                break;
-               if (src == NULL){
+               if (src == NULL) {
                        rte_spinlock_unlock(&intr_lock);
                        continue;
                }
@@ -739,7 +738,7 @@ eal_intr_handle_interrupts(int pfd, unsigned totalfds)
        struct epoll_event events[totalfds];
        int nfds = 0;

-       for(;;) {
+       for (;;) {
                nfds = epoll_wait(pfd, events, totalfds,
                        EAL_INTR_EPOLL_WAIT_FOREVER);
                /* epoll_wait fail */
@@ -818,8 +817,7 @@ eal_intr_thread_main(__rte_unused void *arg)
                                        src->intr_handle.fd, &ev) < 0){
                                rte_panic("Error adding fd %d epoll_ctl, %s\n",
                                        src->intr_handle.fd, strerror(errno));
-                       }
-                       else
+                       } else
                                numfds++;
                }
                rte_spinlock_unlock(&intr_lock);
diff --git a/lib/librte_eal/linuxapp/eal/eal_ivshmem.c 
b/lib/librte_eal/linuxapp/eal/eal_ivshmem.c
index 2deaeb7..9951fa0 100644
--- a/lib/librte_eal/linuxapp/eal/eal_ivshmem.c
+++ b/lib/librte_eal/linuxapp/eal/eal_ivshmem.c
@@ -71,9 +71,9 @@
 #define FULL (PHYS|VIRT|IOREMAP)

 #define METADATA_SIZE_ALIGNED \
-       (RTE_ALIGN_CEIL(sizeof(struct rte_ivshmem_metadata),pagesz))
+       (RTE_ALIGN_CEIL(sizeof(struct rte_ivshmem_metadata), pagesz))

-#define CONTAINS(x,y)\
+#define CONTAINS(x, y)\
        (((y).addr_64 >= (x).addr_64) && ((y).addr_64 < (x).addr_64 + (x).len))

 #define DIM(x) (sizeof(x)/sizeof(x[0]))
@@ -95,7 +95,8 @@ struct ivshmem_shared_config {
        struct ivshmem_pci_device pci_devs[RTE_LIBRTE_IVSHMEM_MAX_PCI_DEVS];
        uint32_t pci_devs_idx;
 };
-static struct ivshmem_shared_config * ivshmem_config;
+
+static struct ivshmem_shared_config *ivshmem_config;
 static int memseg_idx;
 static int pagesz;

@@ -107,7 +108,7 @@ TAILQ_HEAD(rte_ring_list, rte_tailq_entry);
  */

 static int
-is_ivshmem_device(struct rte_pci_device * dev)
+is_ivshmem_device(struct rte_pci_device *dev)
 {
        return (dev->id.vendor_id == PCI_VENDOR_ID_IVSHMEM
                        && dev->id.device_id == PCI_DEVICE_ID_IVSHMEM);
@@ -124,7 +125,7 @@ map_metadata(int fd, uint64_t len)
 }

 static void
-unmap_metadata(void * ptr)
+unmap_metadata(void *ptr)
 {
        munmap(ptr, sizeof(struct rte_ivshmem_metadata));
 }
@@ -133,14 +134,14 @@ static int
 has_ivshmem_metadata(int fd, uint64_t len)
 {
        struct rte_ivshmem_metadata metadata;
-       void * ptr;
+       void *ptr;

        ptr = map_metadata(fd, len);

        if (ptr == MAP_FAILED)
                return -1;

-       metadata = *(struct rte_ivshmem_metadata*) (ptr);
+       metadata = *(struct rte_ivshmem_metadata *) (ptr);

        unmap_metadata(ptr);

@@ -148,7 +149,7 @@ has_ivshmem_metadata(int fd, uint64_t len)
 }

 static void
-remove_segment(struct ivshmem_segment * ms, int len, int idx)
+remove_segment(struct ivshmem_segment *ms, int len, int idx)
 {
        int i;

@@ -158,7 +159,7 @@ remove_segment(struct ivshmem_segment * ms, int len, int 
idx)
 }

 static int
-overlap(const struct rte_memzone * mz1, const struct rte_memzone * mz2)
+overlap(const struct rte_memzone *mz1, const struct rte_memzone *mz2)
 {
        uint64_t start1, end1, start2, end2;
        uint64_t p_start1, p_end1, p_start2, p_end2;
@@ -205,7 +206,7 @@ overlap(const struct rte_memzone * mz1, const struct 
rte_memzone * mz2)
 }

 static int
-adjacent(const struct rte_memzone * mz1, const struct rte_memzone * mz2)
+adjacent(const struct rte_memzone *mz1, const struct rte_memzone *mz2)
 {
        uint64_t start1, end1, start2, end2;
        uint64_t p_start1, p_end1, p_start2, p_end2;
@@ -252,7 +253,7 @@ adjacent(const struct rte_memzone * mz1, const struct 
rte_memzone * mz2)
 }

 static int
-has_adjacent_segments(struct ivshmem_segment * ms, int len)
+has_adjacent_segments(struct ivshmem_segment *ms, int len)
 {
        int i, j, a;

@@ -271,7 +272,7 @@ has_adjacent_segments(struct ivshmem_segment * ms, int len)
 }

 static int
-has_overlapping_segments(struct ivshmem_segment * ms, int len)
+has_overlapping_segments(struct ivshmem_segment *ms, int len)
 {
        int i, j;

@@ -283,10 +284,10 @@ has_overlapping_segments(struct ivshmem_segment * ms, int 
len)
 }

 static int
-seg_compare(const void * a, const void * b)
+seg_compare(const void *a, const void *b)
 {
-       const struct ivshmem_segment * s1 = (const struct ivshmem_segment*) a;
-       const struct ivshmem_segment * s2 = (const struct ivshmem_segment*) b;
+       const struct ivshmem_segment *s1 = a;
+       const struct ivshmem_segment *s2 = b;

        /* move unallocated zones to the end */
        if (s1->entry.mz.addr == NULL && s2->entry.mz.addr == NULL)
@@ -324,19 +325,19 @@ entry_dump(struct rte_ivshmem_metadata_entry *e)

 /* read through metadata mapped from the IVSHMEM device */
 static int
-read_metadata(char * path, int path_len, int fd, uint64_t flen)
+read_metadata(char *path, int path_len, int fd, uint64_t flen)
 {
        struct rte_ivshmem_metadata metadata;
-       struct rte_ivshmem_metadata_entry * entry;
+       struct rte_ivshmem_metadata_entry *entry;
        int idx, i;
-       void * ptr;
+       void *ptr;

        ptr = map_metadata(fd, flen);

        if (ptr == MAP_FAILED)
                return -1;

-       metadata = *(struct rte_ivshmem_metadata*) (ptr);
+       metadata = *(struct rte_ivshmem_metadata *)ptr;

        unmap_metadata(ptr);

@@ -374,9 +375,9 @@ read_metadata(char * path, int path_len, int fd, uint64_t 
flen)

 /* check through each segment and look for adjacent or overlapping ones. */
 static int
-cleanup_segments(struct ivshmem_segment * ms, int tbl_len)
+cleanup_segments(struct ivshmem_segment *ms, int tbl_len)
 {
-       struct ivshmem_segment * s, * tmp;
+       struct ivshmem_segment *s, *tmp;
        int i, j, concat, seg_adjacent, seg_overlapping;
        uint64_t start1, start2, end1, end2, p_start1, p_start2, i_start1, 
i_start2;

@@ -500,7 +501,8 @@ create_shared_config(void)

        /* change the exclusive lock we got earlier to a shared lock */
        if (flock(fd, LOCK_SH | LOCK_NB) == -1) {
-               RTE_LOG(ERR, EAL, "Locking %s failed: %s \n", path, 
strerror(errno));
+               RTE_LOG(ERR, EAL,
+                       "Locking %s failed: %s\n", path, strerror(errno));
                return -1;
        }

@@ -564,7 +566,8 @@ open_shared_config(void)

        /* place a shared lock on config file */
        if (flock(fd, LOCK_SH | LOCK_NB) == -1) {
-               RTE_LOG(ERR, EAL, "Locking %s failed: %s \n", path, 
strerror(errno));
+               RTE_LOG(ERR, EAL,
+                       "Locking %s failed: %s\n", path, strerror(errno));
                return -1;
        }

@@ -585,14 +588,14 @@ static inline int
 map_all_segments(void)
 {
        struct ivshmem_segment ms_tbl[RTE_MAX_MEMSEG];
-       struct ivshmem_pci_device * pci_dev;
-       struct rte_mem_config * mcfg;
-       struct ivshmem_segment * seg;
+       struct ivshmem_pci_device *pci_dev;
+       struct rte_mem_config *mcfg;
+       struct ivshmem_segment *seg;
        int fd, fd_zero;
        unsigned i, j;
        struct rte_memzone mz;
        struct rte_memseg ms;
-       void * base_addr;
+       void *base_addr;
        uint64_t align, len;
        phys_addr_t ioremap_addr;

@@ -748,11 +751,11 @@ map_all_segments(void)
 int
 rte_eal_ivshmem_obj_init(void)
 {
-       struct rte_ring_list* ring_list = NULL;
-       struct rte_mem_config * mcfg;
-       struct ivshmem_segment * seg;
-       struct rte_memzone * mz;
-       struct rte_ring * r;
+       struct rte_ring_list *ring_list = NULL;
+       struct rte_mem_config *mcfg;
+       struct ivshmem_segment *seg;
+       struct rte_memzone *mz;
+       struct rte_ring *r;
        struct rte_tailq_entry *te;
        unsigned i, ms, idx;
        uint64_t offset;
@@ -819,7 +822,7 @@ rte_eal_ivshmem_obj_init(void)
                                sizeof(RTE_RING_MZ_PREFIX) - 1) != 0)
                        continue;

-               r = (struct rte_ring*) (mz->addr_64);
+               r = (struct rte_ring *)(mz->addr_64);

                te = rte_zmalloc("RING_TAILQ_ENTRY", sizeof(*te), 0);
                if (te == NULL) {
@@ -846,8 +849,8 @@ rte_eal_ivshmem_obj_init(void)
 /* initialize ivshmem structures */
 int rte_eal_ivshmem_init(void)
 {
-       struct rte_pci_device * dev;
-       struct rte_pci_resource * res;
+       struct rte_pci_device *dev;
+       struct rte_pci_resource *res;
        int fd, ret;
        char path[PATH_MAX];

@@ -956,8 +959,7 @@ int rte_eal_ivshmem_init(void)
                        RTE_LOG(ERR, EAL, "Mapping IVSHMEM segments failed!\n");
                        return -1;
                }
-       }
-       else {
+       } else {
                RTE_LOG(DEBUG, EAL, "No IVSHMEM configuration found! \n");
        }

diff --git a/lib/librte_eal/linuxapp/eal/eal_lcore.c 
b/lib/librte_eal/linuxapp/eal/eal_lcore.c
index ef8c433..a332885 100644
--- a/lib/librte_eal/linuxapp/eal/eal_lcore.c
+++ b/lib/librte_eal/linuxapp/eal/eal_lcore.c
@@ -98,7 +98,8 @@ eal_cpu_socket_id(unsigned lcore_id)
                        break;
                }
        }
-       if (endptr == NULL || *endptr!='\0' || endptr == e->d_name+prefix_len) {
+       if (endptr == NULL || *endptr != '\0' ||
+           endptr == e->d_name+prefix_len) {
                RTE_LOG(WARNING, EAL, "Cannot read numa node link "
                                "for lcore %u - using physical package id 
instead\n",
                                lcore_id);
@@ -188,7 +189,7 @@ rte_eal_cpu_init(void)
                                lcore_id,
                                lcore_config[lcore_id].core_id,
                                lcore_config[lcore_id].socket_id);
-               count ++;
+               count++;
        }
        /* Set the count of enabled logical cores of the EAL configuration */
        config->lcore_count = count;
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c 
b/lib/librte_eal/linuxapp/eal/eal_memory.c
index 5f9f92e..d140480 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -120,7 +120,7 @@ rte_mem_lock_page(const void *virt)
        unsigned long virtual = (unsigned long)virt;
        int page_size = getpagesize();
        unsigned long aligned = (virtual & ~ (page_size - 1));
-       return mlock((void*)aligned, page_size);
+       return mlock((void *)aligned, page_size);
 }

 /*
@@ -212,10 +212,10 @@ aslr_enabled(void)
        if (retval == 0)
                return -EIO;
        switch (c) {
-               case '0' : return 0;
-               case '1' : return 1;
-               case '2' : return 2;
-               default: return -EINVAL;
+       case '0': return 0;
+       case '1': return 1;
+       case '2': return 2;
+       default: return -EINVAL;
        }
 }

@@ -233,19 +233,20 @@ get_virtual_area(size_t *size, size_t hugepage_sz)
        int fd;
        long aligned_addr;

-       if (internal_config.base_virtaddr != 0) {
-               addr = (void*) (uintptr_t) (internal_config.base_virtaddr +
+       if (internal_config.base_virtaddr != 0)
+               addr = (void *) (uintptr_t) (internal_config.base_virtaddr +
                                baseaddr_offset);
-       }
-       else addr = NULL;
+       else
+               addr = NULL;

        RTE_LOG(INFO, EAL, "Ask a virtual area of 0x%zx bytes\n", *size);

        fd = open("/dev/zero", O_RDONLY);
-       if (fd < 0){
+       if (fd < 0) {
                RTE_LOG(ERR, EAL, "Cannot open /dev/zero\n");
                return NULL;
        }
+
        do {
                addr = mmap(addr,
                                (*size) + hugepage_sz, PROT_READ, MAP_PRIVATE, 
fd, 0);
@@ -379,14 +380,13 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
                if (orig) {
                        hugepg_tbl[i].orig_va = virtaddr;
                        memset(virtaddr, 0, hugepage_sz);
-               }
-               else {
+               } else {
                        hugepg_tbl[i].final_va = virtaddr;
                }

                /* set shared flock on the file. */
                if (flock(fd, LOCK_SH | LOCK_NB) == -1) {
-                       RTE_LOG(ERR, EAL, "%s(): Locking file failed:%s \n",
+                       RTE_LOG(ERR, EAL, "%s(): Locking file failed:%s\n",
                                __func__, strerror(errno));
                        close(fd);
                        return -1;
@@ -483,7 +483,7 @@ remap_all_hugepages(struct hugepage_file *hugepg_tbl, 
struct hugepage_info *hpi)
                }

                total_size = 0;
-               for (;i < j; i++) {
+               for (; i < j; i++) {

                        /* unmap current segment */
                        if (total_size > 0)
@@ -502,7 +502,9 @@ remap_all_hugepages(struct hugepage_file *hugepg_tbl, 
struct hugepage_info *hpi)
                                        PROT_READ | PROT_WRITE, MAP_SHARED, fd, 
0);

                        if (vma_addr == MAP_FAILED || vma_addr != old_addr) {
-                               RTE_LOG(ERR, EAL, "%s(): mmap failed: %s\n", 
__func__, strerror(errno));
+                               RTE_LOG(ERR, EAL,
+                                       "%s(): mmap failed: %s\n",
+                                       __func__, strerror(errno));
                                close(fd);
                                return -1;
                        }
@@ -512,19 +514,19 @@ remap_all_hugepages(struct hugepage_file *hugepg_tbl, 
struct hugepage_info *hpi)
                         * the page and it is marked as used and gets into 
process' pagemap.
                         */
                        for (offset = 0; offset < total_size; offset += 
hugepage_sz)
-                               *((volatile uint8_t*) RTE_PTR_ADD(vma_addr, 
offset));
+                               *((volatile uint8_t *) RTE_PTR_ADD(vma_addr, 
offset));
                }

                /* set shared flock on the file. */
                if (flock(fd, LOCK_SH | LOCK_NB) == -1) {
-                       RTE_LOG(ERR, EAL, "%s(): Locking file failed:%s \n",
+                       RTE_LOG(ERR, EAL, "%s(): Locking file failed:%s\n",
                                __func__, strerror(errno));
                        close(fd);
                        return -1;
                }

-               snprintf(hugepg_tbl[page_idx].filepath, MAX_HUGEPAGE_PATH, "%s",
-                               filepath);
+               snprintf(hugepg_tbl[page_idx].filepath, MAX_HUGEPAGE_PATH,
+                        "%s", filepath);

                physaddr = rte_mem_virt2phy(vma_addr);

@@ -567,23 +569,26 @@ remap_all_hugepages(struct hugepage_file *hugepg_tbl, 
struct hugepage_info *hpi)
        }

        /* zero out the rest */
-       memset(&hugepg_tbl[page_idx], 0, (hpi->num_pages[0] - page_idx) * 
sizeof(struct hugepage_file));
+       memset(&hugepg_tbl[page_idx], 0,
+              (hpi->num_pages[0] - page_idx) * sizeof(struct hugepage_file));
        return page_idx;
 }
 #else/* RTE_EAL_SINGLE_FILE_SEGMENTS=n */

 /* Unmap all hugepages from original mapping */
 static int
-unmap_all_hugepages_orig(struct hugepage_file *hugepg_tbl, struct 
hugepage_info *hpi)
+unmap_all_hugepages_orig(struct hugepage_file *hugepg_tbl,
+                        struct hugepage_info *hpi)
 {
-        unsigned i;
-        for (i = 0; i < hpi->num_pages[0]; i++) {
-                if (hugepg_tbl[i].orig_va) {
-                        munmap(hugepg_tbl[i].orig_va, hpi->hugepage_sz);
-                        hugepg_tbl[i].orig_va = NULL;
-                }
-        }
-        return 0;
+       unsigned i;
+
+       for (i = 0; i < hpi->num_pages[0]; i++) {
+               if (hugepg_tbl[i].orig_va) {
+                       munmap(hugepg_tbl[i].orig_va, hpi->hugepage_sz);
+                       hugepg_tbl[i].orig_va = NULL;
+               }
+       }
+       return 0;
 }
 #endif /* RTE_EAL_SINGLE_FILE_SEGMENTS */

@@ -690,7 +695,7 @@ sort_by_physaddr(struct hugepage_file *hugepg_tbl, struct 
hugepage_info *hpi)
                 * browse all entries starting at 'i', and find the
                 * entry with the smallest addr
                 */
-               for (j=i; j< hpi->num_pages[0]; j++) {
+               for (j = i; j < hpi->num_pages[0]; j++) {

                        if (compare_addr == 0 ||
 #ifdef RTE_ARCH_PPC_64
@@ -744,8 +749,8 @@ create_shared_memory(const char *filename, const size_t 
mem_size)
  * destination is typically the shared memory.
  */
 static int
-copy_hugepages_to_shared_mem(struct hugepage_file * dst, int dest_size,
-               const struct hugepage_file * src, int src_size)
+copy_hugepages_to_shared_mem(struct hugepage_file *dst, int dest_size,
+               const struct hugepage_file *src, int src_size)
 {
        int src_pos, dst_pos = 0;

@@ -831,7 +836,7 @@ unmap_unneeded_hugepages(struct hugepage_file *hugepg_tbl,
                                                        uint64_t final_size = 
nr_pg_left * hp->size;
                                                        uint64_t seg_size = 
hp->repeated * hp->size;

-                                                       void * unmap_va = 
RTE_PTR_ADD(hp->final_va,
+                                                       void *unmap_va = 
RTE_PTR_ADD(hp->final_va,
                                                                        
final_size);
                                                        int fd;

@@ -874,13 +879,13 @@ get_socket_mem_size(int socket)
        uint64_t size = 0;
        unsigned i;

-       for (i = 0; i < internal_config.num_hugepage_sizes; i++){
+       for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
                struct hugepage_info *hpi = &internal_config.hugepage_info[i];
                if (hpi->hugedir != NULL)
                        size += hpi->hugepage_sz * hpi->num_pages[socket];
        }

-       return (size);
+       return size;
 }

 /*
@@ -952,7 +957,7 @@ calc_num_pages_per_socket(uint64_t * memory,

        for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_mem != 0; 
socket++) {
                /* skips if the memory on specific socket wasn't requested */
-               for (i = 0; i < num_hp_info && memory[socket] != 0; i++){
+               for (i = 0; i < num_hp_info && memory[socket] != 0; i++) {
                        hp_used[i].hugedir = hp_info[i].hugedir;
                        hp_used[i].num_pages[socket] = RTE_MIN(
                                        memory[socket] / hp_info[i].hugepage_sz,
@@ -984,7 +989,7 @@ calc_num_pages_per_socket(uint64_t * memory,
                                hp_info[j].num_pages[socket];

                        /* is there enough other memory, if not allocate 
another page and quit */
-                       if (remaining_mem < memory[socket]){
+                       if (remaining_mem < memory[socket]) {
                                cur_mem = RTE_MIN(memory[socket],
                                                hp_info[i].hugepage_sz);
                                memory[socket] -= cur_mem;
@@ -1104,7 +1109,7 @@ rte_eal_hugepage_init(void)
        hp_offset = 0; /* where we start the current page size entries */

        /* map all hugepages and sort them */
-       for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
+       for (i = 0; i < (int)internal_config.num_hugepage_sizes; i++) {
                struct hugepage_info *hpi;

                /*
@@ -1118,20 +1123,20 @@ rte_eal_hugepage_init(void)
                        continue;

                /* map all hugepages available */
-               if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 1) < 0){
+               if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 1) < 0) {
                        RTE_LOG(DEBUG, EAL, "Failed to mmap %u MB hugepages\n",
                                        (unsigned)(hpi->hugepage_sz / 
0x100000));
                        goto fail;
                }

                /* find physical addresses and sockets for each hugepage */
-               if (find_physaddrs(&tmp_hp[hp_offset], hpi) < 0){
+               if (find_physaddrs(&tmp_hp[hp_offset], hpi) < 0) {
                        RTE_LOG(DEBUG, EAL, "Failed to find phys addr for %u MB 
pages\n",
                                        (unsigned)(hpi->hugepage_sz / 
0x100000));
                        goto fail;
                }

-               if (find_numasocket(&tmp_hp[hp_offset], hpi) < 0){
+               if (find_numasocket(&tmp_hp[hp_offset], hpi) < 0) {
                        RTE_LOG(DEBUG, EAL, "Failed to find NUMA socket for %u 
MB pages\n",
                                        (unsigned)(hpi->hugepage_sz / 
0x100000));
                        goto fail;
@@ -1143,7 +1148,7 @@ rte_eal_hugepage_init(void)
 #ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
                /* remap all hugepages into single file segments */
                new_pages_count[i] = remap_all_hugepages(&tmp_hp[hp_offset], 
hpi);
-               if (new_pages_count[i] < 0){
+               if (new_pages_count[i] < 0) {
                        RTE_LOG(DEBUG, EAL, "Failed to remap %u MB pages\n",
                                        (unsigned)(hpi->hugepage_sz / 
0x100000));
                        goto fail;
@@ -1153,7 +1158,7 @@ rte_eal_hugepage_init(void)
                hp_offset += new_pages_count[i];
 #else
                /* remap all hugepages */
-               if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) < 0){
+               if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) < 0) {
                        RTE_LOG(DEBUG, EAL, "Failed to remap %u MB pages\n",
                                        (unsigned)(hpi->hugepage_sz / 
0x100000));
                        goto fail;
@@ -1388,8 +1393,8 @@ rte_eal_hugepage_attach(void)
        if (internal_config.xen_dom0_support) {
 #ifdef RTE_LIBRTE_XEN_DOM0
                if (rte_xen_dom0_memory_attach() < 0) {
-                       RTE_LOG(ERR, EAL,"Failed to attach memory setments of 
primay "
-                                       "process\n");
+                       RTE_LOG(ERR, EAL,
+                               "Failed to attach memory setments of primary 
process\n");
                        return -1;
                }
                return 0;
@@ -1461,7 +1466,7 @@ rte_eal_hugepage_attach(void)
        RTE_LOG(DEBUG, EAL, "Analysing %u files\n", num_hp);

        s = 0;
-       while (s < RTE_MAX_MEMSEG && mcfg->memseg[s].len > 0){
+       while (s < RTE_MAX_MEMSEG && mcfg->memseg[s].len > 0) {
                void *addr, *base_addr;
                uintptr_t offset = 0;
                size_t mapping_size;
@@ -1485,8 +1490,8 @@ rte_eal_hugepage_attach(void)
                /* find the hugepages for this segment and map them
                 * we don't need to worry about order, as the server sorted the
                 * entries before it did the second mmap of them */
-               for (i = 0; i < num_hp && offset < mcfg->memseg[s].len; i++){
-                       if (hp[i].memseg_id == (int)s){
+               for (i = 0; i < num_hp && offset < mcfg->memseg[s].len; i++) {
+                       if (hp[i].memseg_id == (int)s) {
                                fd = open(hp[i].filepath, O_RDWR);
                                if (fd < 0) {
                                        RTE_LOG(ERR, EAL, "Could not open %s\n",
@@ -1508,7 +1513,7 @@ rte_eal_hugepage_attach(void)
                                                hp[i].filepath);
                                        goto error;
                                }
-                               offset+=mapping_size;
+                               offset += mapping_size;
                        }
                }
                RTE_LOG(DEBUG, EAL, "Mapped segment %u of size 0x%llx\n", s,
diff --git a/lib/librte_eal/linuxapp/eal/eal_pci.c 
b/lib/librte_eal/linuxapp/eal/eal_pci.c
index d2adc66..0c66d03 100644
--- a/lib/librte_eal/linuxapp/eal/eal_pci.c
+++ b/lib/librte_eal/linuxapp/eal/eal_pci.c
@@ -83,14 +83,14 @@ pci_unbind_kernel_driver(struct rte_pci_device *dev)
        }
        if (fwrite(buf, n, 1, f) == 0) {
                RTE_LOG(ERR, EAL, "%s(): could not write to %s\n", __func__,
-                               filename);
+                       filename);
                goto error;
        }

        fclose(f);
        return 0;

-error:
+ error:
        fclose(f);
        return -1;
 }
@@ -357,8 +357,7 @@ pci_scan_one(const char *dirname, uint16_t domain, uint8_t 
bus,
        /* device is valid, add in list (sorted) */
        if (TAILQ_EMPTY(&pci_device_list)) {
                TAILQ_INSERT_TAIL(&pci_device_list, dev, next);
-       }
-       else {
+       } else {
                struct rte_pci_device *dev2 = NULL;
                int ret;

@@ -630,30 +629,30 @@ rte_eal_pci_probe_one_driver(struct rte_pci_driver *dr, 
struct rte_pci_device *d

                /* check if device's identifiers match the driver's ones */
                if (id_table->vendor_id != dev->id.vendor_id &&
-                               id_table->vendor_id != PCI_ANY_ID)
+                   id_table->vendor_id != PCI_ANY_ID)
                        continue;
                if (id_table->device_id != dev->id.device_id &&
-                               id_table->device_id != PCI_ANY_ID)
+                   id_table->device_id != PCI_ANY_ID)
                        continue;
                if (id_table->subsystem_vendor_id != 
dev->id.subsystem_vendor_id &&
-                               id_table->subsystem_vendor_id != PCI_ANY_ID)
+                   id_table->subsystem_vendor_id != PCI_ANY_ID)
                        continue;
                if (id_table->subsystem_device_id != 
dev->id.subsystem_device_id &&
-                               id_table->subsystem_device_id != PCI_ANY_ID)
+                   id_table->subsystem_device_id != PCI_ANY_ID)
                        continue;

                struct rte_pci_addr *loc = &dev->addr;

                RTE_LOG(DEBUG, EAL, "PCI device "PCI_PRI_FMT" on NUMA socket 
%i\n",
-                               loc->domain, loc->bus, loc->devid, 
loc->function,
-                               dev->numa_node);
+                       loc->domain, loc->bus, loc->devid, loc->function,
+                       dev->numa_node);

                RTE_LOG(DEBUG, EAL, "  probe driver: %x:%x %s\n", 
dev->id.vendor_id,
-                               dev->id.device_id, dr->name);
+                       dev->id.device_id, dr->name);

                /* no initialization when blacklisted, return without error */
                if (dev->devargs != NULL &&
-                       dev->devargs->type == RTE_DEVTYPE_BLACKLISTED_PCI) {
+                   dev->devargs->type == RTE_DEVTYPE_BLACKLISTED_PCI) {
                        RTE_LOG(DEBUG, EAL, "  Device is blacklisted, not 
initializing\n");
                        return 1;
                }
@@ -671,7 +670,7 @@ rte_eal_pci_probe_one_driver(struct rte_pci_driver *dr, 
struct rte_pci_device *d
                        if (ret != 0)
                                return ret;
                } else if (dr->drv_flags & RTE_PCI_DRV_FORCE_UNBIND &&
-                          rte_eal_process_type() == RTE_PROC_PRIMARY) {
+                          rte_eal_process_type() == RTE_PROC_PRIMARY) {
                        /* unbind current driver */
                        if (pci_unbind_kernel_driver(dev) < 0)
                                return -1;
diff --git a/lib/librte_eal/linuxapp/eal/eal_pci_uio.c 
b/lib/librte_eal/linuxapp/eal/eal_pci_uio.c
index 2d1c69b..9948f84 100644
--- a/lib/librte_eal/linuxapp/eal/eal_pci_uio.c
+++ b/lib/librte_eal/linuxapp/eal/eal_pci_uio.c
@@ -92,7 +92,8 @@ pci_uio_map_secondary(struct rte_pci_device *dev)
 {
        int fd, i;
        struct mapped_pci_resource *uio_res;
-       struct mapped_pci_res_list *uio_res_list = 
RTE_TAILQ_CAST(rte_uio_tailq.head, mapped_pci_res_list);
+       struct mapped_pci_res_list *uio_res_list
+               = RTE_TAILQ_CAST(rte_uio_tailq.head, mapped_pci_res_list);

        TAILQ_FOREACH(uio_res, uio_res_list, next) {

@@ -272,7 +273,8 @@ pci_uio_map_resource(struct rte_pci_device *dev)
        uint64_t phaddr;
        struct rte_pci_addr *loc = &dev->addr;
        struct mapped_pci_resource *uio_res;
-       struct mapped_pci_res_list *uio_res_list = 
RTE_TAILQ_CAST(rte_uio_tailq.head, mapped_pci_res_list);
+       struct mapped_pci_res_list *uio_res_list
+               = RTE_TAILQ_CAST(rte_uio_tailq.head, mapped_pci_res_list);
        struct pci_map *maps;

        dev->intr_handle.fd = -1;
@@ -412,7 +414,8 @@ static struct mapped_pci_resource *
 pci_uio_find_resource(struct rte_pci_device *dev)
 {
        struct mapped_pci_resource *uio_res;
-       struct mapped_pci_res_list *uio_res_list = 
RTE_TAILQ_CAST(rte_uio_tailq.head, mapped_pci_res_list);
+       struct mapped_pci_res_list *uio_res_list
+               = RTE_TAILQ_CAST(rte_uio_tailq.head, mapped_pci_res_list);

        if (dev == NULL)
                return NULL;
@@ -431,7 +434,8 @@ void
 pci_uio_unmap_resource(struct rte_pci_device *dev)
 {
        struct mapped_pci_resource *uio_res;
-       struct mapped_pci_res_list *uio_res_list = 
RTE_TAILQ_CAST(rte_uio_tailq.head, mapped_pci_res_list);
+       struct mapped_pci_res_list *uio_res_list
+               = RTE_TAILQ_CAST(rte_uio_tailq.head, mapped_pci_res_list);

        if (dev == NULL)
                return;
diff --git a/lib/librte_eal/linuxapp/eal/eal_pci_vfio.c 
b/lib/librte_eal/linuxapp/eal/eal_pci_vfio.c
index aea1fb1..18ea694 100644
--- a/lib/librte_eal/linuxapp/eal/eal_pci_vfio.c
+++ b/lib/librte_eal/linuxapp/eal/eal_pci_vfio.c
@@ -551,7 +551,8 @@ pci_vfio_map_resource(struct rte_pci_device *dev)
        struct rte_pci_addr *loc = &dev->addr;
        int i, ret, msix_bar;
        struct mapped_pci_resource *vfio_res = NULL;
-       struct mapped_pci_res_list *vfio_res_list = 
RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
+       struct mapped_pci_res_list *vfio_res_list
+               = RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);

        struct pci_map *maps;
        uint32_t msix_table_offset = 0;
diff --git a/lib/librte_eal/linuxapp/eal/eal_timer.c 
b/lib/librte_eal/linuxapp/eal/eal_timer.c
index 169c6e1..83ba7ea 100644
--- a/lib/librte_eal/linuxapp/eal/eal_timer.c
+++ b/lib/librte_eal/linuxapp/eal/eal_timer.c
@@ -127,7 +127,7 @@ hpet_msb_inc(__attribute__((unused)) void *arg)
        while (1) {
                t = (eal_hpet->counter_l >> 30);
                if (t != (eal_hpet_msb & 3))
-                       eal_hpet_msb ++;
+                       eal_hpet_msb++;
                sleep(10);
        }
 }
@@ -135,7 +135,7 @@ hpet_msb_inc(__attribute__((unused)) void *arg)
 uint64_t
 rte_get_hpet_hz(void)
 {
-       if(internal_config.no_hpet)
+       if (internal_config.no_hpet)
                rte_panic("Error, HPET called, but no HPET present\n");

        return eal_hpet_resolution_hz;
@@ -147,7 +147,7 @@ rte_get_hpet_cycles(void)
        uint32_t t, msb;
        uint64_t ret;

-       if(internal_config.no_hpet)
+       if (internal_config.no_hpet)
                rte_panic("Error, HPET called, but no HPET present\n");

        t = eal_hpet->counter_l;
@@ -252,7 +252,7 @@ check_tsc_flags(void)
                return;
        }

-       while (fgets(line, sizeof line, stream)) {
+       while (fgets(line, sizeof(line), stream)) {
                char *constant_tsc;
                char *nonstop_tsc;

@@ -287,7 +287,8 @@ set_tsc_freq_from_clock(void)

        if (clock_gettime(CLOCK_MONOTONIC_RAW, &t_start) == 0) {
                uint64_t ns, end, start = rte_rdtsc();
-               nanosleep(&sleeptime,NULL);
+
+               nanosleep(&sleeptime, NULL);
                clock_gettime(CLOCK_MONOTONIC_RAW, &t_end);
                end = rte_rdtsc();
                ns = ((t_end.tv_sec - t_start.tv_sec) * NS_PER_SEC);
diff --git a/lib/librte_eal/linuxapp/eal/eal_xen_memory.c 
b/lib/librte_eal/linuxapp/eal/eal_xen_memory.c
index 9246f83..075d3cb 100644
--- a/lib/librte_eal/linuxapp/eal/eal_xen_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_xen_memory.c
@@ -89,10 +89,11 @@ xen_get_virtual_area(size_t *size, size_t mem_size)
        RTE_LOG(INFO, EAL, "Ask a virtual area of 0x%zu bytes\n", *size);

        fd = open("/dev/zero", O_RDONLY);
-       if (fd < 0){
+       if (fd < 0) {
                RTE_LOG(ERR, EAL, "Cannot open /dev/zero\n");
                return NULL;
        }
+
        do {
                addr = mmap(NULL, (*size) + mem_size, PROT_READ,
                        MAP_PRIVATE, fd, 0);
@@ -112,7 +113,7 @@ xen_get_virtual_area(size_t *size, size_t mem_size)
        /* align addr to a mem_size boundary */
        aligned_addr = (uintptr_t)addr;
        aligned_addr = RTE_ALIGN_CEIL(aligned_addr, mem_size);
-        addr = (void *)(aligned_addr);
+       addr = (void *)(aligned_addr);

        RTE_LOG(INFO, EAL, "Virtual area found at %p (size = 0x%zx)\n",
                addr, *size);
@@ -133,21 +134,24 @@ get_xen_memory_size(void)

        file_name = "memsize";
        snprintf(path, sizeof(path), "%s/%s",
-                       sys_dir_path, file_name);
+                sys_dir_path, file_name);

        if (eal_parse_sysfs_value(path, &mem_size) < 0)
                return -1;

        if (mem_size == 0)
-               rte_exit(EXIT_FAILURE,"XEN-DOM0:the %s/%s was not"
-                       " configured.\n",sys_dir_path, file_name);
+               rte_exit(EXIT_FAILURE,
+                        "XEN-DOM0:the %s/%s was notconfigured.\n",
+                        sys_dir_path, file_name);
        if (mem_size % 2)
-               rte_exit(EXIT_FAILURE,"XEN-DOM0:the %s/%s must be"
-                       " even number.\n",sys_dir_path, file_name);
+               rte_exit(EXIT_FAILURE,
+                        "XEN-DOM0:the %s/%s must be even number.\n",
+                        sys_dir_path, file_name);

        if (mem_size > DOM0_CONFIG_MEMSIZE)
-               rte_exit(EXIT_FAILURE,"XEN-DOM0:the %s/%s should not be larger"
-                       " than %d mB\n",sys_dir_path, file_name, 
DOM0_CONFIG_MEMSIZE);
+               rte_exit(EXIT_FAILURE,
+                        "XEN-DOM0:the %s/%s should not be larger than %d mB\n",
+                        sys_dir_path, file_name, DOM0_CONFIG_MEMSIZE);

        return mem_size;
 }
@@ -195,8 +199,9 @@ rte_xen_dom0_memory_init(void)
        requested = (unsigned) (total_mem / 0x100000);
        if (requested > mem_size)
                /* if we didn't satisfy total memory requirements */
-               rte_exit(EXIT_FAILURE,"Not enough memory available! Requested: 
%uMB,"
-                               " available: %uMB\n", requested, mem_size);
+               rte_exit(EXIT_FAILURE,
+                        "Not enough memory available! Requested: 
%uMB,available: %uMB\n",
+                        requested, mem_size);
        else if (total_mem != 0)
                mem_size = requested;

@@ -231,7 +236,7 @@ rte_xen_dom0_memory_init(void)
                goto fail;
        }

-       if(num_memseg > RTE_MAX_MEMSEG){
+       if (num_memseg > RTE_MAX_MEMSEG) {
                RTE_LOG(ERR, EAL, "XEN DOM0: the memseg count %d is greater"
                        " than max memseg %d.\n",num_memseg, RTE_MAX_MEMSEG);
                err = -EIO;
@@ -247,8 +252,7 @@ rte_xen_dom0_memory_init(void)
        }

        /* map all memory segments to contiguous user space */
-       for (memseg_idx = 0; memseg_idx < num_memseg; memseg_idx++)
-       {
+       for (memseg_idx = 0; memseg_idx < num_memseg; memseg_idx++) {
                vma_len = seginfo[memseg_idx].size;

                /**
@@ -275,9 +279,9 @@ rte_xen_dom0_memory_init(void)

                memseg[memseg_idx].addr = vir_addr;
                memseg[memseg_idx].phys_addr = page_size *
-                       seginfo[memseg_idx].pfn ;
+                       seginfo[memseg_idx].pfn;
                memseg[memseg_idx].len = seginfo[memseg_idx].size;
-               for ( i = 0; i < seginfo[memseg_idx].size / RTE_PGSIZE_2M; i++)
+               for (i = 0; i < seginfo[memseg_idx].size / RTE_PGSIZE_2M; i++)
                        memseg[memseg_idx].mfn[i] = seginfo[memseg_idx].mfn[i];

                /* MFNs are continuous in 2M, so assume that page size is 2M */
@@ -322,7 +326,8 @@ rte_xen_dom0_memory_attach(void)
        if (xen_fd < 0) {
                xen_fd = open(DOM0_MM_DEV, O_RDWR);
                if (xen_fd < 0) {
-                       RTE_LOG(ERR, EAL, "Can not open %s\n",DOM0_MM_DEV);
+                       RTE_LOG(ERR, EAL, "Can not open %s\n",
+                               DOM0_MM_DEV);
                        goto error;
                }
        }
@@ -333,7 +338,7 @@ rte_xen_dom0_memory_attach(void)
        /* attach to memory segments of primary process */
        ret = ioctl(xen_fd, RTE_DOM0_IOCTL_ATTACH_TO_MEMSEG, name);
        if (ret) {
-               RTE_LOG(ERR, EAL,"attach memory segments fail.\n");
+               RTE_LOG(ERR, EAL, "attach memory segments fail.\n");
                goto error;
        }

-- 
2.1.4

Reply via email to