Hi all,

Any comments on this patch?

It is really a big issue in dpdk1.8-rc2 and blocked the testing on i686
platform.

Thanks,
Michael
On 12/3/2014 4:11 PM, Michael Qiu wrote:
> lib/librte_eal/linuxapp/eal/eal_memory.c:324:4: error: comparison
> is always false due to limited range of data type [-Werror=type-limits]
>     || (hugepage_sz == RTE_PGSIZE_16G)) {
>     ^
> cc1: all warnings being treated as errors
>
> lib/librte_eal/linuxapp/eal/eal.c(461): error #2259: non-pointer
> conversion from "long long" to "void *" may lose significant bits
>    RTE_PTR_ALIGN_CEIL((uintptr_t)addr, RTE_PGSIZE_16M);
>
> This was introuduced by commit b77b5639:
>         mem: add huge page sizes for IBM Power
>
> The root cause is that size_t and uintptr_t are 32-bit in i686
> platform, but RTE_PGSIZE_16M and RTE_PGSIZE_16G are always 64-bit.
>
> Define RTE_PGSIZE_16G only in 64 bit platform to avoid
> this issue.
>
> Signed-off-by: Michael Qiu <michael.qiu at intel.com>
> ---
>  app/test/test_memzone.c                    | 18 ++++++++++++------
>  lib/librte_eal/common/eal_common_memzone.c |  2 ++
>  lib/librte_eal/common/include/rte_memory.h | 14 ++++++++------
>  lib/librte_eal/linuxapp/eal/eal_memory.c   | 12 +++++-------
>  4 files changed, 27 insertions(+), 19 deletions(-)
>
> diff --git a/app/test/test_memzone.c b/app/test/test_memzone.c
> index 5da6903..7bab8b5 100644
> --- a/app/test/test_memzone.c
> +++ b/app/test/test_memzone.c
> @@ -145,8 +145,10 @@ test_memzone_reserve_flags(void)
>                       hugepage_1GB_avail = 1;
>               if (ms[i].hugepage_sz == RTE_PGSIZE_16M)
>                       hugepage_16MB_avail = 1;
> +#ifdef RTE_ARCH_64
>               if (ms[i].hugepage_sz == RTE_PGSIZE_16G)
>                       hugepage_16GB_avail = 1;
> +#endif
>       }
>       /* Display the availability of 2MB ,1GB, 16MB, 16GB pages */
>       if (hugepage_2MB_avail)
> @@ -234,8 +236,8 @@ test_memzone_reserve_flags(void)
>                       return -1;
>               }
>  
> -             /* Check if 1GB huge pages are unavailable, that function fails 
> unless
> -              * HINT flag is indicated
> +             /* Check if 2MB huge pages are unavailable, that function
> +              * fails unless HINT flag is indicated
>                */
>               if (!hugepage_2MB_avail) {
>                       mz = rte_memzone_reserve("flag_zone_2M_HINT", size, 
> SOCKET_ID_ANY,
> @@ -295,8 +297,9 @@ test_memzone_reserve_flags(void)
>                       return -1;
>               }
>  
> -             /* Check if 1GB huge pages are unavailable, that function fails
> -              * unless HINT flag is indicated
> +#ifdef RTE_ARCH_64
> +             /* Check if 16GB huge pages are unavailable, that function
> +              * fails unless HINT flag is indicated
>                */
>               if (!hugepage_16GB_avail) {
>                       mz = rte_memzone_reserve("flag_zone_16G_HINT", size,
> @@ -318,7 +321,9 @@ test_memzone_reserve_flags(void)
>                               return -1;
>                       }
>               }
> +#endif
>       }
> +#ifdef RTE_ARCH_64
>       /*As with 16MB tests above for 16GB huge page requests*/
>       if (hugepage_16GB_avail) {
>               mz = rte_memzone_reserve("flag_zone_16G", size, SOCKET_ID_ANY,
> @@ -343,8 +348,8 @@ test_memzone_reserve_flags(void)
>                       return -1;
>               }
>  
> -             /* Check if 1GB huge pages are unavailable, that function fails
> -              * unless HINT flag is indicated
> +             /* Check if 16MB huge pages are unavailable, that function
> +              * fails unless HINT flag is indicated
>                */
>               if (!hugepage_16MB_avail) {
>                       mz = rte_memzone_reserve("flag_zone_16M_HINT", size,
> @@ -376,6 +381,7 @@ test_memzone_reserve_flags(void)
>                       }
>               }
>       }
> +#endif
>       return 0;
>  }
>  
> diff --git a/lib/librte_eal/common/eal_common_memzone.c 
> b/lib/librte_eal/common/eal_common_memzone.c
> index b5a5d72..ee233ad 100644
> --- a/lib/librte_eal/common/eal_common_memzone.c
> +++ b/lib/librte_eal/common/eal_common_memzone.c
> @@ -221,12 +221,14 @@ memzone_reserve_aligned_thread_unsafe(const char *name, 
> size_t len,
>               if ((flags & RTE_MEMZONE_1GB) &&
>                               free_memseg[i].hugepage_sz == RTE_PGSIZE_2M)
>                       continue;
> +#ifdef RTE_ARCH_64
>               if ((flags & RTE_MEMZONE_16MB) &&
>                               free_memseg[i].hugepage_sz == RTE_PGSIZE_16G)
>                       continue;
>               if ((flags & RTE_MEMZONE_16GB) &&
>                               free_memseg[i].hugepage_sz == RTE_PGSIZE_16M)
>                       continue;
> +#endif
>  
>               /* this segment is the best until now */
>               if (memseg_idx == -1) {
> diff --git a/lib/librte_eal/common/include/rte_memory.h 
> b/lib/librte_eal/common/include/rte_memory.h
> index 1990833..6bcb92b 100644
> --- a/lib/librte_eal/common/include/rte_memory.h
> +++ b/lib/librte_eal/common/include/rte_memory.h
> @@ -53,12 +53,14 @@ extern "C" {
>  #endif
>  
>  enum rte_page_sizes {
> -     RTE_PGSIZE_4K = 1ULL << 12,
> -     RTE_PGSIZE_2M = 1ULL << 21,
> -     RTE_PGSIZE_1G = 1ULL << 30,
> -     RTE_PGSIZE_64K = 1ULL << 16,
> -     RTE_PGSIZE_16M = 1ULL << 24,
> -     RTE_PGSIZE_16G = 1ULL << 34
> +     RTE_PGSIZE_4K   = 1UL << 12,
> +     RTE_PGSIZE_2M   = 1UL << 21,
> +     RTE_PGSIZE_1G   = 1UL << 30,
> +     RTE_PGSIZE_64K  = 1UL << 16,
> +     RTE_PGSIZE_16M  = 1UL << 24,
> +#ifdef RTE_ARCH_64
> +     RTE_PGSIZE_16G  = 1ULL << 34
> +#endif
>  };
>  
>  #define SOCKET_ID_ANY -1                    /**< Any NUMA socket. */
> diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c 
> b/lib/librte_eal/linuxapp/eal/eal_memory.c
> index e6cb919..833670c 100644
> --- a/lib/librte_eal/linuxapp/eal/eal_memory.c
> +++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
> @@ -317,11 +317,10 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
>                       hugepg_tbl[i].filepath[sizeof(hugepg_tbl[i].filepath) - 
> 1] = '\0';
>               }
>  #ifndef RTE_ARCH_64
> -             /* for 32-bit systems, don't remap 1G and 16G pages, just reuse
> -              * original map address as final map address.
> +             /* for 32-bit systems, don't remap 1G pages(16G not defined),
> +              * just reuse original map address as final map address.
>                */
> -             else if ((hugepage_sz == RTE_PGSIZE_1G)
> -                     || (hugepage_sz == RTE_PGSIZE_16G)) {
> +             else if (hugepage_sz == RTE_PGSIZE_1G) {
>                       hugepg_tbl[i].final_va = hugepg_tbl[i].orig_va;
>                       hugepg_tbl[i].orig_va = NULL;
>                       continue;
> @@ -422,11 +421,10 @@ remap_all_hugepages(struct hugepage_file *hugepg_tbl, 
> struct hugepage_info *hpi)
>       while (i < hpi->num_pages[0]) {
>  
>  #ifndef RTE_ARCH_64
> -             /* for 32-bit systems, don't remap 1G pages and 16G pages,
> +             /* for 32-bit systems, don't remap 1G pages(16G not defined,
>                * just reuse original map address as final map address.
>                */
> -             if ((hugepage_sz == RTE_PGSIZE_1G)
> -                     || (hugepage_sz == RTE_PGSIZE_16G)) {
> +             if (hugepage_sz == RTE_PGSIZE_1G) {
>                       hugepg_tbl[i].final_va = hugepg_tbl[i].orig_va;
>                       hugepg_tbl[i].orig_va = NULL;
>                       i++;

Reply via email to