IBM Power architecture has different memory architecture with x86. When
the physical memory address is in ascending order, the mmaped virtual
address is in descending order. This patch modified the memory segment
detection code to make it work for Power.

Signed-off-by: Chao Zhu <bjzhuc at cn.ibm.com>
---
 config/defconfig_ppc_64-native-linuxapp-gcc   |    1 +
 config/defconfig_x86_64-native-linuxapp-clang |    1 +
 config/defconfig_x86_64-native-linuxapp-gcc   |    1 +
 config/defconfig_x86_64-native-linuxapp-icc   |    1 +
 lib/librte_eal/linuxapp/eal/eal_memory.c      |   19 +++++++++++++++++--
 5 files changed, 21 insertions(+), 2 deletions(-)

diff --git a/config/defconfig_ppc_64-native-linuxapp-gcc 
b/config/defconfig_ppc_64-native-linuxapp-gcc
index cc11cfc..c29888c 100644
--- a/config/defconfig_ppc_64-native-linuxapp-gcc
+++ b/config/defconfig_ppc_64-native-linuxapp-gcc
@@ -34,6 +34,7 @@ CONFIG_RTE_MACHINE="powerpc"

 CONFIG_RTE_ARCH="powerpc"
 CONFIG_RTE_ARCH_PPC_64=y
+CONFIG_RTE_ARCH_64=y
 CONFIG_RTE_ARCH_BIG_ENDIAN=y

 CONFIG_RTE_TOOLCHAIN="gcc"
diff --git a/config/defconfig_x86_64-native-linuxapp-clang 
b/config/defconfig_x86_64-native-linuxapp-clang
index bbda080..5f3074e 100644
--- a/config/defconfig_x86_64-native-linuxapp-clang
+++ b/config/defconfig_x86_64-native-linuxapp-clang
@@ -36,6 +36,7 @@ CONFIG_RTE_MACHINE="native"

 CONFIG_RTE_ARCH="x86_64"
 CONFIG_RTE_ARCH_X86_64=y
+CONFIG_RTE_ARCH_64=y

 CONFIG_RTE_TOOLCHAIN="clang"
 CONFIG_RTE_TOOLCHAIN_CLANG=y
diff --git a/config/defconfig_x86_64-native-linuxapp-gcc 
b/config/defconfig_x86_64-native-linuxapp-gcc
index 3de818a..60baf5b 100644
--- a/config/defconfig_x86_64-native-linuxapp-gcc
+++ b/config/defconfig_x86_64-native-linuxapp-gcc
@@ -36,6 +36,7 @@ CONFIG_RTE_MACHINE="native"

 CONFIG_RTE_ARCH="x86_64"
 CONFIG_RTE_ARCH_X86_64=y
+CONFIG_RTE_ARCH_64=y

 CONFIG_RTE_TOOLCHAIN="gcc"
 CONFIG_RTE_TOOLCHAIN_GCC=y
diff --git a/config/defconfig_x86_64-native-linuxapp-icc 
b/config/defconfig_x86_64-native-linuxapp-icc
index 795333b..71d1e28 100644
--- a/config/defconfig_x86_64-native-linuxapp-icc
+++ b/config/defconfig_x86_64-native-linuxapp-icc
@@ -36,6 +36,7 @@ CONFIG_RTE_MACHINE="native"

 CONFIG_RTE_ARCH="x86_64"
 CONFIG_RTE_ARCH_X86_64=y
+CONFIG_RTE_ARCH_64=y

 CONFIG_RTE_TOOLCHAIN="icc"
 CONFIG_RTE_TOOLCHAIN_ICC=y
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c 
b/lib/librte_eal/linuxapp/eal/eal_memory.c
index f2454f4..6694e08 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -316,7 +316,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
 #endif
                        hugepg_tbl[i].filepath[sizeof(hugepg_tbl[i].filepath) - 
1] = '\0';
                }
-#ifndef RTE_ARCH_X86_64
+#ifndef RTE_ARCH_64
                /* for 32-bit systems, don't remap 1G pages, just reuse original
                 * map address as final map address.
                 */
@@ -412,7 +412,7 @@ remap_all_hugepages(struct hugepage_file *hugepg_tbl, 
struct hugepage_info *hpi)

        while (i < hpi->num_pages[0]) {

-#ifndef RTE_ARCH_X86_64
+#ifndef RTE_ARCH_64
                /* for 32-bit systems, don't remap 1G pages, just reuse original
                 * map address as final map address.
                 */
@@ -1263,9 +1263,18 @@ rte_eal_hugepage_init(void)
                else if ((hugepage[i].physaddr - hugepage[i-1].physaddr) !=
                    hugepage[i].size)
                        new_memseg = 1;
+#ifdef RTE_ARCH_PPC_64
+               /* IBM Power architecture has different memory layout. 
+                * If the physical address is lower address first, the mmaped 
virtual
+                * address will be higher address first */
+               else if (((unsigned long)hugepage[i-1].final_va -
+                   (unsigned long)hugepage[i].final_va) != hugepage[i].size)
+                       new_memseg = 1;
+#else
                else if (((unsigned long)hugepage[i].final_va -
                    (unsigned long)hugepage[i-1].final_va) != hugepage[i].size)
                        new_memseg = 1;
+#endif

                if (new_memseg) {
                        j += 1;
@@ -1284,6 +1293,12 @@ rte_eal_hugepage_init(void)
                }
                /* continuation of previous memseg */
                else {
+#ifdef RTE_ARCH_PPC_64
+               /* Use the phy and virt address of the last page as segment 
address 
+                * for IBM Power architecture */ 
+                       mcfg->memseg[j].phys_addr = hugepage[i].physaddr;
+                       mcfg->memseg[j].addr = hugepage[i].final_va;
+#endif
                        mcfg->memseg[j].len += mcfg->memseg[j].hugepage_sz;
                }
                hugepage[i].memseg_id = j;
-- 
1.7.1

Reply via email to