Adding a new marco for specifing __aligned__ attribute, and updating the current __rte_cache_aligned macro to use it.
Also adding a new macro to specify the __packed__ attribute Acked-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy at intel.com> Signed-off-by: Declan Doherty <declan.doherty at intel.com> --- lib/librte_eal/common/include/rte_memory.h | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/lib/librte_eal/common/include/rte_memory.h b/lib/librte_eal/common/include/rte_memory.h index 1bed415..18fd952 100644 --- a/lib/librte_eal/common/include/rte_memory.h +++ b/lib/librte_eal/common/include/rte_memory.h @@ -76,9 +76,19 @@ enum rte_page_sizes { /**< Return the first cache-aligned value greater or equal to size. */ /** + * Force alignment + */ +#define __rte_aligned(a) __attribute__((__aligned__(a))) + +/** * Force alignment to cache line. */ -#define __rte_cache_aligned __attribute__((__aligned__(RTE_CACHE_LINE_SIZE))) +#define __rte_cache_aligned __rte_aligned(RTE_CACHE_LINE_SIZE) + +/** + * Force a structure to be packed + */ +#define __rte_packed __attribute__((__packed__)) typedef uint64_t phys_addr_t; /**< Physical address definition. */ #define RTE_BAD_PHYS_ADDR ((phys_addr_t)-1) @@ -104,7 +114,7 @@ struct rte_memseg { /**< store segment MFNs */ uint64_t mfn[DOM0_NUM_MEMBLOCK]; #endif -} __attribute__((__packed__)); +} __rte_packed; /** * Lock page in physical memory and prevent from swapping. -- 2.4.3