Add __rte_msvc_pushpack(1) to all __rte_packed structs to cause packing
when building with MSVC.

Signed-off-by: Tyler Retzlaff <roret...@linux.microsoft.com>
---
 drivers/common/mlx5/mlx5_common_mr.h    |  4 ++++
 drivers/common/mlx5/mlx5_common_utils.h |  1 +
 drivers/common/mlx5/mlx5_prm.h          | 28 ++++++++++++++++++++++++++++
 3 files changed, 33 insertions(+)

diff --git a/drivers/common/mlx5/mlx5_common_mr.h 
b/drivers/common/mlx5/mlx5_common_mr.h
index 8789d40..4f4bd73 100644
--- a/drivers/common/mlx5/mlx5_common_mr.h
+++ b/drivers/common/mlx5/mlx5_common_mr.h
@@ -49,6 +49,7 @@ struct mlx5_mr {
 };
 
 /* Cache entry for Memory Region. */
+__rte_msvc_pack
 struct mr_cache_entry {
        uintptr_t start; /* Start address of MR. */
        uintptr_t end; /* End address of MR. */
@@ -56,6 +57,7 @@ struct mr_cache_entry {
 } __rte_packed;
 
 /* MR Cache table for Binary search. */
+__rte_msvc_pack
 struct mlx5_mr_btree {
        uint32_t len; /* Number of entries. */
        uint32_t size; /* Total number of entries. */
@@ -65,6 +67,7 @@ struct mlx5_mr_btree {
 struct mlx5_common_device;
 
 /* Per-queue MR control descriptor. */
+__rte_msvc_pack
 struct mlx5_mr_ctrl {
        uint32_t *dev_gen_ptr; /* Generation number of device to poll. */
        uint32_t cur_gen; /* Generation number saved to flush caches. */
@@ -78,6 +81,7 @@ struct mlx5_mr_ctrl {
 LIST_HEAD(mlx5_mempool_reg_list, mlx5_mempool_reg);
 
 /* Global per-device MR cache. */
+__rte_msvc_pack
 struct mlx5_mr_share_cache {
        uint32_t dev_gen; /* Generation number to flush local caches. */
        rte_rwlock_t rwlock; /* MR cache Lock. */
diff --git a/drivers/common/mlx5/mlx5_common_utils.h 
b/drivers/common/mlx5/mlx5_common_utils.h
index ae15119..a44975c 100644
--- a/drivers/common/mlx5/mlx5_common_utils.h
+++ b/drivers/common/mlx5/mlx5_common_utils.h
@@ -27,6 +27,7 @@
  * Structure of the entry in the mlx5 list, user should define its own struct
  * that contains this in order to store the data.
  */
+__rte_msvc_pack
 struct mlx5_list_entry {
        LIST_ENTRY(mlx5_list_entry) next; /* Entry pointers in the list. */
        uint32_t ref_cnt __rte_aligned(8); /* 0 means, entry is invalid. */
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index c671c75..bf9ecd1 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -319,6 +319,7 @@ enum mlx5_mpw_mode {
 };
 
 /* WQE Control segment. */
+__rte_msvc_pack
 struct mlx5_wqe_cseg {
        uint32_t opcode;
        uint32_t sq_ds;
@@ -336,10 +337,12 @@ struct mlx5_wqe_cseg {
 #define WQE_CSEG_WQE_INDEX_OFFSET       8
 
 /* Header of data segment. Minimal size Data Segment */
+__rte_msvc_pack
 struct mlx5_wqe_dseg {
        uint32_t bcount;
        union {
                uint8_t inline_data[MLX5_DSEG_MIN_INLINE_SIZE];
+               __rte_msvc_pack
                struct {
                        uint32_t lkey;
                        uint64_t pbuf;
@@ -348,8 +351,10 @@ struct mlx5_wqe_dseg {
 } __rte_packed;
 
 /* Subset of struct WQE Ethernet Segment. */
+__rte_msvc_pack
 struct mlx5_wqe_eseg {
        union {
+               __rte_msvc_pack
                struct {
                        uint32_t swp_offs;
                        uint8_t cs_flags;
@@ -362,6 +367,7 @@ struct mlx5_wqe_eseg {
                                uint16_t vlan_tag;
                        };
                } __rte_packed;
+               __rte_msvc_pack
                struct {
                        uint32_t offsets;
                        uint32_t flags;
@@ -371,6 +377,7 @@ struct mlx5_wqe_eseg {
        };
 } __rte_packed;
 
+__rte_msvc_pack
 struct mlx5_wqe_qseg {
        uint32_t reserved0;
        uint32_t reserved1;
@@ -378,6 +385,7 @@ struct mlx5_wqe_qseg {
        uint32_t qpn_cqn;
 } __rte_packed;
 
+__rte_msvc_pack
 struct mlx5_wqe_wseg {
        uint32_t operation;
        uint32_t lkey;
@@ -388,6 +396,7 @@ struct mlx5_wqe_wseg {
 } __rte_packed;
 
 /* The title WQEBB, header of WQE. */
+__rte_msvc_pack
 struct mlx5_wqe {
        union {
                struct mlx5_wqe_cseg cseg;
@@ -437,6 +446,7 @@ struct mlx5_cqe {
        uint8_t lro_num_seg;
        union {
                uint8_t user_index_bytes[3];
+               __rte_msvc_pack
                struct {
                        uint8_t user_index_hi;
                        uint16_t user_index_low;
@@ -460,6 +470,7 @@ struct mlx5_cqe_ts {
        uint8_t op_own;
 };
 
+__rte_msvc_pack
 struct mlx5_wqe_rseg {
        uint64_t raddr;
        uint32_t rkey;
@@ -479,6 +490,7 @@ struct mlx5_wqe_rseg {
 #define MLX5_UMR_KLM_NUM_ALIGN \
        (MLX5_UMR_KLM_PTR_ALIGN / sizeof(struct mlx5_klm))
 
+__rte_msvc_pack
 struct mlx5_wqe_umr_cseg {
        uint32_t if_cf_toe_cq_res;
        uint32_t ko_to_bs;
@@ -486,6 +498,7 @@ struct mlx5_wqe_umr_cseg {
        uint32_t rsvd1[8];
 } __rte_packed;
 
+__rte_msvc_pack
 struct mlx5_wqe_mkey_cseg {
        uint32_t fr_res_af_sf;
        uint32_t qpn_mkey;
@@ -549,6 +562,7 @@ enum {
 #define MLX5_CRYPTO_MMO_TYPE_OFFSET 24
 #define MLX5_CRYPTO_MMO_OP_OFFSET 20
 
+__rte_msvc_pack
 struct mlx5_wqe_umr_bsf_seg {
        /*
         * bs_bpt_eo_es contains:
@@ -582,6 +596,7 @@ struct mlx5_wqe_umr_bsf_seg {
 #pragma GCC diagnostic ignored "-Wpedantic"
 #endif
 
+__rte_msvc_pack
 struct mlx5_umr_wqe {
        struct mlx5_wqe_cseg ctr;
        struct mlx5_wqe_umr_cseg ucseg;
@@ -592,18 +607,21 @@ struct mlx5_umr_wqe {
        };
 } __rte_packed;
 
+__rte_msvc_pack
 struct mlx5_rdma_write_wqe {
        struct mlx5_wqe_cseg ctr;
        struct mlx5_wqe_rseg rseg;
        struct mlx5_wqe_dseg dseg[];
 } __rte_packed;
 
+__rte_msvc_pack
 struct mlx5_wqe_send_en_seg {
        uint32_t reserve[2];
        uint32_t sqnpc;
        uint32_t qpn;
 } __rte_packed;
 
+__rte_msvc_pack
 struct mlx5_wqe_send_en_wqe {
        struct mlx5_wqe_cseg ctr;
        struct mlx5_wqe_send_en_seg sseg;
@@ -650,6 +668,7 @@ struct mlx5_wqe_metadata_seg {
        uint64_t addr;
 };
 
+__rte_msvc_pack
 struct mlx5_gga_wqe {
        uint32_t opcode;
        uint32_t sq_ds;
@@ -663,16 +682,19 @@ struct mlx5_gga_wqe {
 } __rte_packed;
 
 union mlx5_gga_compress_opaque {
+       __rte_msvc_pack
        struct {
                uint32_t syndrome;
                uint32_t reserved0;
                uint32_t scattered_length;
                union {
+                       __rte_msvc_pack
                        struct {
                                uint32_t reserved1[5];
                                uint32_t crc32;
                                uint32_t adler32;
                        } v1 __rte_packed;
+                       __rte_msvc_pack
                        struct {
                                uint32_t crc32;
                                uint32_t adler32;
@@ -685,9 +707,11 @@ struct mlx5_gga_wqe {
 };
 
 union mlx5_gga_crypto_opaque {
+       __rte_msvc_pack
        struct {
                uint32_t syndrome;
                uint32_t reserved0[2];
+               __rte_msvc_pack
                struct {
                        uint32_t iv[3];
                        uint32_t tag_size;
@@ -4134,6 +4158,7 @@ enum mlx5_aso_op {
 #define MLX5_ASO_CSEG_READ_ENABLE 1
 
 /* ASO WQE CTRL segment. */
+__rte_msvc_pack
 struct mlx5_aso_cseg {
        uint32_t va_h;
        uint32_t va_l_r;
@@ -4150,6 +4175,7 @@ struct mlx5_aso_cseg {
 #define MLX5_MTR_MAX_TOKEN_VALUE INT32_MAX
 
 /* A meter data segment - 2 per ASO WQE. */
+__rte_msvc_pack
 struct mlx5_aso_mtr_dseg {
        uint32_t v_bo_sc_bbog_mm;
        /*
@@ -4191,6 +4217,7 @@ struct mlx5_aso_mtr_dseg {
 #define MLX5_ASO_MTRS_PER_POOL 128
 
 /* ASO WQE data segment. */
+__rte_msvc_pack
 struct mlx5_aso_dseg {
        union {
                uint8_t data[MLX5_ASO_WQE_DSEG_SIZE];
@@ -4199,6 +4226,7 @@ struct mlx5_aso_dseg {
 } __rte_packed;
 
 /* ASO WQE. */
+__rte_msvc_pack
 struct mlx5_aso_wqe {
        struct mlx5_wqe_cseg general_cseg;
        struct mlx5_aso_cseg aso_cseg;
-- 
1.8.3.1

Reply via email to