MSVC struct packing is not compatible with GCC. Replace macro
__rte_packed with __rte_packed_begin to push existing pack value
and set packing to 1-byte and macro __rte_packed_end to restore
the pack value prior to the push.

Macro __rte_packed_end is deliberately utilized to trigger a
MSVC compiler warning if no existing packing has been pushed allowing
easy identification of locations where the __rte_packed_begin is
missing.

Signed-off-by: Andre Muezerie <andre...@linux.microsoft.com>
Reviewed-by: Tyler Retzlaff <roret...@linux.microsoft.com>
---
 drivers/common/cnxk/hw/sdp.h                |   4 +-
 drivers/common/cnxk/roc_npc.h               |  16 +--
 drivers/common/cnxk/roc_npc_mcam_dump.c     |   4 +-
 drivers/common/cnxk/roc_platform.h          |   3 +-
 drivers/common/dpaax/compat.h               |   3 -
 drivers/common/iavf/iavf_osdep.h            |   8 +-
 drivers/common/iavf/virtchnl_inline_ipsec.h |  44 +++----
 drivers/common/idpf/base/idpf_osdep.h       |   8 +-
 drivers/common/mlx5/mlx5_common_mr.h        |  16 +--
 drivers/common/mlx5/mlx5_common_utils.h     |   4 +-
 drivers/common/mlx5/mlx5_prm.h              | 120 ++++++++++----------
 drivers/common/qat/qat_adf/icp_qat_fw_la.h  |   8 +-
 drivers/common/qat/qat_common.h             |   8 +-
 13 files changed, 122 insertions(+), 124 deletions(-)

diff --git a/drivers/common/cnxk/hw/sdp.h b/drivers/common/cnxk/hw/sdp.h
index 686f516097..5792f309d7 100644
--- a/drivers/common/cnxk/hw/sdp.h
+++ b/drivers/common/cnxk/hw/sdp.h
@@ -156,7 +156,7 @@
 #define SDP_VF_R_OUT_INT_LEVELS_TIMET (32)
 
 /* SDP Instruction Header */
-struct sdp_instr_ih {
+struct __plt_packed_begin sdp_instr_ih {
        /* Data Len */
        uint64_t tlen : 16;
 
@@ -177,6 +177,6 @@ struct sdp_instr_ih {
 
        /* Reserved2 */
        uint64_t rsvd2 : 1;
-} __plt_packed;
+} __plt_packed_end;
 
 #endif /* __SDP_HW_H_  */
diff --git a/drivers/common/cnxk/roc_npc.h b/drivers/common/cnxk/roc_npc.h
index bf8c65aa9c..f7a1e5d810 100644
--- a/drivers/common/cnxk/roc_npc.h
+++ b/drivers/common/cnxk/roc_npc.h
@@ -97,10 +97,10 @@ struct roc_npc_flow_item_eth {
        uint32_t reserved : 31; /**< Reserved, must be zero. */
 };
 
-struct roc_vlan_hdr {
+struct __plt_packed_begin roc_vlan_hdr {
        uint16_t vlan_tci; /**< Priority (3) + CFI (1) + Identifier Code (12) */
        uint16_t eth_proto; /**< Ethernet type of encapsulated frame. */
-} __plt_packed;
+} __plt_packed_end;
 
 struct roc_npc_flow_item_vlan {
        union {
@@ -115,23 +115,23 @@ struct roc_npc_flow_item_vlan {
        uint32_t reserved : 31; /**< Reserved, must be zero. */
 };
 
-struct roc_ipv6_hdr {
+struct __plt_packed_begin roc_ipv6_hdr {
        uint32_t vtc_flow;    /**< IP version, traffic class & flow label. */
        uint16_t payload_len; /**< IP payload size, including ext. headers */
        uint8_t proto;        /**< Protocol, next header. */
        uint8_t hop_limits;   /**< Hop limits. */
        uint8_t src_addr[16]; /**< IP address of source host. */
        uint8_t dst_addr[16]; /**< IP address of destination host(s). */
-} __plt_packed;
+} __plt_packed_end;
 
-struct roc_ipv6_fragment_ext {
+struct __plt_packed_begin roc_ipv6_fragment_ext {
        uint8_t next_header; /**< Next header type */
        uint8_t reserved;    /**< Reserved */
        uint16_t frag_data;  /**< All fragmentation data */
        uint32_t id;         /**< Packet ID */
-} __plt_packed;
+} __plt_packed_end;
 
-struct roc_ipv6_routing_ext {
+struct __plt_packed_begin roc_ipv6_routing_ext {
        uint8_t next_hdr;       /**< Protocol, next header. */
        uint8_t hdr_len;        /**< Header length. */
        uint8_t type;           /**< Extension header type. */
@@ -145,7 +145,7 @@ struct roc_ipv6_routing_ext {
                };
        };
        /* Next are 128-bit IPv6 address fields to describe segments. */
-} __plt_packed;
+} __plt_packed_end;
 
 struct roc_flow_item_ipv6_ext {
        uint8_t next_hdr; /**< Next header. */
diff --git a/drivers/common/cnxk/roc_npc_mcam_dump.c 
b/drivers/common/cnxk/roc_npc_mcam_dump.c
index ebd2dd69c2..27a63cc92d 100644
--- a/drivers/common/cnxk/roc_npc_mcam_dump.c
+++ b/drivers/common/cnxk/roc_npc_mcam_dump.c
@@ -35,7 +35,7 @@
 #define NIX_TX_VTAGACT_VTAG1_OP_MASK    GENMASK(45, 44)
 #define NIX_TX_VTAGACT_VTAG1_DEF_MASK   GENMASK(57, 48)
 
-struct npc_rx_parse_nibble_s {
+struct __plt_packed_begin npc_rx_parse_nibble_s {
        uint16_t chan : 3;
        uint16_t errlev : 1;
        uint16_t errcode : 2;
@@ -56,7 +56,7 @@ struct npc_rx_parse_nibble_s {
        uint16_t lgtype : 1;
        uint16_t lhflags : 2;
        uint16_t lhtype : 1;
-} __plt_packed;
+} __plt_packed_end;
 
 static const char *const intf_str[] = {
        "NIX-RX",
diff --git a/drivers/common/cnxk/roc_platform.h 
b/drivers/common/cnxk/roc_platform.h
index df4f88f288..d6f3ea9acc 100644
--- a/drivers/common/cnxk/roc_platform.h
+++ b/drivers/common/cnxk/roc_platform.h
@@ -97,7 +97,8 @@
 
 #define __plt_cache_aligned __rte_cache_aligned
 #define __plt_always_inline __rte_always_inline
-#define __plt_packed       __rte_packed
+#define __plt_packed_begin     __rte_packed_begin
+#define __plt_packed_end       __rte_packed_end
 #define __plt_unused       __rte_unused
 #define __roc_api          __rte_internal
 #define plt_iova_t         rte_iova_t
diff --git a/drivers/common/dpaax/compat.h b/drivers/common/dpaax/compat.h
index a7df70d5e6..d59207245c 100644
--- a/drivers/common/dpaax/compat.h
+++ b/drivers/common/dpaax/compat.h
@@ -53,9 +53,6 @@
 #ifndef __always_unused
 #define __always_unused        __rte_unused
 #endif
-#ifndef __packed
-#define __packed       __rte_packed
-#endif
 #ifndef noinline
 #define noinline       __rte_noinline
 #endif
diff --git a/drivers/common/iavf/iavf_osdep.h b/drivers/common/iavf/iavf_osdep.h
index 1f2b7889cb..1b0ca933cc 100644
--- a/drivers/common/iavf/iavf_osdep.h
+++ b/drivers/common/iavf/iavf_osdep.h
@@ -158,17 +158,17 @@ do {                                                      
      \
 } while (0)
 
 /* memory allocation tracking */
-struct iavf_dma_mem {
+struct __rte_packed_begin iavf_dma_mem {
        void *va;
        u64 pa;
        u32 size;
        const void *zone;
-} __rte_packed;
+} __rte_packed_end;
 
-struct iavf_virt_mem {
+struct __rte_packed_begin iavf_virt_mem {
        void *va;
        u32 size;
-} __rte_packed;
+} __rte_packed_end;
 
 #define iavf_allocate_dma_mem(h, m, unused, s, a) \
                        iavf_allocate_dma_mem_d(h, m, s, a)
diff --git a/drivers/common/iavf/virtchnl_inline_ipsec.h 
b/drivers/common/iavf/virtchnl_inline_ipsec.h
index 2f4bf15725..549d38f4d5 100644
--- a/drivers/common/iavf/virtchnl_inline_ipsec.h
+++ b/drivers/common/iavf/virtchnl_inline_ipsec.h
@@ -109,7 +109,7 @@ enum inline_ipsec_ops {
 };
 
 /* Not all valid, if certain field is invalid, set 1 for all bits */
-struct virtchnl_algo_cap  {
+struct __rte_packed_begin virtchnl_algo_cap  {
        u32 algo_type;
 
        u16 block_size;
@@ -129,20 +129,20 @@ struct virtchnl_algo_cap  {
        u16 min_aad_size;
        u16 max_aad_size;
        u16 inc_aad_size;
-} __rte_packed;
+} __rte_packed_end;
 
 /* vf record the capability of crypto from the virtchnl */
-struct virtchnl_sym_crypto_cap {
+struct __rte_packed_begin virtchnl_sym_crypto_cap {
        u8 crypto_type;
        u8 algo_cap_num;
        struct virtchnl_algo_cap algo_cap_list[VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM];
-} __rte_packed;
+} __rte_packed_end;
 
 /* VIRTCHNL_OP_GET_IPSEC_CAP
  * VF pass virtchnl_ipsec_cap to PF
  * and PF return capability of ipsec from virtchnl.
  */
-struct virtchnl_ipsec_cap {
+struct __rte_packed_begin virtchnl_ipsec_cap {
        /* max number of SA per VF */
        u16 max_sa_num;
 
@@ -169,10 +169,10 @@ struct virtchnl_ipsec_cap {
 
        /* crypto capabilities */
        struct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM];
-} __rte_packed;
+} __rte_packed_end;
 
 /* configuration of crypto function */
-struct virtchnl_ipsec_crypto_cfg_item {
+struct __rte_packed_begin virtchnl_ipsec_crypto_cfg_item {
        u8 crypto_type;
 
        u32 algo_type;
@@ -191,7 +191,7 @@ struct virtchnl_ipsec_crypto_cfg_item {
 
        /* key data buffer */
        u8 key_data[VIRTCHNL_IPSEC_MAX_KEY_LEN];
-} __rte_packed;
+} __rte_packed_end;
 
 struct virtchnl_ipsec_sym_crypto_cfg {
        struct virtchnl_ipsec_crypto_cfg_item
@@ -203,7 +203,7 @@ struct virtchnl_ipsec_sym_crypto_cfg {
  * PF create SA as configuration and PF driver will return
  * an unique index (sa_idx) for the created SA.
  */
-struct virtchnl_ipsec_sa_cfg {
+struct __rte_packed_begin virtchnl_ipsec_sa_cfg {
        /* IPsec SA Protocol - AH/ESP */
        u8 virtchnl_protocol_type;
 
@@ -292,17 +292,17 @@ struct virtchnl_ipsec_sa_cfg {
 
        /* crypto configuration */
        struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
-} __rte_packed;
+} __rte_packed_end;
 
 /* VIRTCHNL_OP_IPSEC_SA_UPDATE
  * VF send configuration of index of SA to PF
  * PF will update SA according to configuration
  */
-struct virtchnl_ipsec_sa_update {
+struct __rte_packed_begin virtchnl_ipsec_sa_update {
        u32 sa_index; /* SA to update */
        u32 esn_hi; /* high 32 bits of esn */
        u32 esn_low; /* low 32 bits of esn */
-} __rte_packed;
+} __rte_packed_end;
 
 /* VIRTCHNL_OP_IPSEC_SA_DESTROY
  * VF send configuration of index of SA to PF
@@ -310,7 +310,7 @@ struct virtchnl_ipsec_sa_update {
  * flag bitmap indicate all SA or just selected SA will
  * be destroyed
  */
-struct virtchnl_ipsec_sa_destroy {
+struct __rte_packed_begin virtchnl_ipsec_sa_destroy {
        /* All zero bitmap indicates all SA will be destroyed.
         * Non-zero bitmap indicates the selected SA in
         * array sa_index will be destroyed.
@@ -319,13 +319,13 @@ struct virtchnl_ipsec_sa_destroy {
 
        /* selected SA index */
        u32 sa_index[VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM];
-} __rte_packed;
+} __rte_packed_end;
 
 /* VIRTCHNL_OP_IPSEC_SA_READ
  * VF send this SA configuration to PF using virtchnl;
  * PF read SA and will return configuration for the created SA.
  */
-struct virtchnl_ipsec_sa_read {
+struct __rte_packed_begin virtchnl_ipsec_sa_read {
        /* SA valid - invalid/valid */
        u8 valid;
 
@@ -424,14 +424,14 @@ struct virtchnl_ipsec_sa_read {
 
        /* crypto configuration. Salt and keys are set to 0 */
        struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
-} __rte_packed;
+} __rte_packed_end;
 
 
 #define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4    (0)
 #define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6    (1)
 
 /* Add allowlist entry in IES */
-struct virtchnl_ipsec_sp_cfg {
+struct __rte_packed_begin virtchnl_ipsec_sp_cfg {
        u32 spi;
        u32 dip[4];
 
@@ -455,15 +455,15 @@ struct virtchnl_ipsec_sp_cfg {
 
        /* NAT-T UDP port number. Only valid in case NAT-T supported */
        u16 udp_port;
-} __rte_packed;
+} __rte_packed_end;
 
 
 /* Delete allowlist entry in IES */
-struct virtchnl_ipsec_sp_destroy {
+struct __rte_packed_begin virtchnl_ipsec_sp_destroy {
        /* 0 for IPv4 table, 1 for IPv6 table. */
        u8 table_id;
        u32 rule_id;
-} __rte_packed;
+} __rte_packed_end;
 
 /* Response from IES to allowlist operations */
 struct virtchnl_ipsec_sp_cfg_resp {
@@ -494,7 +494,7 @@ struct virtchnl_ipsec_resp {
 };
 
 /* Internal message descriptor for VF <-> IPsec communication */
-struct inline_ipsec_msg {
+struct __rte_packed_begin inline_ipsec_msg {
        u16 ipsec_opcode;
        u16 req_id;
 
@@ -520,7 +520,7 @@ struct inline_ipsec_msg {
                /* Reserved */
                struct virtchnl_ipsec_sa_read sa_read[0];
        } ipsec_data;
-} __rte_packed;
+} __rte_packed_end;
 
 static inline u16 virtchnl_inline_ipsec_val_msg_len(u16 opcode)
 {
diff --git a/drivers/common/idpf/base/idpf_osdep.h 
b/drivers/common/idpf/base/idpf_osdep.h
index 250f0ec500..7b43df3079 100644
--- a/drivers/common/idpf/base/idpf_osdep.h
+++ b/drivers/common/idpf/base/idpf_osdep.h
@@ -182,17 +182,17 @@ static inline uint64_t idpf_read_addr64(volatile void 
*addr)
 #define BITS_PER_BYTE       8
 
 /* memory allocation tracking */
-struct idpf_dma_mem {
+struct __rte_packed_begin idpf_dma_mem {
        void *va;
        u64 pa;
        u32 size;
        const void *zone;
-} __rte_packed;
+} __rte_packed_end;
 
-struct idpf_virt_mem {
+struct __rte_packed_begin idpf_virt_mem {
        void *va;
        u32 size;
-} __rte_packed;
+} __rte_packed_end;
 
 #define idpf_malloc(h, s)      rte_zmalloc(NULL, s, 0)
 #define idpf_calloc(h, c, s)   rte_zmalloc(NULL, (c) * (s), 0)
diff --git a/drivers/common/mlx5/mlx5_common_mr.h 
b/drivers/common/mlx5/mlx5_common_mr.h
index a7f1042037..cf7c685e9b 100644
--- a/drivers/common/mlx5/mlx5_common_mr.h
+++ b/drivers/common/mlx5/mlx5_common_mr.h
@@ -49,36 +49,36 @@ struct mlx5_mr {
 };
 
 /* Cache entry for Memory Region. */
-struct mr_cache_entry {
+struct __rte_packed_begin mr_cache_entry {
        uintptr_t start; /* Start address of MR. */
        uintptr_t end; /* End address of MR. */
        uint32_t lkey; /* rte_cpu_to_be_32(lkey). */
-} __rte_packed;
+} __rte_packed_end;
 
 /* MR Cache table for Binary search. */
-struct mlx5_mr_btree {
+struct __rte_packed_begin mlx5_mr_btree {
        uint32_t len; /* Number of entries. */
        uint32_t size; /* Total number of entries. */
        struct mr_cache_entry (*table)[];
-} __rte_packed;
+} __rte_packed_end;
 
 struct mlx5_common_device;
 
 /* Per-queue MR control descriptor. */
-struct mlx5_mr_ctrl {
+struct __rte_packed_begin mlx5_mr_ctrl {
        uint32_t *dev_gen_ptr; /* Generation number of device to poll. */
        uint32_t cur_gen; /* Generation number saved to flush caches. */
        uint16_t mru; /* Index of last hit entry in top-half cache. */
        uint16_t head; /* Index of the oldest entry in top-half cache. */
        struct mr_cache_entry cache[MLX5_MR_CACHE_N]; /* Cache for top-half. */
        struct mlx5_mr_btree cache_bh; /* Cache for bottom-half. */
-} __rte_packed;
+} __rte_packed_end;
 
 LIST_HEAD(mlx5_mr_list, mlx5_mr);
 LIST_HEAD(mlx5_mempool_reg_list, mlx5_mempool_reg);
 
 /* Global per-device MR cache. */
-struct mlx5_mr_share_cache {
+struct __rte_packed_begin mlx5_mr_share_cache {
        uint32_t dev_gen; /* Generation number to flush local caches. */
        rte_rwlock_t rwlock; /* MR cache Lock. */
        rte_rwlock_t mprwlock; /* Mempool Registration Lock. */
@@ -88,7 +88,7 @@ struct mlx5_mr_share_cache {
        struct mlx5_mempool_reg_list mempool_reg_list; /* Mempool database. */
        mlx5_reg_mr_t reg_mr_cb; /* Callback to reg_mr func */
        mlx5_dereg_mr_t dereg_mr_cb; /* Callback to dereg_mr func */
-} __rte_packed;
+} __rte_packed_end;
 
 /* Multi-Packet RQ buffer header. */
 struct __rte_cache_aligned mlx5_mprq_buf {
diff --git a/drivers/common/mlx5/mlx5_common_utils.h 
b/drivers/common/mlx5/mlx5_common_utils.h
index 9139bc6829..e56f367bf2 100644
--- a/drivers/common/mlx5/mlx5_common_utils.h
+++ b/drivers/common/mlx5/mlx5_common_utils.h
@@ -27,7 +27,7 @@ struct mlx5_list;
  * Structure of the entry in the mlx5 list, user should define its own struct
  * that contains this in order to store the data.
  */
-struct mlx5_list_entry {
+struct __rte_packed_begin mlx5_list_entry {
        LIST_ENTRY(mlx5_list_entry) next; /* Entry pointers in the list. */
        alignas(8) RTE_ATOMIC(uint32_t) ref_cnt; /* 0 means, entry is invalid. 
*/
        uint32_t lcore_idx;
@@ -35,7 +35,7 @@ struct mlx5_list_entry {
                struct mlx5_list_entry *gentry;
                uint32_t bucket_idx;
        };
-} __rte_packed;
+} __rte_packed_end;
 
 struct __rte_cache_aligned mlx5_list_cache {
        LIST_HEAD(mlx5_list_head, mlx5_list_entry) h;
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index 2d82807bc2..0ecddaa36b 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -323,12 +323,12 @@ enum mlx5_mpw_mode {
 };
 
 /* WQE Control segment. */
-struct __rte_aligned(MLX5_WSEG_SIZE) mlx5_wqe_cseg {
+struct __rte_aligned(MLX5_WSEG_SIZE) __rte_packed_begin mlx5_wqe_cseg {
        uint32_t opcode;
        uint32_t sq_ds;
        uint32_t flags;
        uint32_t misc;
-} __rte_packed;
+} __rte_packed_end;
 
 /*
  * WQE CSEG opcode field size is 32 bits, divided:
@@ -340,21 +340,21 @@ struct __rte_aligned(MLX5_WSEG_SIZE) mlx5_wqe_cseg {
 #define WQE_CSEG_WQE_INDEX_OFFSET       8
 
 /* Header of data segment. Minimal size Data Segment */
-struct mlx5_wqe_dseg {
+struct __rte_packed_begin mlx5_wqe_dseg {
        uint32_t bcount;
        union {
                uint8_t inline_data[MLX5_DSEG_MIN_INLINE_SIZE];
-               struct {
+               struct __rte_packed_begin {
                        uint32_t lkey;
                        uint64_t pbuf;
-               } __rte_packed;
+               } __rte_packed_end;
        };
-} __rte_packed;
+} __rte_packed_end;
 
 /* Subset of struct WQE Ethernet Segment. */
-struct mlx5_wqe_eseg {
+struct __rte_packed_begin mlx5_wqe_eseg {
        union {
-               struct {
+               struct __rte_packed_begin {
                        uint32_t swp_offs;
                        uint8_t cs_flags;
                        uint8_t swp_flags;
@@ -365,34 +365,34 @@ struct mlx5_wqe_eseg {
                                uint16_t inline_data;
                                uint16_t vlan_tag;
                        };
-               } __rte_packed;
-               struct {
+               } __rte_packed_end;
+               struct __rte_packed_begin {
                        uint32_t offsets;
                        uint32_t flags;
                        uint32_t flow_metadata;
                        uint32_t inline_hdr;
-               } __rte_packed;
+               } __rte_packed_end;
        };
-} __rte_packed;
+} __rte_packed_end;
 
-struct mlx5_wqe_qseg {
+struct __rte_packed_begin mlx5_wqe_qseg {
        uint32_t reserved0;
        uint32_t reserved1;
        uint32_t max_index;
        uint32_t qpn_cqn;
-} __rte_packed;
+} __rte_packed_end;
 
-struct mlx5_wqe_wseg {
+struct __rte_packed_begin mlx5_wqe_wseg {
        uint32_t operation;
        uint32_t lkey;
        uint32_t va_high;
        uint32_t va_low;
        uint64_t value;
        uint64_t mask;
-} __rte_packed;
+} __rte_packed_end;
 
 /* The title WQEBB, header of WQE. */
-struct mlx5_wqe {
+struct __rte_packed_begin mlx5_wqe {
        union {
                struct mlx5_wqe_cseg cseg;
                uint32_t ctrl[4];
@@ -402,7 +402,7 @@ struct mlx5_wqe {
                struct mlx5_wqe_dseg dseg[2];
                uint8_t data[MLX5_ESEG_EXTRA_DATA_SIZE];
        };
-} __rte_packed;
+} __rte_packed_end;
 
 /* WQE for Multi-Packet RQ. */
 struct mlx5_wqe_mprq {
@@ -464,10 +464,10 @@ struct mlx5_cqe {
        uint8_t lro_num_seg;
        union {
                uint8_t user_index_bytes[3];
-               struct {
+               struct __rte_packed_begin {
                        uint8_t user_index_hi;
                        uint16_t user_index_low;
-               } __rte_packed;
+               } __rte_packed_end;
        };
        uint32_t flow_table_metadata;
        uint8_t rsvd4[4];
@@ -487,11 +487,11 @@ struct mlx5_cqe_ts {
        uint8_t op_own;
 };
 
-struct mlx5_wqe_rseg {
+struct __rte_packed_begin mlx5_wqe_rseg {
        uint64_t raddr;
        uint32_t rkey;
        uint32_t reserved;
-} __rte_packed;
+} __rte_packed_end;
 
 #define MLX5_UMRC_IF_OFFSET 31u
 #define MLX5_UMRC_KO_OFFSET 16u
@@ -506,14 +506,14 @@ struct mlx5_wqe_rseg {
 #define MLX5_UMR_KLM_NUM_ALIGN \
        (MLX5_UMR_KLM_PTR_ALIGN / sizeof(struct mlx5_klm))
 
-struct mlx5_wqe_umr_cseg {
+struct __rte_packed_begin mlx5_wqe_umr_cseg {
        uint32_t if_cf_toe_cq_res;
        uint32_t ko_to_bs;
        uint64_t mkey_mask;
        uint32_t rsvd1[8];
-} __rte_packed;
+} __rte_packed_end;
 
-struct mlx5_wqe_mkey_cseg {
+struct __rte_packed_begin mlx5_wqe_mkey_cseg {
        uint32_t fr_res_af_sf;
        uint32_t qpn_mkey;
        uint32_t reserved2;
@@ -525,7 +525,7 @@ struct mlx5_wqe_mkey_cseg {
        uint32_t translations_octword_size;
        uint32_t res4_lps;
        uint32_t reserved;
-} __rte_packed;
+} __rte_packed_end;
 
 enum {
        MLX5_BSF_SIZE_16B = 0x0,
@@ -576,7 +576,7 @@ enum {
 #define MLX5_CRYPTO_MMO_TYPE_OFFSET 24
 #define MLX5_CRYPTO_MMO_OP_OFFSET 20
 
-struct mlx5_wqe_umr_bsf_seg {
+struct __rte_packed_begin mlx5_wqe_umr_bsf_seg {
        /*
         * bs_bpt_eo_es contains:
         * bs   bsf_size                2 bits at MLX5_BSF_SIZE_OFFSET
@@ -603,13 +603,13 @@ struct mlx5_wqe_umr_bsf_seg {
        uint32_t reserved1;
        uint64_t keytag;
        uint32_t reserved2[4];
-} __rte_packed;
+} __rte_packed_end;
 
 #ifdef PEDANTIC
 #pragma GCC diagnostic ignored "-Wpedantic"
 #endif
 
-struct mlx5_umr_wqe {
+struct __rte_packed_begin mlx5_umr_wqe {
        struct mlx5_wqe_cseg ctr;
        struct mlx5_wqe_umr_cseg ucseg;
        struct mlx5_wqe_mkey_cseg mkc;
@@ -617,24 +617,24 @@ struct mlx5_umr_wqe {
                struct mlx5_wqe_dseg kseg[0];
                struct mlx5_wqe_umr_bsf_seg bsf[0];
        };
-} __rte_packed;
+} __rte_packed_end;
 
-struct mlx5_rdma_write_wqe {
+struct __rte_packed_begin mlx5_rdma_write_wqe {
        struct mlx5_wqe_cseg ctr;
        struct mlx5_wqe_rseg rseg;
        struct mlx5_wqe_dseg dseg[];
-} __rte_packed;
+} __rte_packed_end;
 
-struct mlx5_wqe_send_en_seg {
+struct __rte_packed_begin mlx5_wqe_send_en_seg {
        uint32_t reserve[2];
        uint32_t sqnpc;
        uint32_t qpn;
-} __rte_packed;
+} __rte_packed_end;
 
-struct mlx5_wqe_send_en_wqe {
+struct __rte_packed_begin mlx5_wqe_send_en_wqe {
        struct mlx5_wqe_cseg ctr;
        struct mlx5_wqe_send_en_seg sseg;
-} __rte_packed;
+} __rte_packed_end;
 
 #ifdef PEDANTIC
 #pragma GCC diagnostic error "-Wpedantic"
@@ -677,7 +677,7 @@ struct mlx5_wqe_metadata_seg {
        uint64_t addr;
 };
 
-struct mlx5_gga_wqe {
+struct __rte_packed_begin mlx5_gga_wqe {
        uint32_t opcode;
        uint32_t sq_ds;
        uint32_t flags;
@@ -687,40 +687,40 @@ struct mlx5_gga_wqe {
        uint64_t opaque_vaddr;
        struct mlx5_wqe_dseg gather;
        struct mlx5_wqe_dseg scatter;
-} __rte_packed;
+} __rte_packed_end;
 
 union mlx5_gga_compress_opaque {
-       struct {
+       struct __rte_packed_begin {
                uint32_t syndrome;
                uint32_t reserved0;
                uint32_t scattered_length;
                union {
-                       struct {
+                       struct __rte_packed_begin {
                                uint32_t reserved1[5];
                                uint32_t crc32;
                                uint32_t adler32;
-                       } v1 __rte_packed;
-                       struct {
+                       } v1 __rte_packed_end;
+                       struct __rte_packed_begin {
                                uint32_t crc32;
                                uint32_t adler32;
                                uint32_t crc32c;
                                uint32_t xxh32;
-                       } v2 __rte_packed;
+                       } v2 __rte_packed_end;
                };
-       } __rte_packed;
+       } __rte_packed_end;
        uint32_t data[64];
 };
 
 union mlx5_gga_crypto_opaque {
-       struct {
+       struct __rte_packed_begin {
                uint32_t syndrome;
                uint32_t reserved0[2];
-               struct {
+               struct __rte_packed_begin {
                        uint32_t iv[3];
                        uint32_t tag_size;
                        uint32_t aad_size;
-               } cp __rte_packed;
-       } __rte_packed;
+               } cp __rte_packed_end;
+       } __rte_packed_end;
        uint8_t data[64];
 };
 
@@ -931,7 +931,7 @@ mlx5_regc_value(uint8_t regc_ix)
 
 /* Modification sub command. */
 struct mlx5_modification_cmd {
-       union {
+       union __rte_packed_begin {
                uint32_t data0;
                struct {
                        unsigned int length:5;
@@ -941,8 +941,8 @@ struct mlx5_modification_cmd {
                        unsigned int field:12;
                        unsigned int action_type:4;
                };
-       } __rte_packed;
-       union {
+       } __rte_packed_end;
+       union __rte_packed_begin {
                uint32_t data1;
                uint8_t data[4];
                struct {
@@ -952,7 +952,7 @@ struct mlx5_modification_cmd {
                        unsigned int dst_field:12;
                        unsigned int rsvd4:4;
                };
-       } __rte_packed;
+       } __rte_packed_end;
 };
 
 typedef uint64_t u64;
@@ -4191,7 +4191,7 @@ enum mlx5_aso_op {
 #define MLX5_ASO_CSEG_READ_ENABLE 1
 
 /* ASO WQE CTRL segment. */
-struct mlx5_aso_cseg {
+struct __rte_packed_begin mlx5_aso_cseg {
        uint32_t va_h;
        uint32_t va_l_r;
        uint32_t lkey;
@@ -4202,12 +4202,12 @@ struct mlx5_aso_cseg {
        uint32_t condition_1_mask;
        uint64_t bitwise_data;
        uint64_t data_mask;
-} __rte_packed;
+} __rte_packed_end;
 
 #define MLX5_MTR_MAX_TOKEN_VALUE INT32_MAX
 
 /* A meter data segment - 2 per ASO WQE. */
-struct mlx5_aso_mtr_dseg {
+struct __rte_packed_begin mlx5_aso_mtr_dseg {
        uint32_t v_bo_sc_bbog_mm;
        /*
         * bit 31: valid, 30: bucket overflow, 28-29: start color,
@@ -4227,7 +4227,7 @@ struct mlx5_aso_mtr_dseg {
         */
        uint32_t e_tokens;
        uint64_t timestamp;
-} __rte_packed;
+} __rte_packed_end;
 
 #define ASO_DSEG_VALID_OFFSET 31
 #define ASO_DSEG_BO_OFFSET 30
@@ -4248,19 +4248,19 @@ struct mlx5_aso_mtr_dseg {
 #define MLX5_ASO_MTRS_PER_POOL 128
 
 /* ASO WQE data segment. */
-struct mlx5_aso_dseg {
+struct __rte_packed_begin mlx5_aso_dseg {
        union {
                uint8_t data[MLX5_ASO_WQE_DSEG_SIZE];
                struct mlx5_aso_mtr_dseg mtrs[MLX5_ASO_METERS_PER_WQE];
        };
-} __rte_packed;
+} __rte_packed_end;
 
 /* ASO WQE. */
-struct mlx5_aso_wqe {
+struct __rte_packed_begin mlx5_aso_wqe {
        struct mlx5_wqe_cseg general_cseg;
        struct mlx5_aso_cseg aso_cseg;
        struct mlx5_aso_dseg aso_dseg;
-} __rte_packed;
+} __rte_packed_end;
 
 enum {
        MLX5_EVENT_TYPE_OBJECT_CHANGE = 0x27,
diff --git a/drivers/common/qat/qat_adf/icp_qat_fw_la.h 
b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
index fe32b66c50..835674d91b 100644
--- a/drivers/common/qat/qat_adf/icp_qat_fw_la.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
@@ -357,7 +357,7 @@ struct icp_qat_fw_cipher_auth_cd_ctrl_hdr {
 #define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET 24
 #define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0)
 
-struct icp_qat_fw_la_cipher_req_params {
+struct __rte_packed_begin icp_qat_fw_la_cipher_req_params {
        uint32_t cipher_offset;
        uint32_t cipher_length;
        union {
@@ -372,9 +372,9 @@ struct icp_qat_fw_la_cipher_req_params {
        uint16_t spc_aad_sz;
        uint8_t reserved;
        uint8_t spc_auth_res_sz;
-} __rte_packed;
+} __rte_packed_end;
 
-struct icp_qat_fw_la_auth_req_params {
+struct __rte_packed_begin icp_qat_fw_la_auth_req_params {
        uint32_t auth_off;
        uint32_t auth_len;
        union {
@@ -389,7 +389,7 @@ struct icp_qat_fw_la_auth_req_params {
        uint8_t resrvd1;
        uint8_t hash_state_sz;
        uint8_t auth_res_sz;
-} __rte_packed;
+} __rte_packed_end;
 
 struct icp_qat_fw_la_auth_req_params_resrvd_flds {
        uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6];
diff --git a/drivers/common/qat/qat_common.h b/drivers/common/qat/qat_common.h
index 703534dc15..51d20267bd 100644
--- a/drivers/common/qat/qat_common.h
+++ b/drivers/common/qat/qat_common.h
@@ -57,11 +57,11 @@ enum qat_svc_list {
 };
 
 /**< Common struct for scatter-gather list operations */
-struct qat_flat_buf {
+struct __rte_packed_begin qat_flat_buf {
        uint32_t len;
        uint32_t resrvd;
        uint64_t addr;
-} __rte_packed;
+} __rte_packed_end;
 
 #define qat_sgl_hdr  struct { \
        uint64_t resrvd; \
@@ -70,11 +70,11 @@ struct qat_flat_buf {
 }
 
 __extension__
-struct __rte_cache_aligned qat_sgl {
+struct __rte_cache_aligned __rte_packed_begin qat_sgl {
        qat_sgl_hdr;
        /* flexible array of flat buffers*/
        struct qat_flat_buf buffers[0];
-} __rte_packed;
+} __rte_packed_end;
 
 /** Common, i.e. not service-specific, statistics */
 struct qat_common_stats {
-- 
2.47.0.vfs.0.3

Reply via email to