On 12/30/20 4:14 AM, Xia, Chenbo wrote:
> Hi Maxime,
>
>> -----Original Message-----
>> From: Maxime Coquelin <maxime.coque...@redhat.com>
>> Sent: Monday, December 21, 2020 5:14 AM
>> To: dev@dpdk.org; Xia, Chenbo <chenbo....@intel.com>; olivier.m...@6wind.com;
>> amore...@redhat.com; david.march...@redhat.com
>> Cc: Maxime Coquelin <maxime.coque...@redhat.com>
>> Subject: [PATCH 17/40] net/virtio: move features definition to generic header
>>
>> This patch moves all the Virtio definition to the generic
>> header. It also renames some helpers to no more reference
>> PCI.
>>
>> Signed-off-by: Maxime Coquelin <maxime.coque...@redhat.com>
>> ---
>> drivers/net/virtio/meson.build | 3 +-
>> drivers/net/virtio/virtio.c | 22 ++++
>> drivers/net/virtio/virtio.h | 94 +++++++++++++++
>> drivers/net/virtio/virtio_ethdev.c | 110 +++++++++---------
>> drivers/net/virtio/virtio_pci.c | 21 +---
>> drivers/net/virtio/virtio_pci.h | 90 --------------
>> drivers/net/virtio/virtio_ring.h | 2 +-
>> drivers/net/virtio/virtio_rxtx.c | 38 +++---
>> drivers/net/virtio/virtio_rxtx_packed_avx.c | 6 +-
>> .../net/virtio/virtio_user/vhost_kernel_tap.c | 2 +-
>> drivers/net/virtio/virtio_user_ethdev.c | 6 +-
>> drivers/net/virtio/virtqueue.c | 4 +-
>> drivers/net/virtio/virtqueue.h | 8 +-
>> 13 files changed, 209 insertions(+), 197 deletions(-)
>> create mode 100644 drivers/net/virtio/virtio.c
>>
>> diff --git a/drivers/net/virtio/meson.build b/drivers/net/virtio/meson.build
>> index 0b62418f33..7de41cd04d 100644
>> --- a/drivers/net/virtio/meson.build
>> +++ b/drivers/net/virtio/meson.build
>> @@ -1,7 +1,8 @@
>> # SPDX-License-Identifier: BSD-3-Clause
>> # Copyright(c) 2018 Intel Corporation
>>
>> -sources += files('virtio_ethdev.c',
>> +sources += files('virtio.c',
>> + 'virtio_ethdev.c',
>
> Better align the file names 😊
Done.
>
>> 'virtio_pci_ethdev.c',
>> 'virtio_pci.c',
>> 'virtio_rxtx.c',
>> diff --git a/drivers/net/virtio/virtio.c b/drivers/net/virtio/virtio.c
>> new file mode 100644
>> index 0000000000..d8d6bf7add
>> --- /dev/null
>> +++ b/drivers/net/virtio/virtio.c
>> @@ -0,0 +1,22 @@
>> +/* SPDX-License-Identifier: BSD-3-Clause
>> + * Copyright(c) 2010-2014 Intel Corporation
>> + * Copyright(c) 2020 Red Hat, Inc.
>> + */
>> +
>> +#include "virtio.h"
>> +
>> +uint64_t
>> +virtio_negotiate_features(struct virtio_hw *hw, uint64_t host_features)
>> +{
>> + uint64_t features;
>
> [snip]
>
>> @@ -1664,9 +1664,9 @@ virtio_init_device(struct rte_eth_dev *eth_dev,
>> uint64_t
>> req_features)
>> eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
>>
>> /* Setting up rx_header size for the device */
>> - if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
>> - vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
>> - vtpci_with_feature(hw, VIRTIO_F_RING_PACKED))
>> + if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
>> + virtio_with_feature(hw, VIRTIO_F_VERSION_1) ||
>> + virtio_with_feature(hw, VIRTIO_F_RING_PACKED))
>
> There are mixed usage of virtio_with_packed_queue and virtio_with_features(hw,
> VIRTIO_F_RING_PACKED). I think we should use only one. Since
> virtio_with_packed_queue
> is introduced, should we only keep this one? What do you think?
Yes, it may be better to use one as it make it easier to grep for it.
I will apply your suggestion in v2.
Thanks!
Maxime
> Thanks
> Chenbo
>
>> hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
>> else
>> hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
>> @@ -1681,7 +1681,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev,
>> uint64_t
>> req_features)
>> hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
>>
>> if (hw->speed == ETH_SPEED_NUM_UNKNOWN) {
>> - if (vtpci_with_feature(hw, VIRTIO_NET_F_SPEED_DUPLEX)) {
>> + if (virtio_with_feature(hw, VIRTIO_NET_F_SPEED_DUPLEX)) {
>> config = &local_config;
>> vtpci_read_dev_config(hw,
>> offsetof(struct virtio_net_config, speed),
>> @@ -1697,14 +1697,14 @@ virtio_init_device(struct rte_eth_dev *eth_dev,
>> uint64_t req_features)
>> hw->duplex = ETH_LINK_FULL_DUPLEX;
>> PMD_INIT_LOG(DEBUG, "link speed = %d, duplex = %d",
>> hw->speed, hw->duplex);
>> - if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
>> + if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
>> config = &local_config;
>>
>> vtpci_read_dev_config(hw,
>> offsetof(struct virtio_net_config, mac),
>> &config->mac, sizeof(config->mac));
>>
>> - if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
>> + if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
>> vtpci_read_dev_config(hw,
>> offsetof(struct virtio_net_config, status),
>> &config->status, sizeof(config->status));
>> @@ -1714,7 +1714,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev,
>> uint64_t
>> req_features)
>> config->status = 0;
>> }
>>
>> - if (vtpci_with_feature(hw, VIRTIO_NET_F_MQ)) {
>> + if (virtio_with_feature(hw, VIRTIO_NET_F_MQ)) {
>> vtpci_read_dev_config(hw,
>> offsetof(struct virtio_net_config,
>> max_virtqueue_pairs),
>> &config->max_virtqueue_pairs,
>> @@ -1727,7 +1727,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev,
>> uint64_t
>> req_features)
>>
>> hw->max_queue_pairs = config->max_virtqueue_pairs;
>>
>> - if (vtpci_with_feature(hw, VIRTIO_NET_F_MTU)) {
>> + if (virtio_with_feature(hw, VIRTIO_NET_F_MTU)) {
>> vtpci_read_dev_config(hw,
>> offsetof(struct virtio_net_config, mtu),
>> &config->mtu,
>> @@ -1838,7 +1838,7 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
>> goto err_virtio_init;
>>
>> if (vectorized) {
>> - if (!vtpci_packed_queue(hw)) {
>> + if (!virtio_with_packed_queue(hw)) {
>> hw->use_vec_rx = 1;
>> } else {
>> #if !defined(CC_AVX512_SUPPORT)
>> @@ -1965,17 +1965,17 @@ virtio_dev_devargs_parse(struct rte_devargs *devargs,
>> uint32_t *speed, int *vect
>> static bool
>> rx_offload_enabled(struct virtio_hw *hw)
>> {
>> - return vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
>> - vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
>> - vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
>> + return virtio_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
>> + virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
>> + virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
>> }
>>
>> static bool
>> tx_offload_enabled(struct virtio_hw *hw)
>> {
>> - return vtpci_with_feature(hw, VIRTIO_NET_F_CSUM) ||
>> - vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
>> - vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
>> + return virtio_with_feature(hw, VIRTIO_NET_F_CSUM) ||
>> + virtio_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
>> + virtio_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
>> }
>>
>> /*
>> @@ -2048,29 +2048,29 @@ virtio_dev_configure(struct rte_eth_dev *dev)
>>
>> if ((rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
>> DEV_RX_OFFLOAD_TCP_CKSUM)) &&
>> - !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
>> + !virtio_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
>> PMD_DRV_LOG(ERR,
>> "rx checksum not available on this host");
>> return -ENOTSUP;
>> }
>>
>> if ((rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) &&
>> - (!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
>> - !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
>> + (!virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
>> + !virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
>> PMD_DRV_LOG(ERR,
>> "Large Receive Offload not available on this host");
>> return -ENOTSUP;
>> }
>>
>> /* start control queue */
>> - if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
>> + if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
>> virtio_dev_cq_start(dev);
>>
>> if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
>> hw->vlan_strip = 1;
>>
>> - if ((rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
>> - && !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
>> + if ((rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
>> + !virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
>> PMD_DRV_LOG(ERR,
>> "vlan filtering not available on this host");
>> return -ENOTSUP;
>> @@ -2087,12 +2087,12 @@ virtio_dev_configure(struct rte_eth_dev *dev)
>> return -EBUSY;
>> }
>>
>> - if (vtpci_packed_queue(hw)) {
>> + if (virtio_with_packed_queue(hw)) {
>> #if defined(RTE_ARCH_X86_64) && defined(CC_AVX512_SUPPORT)
>> if ((hw->use_vec_rx || hw->use_vec_tx) &&
>> (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) ||
>> - !vtpci_with_feature(hw, VIRTIO_F_IN_ORDER) ||
>> - !vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
>> + !virtio_with_feature(hw, VIRTIO_F_IN_ORDER) ||
>> + !virtio_with_feature(hw, VIRTIO_F_VERSION_1) ||
>> rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_512)) {
>> PMD_DRV_LOG(INFO,
>> "disabled packed ring vectorized path for
>> requirements
>> not met");
>> @@ -2105,7 +2105,7 @@ virtio_dev_configure(struct rte_eth_dev *dev)
>> #endif
>>
>> if (hw->use_vec_rx) {
>> - if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
>> + if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
>> PMD_DRV_LOG(INFO,
>> "disabled packed ring vectorized rx for
>> mrg_rxbuf enabled");
>> hw->use_vec_rx = 0;
>> @@ -2118,7 +2118,7 @@ virtio_dev_configure(struct rte_eth_dev *dev)
>> }
>> }
>> } else {
>> - if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
>> + if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER)) {
>> hw->use_inorder_tx = 1;
>> hw->use_inorder_rx = 1;
>> hw->use_vec_rx = 0;
>> @@ -2132,7 +2132,7 @@ virtio_dev_configure(struct rte_eth_dev *dev)
>> hw->use_vec_rx = 0;
>> }
>> #endif
>> - if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
>> + if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
>> PMD_DRV_LOG(INFO,
>> "disabled split ring vectorized rx for
>> mrg_rxbuf
>> enabled");
>> hw->use_vec_rx = 0;
>> @@ -2350,7 +2350,7 @@ virtio_dev_link_update(struct rte_eth_dev *dev,
>> __rte_unused int wait_to_complet
>> if (!hw->started) {
>> link.link_status = ETH_LINK_DOWN;
>> link.link_speed = ETH_SPEED_NUM_NONE;
>> - } else if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
>> + } else if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
>> PMD_INIT_LOG(DEBUG, "Get link status from hw");
>> vtpci_read_dev_config(hw,
>> offsetof(struct virtio_net_config, status),
>> @@ -2381,7 +2381,7 @@ virtio_dev_vlan_offload_set(struct rte_eth_dev *dev,
>> int
>> mask)
>>
>> if (mask & ETH_VLAN_FILTER_MASK) {
>> if ((offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
>> - !vtpci_with_feature(hw,
>> VIRTIO_NET_F_CTRL_VLAN)) {
>> + !virtio_with_feature(hw,
>> VIRTIO_NET_F_CTRL_VLAN)) {
>>
>> PMD_DRV_LOG(NOTICE,
>> "vlan filtering not available on this host");
>> diff --git a/drivers/net/virtio/virtio_pci.c
>> b/drivers/net/virtio/virtio_pci.c
>> index df69fcdd45..9c07ebad00 100644
>> --- a/drivers/net/virtio/virtio_pci.c
>> +++ b/drivers/net/virtio/virtio_pci.c
>> @@ -356,7 +356,7 @@ modern_set_features(struct virtio_hw *hw, uint64_t
>> features)
>> static int
>> modern_features_ok(struct virtio_hw *hw)
>> {
>> - if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
>> + if (!virtio_with_feature(hw, VIRTIO_F_VERSION_1)) {
>> PMD_INIT_LOG(ERR, "Version 1+ required with modern devices\n");
>> return -1;
>> }
>> @@ -479,12 +479,12 @@ modern_notify_queue(struct virtio_hw *hw, struct
>> virtqueue *vq)
>> {
>> uint32_t notify_data;
>>
>> - if (!vtpci_with_feature(hw, VIRTIO_F_NOTIFICATION_DATA)) {
>> + if (!virtio_with_feature(hw, VIRTIO_F_NOTIFICATION_DATA)) {
>> rte_write16(vq->vq_queue_index, vq->notify_addr);
>> return;
>> }
>>
>> - if (vtpci_with_feature(hw, VIRTIO_F_RING_PACKED)) {
>> + if (virtio_with_feature(hw, VIRTIO_F_RING_PACKED)) {
>> /*
>> * Bit[0:15]: vq queue index
>> * Bit[16:30]: avail index
>> @@ -548,21 +548,6 @@ vtpci_write_dev_config(struct virtio_hw *hw, size_t
>> offset,
>> VIRTIO_OPS(hw)->write_dev_cfg(hw, offset, src, length);
>> }
>>
>> -uint64_t
>> -vtpci_negotiate_features(struct virtio_hw *hw, uint64_t host_features)
>> -{
>> - uint64_t features;
>> -
>> - /*
>> - * Limit negotiated features to what the driver, virtqueue, and
>> - * host all support.
>> - */
>> - features = host_features & hw->guest_features;
>> - VIRTIO_OPS(hw)->set_features(hw, features);
>> -
>> - return features;
>> -}
>> -
>> void
>> vtpci_reset(struct virtio_hw *hw)
>> {
>> diff --git a/drivers/net/virtio/virtio_pci.h
>> b/drivers/net/virtio/virtio_pci.h
>> index 8b07c4a369..b02e5c15f5 100644
>> --- a/drivers/net/virtio/virtio_pci.h
>> +++ b/drivers/net/virtio/virtio_pci.h
>> @@ -79,83 +79,6 @@ struct virtnet_ctl;
>> */
>> #define VIRTIO_MAX_INDIRECT ((int) (PAGE_SIZE / 16))
>>
>> -/* The feature bitmap for virtio net */
>> -#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */
>> -#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial
>> csum */
>> -#define VIRTIO_NET_F_MTU 3 /* Initial MTU advice. */
>> -#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */
>> -#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */
>> -#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */
>> -#define VIRTIO_NET_F_GUEST_ECN 9 /* Guest can handle TSO[6] w/
>> ECN in.
>> */
>> -#define VIRTIO_NET_F_GUEST_UFO 10 /* Guest can handle UFO in. */
>> -#define VIRTIO_NET_F_HOST_TSO4 11 /* Host can handle TSOv4 in. */
>> -#define VIRTIO_NET_F_HOST_TSO6 12 /* Host can handle TSOv6 in. */
>> -#define VIRTIO_NET_F_HOST_ECN 13 /* Host can handle TSO[6] w/
>> ECN in.
>> */
>> -#define VIRTIO_NET_F_HOST_UFO 14 /* Host can handle UFO in. */
>> -#define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can merge receive
>> buffers.
>> */
>> -#define VIRTIO_NET_F_STATUS 16 /* virtio_net_config.status available */
>> -#define VIRTIO_NET_F_CTRL_VQ 17 /* Control channel available */
>> -#define VIRTIO_NET_F_CTRL_RX 18 /* Control channel RX mode
>> support */
>> -#define VIRTIO_NET_F_CTRL_VLAN 19 /* Control channel VLAN
>> filtering */
>> -#define VIRTIO_NET_F_CTRL_RX_EXTRA 20 /* Extra RX mode control
>> support */
>> -#define VIRTIO_NET_F_GUEST_ANNOUNCE 21 /* Guest can announce device on
>> the
>> - * network */
>> -#define VIRTIO_NET_F_MQ 22 /* Device supports Receive Flow
>> - * Steering */
>> -#define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */
>> -
>> -/* Do we get callbacks when the ring is completely used, even if we've
>> - * suppressed them? */
>> -#define VIRTIO_F_NOTIFY_ON_EMPTY 24
>> -
>> -/* Can the device handle any descriptor layout? */
>> -#define VIRTIO_F_ANY_LAYOUT 27
>> -
>> -/* We support indirect buffer descriptors */
>> -#define VIRTIO_RING_F_INDIRECT_DESC 28
>> -
>> -#define VIRTIO_F_VERSION_1 32
>> -#define VIRTIO_F_IOMMU_PLATFORM 33
>> -#define VIRTIO_F_RING_PACKED 34
>> -
>> -/*
>> - * Some VirtIO feature bits (currently bits 28 through 31) are
>> - * reserved for the transport being used (eg. virtio_ring), the
>> - * rest are per-device feature bits.
>> - */
>> -#define VIRTIO_TRANSPORT_F_START 28
>> -#define VIRTIO_TRANSPORT_F_END 34
>> -
>> -/*
>> - * Inorder feature indicates that all buffers are used by the device
>> - * in the same order in which they have been made available.
>> - */
>> -#define VIRTIO_F_IN_ORDER 35
>> -
>> -/*
>> - * This feature indicates that memory accesses by the driver and the device
>> - * are ordered in a way described by the platform.
>> - */
>> -#define VIRTIO_F_ORDER_PLATFORM 36
>> -
>> -/*
>> - * This feature indicates that the driver passes extra data (besides
>> - * identifying the virtqueue) in its device notifications.
>> - */
>> -#define VIRTIO_F_NOTIFICATION_DATA 38
>> -
>> -/* Device set linkspeed and duplex */
>> -#define VIRTIO_NET_F_SPEED_DUPLEX 63
>> -
>> -/* The Guest publishes the used index for which it expects an interrupt
>> - * at the end of the avail ring. Host should ignore the avail->flags field.
>> */
>> -/* The Host publishes the avail index for which it expects a kick
>> - * at the end of the used ring. Guest should ignore the used->flags field.
>> */
>> -#define VIRTIO_RING_F_EVENT_IDX 29
>> -
>> -#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */
>> -#define VIRTIO_NET_S_ANNOUNCE 2 /* Announcement is needed */
>> -
>> /*
>> * Maximum number of virtqueues per device.
>> */
>> @@ -271,17 +194,6 @@ enum virtio_msix_status {
>> VIRTIO_MSIX_ENABLED = 2
>> };
>>
>> -static inline int
>> -vtpci_with_feature(struct virtio_hw *hw, uint64_t bit)
>> -{
>> - return (hw->guest_features & (1ULL << bit)) != 0;
>> -}
>> -
>> -static inline int
>> -vtpci_packed_queue(struct virtio_hw *hw)
>> -{
>> - return vtpci_with_feature(hw, VIRTIO_F_RING_PACKED);
>> -}
>>
>> /*
>> * Function declaration from virtio_pci.c
>> @@ -294,8 +206,6 @@ void vtpci_reinit_complete(struct virtio_hw *);
>> uint8_t vtpci_get_status(struct virtio_hw *);
>> void vtpci_set_status(struct virtio_hw *, uint8_t);
>>
>> -uint64_t vtpci_negotiate_features(struct virtio_hw *, uint64_t);
>> -
>> void vtpci_write_dev_config(struct virtio_hw *, size_t, const void *, int);
>>
>> void vtpci_read_dev_config(struct virtio_hw *, size_t, void *, int);
>> diff --git a/drivers/net/virtio/virtio_ring.h
>> b/drivers/net/virtio/virtio_ring.h
>> index 0f6574f684..17a56b0a73 100644
>> --- a/drivers/net/virtio/virtio_ring.h
>> +++ b/drivers/net/virtio/virtio_ring.h
>> @@ -133,7 +133,7 @@ vring_size(struct virtio_hw *hw, unsigned int num,
>> unsigned long align)
>> {
>> size_t size;
>>
>> - if (vtpci_packed_queue(hw)) {
>> + if (virtio_with_packed_queue(hw)) {
>> size = num * sizeof(struct vring_packed_desc);
>> size += sizeof(struct vring_packed_desc_event);
>> size = RTE_ALIGN_CEIL(size, align);
>> diff --git a/drivers/net/virtio/virtio_rxtx.c
>> b/drivers/net/virtio/virtio_rxtx.c
>> index 93fe856cbd..10989118b0 100644
>> --- a/drivers/net/virtio/virtio_rxtx.c
>> +++ b/drivers/net/virtio/virtio_rxtx.c
>> @@ -685,14 +685,14 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev
>> *dev,
>> uint16_t queue_idx)
>> struct rte_mbuf *m;
>> uint16_t desc_idx;
>> int error, nbufs, i;
>> - bool in_order = vtpci_with_feature(hw, VIRTIO_F_IN_ORDER);
>> + bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
>>
>> PMD_INIT_FUNC_TRACE();
>>
>> /* Allocate blank mbufs for the each rx descriptor */
>> nbufs = 0;
>>
>> - if (hw->use_vec_rx && !vtpci_packed_queue(hw)) {
>> + if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
>> for (desc_idx = 0; desc_idx < vq->vq_nentries;
>> desc_idx++) {
>> vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
>> @@ -710,12 +710,12 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev
>> *dev,
>> uint16_t queue_idx)
>> &rxvq->fake_mbuf;
>> }
>>
>> - if (hw->use_vec_rx && !vtpci_packed_queue(hw)) {
>> + if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
>> while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
>> virtio_rxq_rearm_vec(rxvq);
>> nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
>> }
>> - } else if (!vtpci_packed_queue(vq->hw) && in_order) {
>> + } else if (!virtio_with_packed_queue(vq->hw) && in_order) {
>> if ((!virtqueue_full(vq))) {
>> uint16_t free_cnt = vq->vq_free_cnt;
>> struct rte_mbuf *pkts[free_cnt];
>> @@ -741,7 +741,7 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev,
>> uint16_t queue_idx)
>> break;
>>
>> /* Enqueue allocated buffers */
>> - if (vtpci_packed_queue(vq->hw))
>> + if (virtio_with_packed_queue(vq->hw))
>> error = virtqueue_enqueue_recv_refill_packed(vq,
>> &m, 1);
>> else
>> @@ -754,7 +754,7 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev,
>> uint16_t queue_idx)
>> nbufs++;
>> }
>>
>> - if (!vtpci_packed_queue(vq->hw))
>> + if (!virtio_with_packed_queue(vq->hw))
>> vq_update_avail_idx(vq);
>> }
>>
>> @@ -829,8 +829,8 @@ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
>>
>> PMD_INIT_FUNC_TRACE();
>>
>> - if (!vtpci_packed_queue(hw)) {
>> - if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER))
>> + if (!virtio_with_packed_queue(hw)) {
>> + if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER))
>> vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0;
>> }
>>
>> @@ -847,7 +847,7 @@ virtio_discard_rxbuf(struct virtqueue *vq, struct
>> rte_mbuf
>> *m)
>> * Requeue the discarded mbuf. This should always be
>> * successful since it was just dequeued.
>> */
>> - if (vtpci_packed_queue(vq->hw))
>> + if (virtio_with_packed_queue(vq->hw))
>> error = virtqueue_enqueue_recv_refill_packed(vq, &m, 1);
>> else
>> error = virtqueue_enqueue_recv_refill(vq, &m, 1);
>> @@ -1209,7 +1209,7 @@ virtio_recv_pkts_inorder(void *rx_queue,
>> ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
>> - hdr_size);
>>
>> - if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
>> + if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
>> seg_num = header->num_buffers;
>> if (seg_num == 0)
>> seg_num = 1;
>> @@ -1735,7 +1735,7 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf
>> **tx_pkts,
>> struct virtio_hw *hw = vq->hw;
>> uint16_t hdr_size = hw->vtnet_hdr_size;
>> uint16_t nb_tx = 0;
>> - bool in_order = vtpci_with_feature(hw, VIRTIO_F_IN_ORDER);
>> + bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
>>
>> if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
>> return nb_tx;
>> @@ -1754,8 +1754,8 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf
>> **tx_pkts,
>> int can_push = 0, use_indirect = 0, slots, need;
>>
>> /* optimize ring usage */
>> - if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
>> - vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
>> + if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
>> + virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
>> rte_mbuf_refcnt_read(txm) == 1 &&
>> RTE_MBUF_DIRECT(txm) &&
>> txm->nb_segs == 1 &&
>> @@ -1763,7 +1763,7 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf
>> **tx_pkts,
>> rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
>> __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
>> can_push = 1;
>> - else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
>> + else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
>> txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
>> use_indirect = 1;
>> /* How many main ring entries are needed to this Tx?
>> @@ -1835,8 +1835,8 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf
>> **tx_pkts, uint16_t nb_pkts)
>> int can_push = 0, use_indirect = 0, slots, need;
>>
>> /* optimize ring usage */
>> - if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
>> - vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
>> + if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
>> + virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
>> rte_mbuf_refcnt_read(txm) == 1 &&
>> RTE_MBUF_DIRECT(txm) &&
>> txm->nb_segs == 1 &&
>> @@ -1844,7 +1844,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf
>> **tx_pkts, uint16_t nb_pkts)
>> rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
>> __alignof__(struct
>> virtio_net_hdr_mrg_rxbuf)))
>> can_push = 1;
>> - else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
>> + else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
>> txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
>> use_indirect = 1;
>>
>> @@ -1937,8 +1937,8 @@ virtio_xmit_pkts_inorder(void *tx_queue,
>> int slots;
>>
>> /* optimize ring usage */
>> - if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
>> - vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
>> + if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
>> + virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
>> rte_mbuf_refcnt_read(txm) == 1 &&
>> RTE_MBUF_DIRECT(txm) &&
>> txm->nb_segs == 1 &&
>> diff --git a/drivers/net/virtio/virtio_rxtx_packed_avx.c
>> b/drivers/net/virtio/virtio_rxtx_packed_avx.c
>> index a6a49ec439..c272766a9f 100644
>> --- a/drivers/net/virtio/virtio_rxtx_packed_avx.c
>> +++ b/drivers/net/virtio/virtio_rxtx_packed_avx.c
>> @@ -211,14 +211,14 @@ virtqueue_enqueue_single_packed_vec(struct virtnet_tx
>> *txvq,
>> int16_t need;
>>
>> /* optimize ring usage */
>> - if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
>> - vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
>> + if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
>> + virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
>> rte_mbuf_refcnt_read(txm) == 1 &&
>> RTE_MBUF_DIRECT(txm) &&
>> txm->nb_segs == 1 &&
>> rte_pktmbuf_headroom(txm) >= hdr_size)
>> can_push = 1;
>> - else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
>> + else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
>> txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
>> use_indirect = 1;
>> /* How many main ring entries are needed to this Tx?
>> diff --git a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
>> b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
>> index 79b8446f8e..eade702c5c 100644
>> --- a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
>> +++ b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
>> @@ -16,7 +16,7 @@
>>
>> #include "vhost_kernel_tap.h"
>> #include "../virtio_logs.h"
>> -#include "../virtio_pci.h"
>> +#include "../virtio.h"
>>
>> int
>> vhost_kernel_tap_set_offload(int fd, uint64_t features)
>> diff --git a/drivers/net/virtio/virtio_user_ethdev.c
>> b/drivers/net/virtio/virtio_user_ethdev.c
>> index 14468ddf52..d05613ba3b 100644
>> --- a/drivers/net/virtio/virtio_user_ethdev.c
>> +++ b/drivers/net/virtio/virtio_user_ethdev.c
>> @@ -122,7 +122,7 @@ virtio_user_server_reconnect(struct virtio_user_dev *dev)
>> dev->features &= dev->device_features;
>>
>> /* For packed ring, resetting queues is required in reconnection. */
>> - if (vtpci_packed_queue(hw) &&
>> + if (virtio_with_packed_queue(hw) &&
>> (old_status & VIRTIO_CONFIG_STATUS_DRIVER_OK)) {
>> PMD_INIT_LOG(NOTICE, "Packets on the fly will be dropped"
>> " when packed ring reconnecting.");
>> @@ -423,7 +423,7 @@ virtio_user_setup_queue(struct virtio_hw *hw, struct
>> virtqueue *vq)
>> {
>> struct virtio_user_dev *dev = virtio_user_get_dev(hw);
>>
>> - if (vtpci_packed_queue(hw))
>> + if (virtio_with_packed_queue(hw))
>> virtio_user_setup_queue_packed(vq, dev);
>> else
>> virtio_user_setup_queue_split(vq, dev);
>> @@ -456,7 +456,7 @@ virtio_user_notify_queue(struct virtio_hw *hw, struct
>> virtqueue *vq)
>> struct virtio_user_dev *dev = virtio_user_get_dev(hw);
>>
>> if (hw->cvq && (hw->cvq->vq == vq)) {
>> - if (vtpci_packed_queue(vq->hw))
>> + if (virtio_with_packed_queue(vq->hw))
>> virtio_user_handle_cq_packed(dev, vq->vq_queue_index);
>> else
>> virtio_user_handle_cq(dev, vq->vq_queue_index);
>> diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
>> index 2702e120ee..59a2cb6599 100644
>> --- a/drivers/net/virtio/virtqueue.c
>> +++ b/drivers/net/virtio/virtqueue.c
>> @@ -32,7 +32,7 @@ virtqueue_detach_unused(struct virtqueue *vq)
>> end = (vq->vq_avail_idx + vq->vq_free_cnt) & (vq->vq_nentries - 1);
>>
>> for (idx = 0; idx < vq->vq_nentries; idx++) {
>> - if (hw->use_vec_rx && !vtpci_packed_queue(hw) &&
>> + if (hw->use_vec_rx && !virtio_with_packed_queue(hw) &&
>> type == VTNET_RQ) {
>> if (start <= end && idx >= start && idx < end)
>> continue;
>> @@ -137,7 +137,7 @@ virtqueue_rxvq_flush(struct virtqueue *vq)
>> {
>> struct virtio_hw *hw = vq->hw;
>>
>> - if (vtpci_packed_queue(hw))
>> + if (virtio_with_packed_queue(hw))
>> virtqueue_rxvq_flush_packed(vq);
>> else
>> virtqueue_rxvq_flush_split(vq);
>> diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
>> index 9d2089766b..6c1df6f8e5 100644
>> --- a/drivers/net/virtio/virtqueue.h
>> +++ b/drivers/net/virtio/virtqueue.h
>> @@ -12,7 +12,7 @@
>> #include <rte_mempool.h>
>> #include <rte_net.h>
>>
>> -#include "virtio_pci.h"
>> +#include "virtio.h"
>> #include "virtio_ring.h"
>> #include "virtio_logs.h"
>> #include "virtio_rxtx.h"
>> @@ -386,7 +386,7 @@ virtqueue_disable_intr_split(struct virtqueue *vq)
>> static inline void
>> virtqueue_disable_intr(struct virtqueue *vq)
>> {
>> - if (vtpci_packed_queue(vq->hw))
>> + if (virtio_with_packed_queue(vq->hw))
>> virtqueue_disable_intr_packed(vq);
>> else
>> virtqueue_disable_intr_split(vq);
>> @@ -420,7 +420,7 @@ virtqueue_enable_intr_split(struct virtqueue *vq)
>> static inline void
>> virtqueue_enable_intr(struct virtqueue *vq)
>> {
>> - if (vtpci_packed_queue(vq->hw))
>> + if (virtio_with_packed_queue(vq->hw))
>> virtqueue_enable_intr_packed(vq);
>> else
>> virtqueue_enable_intr_split(vq);
>> @@ -573,7 +573,7 @@ virtqueue_notify(struct virtqueue *vq)
>> used_idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx, \
>> __ATOMIC_RELAXED); \
>> nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
>> - if (vtpci_packed_queue((vq)->hw)) { \
>> + if (virtio_with_packed_queue((vq)->hw)) { \
>> PMD_INIT_LOG(DEBUG, \
>> "VQ: - size=%d; free=%d; used_cons_idx=%d; avail_idx=%d;" \
>> " cached_flags=0x%x; used_wrap_counter=%d", \
>> --
>> 2.29.2
>