Il 18/04/2013 14:50, Anthony Liguori ha scritto: > "Michael S. Tsirkin" <m...@redhat.com> writes: > >> On Thu, Apr 11, 2013 at 04:30:01PM +0200, fred.kon...@greensocs.com wrote: >>> From: KONRAD Frederic <fred.kon...@greensocs.com> >>> >>> As the virtio-net-pci and virtio-net-s390 are switched to the new API, >>> we can use QOM casts. >>> >>> Signed-off-by: KONRAD Frederic <fred.kon...@greensocs.com> >>> --- >>> hw/net/virtio-net.c | 141 >>> +++++++++++++++++++++-------------------- >>> include/hw/virtio/virtio-net.h | 2 +- >>> 2 files changed, 75 insertions(+), 68 deletions(-) >>> >>> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c >>> index 988fe03..09890c1 100644 >>> --- a/hw/net/virtio-net.c >>> +++ b/hw/net/virtio-net.c >>> @@ -65,17 +65,9 @@ static int vq2q(int queue_index) >>> * - we could suppress RX interrupt if we were so inclined. >>> */ >>> >>> -/* >>> - * Moving to QOM later in this serie. >>> - */ >>> -static VirtIONet *to_virtio_net(VirtIODevice *vdev) >>> -{ >>> - return (VirtIONet *)vdev; >>> -} >>> - >>> static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config) >>> { >>> - VirtIONet *n = to_virtio_net(vdev); >>> + VirtIONet *n = VIRTIO_NET(vdev); >>> struct virtio_net_config netcfg; >>> >>> stw_p(&netcfg.status, n->status); >>> @@ -86,12 +78,12 @@ static void virtio_net_get_config(VirtIODevice *vdev, >>> uint8_t *config) >>> >>> static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t >>> *config) >>> { >>> - VirtIONet *n = to_virtio_net(vdev); >>> + VirtIONet *n = VIRTIO_NET(vdev); >>> struct virtio_net_config netcfg = {}; >>> >>> memcpy(&netcfg, config, n->config_size); >>> >>> - if (!(n->vdev.guest_features >> VIRTIO_NET_F_CTRL_MAC_ADDR & 1) && >>> + if (!(vdev->guest_features >> VIRTIO_NET_F_CTRL_MAC_ADDR & 1) && >>> memcmp(netcfg.mac, n->mac, ETH_ALEN)) { >>> memcpy(n->mac, netcfg.mac, ETH_ALEN); >>> qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); >>> @@ -100,12 +92,14 @@ static void virtio_net_set_config(VirtIODevice *vdev, >>> const uint8_t *config) >>> >>> static bool virtio_net_started(VirtIONet *n, uint8_t status) >>> { >>> + VirtIODevice *vdev = VIRTIO_DEVICE(n); >>> return (status & VIRTIO_CONFIG_S_DRIVER_OK) && >>> - (n->status & VIRTIO_NET_S_LINK_UP) && n->vdev.vm_running; >>> + (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running; >>> } >>> >>> static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) >>> { >>> + VirtIODevice *vdev = VIRTIO_DEVICE(n); >>> NetClientState *nc = qemu_get_queue(n->nic); >>> int queues = n->multiqueue ? n->max_queues : 1; >>> >>> @@ -126,25 +120,25 @@ static void virtio_net_vhost_status(VirtIONet *n, >>> uint8_t status) >>> } >>> if (!n->vhost_started) { >>> int r; >>> - if (!vhost_net_query(tap_get_vhost_net(nc->peer), &n->vdev)) { >>> + if (!vhost_net_query(tap_get_vhost_net(nc->peer), vdev)) { >>> return; >>> } >>> n->vhost_started = 1; >>> - r = vhost_net_start(&n->vdev, n->nic->ncs, queues); >>> + r = vhost_net_start(vdev, n->nic->ncs, queues); >>> if (r < 0) { >>> error_report("unable to start vhost net: %d: " >>> "falling back on userspace virtio", -r); >>> n->vhost_started = 0; >>> } >>> } else { >>> - vhost_net_stop(&n->vdev, n->nic->ncs, queues); >>> + vhost_net_stop(vdev, n->nic->ncs, queues); >>> n->vhost_started = 0; >>> } >>> } >>> >>> static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t >>> status) >>> { >>> - VirtIONet *n = to_virtio_net(vdev); >>> + VirtIONet *n = VIRTIO_NET(vdev); >>> VirtIONetQueue *q; >>> int i; >>> uint8_t queue_status; >>> @@ -184,6 +178,7 @@ static void virtio_net_set_status(struct VirtIODevice >>> *vdev, uint8_t status) >>> static void virtio_net_set_link_status(NetClientState *nc) >>> { >>> VirtIONet *n = qemu_get_nic_opaque(nc); >>> + VirtIODevice *vdev = VIRTIO_DEVICE(n); >>> uint16_t old_status = n->status; >>> >>> if (nc->link_down) >>> @@ -192,14 +187,14 @@ static void virtio_net_set_link_status(NetClientState >>> *nc) >>> n->status |= VIRTIO_NET_S_LINK_UP; >>> >>> if (n->status != old_status) >>> - virtio_notify_config(&n->vdev); >>> + virtio_notify_config(vdev); >>> >>> - virtio_net_set_status(&n->vdev, n->vdev.status); >>> + virtio_net_set_status(vdev, vdev->status); >>> } >>> >>> static void virtio_net_reset(VirtIODevice *vdev) >>> { >>> - VirtIONet *n = to_virtio_net(vdev); >>> + VirtIONet *n = VIRTIO_NET(vdev); >>> >>> /* Reset back to compatibility mode */ >>> n->promisc = 1; >>> @@ -318,7 +313,7 @@ static void virtio_net_set_multiqueue(VirtIONet *n, int >>> multiqueue, int ctrl); >>> >>> static uint32_t virtio_net_get_features(VirtIODevice *vdev, uint32_t >>> features) >>> { >>> - VirtIONet *n = to_virtio_net(vdev); >>> + VirtIONet *n = VIRTIO_NET(vdev); >>> NetClientState *nc = qemu_get_queue(n->nic); >>> >>> features |= (1 << VIRTIO_NET_F_MAC); >>> @@ -366,7 +361,7 @@ static uint32_t virtio_net_bad_features(VirtIODevice >>> *vdev) >>> >>> static void virtio_net_set_features(VirtIODevice *vdev, uint32_t features) >>> { >>> - VirtIONet *n = to_virtio_net(vdev); >>> + VirtIONet *n = VIRTIO_NET(vdev); >>> int i; >>> >>> virtio_net_set_multiqueue(n, !!(features & (1 << VIRTIO_NET_F_MQ)), >>> @@ -534,6 +529,7 @@ static int virtio_net_handle_vlan_table(VirtIONet *n, >>> uint8_t cmd, >>> static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd, >>> struct iovec *iov, unsigned int iov_cnt) >>> { >>> + VirtIODevice *vdev = VIRTIO_DEVICE(n); >>> struct virtio_net_ctrl_mq mq; >>> size_t s; >>> uint16_t queues; >>> @@ -559,14 +555,14 @@ static int virtio_net_handle_mq(VirtIONet *n, uint8_t >>> cmd, >>> n->curr_queues = queues; >>> /* stop the backend before changing the number of queues to avoid >>> handling a >>> * disabled queue */ >>> - virtio_net_set_status(&n->vdev, n->vdev.status); >>> + virtio_net_set_status(vdev, vdev->status); >>> virtio_net_set_queues(n); >>> >>> return VIRTIO_NET_OK; >>> } >>> static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) >>> { >>> - VirtIONet *n = to_virtio_net(vdev); >>> + VirtIONet *n = VIRTIO_NET(vdev); >>> struct virtio_net_ctrl_hdr ctrl; >>> virtio_net_ctrl_ack status = VIRTIO_NET_ERR; >>> VirtQueueElement elem; >>> @@ -609,7 +605,7 @@ static void virtio_net_handle_ctrl(VirtIODevice *vdev, >>> VirtQueue *vq) >>> >>> static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq) >>> { >>> - VirtIONet *n = to_virtio_net(vdev); >>> + VirtIONet *n = VIRTIO_NET(vdev); >>> int queue_index = vq2q(virtio_get_queue_index(vq)); >>> >>> qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index)); >>> @@ -618,9 +614,10 @@ static void virtio_net_handle_rx(VirtIODevice *vdev, >>> VirtQueue *vq) >>> static int virtio_net_can_receive(NetClientState *nc) >>> { >>> VirtIONet *n = qemu_get_nic_opaque(nc); >>> + VirtIODevice *vdev = VIRTIO_DEVICE(n); >>> VirtIONetQueue *q = virtio_net_get_subqueue(nc); >>> >>> - if (!n->vdev.vm_running) { >>> + if (!vdev->vm_running) { >>> return 0; >>> } >>> >> >> BTW this is data path so was supposed to use the faster non-QOM casts. > > No, we're not. I don't know where you got that idea from. > > Unless you have actual performance numbers to show that it matters, then > you're just speculating.
It is slow in two ways. First, type_get_by_name is a useless hashtable lookup. This one is pretty hard to deny, we could memoize it like this: diff --git a/include/qom/object.h b/include/qom/object.h index d0f99c5..4c19978 100644 --- a/include/qom/object.h +++ b/include/qom/object.h @@ -475,8 +475,11 @@ struct TypeInfo * If an invalid object is passed to this function, a run time assert will be * generated. */ +#define TYPE_GET_BY_NAME(name) \ + ({ static Type _type; \ + if (!__builtin_constant_p(name)) { \ + extern void do_not_use_TYPE_GET_BY_NAME(void); \ + do_not_use_TYPE_GET_B_NAME(); \ + } \ + _type ?: (_type = type_get_by_name(name)); }) + #define OBJECT_CHECK(type, obj, name) \ - ((type *)object_dynamic_cast_assert(OBJECT(obj), (name))) + ((type *)object_dynamic_cast_with_type_assert(OBJECT(obj), \ + __builtin_constant_p((name)) ? TYPE_GET_BY_NAME(name) \ + : type_get_by_name(name))) /** * OBJECT_CLASS_CHECK: (incomplete of course). (What glib does is instead a function call). Second, it doesn't apply in this case, but this: if (type->class->interfaces && type_is_ancestor(target_type, type_interface)) { is pretty awful. We really should cache the type_is_ancestor call and make it something like if (target_type->type_is_interface && type->class->interfaces) Paolo