NetQueue will be accessed by nc and its peers at the same time, need lock to protect it.
Signed-off-by: Liu Ping Fan <pingf...@linux.vnet.ibm.com> --- include/net/net.h | 1 + net/queue.c | 13 +++++++++++++ 2 files changed, 14 insertions(+) diff --git a/include/net/net.h b/include/net/net.h index 43d85a1..2f72b26 100644 --- a/include/net/net.h +++ b/include/net/net.h @@ -5,6 +5,7 @@ #include "qemu-common.h" #include "qapi/qmp/qdict.h" #include "qemu/option.h" +#include "qemu/thread.h" #include "net/queue.h" #include "migration/vmstate.h" #include "qapi-types.h" diff --git a/net/queue.c b/net/queue.c index d508b7a..26399a1 100644 --- a/net/queue.c +++ b/net/queue.c @@ -56,6 +56,7 @@ struct NetQueue { uint32_t nq_maxlen; uint32_t nq_count; + QemuMutex lock; QTAILQ_HEAD(packets, NetPacket) packets; unsigned delivering : 1; @@ -88,6 +89,7 @@ NetQueue *qemu_new_net_queue(NetClientState *nc) queue->nq_maxlen = 10000; queue->nq_count = 0; + qemu_mutex_init(&queue->lock); QTAILQ_INIT(&queue->packets); queue->delivering = 0; @@ -116,7 +118,9 @@ static void qemu_net_queue_append(NetQueue *queue, { NetPacket *packet; + qemu_mutex_lock(&queue->lock); if (queue->nq_count >= queue->nq_maxlen && !sent_cb) { + qemu_mutex_unlock(&queue->lock); return; /* drop if queue full and no callback */ } packet = g_malloc(sizeof(NetPacket) + size); @@ -128,6 +132,7 @@ static void qemu_net_queue_append(NetQueue *queue, queue->nq_count++; QTAILQ_INSERT_TAIL(&queue->packets, packet, entry); + qemu_mutex_unlock(&queue->lock); } static void qemu_net_queue_append_iov(NetQueue *queue, @@ -141,7 +146,9 @@ static void qemu_net_queue_append_iov(NetQueue *queue, size_t max_len = 0; int i; + qemu_mutex_lock(&queue->lock); if (queue->nq_count >= queue->nq_maxlen && !sent_cb) { + qemu_mutex_unlock(&queue->lock); return; /* drop if queue full and no callback */ } for (i = 0; i < iovcnt; i++) { @@ -163,6 +170,7 @@ static void qemu_net_queue_append_iov(NetQueue *queue, queue->nq_count++; QTAILQ_INSERT_TAIL(&queue->packets, packet, entry); + qemu_mutex_unlock(&queue->lock); } static ssize_t qemu_net_queue_deliver(NetQueue *queue, @@ -273,6 +281,7 @@ void qemu_net_queue_purge(NetQueue *queue, NetClientState *from) { NetPacket *packet, *next; + qemu_mutex_lock(&queue->lock); QTAILQ_FOREACH_SAFE(packet, &queue->packets, entry, next) { if (packet->sender == from) { QTAILQ_REMOVE(&queue->packets, packet, entry); @@ -280,10 +289,12 @@ void qemu_net_queue_purge(NetQueue *queue, NetClientState *from) g_free(packet); } } + qemu_mutex_unlock(&queue->lock); } bool qemu_net_queue_flush(NetQueue *queue) { + qemu_mutex_lock(&queue->lock); while (!QTAILQ_EMPTY(&queue->packets)) { NetPacket *packet; int ret; @@ -300,6 +311,7 @@ bool qemu_net_queue_flush(NetQueue *queue) if (ret == 0) { queue->nq_count++; QTAILQ_INSERT_HEAD(&queue->packets, packet, entry); + qemu_mutex_unlock(&queue->lock); return false; } @@ -309,5 +321,6 @@ bool qemu_net_queue_flush(NetQueue *queue) g_free(packet); } + qemu_mutex_unlock(&queue->lock); return true; } -- 1.8.1.4