From: Liu Ping Fan <pingf...@linux.vnet.ibm.com> NetQueue will be accessed by nc and its peers at the same time, need lock to protect it.
Signed-off-by: Liu Ping Fan <pingf...@linux.vnet.ibm.com> --- include/net/net.h | 1 + net/queue.c | 13 +++++++++++++ 2 files changed, 14 insertions(+) diff --git a/include/net/net.h b/include/net/net.h index 43d85a1..2f72b26 100644 --- a/include/net/net.h +++ b/include/net/net.h @@ -5,6 +5,7 @@ #include "qemu-common.h" #include "qapi/qmp/qdict.h" #include "qemu/option.h" +#include "qemu/thread.h" #include "net/queue.h" #include "migration/vmstate.h" #include "qapi-types.h" diff --git a/net/queue.c b/net/queue.c index 859d02a..c6d4241 100644 --- a/net/queue.c +++ b/net/queue.c @@ -53,6 +53,7 @@ struct NetQueue { uint32_t nq_maxlen; uint32_t nq_count; + QemuMutex lock; QTAILQ_HEAD(packets, NetPacket) packets; unsigned delivering : 1; @@ -68,6 +69,7 @@ NetQueue *qemu_new_net_queue(void *opaque) queue->nq_maxlen = 10000; queue->nq_count = 0; + qemu_mutex_init(&queue->lock); QTAILQ_INIT(&queue->packets); queue->delivering = 0; @@ -96,7 +98,9 @@ static void qemu_net_queue_append(NetQueue *queue, { NetPacket *packet; + qemu_mutex_lock(&queue->lock); if (queue->nq_count >= queue->nq_maxlen && !sent_cb) { + qemu_mutex_unlock(&queue->lock); return; /* drop if queue full and no callback */ } packet = g_malloc(sizeof(NetPacket) + size); @@ -108,6 +112,7 @@ static void qemu_net_queue_append(NetQueue *queue, queue->nq_count++; QTAILQ_INSERT_TAIL(&queue->packets, packet, entry); + qemu_mutex_unlock(&queue->lock); } static void qemu_net_queue_append_iov(NetQueue *queue, @@ -121,7 +126,9 @@ static void qemu_net_queue_append_iov(NetQueue *queue, size_t max_len = 0; int i; + qemu_mutex_lock(&queue->lock); if (queue->nq_count >= queue->nq_maxlen && !sent_cb) { + qemu_mutex_unlock(&queue->lock); return; /* drop if queue full and no callback */ } for (i = 0; i < iovcnt; i++) { @@ -143,6 +150,7 @@ static void qemu_net_queue_append_iov(NetQueue *queue, queue->nq_count++; QTAILQ_INSERT_TAIL(&queue->packets, packet, entry); + qemu_mutex_unlock(&queue->lock); } static ssize_t qemu_net_queue_deliver(NetQueue *queue, @@ -229,6 +237,7 @@ void qemu_net_queue_purge(NetQueue *queue, NetClientState *from) { NetPacket *packet, *next; + qemu_mutex_lock(&queue->lock); QTAILQ_FOREACH_SAFE(packet, &queue->packets, entry, next) { if (packet->sender == from) { QTAILQ_REMOVE(&queue->packets, packet, entry); @@ -236,10 +245,12 @@ void qemu_net_queue_purge(NetQueue *queue, NetClientState *from) g_free(packet); } } + qemu_mutex_unlock(&queue->lock); } bool qemu_net_queue_flush(NetQueue *queue) { + qemu_mutex_lock(&queue->lock); while (!QTAILQ_EMPTY(&queue->packets)) { NetPacket *packet; int ret; @@ -256,6 +267,7 @@ bool qemu_net_queue_flush(NetQueue *queue) if (ret == 0) { queue->nq_count++; QTAILQ_INSERT_HEAD(&queue->packets, packet, entry); + qemu_mutex_unlock(&queue->lock); return false; } @@ -265,5 +277,6 @@ bool qemu_net_queue_flush(NetQueue *queue) g_free(packet); } + qemu_mutex_unlock(&queue->lock); return true; } -- 1.8.1.4