On Fri, 28 Apr 2023, Devin Heitmueller wrote:

The existing DecklinkQueue implementation was using the PacketList
structure but wasn't using the standard avpriv_packet_list_get and
avpriv_packet_list_put functions.  Convert to using them so we
eliminate the duplicate logic, per Marton Balint's suggestion.

Signed-off-by: Devin Heitmueller <dheitmuel...@ltnglobal.com>
---
libavdevice/decklink_common.cpp | 45 +++++++++++------------------------------
1 file changed, 12 insertions(+), 33 deletions(-)

diff --git a/libavdevice/decklink_common.cpp b/libavdevice/decklink_common.cpp
index 74e26e9..af1b731 100644
--- a/libavdevice/decklink_common.cpp
+++ b/libavdevice/decklink_common.cpp
@@ -402,16 +402,12 @@ void ff_decklink_packet_queue_init(AVFormatContext 
*avctx, DecklinkPacketQueue *

void ff_decklink_packet_queue_flush(DecklinkPacketQueue *q)
{
-    PacketListEntry *pkt, *pkt1;
+    AVPacket pkt;

    pthread_mutex_lock(&q->mutex);
-    for (pkt = q->pkt_list.head; pkt != NULL; pkt = pkt1) {
-        pkt1 = pkt->next;
-        av_packet_unref(&pkt->pkt);
-        av_freep(&pkt);
+    while (avpriv_packet_list_get(&q->pkt_list, &pkt) == 0) {
+        av_packet_unref(&pkt);
    }
-    q->pkt_list.head = NULL;
-    q->pkt_list.tail = NULL;
    q->nb_packets = 0;
    q->size       = 0;
    pthread_mutex_unlock(&q->mutex);
@@ -435,7 +431,8 @@ unsigned long long 
ff_decklink_packet_queue_size(DecklinkPacketQueue *q)

int ff_decklink_packet_queue_put(DecklinkPacketQueue *q, AVPacket *pkt)
{
-    PacketListEntry *pkt1;
+    size_t pkt_size = pkt->size;

AVPacket->size is int so size_t is a bit strange here.

+    int ret;

    // Drop Packet if queue size is > maximum queue size
    if (ff_decklink_packet_queue_size(q) > (uint64_t)q->max_q_size) {
@@ -449,26 +446,14 @@ int ff_decklink_packet_queue_put(DecklinkPacketQueue *q, 
AVPacket *pkt)
        return -1;
    }

-    pkt1 = (PacketListEntry *)av_malloc(sizeof(*pkt1));
-    if (!pkt1) {
-        av_packet_unref(pkt);
-        return -1;
-    }
-    av_packet_move_ref(&pkt1->pkt, pkt);
-    pkt1->next = NULL;
-
    pthread_mutex_lock(&q->mutex);

-    if (!q->pkt_list.tail) {
-        q->pkt_list.head = pkt1;
-    } else {
-        q->pkt_list.tail->next = pkt1;
+    ret = avpriv_packet_list_put(&q->pkt_list, pkt, NULL, 0);
+    if (ret == 0) {
+        q->nb_packets++;
+        q->size += pkt_size + sizeof(AVPacket);
    }

else av_packet_unref() to not leak packet data in case of failure?


-    q->pkt_list.tail = pkt1;
-    q->nb_packets++;
-    q->size += pkt1->pkt.size + sizeof(*pkt1);
-
    pthread_cond_signal(&q->cond);

Maybe only cond_signal if avpriv_packet_list_put was successful?


    pthread_mutex_unlock(&q->mutex);

You have to return the ret code, not 0 in the end of the function in order to propagate avpriv_packet_list_put failure.

@@ -482,16 +467,10 @@ int ff_decklink_packet_queue_get(DecklinkPacketQueue *q, 
AVPacket *pkt, int bloc
    pthread_mutex_lock(&q->mutex);

    for (;; ) {
-        PacketListEntry *pkt1 = q->pkt_list.head;
-        if (pkt1) {
-            q->pkt_list.head = pkt1->next;
-            if (!q->pkt_list.head) {
-                q->pkt_list.tail = NULL;
-            }
+        ret = avpriv_packet_list_get(&q->pkt_list, pkt);
+        if (ret == 0) {
            q->nb_packets--;
-            q->size -= pkt1->pkt.size + sizeof(*pkt1);
-            *pkt     = pkt1->pkt;
-            av_free(pkt1);
+            q->size -= pkt->size + sizeof(AVPacket);
            ret = 1;
            break;
        } else if (!block) {

Thanks,
Marton

--
1.8.3.1

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Reply via email to