Hi Everyone! My client application use vapi to connect to vpp. When vpp restarts, my application gets stuck at the vapi call. When I also restart my application, everything becomes normal. *I guess this is due to a broken connection.* Is there a way to reconnect without restarting my application? Any clues are greatly appreciated.
vpp version: v20.01 master branch. The debug stack is as follows: Thread 2 "agent" hit Breakpoint 1, CVapiClient::SetIpfixExporter (this=0x6e5be0 <CHttpServer::m_vapiClient>, srcAddr="10.10.0.1", colAddr="10.10.0.10", tmpInterval=20, port=8877, mtu=1450) at vapiclient/vapiclient.cpp:219 219 if (VAPI_OK != (rv = vapi_set_ipfix_exporter(ctx, sv, sw_interface__set_ipfix_cb, &called))) (gdb) s vapi_set_ipfix_exporter (ctx=0x7676c0, msg=0x1300883a0, callback=0x4a0a62 <sw_interface__set_ipfix_cb(vapi_ctx_s*, void*, vapi_error_e, bool, vapi_payload_set_ipfix_exporter_reply*)>, callback_ctx=0x7ffff0a8c960) at /data/kans/archives/ntc/vpp/build-root/install-vpp-native/vpp/include/vapi/ipfix_export.api.vapi.h:951 951 if (!msg || !callback) { (gdb) n 954 if (vapi_is_nonblocking(ctx) && vapi_requests_full(ctx)) { (gdb) 958 if (VAPI_OK != (rv = vapi_producer_lock (ctx))) { (gdb) 961 u32 req_context = vapi_gen_req_context(ctx); (gdb) 962 msg->header.context = req_context; (gdb) 963 vapi_msg_set_ipfix_exporter_hton(msg); (gdb) 964 if (VAPI_OK == (rv = vapi_send (ctx, msg))) { (gdb) 965 vapi_store_request(ctx, req_context, false, (vapi_cb_t)callback, callback_ctx); (gdb) 966 if (VAPI_OK != vapi_producer_unlock (ctx)) { (gdb) 969 if (vapi_is_nonblocking(ctx)) { (gdb) 972 rv = vapi_dispatch(ctx); (gdb) s vapi_dispatch (ctx=0x7676c0) at /data/kans/archives/ntc/vpp/src/vpp-api/vapi/vapi.c:815 815 vapi_error_e rv = VAPI_OK; (gdb) n 816 while (!vapi_requests_empty (ctx)) (gdb) 818 rv = vapi_dispatch_one (ctx); (gdb) s vapi_dispatch_one (ctx=0x7676c0) at /data/kans/archives/ntc/vpp/src/vpp-api/vapi/vapi.c:761 761 vapi_error_e rv = vapi_recv (ctx, &msg, &size, SVM_Q_WAIT, 0); (gdb) s vapi_recv (ctx=0x7676c0, msg=0x7ffff0a8c888, msg_size=0x7ffff0a8c890, cond=SVM_Q_WAIT, time=0) at /data/kans/archives/ntc/vpp/src/vpp-api/vapi/vapi.c:538 538 if (!ctx || !ctx->connected || !msg || !msg_size) (gdb) n 542 vapi_error_e rv = VAPI_OK; (gdb) n 543 api_main_t *am = vlibapi_get_main (); (gdb) n 546 if (am->our_pid == 0) (gdb) n 551 svm_queue_t *q = am->vl_input_queue; (gdb) n 555 int tmp = svm_queue_sub (q, (u8 *) & data, cond, time); (gdb) s svm_queue_sub (q=0x1301caa00, elem=0x7ffff0a8c828 "", cond=SVM_Q_WAIT, time=0) at /data/kans/archives/ntc/vpp/src/svm/queue.c:360 360 int need_broadcast = 0; (gdb) n 361 int rc = 0; (gdb) n 363 if (cond == SVM_Q_NOWAIT) (gdb) n 372 svm_queue_lock (q); (gdb) p cond $17 = SVM_Q_WAIT (gdb) n 374 if (PREDICT_FALSE (q->cursize == 0)) (gdb) p *q $18 = {mutex = {__data = {__lock = 1, __count = 0, __owner = 16263, __nusers = 2, __kind = 128, __spins = 0, __elision = 0, __list = {__prev = 0x0, __next = 0x0}}, __size = "\001\000\000\000\000\000\000\000\207?\000\000\002\000\000\000\200", '\000' <repeats 22 times>, __align = 1}, condvar = {__data = {__lock = 0, __futex = 1, __total_seq = 1, __wakeup_seq = 0, __woken_seq = 0, __mutex = 0xffffffffffffffff, __nwaiters = 2, __broadcast_seq = 0}, __size = "\000\000\000\000\001\000\000\000\001", '\000' <repeats 23 times>, "\377\377\377\377\377\377\377\377\002\000\000\000\000\000\000", __align = 4294967296}, head = 1, tail = 1, cursize = 0, maxsize = 32, elsize = 8, consumer_pid = 16262, producer_evtfd = -1, consumer_evtfd = -1, data = 0x1301caa78 "\310\305\004\060\001"} (gdb) n 376 if (cond == SVM_Q_NOWAIT) (gdb) n 381 else if (cond == SVM_Q_TIMEDWAIT) (gdb) n 394 while (q->cursize == 0) (gdb) p q->cursize $19 = 0 (gdb) n *395 svm_queue_wait_inline (q); // stuck here* (gdb) n Best Regeards, Xiaopeng
-=-=-=-=-=-=-=-=-=-=-=- Links: You receive all messages sent to this group. View/Reply Online (#16909): https://lists.fd.io/g/vpp-dev/message/16909 Mute This Topic: https://lists.fd.io/mt/75370134/21656 Mute #vapi: https://lists.fd.io/g/fdio+vpp-dev/mutehashtag/vapi Group Owner: vpp-dev+ow...@lists.fd.io Unsubscribe: https://lists.fd.io/g/vpp-dev/unsub [arch...@mail-archive.com] -=-=-=-=-=-=-=-=-=-=-=-