Hi all,
I sent 40000 MACs l2 packets to VPP, then it crashed, and below is the stack.  
out of memory?  How can I get this correct?


(gdb)  bt
#0    0x00000000004074ef  in  debug_sigabrt  (sig=<optimized  out>)
        at  
/syslog/share/VBRASV100R001_new_trunk/vpp1704/build-data/../src/vpp/vnet/main.c:63
#1    <signal  handler  called>
#2    0x00007f439ca84a0d  in  raise  ()  from  /lib/libc.so.6
#3    0x00007f439ca85944  in  abort  ()  from  /lib/libc.so.6
#4    0x00000000004076be  in  os_panic  ()
        at  
/syslog/share/VBRASV100R001_new_trunk/vpp1704/build-data/../src/vpp/vnet/main.c:290
#5    0x00007f439ddd4c7f  in  clib_mem_alloc_aligned_at_offset  (
        os_out_of_memory_on_failure=1,  align_offset=<optimized  out>,  
align=4,  
        size=7825458)
        at  
/syslog/share/VBRASV100R001_new_trunk/vpp1704/build-data/../src/vppinfra/mem.h:102
#6    vec_resize_allocate_memory  (v=v@entry=0x3055afb0,  
        length_increment=length_increment@entry=100,  data_bytes=<optimized  
out>,  
        header_bytes=<optimized  out>,  header_bytes@entry=0,  
        data_align=data_align@entry=4)
        at  
/syslog/share/VBRASV100R001_new_trunk/vpp1704/build-data/../src/vppinfra/vec.c:84
#7    0x00000000004277d8  in  _vec_resize  (data_align=0,  header_bytes=0,  
        data_bytes=<optimized  out>,  length_increment=100,  v=<optimized  out>)
        at  
/syslog/share/VBRASV100R001_new_trunk/vpp1704/build-data/../src/vppinfra/---Type
  <return>  to  continue,  or  q  <return>  to  quit---
vec.h:142
#8    shmem_cli_output  (arg=139927313067576,  
        buffer=0x7f43608fc8bc  "  7a:7a:c0:a8:f8:66        1            4       
     0/1            3              -            -          -              
GigabitEthernet0/0/3          \n",  buffer_bytes=100)
        at  
/syslog/share/VBRASV100R001_new_trunk/vpp1704/build-data/../src/vpp/api/api.c:1095
#9    0x00007f43a4d8222e  in  vlib_cli_output  (vm=<optimized  out>,  
        fmt=fmt@entry=0x7f439ef16c90  "%=19U%=7d%=7d  
%3d/%-3d%=9v%=7s%=7s%=5s%=30U")
        at  
/syslog/share/VBRASV100R001_new_trunk/vpp1704/build-data/../src/vlib/cli.c:594
#10  0x00007f439eb2581a  in  display_l2fib_entry  (key=...,  
        result=result@entry=...,  s=s@entry=0x7f43608b07c0  "3")
        at  
/syslog/share/VBRASV100R001_new_trunk/vpp1704/build-data/../src/vnet/l2/l2_fib.c:120
#11  0x00007f439eb25af6  in  show_l2fib  (vm=0x7f43a4fe19c0  
<vlib_global_main>,  
        input=<optimized  out>,  cmd=<optimized  out>)
        at  
/syslog/share/VBRASV100R001_new_trunk/vpp1704/build-data/../src/vnet/l2/l2_fib.c:274
#12  0x00007f43a4d82641  in  vlib_cli_dispatch_sub_commands  (
        vm=vm@entry=0x7f43a4fe19c0  <vlib_global_main>,  
        cm=cm@entry=0x7f43a4fe1c28  <vlib_global_main+616>,  
        input=input@entry=0x7f435dca2e40,  parent_command_index=<optimized  
out>)
        at  
/syslog/share/VBRASV100R001_new_trunk/vpp1704/build-data/../src/vlib/cli.---Type
  <return>  to  continue,  or  q  <return>  to  quit---Quit
(gdb)  frame  10
#10  0x00007f439eb2581a  in  display_l2fib_entry  (key=...,  
        result=result@entry=...,  s=s@entry=0x7f43608b07c0  "3")
        at  
/syslog/share/VBRASV100R001_new_trunk/vpp1704/build-data/../src/vnet/l2/l2_fib.c:120

120 
/syslog/share/VBRASV100R001_new_trunk/vpp1704/build-data/../src/vnet/l2/l2_fib.c:
  No  such  file  or  directory.


root@QWAC:/#  cat  /pro c/meminfo 
MemTotal:        3938480 kB
MemFree:         1956388 kB
MemAvailable:    2299188 kB
Buffers:          179036 kB
Cached:           186656 kB
SwapCached:            0 kB
Active:           482992 kB
Inactive:         134164 kB
Active(anon):     253420 kB
Inactive(anon):    11816 kB
Active(file):     229572 kB
Inactive(file):   122348 kB
Unevictable:           0 kB
Mlocked:               0 kB
SwapTotal:             0 kB
SwapFree:              0 kB
Dirty:                 0 kB
Writeback:             0 kB
AnonPages:        252064 kB
Mapped:           208352 kB
Shmem:             13276 kB
Slab:              36312 kB
SReclaimable:      20616 kB
SUnreclaim:        15696 kB
KernelStack:        2016 kB
PageTables:         4236 kB
NFS_Unstable:          0 kB
Bounce:                0 kB
WritebackTmp:          0 kB
CommitLimit:     1444952 kB
Committed_AS:    2579656 kB
VmallocTotal:   34359738367 kB
VmallocUsed:      268632 kB
VmallocChunk:   34359462972 kB
HugePages_Total:     512
HugePages_Free:      384
HugePages_Rsvd:        0
HugePages_Surp:        0
Hugepagesize:       2048 kB
DirectMap4k:       13688 kB
DirectMap2M:     4071424 kB




unix {
  nodaemon
  log /tmp/vpp.log
  full-coredump
}


api-trace {
  on
}


api-segment {
  gid vpp
}


cpu {
## In the VPP there is one main thread and optionally the user can create 
worker(s)
## The main thread and worker thread(s) can be pinned to CPU core(s) manually 
or automatically


## Manual pinning of thread(s) to CPU core(s)


## Set logical CPU core where main thread runs
#main-core 1


## Set logical CPU core(s) where worker threads are running
#corelist-workers 2-3


## Automatic pinning of thread(s) to CPU core(s)


## Sets number of CPU core(s) to be skipped (1 ... N-1)
## Skipped CPU core(s) are not used for pinning main thread and working 
thread(s).
## The main thread is automatically pinned to the first available CPU core and 
worker(s)
## are pinned to next free CPU core(s) after core assigned to main thread
# skip-cores 4


## Specify a number of workers to be created
## Workers are pinned to N consecutive CPU cores while skipping "skip-cores" 
CPU core(s)
## and main thread's CPU core
# workers 2


## Set scheduling policy and priority of main and worker threads


## Scheduling policy options are: other (SCHED_OTHER), batch (SCHED_BATCH)
## idle (SCHED_IDLE), fifo (SCHED_FIFO), rr (SCHED_RR)
# scheduler-policy fifo


## Scheduling priority is used only for "real-time policies (fifo and rr),
## and has to be in the range of priorities supported for a particular policy
# scheduler-priority 50
}


# dpdk {
## Change default settings for all intefaces
# dev default {
## Number of receive queues, enables RSS
## Default is 1
# num-rx-queues 3


## Number of transmit queues, Default is equal
## to number of worker threads or 1 if no workers treads
# num-tx-queues 3


## Number of descriptors in transmit and receive rings
## increasing or reducing number can impact performance
## Default is 1024 for both rx and tx
# num-rx-desc 512
# num-tx-desc 512


## VLAN strip offload mode for interface
## Default is off
# vlan-strip-offload on
# }


## Whitelist specific interface by specifying PCI address
# dev 0000:02:00.0


## Whitelist specific interface by specifying PCI address and in
## addition specify custom parameters for this interface
# dev 0000:02:00.1 {
#num-rx-queues 2
# }


## Change UIO driver used by VPP, Options are: igb_uio, vfio-pci
## and uio_pci_generic (default)
# uio-driver vfio-pci


## Disable mutli-segment buffers, improves performance but
## disables Jumbo MTU support
# no-multi-seg


## Increase number of buffers allocated, needed only in scenarios with
## large number of interfaces and worker threads. Value is per CPU socket.
## Default is 16384
# num-mbufs 128000


## Change hugepages allocation per-socket, needed only if there is need for
## larger number of mbufs. Default is 256M on each detected CPU socket
# socket-mem 2048,2048
# }
snat { deterministic }
heapsize  1G 









Regards,
Xlangyun

-=-=-=-=-=-=-=-=-=-=-=-
Links: You receive all messages sent to this group.

View/Reply Online (#11425): https://lists.fd.io/g/vpp-dev/message/11425
Mute This Topic: https://lists.fd.io/mt/28369058/21656
Group Owner: vpp-dev+ow...@lists.fd.io
Unsubscribe: https://lists.fd.io/g/vpp-dev/unsub  [arch...@mail-archive.com]
-=-=-=-=-=-=-=-=-=-=-=-

Reply via email to