Alexander Sack wrote:
Oleksandr:

Are you using DEVICE_POLLING by chance?  If so, have you tried turning
it off (ifconfig use -polling etc.)?  Just curious.


Surely, no :)

# ifconfig em0
em0: flags=8843<UP,BROADCAST,RUNNING,SIMPLEX,MULTICAST> metric 0 mtu 1500
        options=19b<RXCSUM,TXCSUM,VLAN_MTU,VLAN_HWTAGGING,VLAN_HWCSUM,TSO4>


I'm just trying the same configuration on i386.


On Sat, May 3, 2008 at 6:16 PM, Oleksandr Samoylyk
<[EMAIL PROTECTED]> wrote:
Hi!

 I'm running a SMP FreeBSD box with mpd5 on it.

 # uname -a
 FreeBSD xxx.xxxxxxxxx.xxx 7.0-STABLE FreeBSD 7.0-STABLE #0: Sat May  3
 12:40:02 EEST 2008
 [EMAIL PROTECTED]:/usr/obj/usr/src/sys/XXXX  amd64

 # mpd5 -v
 Version 5.1 ([EMAIL PROTECTED] 09:53  1-May-2008)

 Somehow em0 begins to eat all CPU time of one core.

 # top -S
 last pid: 55827;  load averages:  3.76,  3.42,  3.08
      up 0+03:27:38  16:24:20
 104 processes: 11 running, 81 sleeping, 12 waiting
 CPU states:  1.7% user,  0.0% nice, 21.4% system,  3.0% interrupt, 73.9% idle
 Mem: 71M Active, 89M Inact, 340M Wired, 336K Cache, 214M Buf, 7418M Free
 Swap: 4096M Total, 4096M Free

  PID USERNAME  THR PRI NICE   SIZE    RES STATE  C   TIME   WCPU COMMAND
   29 root        1 -68    -     0K    16K CPU5   5 196:41 100.00% em0 taskq
   11 root        1 171 ki31     0K    16K CPU7   7 175:41 94.09% idle: cpu7
   16 root        1 171 ki31     0K    16K CPU2   2 175:45 91.26% idle: cpu2
   15 root        1 171 ki31     0K    16K CPU3   3 180:18 89.45% idle: cpu3
   14 root        1 171 ki31     0K    16K CPU4   4 177:13 87.89% idle: cpu4
   17 root        1 171 ki31     0K    16K CPU1   1 165:27 86.87% idle: cpu1
   12 root        1 171 ki31     0K    16K CPU6   6 176:18 83.25% idle: cpu6
   18 root        1 171 ki31     0K    16K RUN    0 157:44 80.66% idle: cpu0
  611 root        6  58    0   133M 44320K select 0   0:00 66.26% mpd5
   21 root        1 -44    -     0K    16K CPU4   4  48:38 21.39% swi1: net
   30 root        1 -68    -     0K    16K -      6  21:41 10.25% em1 taskq

 Everything is OK with outbound interface - em1.

 Current bandwidth - ~ 80 Mbit/s

 There are a lot of input errors on em0 (but no on em1):

 # netstat -w 1 -I em0
            input          (em0)           output
   packets  errs      bytes    packets  errs      bytes colls
      8012   923    2838565      12504     0    7943345     0
      7934   874    2469244      12555     0    7728764     0
      7931   976    2712035      12482     0    8006760     0
      8015   813    2694716      10669     0    7796656     0
      7975   733    2475193      12306     0    8032129     0
      7871   825    2548198      12269     0    7789452     0
      8072   961    2647014      11924     0    7260788     0
      7909   983    2576145      10552     0    7479881     0
 ^C

 And systat -v looks strange with no interrupts on em0:

    2 users    Load  1.34  1.61  1.62                  May  3 14:04

 Mem:KB    REAL            VIRTUAL                       VN PAGER   SWAP PAGER
        Tot   Share      Tot    Share    Free           in   out     in   out
 Act   68152    9452   231584    11936 7786368  count
 All  108516   10676  4486380    15448          pages
 Proc:                                                            Interrupts
  r   p   d   s   w   Csw  Trp  Sys  Int  Sof  Flt   3981 cow   22705 total
             47       46k  10k 268k 6697  23k  10k   3973 zfod atkbd0 1
                                                          ozfod ata0 irq14
 18.3%Sys   2.3%Intr  1.8%User  0.0%Nice 77.6%Idle        %ozfod atapci1 19
 |    |    |    |    |    |    |    |    |    |    |       daefr  2001 cpu0: 
time
 =========+>                                          5699 prcfr     2 em0 
irq256
                                        55 dtbuf    12110 totfr  6695 em1 irq257
 Namei     Name-cache   Dir-cache    100000 desvn          react  2001 cpu3: 
time
   Calls    hits   %    hits   %      4217 numvn          pdwak  2001 cpu1: time
   12005   12004 100                   304 frevn          pdpgs  2001 cpu2: time
                                                       13 intrn  2001 cpu4: time
 Disks   ad4                                        232692 wire   2001 cpu5: 
time
 KB/t   0.00                                         60640 act    2001 cpu7: 
time
 tps       0                                         28784 inact  2001 cpu6: 
time
 MB/s   0.00                                           336 cache
 %busy     0                                       7786032 free
                                                   219632 buf

 Latency grows up to 400 ms:
 # ping 10.0.0.1
 PING 10.0.0.1 (10.0.0.1): 56 data bytes
 64 bytes from 10.0.0.1: icmp_seq=0 ttl=64 time=17.619 ms
 64 bytes from 10.0.0.1: icmp_seq=1 ttl=64 time=27.497 ms
 64 bytes from 10.0.0.1: icmp_seq=3 ttl=64 time=16.481 ms
 64 bytes from 10.0.0.1: icmp_seq=4 ttl=64 time=24.535 ms
 64 bytes from 10.0.0.1: icmp_seq=5 ttl=64 time=13.058 ms
 ^C
 --- 10.0.0.1 ping statistics ---
 6 packets transmitted, 5 packets received, 16.7% packet loss
 round-trip min/avg/max/stddev = 13.058/19.838/27.497/5.346 ms

 # top -S
 last pid: 55827;  load averages:  3.76,  3.42,  3.08
      up 0+03:27:38  16:24:20
 104 processes: 11 running, 81 sleeping, 12 waiting
 CPU states:  1.7% user,  0.0% nice, 21.4% system,  3.0% interrupt, 73.9% idle
 Mem: 71M Active, 89M Inact, 340M Wired, 336K Cache, 214M Buf, 7418M Free
 Swap: 4096M Total, 4096M Free

  PID USERNAME  THR PRI NICE   SIZE    RES STATE  C   TIME   WCPU COMMAND
   29 root        1 -68    -     0K    16K CPU5   5 196:41 100.00% em0 taskq
   11 root        1 171 ki31     0K    16K CPU7   7 175:41 94.09% idle: cpu7
   16 root        1 171 ki31     0K    16K CPU2   2 175:45 91.26% idle: cpu2
   15 root        1 171 ki31     0K    16K CPU3   3 180:18 89.45% idle: cpu3
   14 root        1 171 ki31     0K    16K CPU4   4 177:13 87.89% idle: cpu4
   17 root        1 171 ki31     0K    16K CPU1   1 165:27 86.87% idle: cpu1
   12 root        1 171 ki31     0K    16K CPU6   6 176:18 83.25% idle: cpu6
   18 root        1 171 ki31     0K    16K RUN    0 157:44 80.66% idle: cpu0
  611 root        6  58    0   133M 44320K select 0   0:00 66.26% mpd5
   21 root        1 -44    -     0K    16K CPU4   4  48:38 21.39% swi1: net
   30 root        1 -68    -     0K    16K -      6  21:41 10.25% em1 taskq

 # sysctl dev.em.0
 dev.em.0.%desc: Intel(R) PRO/1000 Network Connection Version - 6.7.3
 dev.em.0.%driver: em
 dev.em.0.%location: slot=0 function=0
 dev.em.0.%pnpinfo: vendor=0x8086 device=0x1096 subvendor=0x15d9
 subdevice=0x0000 class=0x020000
 dev.em.0.%parent: pci6
 dev.em.0.debug: -1
 dev.em.0.stats: -1
 dev.em.0.rx_int_delay: 0
 dev.em.0.tx_int_delay: 66
 dev.em.0.rx_abs_int_delay: 66
 dev.em.0.tx_abs_int_delay: 66
 dev.em.0.rx_processing_limit: -1

 I've tried both:
 options         SCHED_ULE
 options         SCHED_4BSD

 I've added just the following lines in my kernel config:

 options         IPFIREWALL
 options         IPFIREWALL_DEFAULT_TO_ACCEPT

 options         NETGRAPH
 options         NETGRAPH_PPP
 options         NETGRAPH_PPTPGRE


 My sysctls:
  net.inet.ip.forwarding=1
 net.inet.ip.fastforwarding=1
 net.inet.ip.redirect=0
 net.inet.ip.random_id=1
 net.inet.ip.ttl=255
 net.inet.ip.intr_queue_maxlen=4096

 kern.maxfiles=131072
 kern.maxfilesperproc=32768
 kern.maxprocperuid=32768

 kern.ipc.somaxconn=65535
 kern.ipc.maxsockets=32768
 kern.ipc.maxsockbuf=16777216

 net.inet.tcp.rfc1323=1
 net.inet.tcp.recvspace=65536
 net.inet.tcp.sendspace=32768
 net.inet.tcp.sendbuf_max=16777216
 net.inet.tcp.recvbuf_max=16777216
 net.inet.tcp.sendbuf_auto=1
 net.inet.tcp.sendbuf_inc=8192
 net.inet.tcp.recvbuf_auto=1
 net.inet.tcp.recvbuf_inc=16384
 net.inet.tcp.maxtcptw=40960
 net.inet.tcp.msl=2500
 net.inet.tcp.delayed_ack=0
 net.inet.tcp.nolocaltimewait=1

 net.inet.udp.checksum=0
 net.inet.udp.recvspace=65535
 net.inet.udp.maxdgram=57344

 net.inet.icmp.icmplim=30

 net.inet.tcp.blackhole=2
 net.inet.udp.blackhole=1

 net.local.stream.recvspace=65535
 net.local.stream.sendspace=65535

 net.isr.direct=1

 kern.timecounter.hardware=TSC

 dev.em.0.rx_processing_limit=-1

 If I set net.isr.direct to "0", than sw1: net begins to eat 100% of a
 core, but without errors:
 # netstat -w 1 -I em0
            input          (em0)           output
   packets  errs      bytes    packets  errs      bytes colls
      6953     0    2860537       8703     0    4882814     0
      6785     0    2587635       7683     0    4443958     0
      7006     0    2576630       8718     0    4924591     0
      6887     0    2652461       8272     0    4548049     0
      6854     0    2610157       8689     0    5152459     0
      6889     0    2586067       8265     0    5010795     0
      6878     0    2586746       8255     0    4734959     0
 ^C

 Moreover, with net.isr.direct=0 I can't create a PPTP tunnel.

 Please, help to solve the problem. Thanks!

 --
  Oleksandr Samoylyk
  OVS-RIPE
 _______________________________________________
 freebsd-net@freebsd.org mailing list
 http://lists.freebsd.org/mailman/listinfo/freebsd-net
 To unsubscribe, send any mail to "[EMAIL PROTECTED]"



--
 Oleksandr Samoylyk
 OVS-RIPE
_______________________________________________
freebsd-net@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/freebsd-net
To unsubscribe, send any mail to "[EMAIL PROTECTED]"

Reply via email to