On Thursday 31 January 2013 20:30:18 Ethan Jackson wrote:
> Are you absolutely sure the traffic isn't egressing the first switch,
> and then ingressing the other switch into the bond?  It's often hard
> to tell with tcpdump which direction traffic is travelling.

That made me really think and finally I have to confess: You're right, that's 
the problem with duplicate broad-/multicast traffic. Why was I not able to see 
that with port mirroring on the blade center switches? Well, by default they 
mirror in- and egress traffic as tcpdump does. 
So I did all my tests again today, only mirroring ingress traffic on the blade 
center switches internal port to the blade hosting my XCP test server. And, 
voila, I saw my broadcast traffic only on one of the two ports. 

Too bad, I still have my problem with uplink switches going crazy because of 
MAC addresses jumping between the uplink interfaces to the blade center 
switches and my #1 explanation is doomed. But what about the other problem I 
reported a few days ago (Subject: [BUG] SLB bonding & bond-rebalance-interval 
not working as expected)? So I had a deeper look into that problem - it seems 
to also be a quite good explanation but please find my quite lengthy report 
below. 


> Could you please send problematic traffic through the switch, and
> while that's going either run ovs-bugtool and send us the tarball, or
> run the following commands

Not sure if you still need that, but it can't harm - sadly XCP doesn't contain 
ovs-bugtool, but here we go:

[root@fraxcptest ~]# ovs-dpctl show -s
system@xapi1:
        lookups: hit:4475430 missed:676030 lost:0
        flows: 9
        port 0: xapi1 (internal)
                RX packets:1470928 errors:0 dropped:0 overruns:0 frame:0
                TX packets:3106056 errors:0 dropped:0 aborted:0 carrier:0
                collisions:0
                RX bytes:312425859 (298.0 MiB)  TX bytes:3414856958 (3.2 GiB)
        port 1: eth1
                RX packets:3086604 errors:0 dropped:0 overruns:0 frame:0
                TX packets:1465522 errors:0 dropped:0 aborted:0 carrier:0
                collisions:0
                RX bytes:3413109311 (3.2 GiB)  TX bytes:306152907 (292.0 MiB)
        port 2: eth0
                RX packets:470299 errors:0 dropped:0 overruns:0 frame:0
                TX packets:133084 errors:0 dropped:0 aborted:0 carrier:0
                collisions:0
                RX bytes:39932740 (38.1 MiB)  TX bytes:15932731 (15.2 MiB)
        port 6: vif4.0
                RX packets:8361 errors:0 dropped:0 overruns:0 frame:0
                TX packets:7025 errors:0 dropped:2 aborted:0 carrier:0
                collisions:0
                RX bytes:878048 (857.5 KiB)  TX bytes:605354 (591.2 KiB)
system@xenbr2:
        lookups: hit:0 missed:0 lost:0
        flows: 0
        port 0: xenbr2 (internal)
                RX packets:0 errors:0 dropped:0 overruns:0 frame:0
                TX packets:0 errors:0 dropped:0 aborted:0 carrier:0
                collisions:0
                RX bytes:0  TX bytes:0
        port 1: eth2
                RX packets:0 errors:0 dropped:0 overruns:0 frame:0
                TX packets:0 errors:0 dropped:0 aborted:0 carrier:0
                collisions:0
                RX bytes:0  TX bytes:0
system@xenbr3:
        lookups: hit:0 missed:0 lost:0
        flows: 0
        port 0: xenbr3 (internal)
                RX packets:0 errors:0 dropped:0 overruns:0 frame:0
                TX packets:0 errors:0 dropped:0 aborted:0 carrier:0
                collisions:0
                RX bytes:0  TX bytes:0
        port 1: eth3
                RX packets:0 errors:0 dropped:0 overruns:0 frame:0
                TX packets:0 errors:0 dropped:0 aborted:0 carrier:0
                collisions:0
                RX bytes:0  TX bytes:0
[root@fraxcptest ~]# 
[root@fraxcptest ~]# 
[root@fraxcptest ~]# ovs-dpctl dump-flows xapi1
priority(6),in_port(0),eth(src=00:1a:64:8f:a5:9e,dst=00:1e:37:48:c0:08),eth_type(0x0800),ipv4(src=192.168.0.171,dst=192.168.0.124,proto=6,tos=0x10,ttl=64,frag=no),tcp(src=22,dst=60678),
 
packets:824, bytes:119312, used:4.000s, actions:2
in_port(1),eth(src=b2:76:b4:94:5d:e1,dst=ff:ff:ff:ff:ff:ff),eth_type(0x0800),ipv4(src=192.168.0.157,dst=192.168.0.255,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0),
 
packets:8, bytes:784, used:0.980s, actions:drop
in_port(6),eth(src=b2:76:b4:94:5d:e1,dst=ff:ff:ff:ff:ff:ff),eth_type(0x0800),ipv4(src=192.168.0.157,dst=192.168.0.255,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0),
 
packets:8, bytes:784, used:0.980s, actions:2,0
in_port(1),eth(src=00:1e:37:48:c0:08,dst=00:1a:64:8f:a5:9e),eth_type(0x0800),ipv4(src=192.168.0.124,dst=192.168.0.171,proto=6,tos=0x10,ttl=64,frag=no),tcp(src=32932,dst=22),
 
packets:133, bytes:12006, used:0.030s, actions:0
in_port(2),eth(src=00:1e:37:48:c0:08,dst=00:1a:64:8f:a5:9e),eth_type(0x0800),ipv4(src=192.168.0.124,dst=192.168.0.171,proto=6,tos=0x10,ttl=64,frag=no),tcp(src=32932,dst=22),
 
packets:8, bytes:952, used:2.930s, actions:0
in_port(6),eth(src=b2:76:b4:94:5d:e1,dst=00:1e:37:48:c0:08),eth_type(0x0800),ipv4(src=192.168.0.157,dst=192.168.0.124,proto=6,tos=0,ttl=64,frag=no),tcp(src=22,dst=53232),
 
packets:29, bytes:4849, used:3.000s, actions:2
in_port(2),eth(src=00:1e:37:48:c0:08,dst=b2:76:b4:94:5d:e1),eth_type(0x0800),ipv4(src=192.168.0.124,dst=192.168.0.157,proto=6,tos=0x10,ttl=64,frag=no),tcp(src=53232,dst=22),
 
packets:15, bytes:1134, used:3.000s, actions:6
priority(6),in_port(0),eth(src=00:1a:64:8f:a5:9e,dst=00:1e:37:48:c0:08),eth_type(0x0800),ipv4(src=192.168.0.171,dst=192.168.0.124,proto=6,tos=0x10,ttl=64,frag=no),tcp(src=22,dst=60480),
 
packets:1856, bytes:1435320, used:3.030s, actions:2
priority(6),in_port(0),eth(src=00:1a:64:8f:a5:9e,dst=00:1e:37:48:c0:08),eth_type(0x0800),ipv4(src=192.168.0.171,dst=192.168.0.124,proto=6,tos=0x10,ttl=64,frag=no),tcp(src=22,dst=32932),
 
packets:133, bytes:19578, used:0.030s, actions:1
[root@fraxcptest ~]# 
[root@fraxcptest ~]# 
[root@fraxcptest ~]# ovs-appctl bond/list
bond    type    slaves
bond0   balance-slb     eth0, eth1
[root@fraxcptest ~]# 
[root@fraxcptest ~]# 
[root@fraxcptest ~]# ovs-appctl bond/show bond0
bond_mode: balance-slb
bond-hash-algorithm: balance-slb
bond-hash-basis: 0
updelay: 31000 ms
downdelay: 200 ms
next rebalance: 23952 ms
lacp_negotiated: false

slave eth0: enabled
        may_enable: true
        hash 122: 7 kB load

slave eth1: enabled
        active slave
        may_enable: true
        hash 93: 139 kB load
[root@fraxcptest ~]# 
[root@fraxcptest ~]# 
[root@fraxcptest ~]# ovs-appctl lacp/show
[root@fraxcptest ~]# 
[root@fraxcptest ~]# 
[root@fraxcptest ~]# ovs-appctl fdb/show xapi1
 port  VLAN  MAC                Age
    1     0  00:12:79:85:87:00   48
    6     0  b2:76:b4:94:5d:e1    1
    1     0  00:1e:37:48:c0:08    1
    0     0  00:1a:64:8f:a5:9e    1


##########################################################################
##########################################################################
##########################################################################


Please find my report mentioned before below - I really hope it contains all 
information you may need to assist. 


As you've seen before, only xapi1 is interesting. Port 1 is eth1 and port 2 is 
eth0 - a bit confusing, but we can work with that. 

What about MAC addresses? b2:76:b4:94:5d:e1 is the VM we're interested in. So 
let's look up its flows:
[root@fraxcptest ~]# ovs-dpctl dump-flows xapi1 | fgrep b2:76:b4:94:5d:e1
[root@fraxcptest ~]# 

OK, currently no active flows, that's good. 

What about the hash of the VMs MAC:
[root@fraxcptest ~]# ovs-appctl bond/hash b2:76:b4:94:5d:e1
122

And where does that hash currently live?
[root@fraxcptest ~]# ovs-appctl bond/show bond0
bond_mode: balance-slb
bond-hash-algorithm: balance-slb
bond-hash-basis: 0
updelay: 31000 ms
downdelay: 200 ms
next rebalance: 93899 ms
lacp_negotiated: false

slave eth0: enabled
        may_enable: true
        hash 122: 1 kB load

slave eth1: enabled
        active slave
        may_enable: true
        hash 93: 69 kB load

It's on eth0

Now let's start some traffic in our VM. 
I'm starting a broadcast ping over SSH - that gives us a nice constant flow of 
ICMP and TCP (because of the SSH connection) packets. 
Open vSwitch will handle that as two flows, but let's check:
[root@fraxcptest ~]# ovs-dpctl dump-flows xapi1 | fgrep b2:76:b4:94:5d:e1
in_port(1),eth(src=b2:76:b4:94:5d:e1,dst=ff:ff:ff:ff:ff:ff),eth_type(0x0800),ipv4(src=192.168.0.157,dst=192.168.0.255,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0),
 
packets:4, bytes:392, used:2.770s, actions:drop
in_port(6),eth(src=b2:76:b4:94:5d:e1,dst=ff:ff:ff:ff:ff:ff),eth_type(0x0800),ipv4(src=192.168.0.157,dst=192.168.0.255,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0),
 
packets:4, bytes:392, used:2.770s, actions:2,0
in_port(6),eth(src=b2:76:b4:94:5d:e1,dst=00:1e:37:48:c0:08),eth_type(0x0800),ipv4(src=192.168.0.157,dst=192.168.0.124,proto=6,tos=0,ttl=64,frag=no),tcp(src=22,dst=52021),
 
packets:8, bytes:1312, used:1.740s, actions:2
in_port(2),eth(src=00:1e:37:48:c0:08,dst=b2:76:b4:94:5d:e1),eth_type(0x0800),ipv4(src=192.168.0.124,dst=192.168.0.157,proto=6,tos=0x10,ttl=64,frag=no),tcp(src=52021,dst=22),
 
packets:11, bytes:822, used:1.730s, actions:6

OK, very good. Two flows, both going out via port 2, that's eth0 and that's 
correct. 

Now let's wait a bit...
... and check if Open vSwitch has rebalanced in between:
[root@fraxcptest ~]# ovs-appctl bond/show bond0
bond_mode: balance-slb
bond-hash-algorithm: balance-slb
bond-hash-basis: 0
updelay: 31000 ms
downdelay: 200 ms
next rebalance: 67432 ms
lacp_negotiated: false

slave eth0: enabled
        may_enable: true
        hash 122: 12 kB load

slave eth1: enabled
        active slave
        may_enable: true
        hash 93: 182 kB load

Nope, hash 122 is still on eth0, let's check the flow table:
[root@fraxcptest ~]# ovs-dpctl dump-flows xapi1 | fgrep b2:76:b4:94:5d:e1
in_port(1),eth(src=b2:76:b4:94:5d:e1,dst=ff:ff:ff:ff:ff:ff),eth_type(0x0800),ipv4(src=192.168.0.157,dst=192.168.0.255,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0),
 
packets:146, bytes:14308, used:0.910s, actions:drop
in_port(6),eth(src=b2:76:b4:94:5d:e1,dst=ff:ff:ff:ff:ff:ff),eth_type(0x0800),ipv4(src=192.168.0.157,dst=192.168.0.255,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0),
 
packets:146, bytes:14308, used:0.910s, actions:2,0
in_port(6),eth(src=b2:76:b4:94:5d:e1,dst=00:1e:37:48:c0:08),eth_type(0x0800),ipv4(src=192.168.0.157,dst=192.168.0.124,proto=6,tos=0,ttl=64,frag=no),tcp(src=22,dst=52021),
 
packets:119, bytes:16238, used:0.850s, actions:2
in_port(2),eth(src=00:1e:37:48:c0:08,dst=b2:76:b4:94:5d:e1),eth_type(0x0800),ipv4(src=192.168.0.124,dst=192.168.0.157,proto=6,tos=0x10,ttl=64,frag=no),tcp(src=52021,dst=22),
 
packets:122, bytes:8148, used:0.850s, actions:6

Very good, the flow is still on port 2 (eth0). 

Now let's spice that up a bit and move hash 122 over to eth1 (as this will 
happen from time to time automatically):
[root@fraxcptest ~]# ovs-appctl bond/migrate bond0 122 eth1
migrated
[root@fraxcptest ~]# ovs-appctl bond/show bond0
bond_mode: balance-slb
bond-hash-algorithm: balance-slb
bond-hash-basis: 0
updelay: 31000 ms
downdelay: 200 ms
next rebalance: 65123 ms
lacp_negotiated: false

slave eth0: enabled
        may_enable: true

slave eth1: enabled
        active slave
        may_enable: true
        hash 93: 199 kB load
        hash 122: 13 kB load

OK, hash 122 is on eth1 now, let's check the flow table:
[root@fraxcptest ~]# ovs-dpctl dump-flows xapi1 | fgrep b2:76:b4:94:5d:e1
in_port(1),eth(src=b2:76:b4:94:5d:e1,dst=ff:ff:ff:ff:ff:ff),eth_type(0x0800),ipv4(src=192.168.0.157,dst=192.168.0.255,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0),
 
packets:201, bytes:19698, used:0.880s, actions:drop
in_port(6),eth(src=b2:76:b4:94:5d:e1,dst=ff:ff:ff:ff:ff:ff),eth_type(0x0800),ipv4(src=192.168.0.157,dst=192.168.0.255,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0),
 
packets:201, bytes:19698, used:0.880s, actions:2,0
in_port(6),eth(src=b2:76:b4:94:5d:e1,dst=00:1e:37:48:c0:08),eth_type(0x0800),ipv4(src=192.168.0.157,dst=192.168.0.124,proto=6,tos=0,ttl=64,frag=no),tcp(src=22,dst=52021),
 
packets:159, bytes:21678, used:2.890s, actions:2
in_port(2),eth(src=00:1e:37:48:c0:08,dst=b2:76:b4:94:5d:e1),eth_type(0x0800),ipv4(src=192.168.0.124,dst=192.168.0.157,proto=6,tos=0x10,ttl=64,frag=no),tcp(src=52021,dst=22),
 
packets:162, bytes:10788, used:2.890s, actions:6

What a second, that flows are still using port 2 (eth0) - well, let's wait a 
bit, maybe it takes some time...

[root@fraxcptest ~]# sleep 60
[root@fraxcptest ~]# ovs-appctl bond/show bond0
bond_mode: balance-slb
bond-hash-algorithm: balance-slb
bond-hash-basis: 0
updelay: 31000 ms
downdelay: 200 ms
next rebalance: 106382 ms
lacp_negotiated: false

slave eth0: enabled
        may_enable: true
        hash 93: 163 kB load

slave eth1: enabled
        active slave
        may_enable: true
        hash 122: 12 kB load

OK, looks like rebalancing kicked in in between and shifted hash 93 to eth0 in 
between, but that shouldn't matter as hash 122 is still on eth1. 
Let's check the flow table again:
[root@fraxcptest ~]# ovs-dpctl dump-flows xapi1 | fgrep b2:76:b4:94:5d:e1
in_port(1),eth(src=b2:76:b4:94:5d:e1,dst=ff:ff:ff:ff:ff:ff),eth_type(0x0800),ipv4(src=192.168.0.157,dst=192.168.0.255,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0),
 
packets:323, bytes:31654, used:2.190s, actions:drop
in_port(6),eth(src=b2:76:b4:94:5d:e1,dst=ff:ff:ff:ff:ff:ff),eth_type(0x0800),ipv4(src=192.168.0.157,dst=192.168.0.255,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0),
 
packets:323, bytes:31654, used:2.190s, actions:2,0
in_port(6),eth(src=b2:76:b4:94:5d:e1,dst=00:1e:37:48:c0:08),eth_type(0x0800),ipv4(src=192.168.0.157,dst=192.168.0.124,proto=6,tos=0,ttl=64,frag=no),tcp(src=22,dst=52021),
 
packets:279, bytes:38030, used:1.140s, actions:2
in_port(2),eth(src=00:1e:37:48:c0:08,dst=b2:76:b4:94:5d:e1),eth_type(0x0800),ipv4(src=192.168.0.124,dst=192.168.0.157,proto=6,tos=0x10,ttl=64,frag=no),tcp(src=52021,dst=22),
 
packets:14, bytes:924, used:1.140s, actions:6

Nope, still using port 2 (eth0)

Oh wait, what's that?
[root@fraxcptest ~]# ovs-dpctl dump-flows xapi1 | fgrep b2:76:b4:94:5d:e1
in_port(1),eth(src=b2:76:b4:94:5d:e1,dst=ff:ff:ff:ff:ff:ff),eth_type(0x0800),ipv4(src=192.168.0.157,dst=192.168.0.255,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0),
 
packets:355, bytes:34790, used:1.140s, actions:drop
in_port(6),eth(src=b2:76:b4:94:5d:e1,dst=00:1e:37:48:c0:08),eth_type(0x0806),arp(sip=192.168.0.157,tip=192.168.0.124,op=2,sha=b2:76:b4:94:5d:e1,tha=00:1e:37:48:c0:08),
 
packets:7, bytes:294, used:1.410s, actions:1
in_port(6),eth(src=b2:76:b4:94:5d:e1,dst=ff:ff:ff:ff:ff:ff),eth_type(0x0800),ipv4(src=192.168.0.157,dst=192.168.0.255,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0),
 
packets:355, bytes:34790, used:1.140s, actions:2,0
in_port(6),eth(src=b2:76:b4:94:5d:e1,dst=00:1e:37:48:c0:08),eth_type(0x0800),ipv4(src=192.168.0.157,dst=192.168.0.124,proto=6,tos=0,ttl=64,frag=no),tcp(src=22,dst=52021),
 
packets:316, bytes:43032, used:0.090s, actions:2
in_port(2),eth(src=00:1e:37:48:c0:08,dst=b2:76:b4:94:5d:e1),eth_type(0x0800),ipv4(src=192.168.0.124,dst=192.168.0.157,proto=6,tos=0x10,ttl=64,frag=no),tcp(src=52021,dst=22),
 
packets:1, bytes:66, used:0.090s, actions:6
in_port(1),eth(src=00:1e:37:48:c0:08,dst=b2:76:b4:94:5d:e1),eth_type(0x0800),ipv4(src=192.168.0.124,dst=192.168.0.157,proto=6,tos=0x10,ttl=64,frag=no),tcp(src=52021,dst=22),
 
packets:0, bytes:0, used:never, actions:6

Our test VM sent out an ARP reply and that used port 1 (eth1) while the 
existing ICMP and TCP flows still use port 2 (eth0).

OK, let's stop our tests and wait for the VM's flows to disappear from the 
flow table:
[root@fraxcptest ~]# ovs-dpctl dump-flows xapi1 | fgrep b2:76:b4:94:5d:e1
[root@fraxcptest ~]# 

(I was constantly monitoring ovs-appctl bond/show bond0 in a second windows to 
ensure rebalancing doesn't kick in unintentionally)

And start tests again. 

Have a look at hashes and flow table:
[root@fraxcptest ~]# ovs-appctl bond/show bond0
bond_mode: balance-slb
bond-hash-algorithm: balance-slb
bond-hash-basis: 0
updelay: 31000 ms
downdelay: 200 ms
next rebalance: 11682 ms
lacp_negotiated: false

slave eth0: enabled
        may_enable: true
        hash 93: 263 kB load

slave eth1: enabled
        active slave
        may_enable: true
        hash 122: 21 kB load

[root@fraxcptest ~]# ovs-dpctl dump-flows xapi1 | fgrep b2:76:b4:94:5d:e1
in_port(6),eth(src=b2:76:b4:94:5d:e1,dst=ff:ff:ff:ff:ff:ff),eth_type(0x0800),ipv4(src=192.168.0.157,dst=192.168.0.255,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0),
 
packets:12, bytes:1176, used:0.030s, actions:1,0
in_port(2),eth(src=b2:76:b4:94:5d:e1,dst=ff:ff:ff:ff:ff:ff),eth_type(0x0800),ipv4(src=192.168.0.157,dst=192.168.0.255,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0),
 
packets:12, bytes:1176, used:0.030s, actions:drop
in_port(6),eth(src=b2:76:b4:94:5d:e1,dst=00:1e:37:48:c0:08),eth_type(0x0800),ipv4(src=192.168.0.157,dst=192.168.0.124,proto=6,tos=0,ttl=64,frag=no),tcp(src=22,dst=52021),
 
packets:13, bytes:2026, used:2.050s, actions:1
in_port(1),eth(src=00:1e:37:48:c0:08,dst=b2:76:b4:94:5d:e1),eth_type(0x0800),ipv4(src=192.168.0.124,dst=192.168.0.157,proto=6,tos=0x10,ttl=64,frag=no),tcp(src=52021,dst=22),
 
packets:13, bytes:882, used:2.050s, actions:6

OK, without any changes to Open vSwitch in between, no shifting of hashes, the 
new flows are using the correct outgoing port 1 (eth1). 

Let's do the same game again:
[root@fraxcptest ~]# ovs-appctl bond/migrate bond0 122 eth0
migrated
[root@fraxcptest ~]# ovs-dpctl dump-flows xapi1 | fgrep b2:76:b4:94:5d:e1
in_port(6),eth(src=b2:76:b4:94:5d:e1,dst=ff:ff:ff:ff:ff:ff),eth_type(0x0800),ipv4(src=192.168.0.157,dst=192.168.0.255,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0),
 
packets:124, bytes:12152, used:2.290s, actions:1,0
in_port(2),eth(src=b2:76:b4:94:5d:e1,dst=ff:ff:ff:ff:ff:ff),eth_type(0x0800),ipv4(src=192.168.0.157,dst=192.168.0.255,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0),
 
packets:124, bytes:12152, used:2.280s, actions:drop
in_port(6),eth(src=b2:76:b4:94:5d:e1,dst=00:1e:37:48:c0:08),eth_type(0x0800),ipv4(src=192.168.0.157,dst=192.168.0.124,proto=6,tos=0,ttl=64,frag=no),tcp(src=22,dst=52021),
 
packets:90, bytes:12420, used:1.240s, actions:1
in_port(1),eth(src=00:1e:37:48:c0:08,dst=b2:76:b4:94:5d:e1),eth_type(0x0800),ipv4(src=192.168.0.124,dst=192.168.0.157,proto=6,tos=0x10,ttl=64,frag=no),tcp(src=52021,dst=22),
 
packets:90, bytes:6012, used:1.240s, actions:6

Traffic still on port 1 (eth1) while the hash should be on eth0

Maybe we can catch an ARP reply again?
[root@fraxcptest ~]# ovs-dpctl dump-flows xapi1 | fgrep b2:76:b4:94:5d:e1
in_port(6),eth(src=b2:76:b4:94:5d:e1,dst=00:1e:37:48:c0:08),eth_type(0x0806),arp(sip=192.168.0.157,tip=192.168.0.124,op=2,sha=b2:76:b4:94:5d:e1,tha=00:1e:37:48:c0:08),
 
packets:7, bytes:294, used:3.790s, actions:2
in_port(6),eth(src=b2:76:b4:94:5d:e1,dst=ff:ff:ff:ff:ff:ff),eth_type(0x0800),ipv4(src=192.168.0.157,dst=192.168.0.255,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0),
 
packets:165, bytes:16170, used:0.440s, actions:1,0
in_port(2),eth(src=b2:76:b4:94:5d:e1,dst=ff:ff:ff:ff:ff:ff),eth_type(0x0800),ipv4(src=192.168.0.157,dst=192.168.0.255,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0),
 
packets:165, bytes:16170, used:0.440s, actions:drop
in_port(6),eth(src=b2:76:b4:94:5d:e1,dst=00:1e:37:48:c0:08),eth_type(0x0800),ipv4(src=192.168.0.157,dst=192.168.0.124,proto=6,tos=0,ttl=64,frag=no),tcp(src=22,dst=52021),
 
packets:134, bytes:18348, used:2.460s, actions:1
in_port(2),eth(src=00:1e:37:48:c0:08,dst=b2:76:b4:94:5d:e1),eth_type(0x0800),ipv4(src=192.168.0.124,dst=192.168.0.157,proto=6,tos=0x10,ttl=64,frag=no),tcp(src=52021,dst=22),
 
packets:0, bytes:0, used:never, actions:6
in_port(1),eth(src=00:1e:37:48:c0:08,dst=b2:76:b4:94:5d:e1),eth_type(0x0800),ipv4(src=192.168.0.124,dst=192.168.0.157,proto=6,tos=0x10,ttl=64,frag=no),tcp(src=52021,dst=22),
 
packets:1, bytes:66, used:2.460s, actions:6

And here we go, ARP reply is using port 2 (eth0) while ICMP and TCP are still 
using port 1 (eth1). 

So to me it looks like the migration of existing flows during rebalancing 
doesn't work correctly. They seem to "stick" to the interface they have 
started on and don't move over. 

Hope that helps. 

Regards,
Markus
_______________________________________________
dev mailing list
dev@openvswitch.org
http://openvswitch.org/mailman/listinfo/dev

Reply via email to