netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* Any reason why arp monitor keeps emitting netlink failover events?
@ 2022-06-14  2:59 Jonathan Toppins
  2022-06-14 15:29 ` Jay Vosburgh
  0 siblings, 1 reply; 6+ messages in thread
From: Jonathan Toppins @ 2022-06-14  2:59 UTC (permalink / raw)
  To: netdev@vger.kernel.org
  Cc: Jay Vosburgh, Veaceslav Falico, Andy Gospodarek, Hangbin Liu

On net-next/master from today, I see netlink failover events being 
emitted from an active-backup bond. In the ip monitor dump you can see 
the bond is up (according to the link status) but keeps emitting 
failover events and I am not sure why. This appears to be an issue also 
on Fedora 35 and CentOS 8 kernels. The configuration appears to be 
correct, though I could be missing something. Thoughts?

-Jon


Upstream Commit:
   c04245328dd7 net: make __sys_accept4_file() static

Console Log:
[root@fedora ~]# cat ./bond-bz2094911.sh
#!/bin/sh

set -e
set -x

dmesg -C
ip -all netns delete || true
ip link del link1_1 || true
ip link del link2_1 || true
modprobe -r bonding

ip link add name link1_1 type veth peer name link1_2
ip link add name link2_1 type veth peer name link2_2
ip netns add ns1
ip netns exec ns1 ip link add name br0 type bridge vlan_filtering 1
ip link set link1_2 up netns ns1
ip link set link2_2 up netns ns1
ip netns exec ns1 ip link set link1_2 master br0
ip netns exec ns1 ip link set link2_2 master br0
ip netns exec ns1 ip addr add 192.168.30.5/24 dev br0
ip netns exec ns1 ip addr add 192.168.30.7/24 dev br0
ip netns exec ns1 ip link set br0 up
ip link add name bond0 type bond mode active-backup arp_all_targets any \
	arp_ip_target "192.168.30.7,192.168.30.5" arp_interval 1000 \
	fail_over_mac follow arp_validate active primary_reselect always \
	primary link1_1
ip link set bond0 up
ip addr add 192.168.30.10/24 dev bond0
ifenslave bond0 link1_1 link2_1
#ip -ts -o monitor link


[root@fedora ~]# ip -ts -o monitor link dev bond0
[2022-06-13T22:20:35.289034] [2022-06-13T22:20:35.289846] 
[2022-06-13T22:20:35.289978] [2022-06-13T22:20:35.290089] 
[2022-06-13T22:20:35.290200] [2022-06-13T22:20:35.290311] 14: bond0: 
<BROADCAST,MULTICAST,MASTER,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP 
group default event BONDING FAILOVER \    link/ether fe:5b:52:88:61:68 
brd ff:ff:ff:ff:ff:ff
[2022-06-13T22:20:35.291055] 14: bond0: 
<BROADCAST,MULTICAST,MASTER,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP 
group default event NOTIFY PEERS \    link/ether fe:5b:52:88:61:68 brd 
ff:ff:ff:ff:ff:ff
[2022-06-13T22:20:35.324494] 14: bond0: 
<BROADCAST,MULTICAST,MASTER,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP 
group default event RESEND IGMP \    link/ether fe:5b:52:88:61:68 brd 
ff:ff:ff:ff:ff:ff
[2022-06-13T22:20:36.312078] [2022-06-13T22:20:36.312296] 
[2022-06-13T22:20:36.312549] [2022-06-13T22:20:36.312670] 
[2022-06-13T22:20:36.312782] [2022-06-13T22:20:36.312893] 14: bond0: 
<BROADCAST,MULTICAST,MASTER,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP 
group default event BONDING FAILOVER \    link/ether fe:5b:52:88:61:68 
brd ff:ff:ff:ff:ff:ff
[2022-06-13T22:20:36.313134] 14: bond0: 
<BROADCAST,MULTICAST,MASTER,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP 
group default event NOTIFY PEERS \    link/ether fe:5b:52:88:61:68 brd 
ff:ff:ff:ff:ff:ff
[2022-06-13T22:20:36.346157] 14: bond0: 
<BROADCAST,MULTICAST,MASTER,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP 
group default event RESEND IGMP \    link/ether fe:5b:52:88:61:68 brd 
ff:ff:ff:ff:ff:ff

[root@fedora tests]# ip -d link show dev bond0
14: bond0: <BROADCAST,MULTICAST,MASTER,UP,LOWER_UP> mtu 1500 qdisc 
noqueue state UP mode DEFAULT group default qlen 1000
     link/ether fe:5b:52:88:61:68 brd ff:ff:ff:ff:ff:ff promiscuity 0 
minmtu 68 maxmtu 65535
     bond mode active-backup active_slave link1_1 miimon 0 updelay 0 
downdelay 0 peer_notify_delay 0 use_carrier 1 arp_interval 1000 
arp_missed_max 2 arp_ip_target 192.168.30.7,192.168.30.5 arp_validate 
active arp_all_targets any primary link1_1 primary_reselect always 
fail_over_mac follow xmit_hash_policy layer2 resend_igmp 1 num_grat_arp 
1 all_slaves_active 0 min_links 0 lp_interval 1 packets_per_slave 1 
lacp_active on lacp_rate slow ad_select stable tlb_dynamic_lb 1 
addrgenmode eui64 numtxqueues 16 numrxqueues 16 gso_max_size 65536 
gso_max_segs 65535 gro_max_size 65536
[root@fedora tests]#

[root@fedora tests]# ip -d -s link show dev link1_1
11: link1_1@if10: <BROADCAST,MULTICAST,SLAVE,UP,LOWER_UP> mtu 1500 qdisc 
noqueue master bond0 state UP mode DEFAULT group default qlen 1000
     link/ether aa:48:a3:a3:2b:2b brd ff:ff:ff:ff:ff:ff link-netns ns1 
promiscuity 0 minmtu 68 maxmtu 65535
     veth
     bond_slave state BACKUP mii_status DOWN link_failure_count 466 
perm_hwaddr b6:19:b6:e3:29:c5 queue_id 0 addrgenmode eui64 numtxqueues 1 
numrxqueues 1 gso_max_size 65536 gso_max_segs 65535 gro_max_size 65536
     RX:  bytes packets errors dropped  missed   mcast
         295004    5622      0       0       0       0
     TX:  bytes packets errors dropped carrier collsns
         254824    4678      0       0       0       0

[root@fedora tests]# ip -d -s link show dev link2_1
13: link2_1@if12: <BROADCAST,MULTICAST,SLAVE,UP,LOWER_UP> mtu 1500 qdisc 
noqueue master bond0 state UP mode DEFAULT group default qlen 1000
     link/ether aa:48:a3:a3:2b:2b brd ff:ff:ff:ff:ff:ff link-netns ns1 
promiscuity 0 minmtu 68 maxmtu 65535
     veth
     bond_slave state BACKUP mii_status UP link_failure_count 0 
perm_hwaddr aa:48:a3:a3:2b:2b queue_id 0 addrgenmode eui64 numtxqueues 1 
numrxqueues 1 gso_max_size 65536 gso_max_segs 65535 gro_max_size 65536
     RX:  bytes packets errors dropped  missed   mcast
         303452    5776      0       0       0       0
     TX:  bytes packets errors dropped carrier collsns
         179592    2866      0       0       0       0


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2022-06-16 18:53 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-06-14  2:59 Any reason why arp monitor keeps emitting netlink failover events? Jonathan Toppins
2022-06-14 15:29 ` Jay Vosburgh
2022-06-14 17:07   ` Jonathan Toppins
2022-06-15  0:26     ` Jay Vosburgh
2022-06-15 15:51       ` Jonathan Toppins
2022-06-16 18:52         ` Jay Vosburgh

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).