Hi all,
I’m trying to run Babel over a GRE tunnel on VPP, but inbound Hellos from the
neighbor are being dropped with *“Multicast RPF check failed.”* The same Babel
setup works without VPP, so basic connectivity and host config look good.
I’ve attached detailed configs and packet traces from VPP1 and VPP2.
As a test, I explicitly allowed the Babel IPv6 multicast group (ff02::1:6) on
both WAN interfaces:
vppctl ip mroute add ff02:: 1 : 6 / 128 via :: WAN00
vppctl ip mroute add ff02:: 1 : 6 / 128 via :: WAN01
Unfortunately, the issue persists. Any pointers on what I might be missing on
the VPP side would be much appreciated.
Thanks
Nivethi
#**********VPP1 (ubuntu 22.04.5 LTS) ***************
vppctl sh version
#-> vpp v25.10-rc0~210-g8941f9ade built by root on VPP2 at 2025-08-06T17:20:06
#vppctl lcp default
vppctl lcp lcp-sync enable
vppctl lcp lcp-sync on
#WAN00 <- TenGigabitEthernetc/0/0
#WAN01 <- TenGigabitEthernetc/0/1
#LAN00 <- TenGigabitEthernetb/0/0
#LAN01 <- TenGigabitEthernetb/0/1
# Bringup the WAN interfaces
# Assign IP address for WAN Interfaces
vppctl lcp create WAN00 host-if hWAN00
vppctl set int ip address WAN00 10.130.0.1/24
vppctl set int ip address WAN00 2001::1/64
vppctl lcp create WAN01 host-if hWAN01
vppctl set int ip address WAN01 10.140.0.1/24
vppctl set int ip address WAN01 2002::1/64
vppctl set int state WAN00 up
vppctl set int state WAN01 up
# Create a Loopback interfaces
vppctl create loopback interface mac aa:bb:cc:00:00:01 instance 0
vppctl lcp create loop0 host-if vxlanloop0
vppctl set int ip address loop0 10.0.1.0/30
vppctl set int state loop0 up
# As the Loopback interface doesn't learn MAC using ARP, add the remote IP & MAC manually
vppctl set ip neighbor loop0 10.0.2.0 aa:bb:cc:00:00:02
#expect Babel to create this route after learing from VPP2 neighbor
#vppctl ip route add 10.0.2.0/32 via 10.130.0.2 WAN00
#vppctl ip route add 10.0.2.0/32 via 10.140.0.2 WAN01
# Create or overwrite the babeld conf file
cat << 'EOF' > "/etc/babeld.conf"
# For more information about this configuration file, refer to
# babeld(8)
# babeld -c /etc/babeld.conf -d 10
# Enable babel on specific interfaces
local-port 33123
interface hWAN00
interface hWAN01
#export-table 100
#default unicast true
# Redistribute connected routes
#redistribute connected
# Redistribute static routes
#redistribute static
# Redistribute a specific IP prefix
redistribute ip 10.0.1.0/30
redistribute local deny
# Optional: Define filter rules (in/out)
# in filter allows controlling what you accept
# out filter allows controlling what you announce
EOF
#OSPF HOST SIDE
ip link set vxlanloop0 up
ip link set hWAN00 up
ip link set hWAN00 up
exit 0
hWAN00 UNKNOWN 10.130.0.1/24 2001::1/64 fe80::227c:14ff:fef4:34bf/64
hWAN01 UP 10.140.0.1/24 2002::1/64 fe80::227c:14ff:fef4:34c0/64
vxlanloop0 UP 10.0.1.0/30 fe80::a8bb:ccff:fe00:1/64
root@VPP1:/home/zwan# ping 2001::2
PING 2001::2(2001::2) 56 data bytes
64 bytes from 2001::2: icmp_seq=1 ttl=64 time=1.54 ms
64 bytes from 2001::2: icmp_seq=2 ttl=64 time=0.171 ms
^C
#RUN babeld
babeld -c /etc/babeld.conf -d 10
My id 22:7c:14:ff:fe:f4:34:b8 seqno 50388
10.0.1.0/30 from 0.0.0.0/0 metric 0 (exported)
Sending hello 7586 (400) to hWAN00.
My id 22:7c:14:ff:fe:f4:34:b8 seqno 50388
10.0.1.0/30 from 0.0.0.0/0 metric 0 (exported)
Sending hello 24590 (400) to hWAN01.
root@VPP1:/home/zwan# vppctl sh ip fib
ipv4-VRF:0, fib_index:0, flow hash:[src dst sport dport proto flowlabel ] epoch:0 flags:none locks:[adjacency:1, default-route:1, ]
0.0.0.0/0
unicast-ip4-chain
[@0]: dpo-load-balance: [proto:ip4 index:1 buckets:1 uRPF:0 to:[0:0]]
[0] [@0]: dpo-drop ip4
0.0.0.0/32
unicast-ip4-chain
[@0]: dpo-load-balance: [proto:ip4 index:2 buckets:1 uRPF:1 to:[0:0]]
[0] [@0]: dpo-drop ip4
10.0.1.0/30
unicast-ip4-chain
[@0]: dpo-load-balance: [proto:ip4 index:27 buckets:1 uRPF:38 to:[0:0]]
[0] [@4]: ipv4-glean: [src:10.0.1.0/30] loop0: mtu:9000 next:3 flags:[] ffffffffffffaabbcc0000010806
10.0.1.0/32
unicast-ip4-chain
[@0]: dpo-load-balance: [proto:ip4 index:29 buckets:1 uRPF:39 to:[0:0]]
[0] [@13]: dpo-receive: 10.0.1.0 on loop0
10.0.1.3/32
unicast-ip4-chain
[@0]: dpo-load-balance: [proto:ip4 index:28 buckets:1 uRPF:37 to:[0:0]]
[0] [@0]: dpo-drop ip4
10.0.2.0/32
UNRESOLVED
10.130.0.0/32
unicast-ip4-chain
[@0]: dpo-load-balance: [proto:ip4 index:18 buckets:1 uRPF:23 to:[0:0]]
[0] [@0]: dpo-drop ip4
10.130.0.0/24
unicast-ip4-chain
[@0]: dpo-load-balance: [proto:ip4 index:17 buckets:1 uRPF:22 to:[0:0]]
[0] [@4]: ipv4-glean: [src:10.130.0.0/24] WAN00: mtu:9000 next:1 flags:[] ffffffffffff207c14f434bf0806
10.130.0.1/32
unicast-ip4-chain
[@0]: dpo-load-balance: [proto:ip4 index:20 buckets:1 uRPF:27 to:[4:336]]
[0] [@13]: dpo-receive: 10.130.0.1 on WAN00
10.130.0.2/32
unicast-ip4-chain
[@0]: dpo-load-balance: [proto:ip4 index:30 buckets:1 uRPF:34 to:[0:0]]
[0] [@5]: ipv4 via 10.130.0.2 WAN00: mtu:9000 next:5 flags:[] 207c14f434b6207c14f434bf0800
10.130.0.255/32
unicast-ip4-chain
[@0]: dpo-load-balance: [proto:ip4 index:19 buckets:1 uRPF:25 to:[0:0]]
[0] [@0]: dpo-drop ip4
10.140.0.0/32
unicast-ip4-chain
[@0]: dpo-load-balance: [proto:ip4 index:24 buckets:1 uRPF:31 to:[0:0]]
[0] [@0]: dpo-drop ip4
10.140.0.0/24
unicast-ip4-chain
[@0]: dpo-load-balance: [proto:ip4 index:23 buckets:1 uRPF:41 to:[0:0]]
[0] [@4]: ipv4-glean: [src:10.140.0.0/24] WAN01: mtu:9000 next:2 flags:[] ffffffffffff207c14f434c00806
10.140.0.1/32
unicast-ip4-chain
[@0]: dpo-load-balance: [proto:ip4 index:26 buckets:1 uRPF:35 to:[0:0]]
[0] [@13]: dpo-receive: 10.140.0.1 on WAN01
10.140.0.255/32
unicast-ip4-chain
[@0]: dpo-load-balance: [proto:ip4 index:25 buckets:1 uRPF:33 to:[0:0]]
[0] [@0]: dpo-drop ip4
224.0.0.0/4
unicast-ip4-chain
[@0]: dpo-load-balance: [proto:ip4 index:4 buckets:1 uRPF:3 to:[0:0]]
[0] [@0]: dpo-drop ip4
240.0.0.0/4
unicast-ip4-chain
[@0]: dpo-load-balance: [proto:ip4 index:3 buckets:1 uRPF:2 to:[0:0]]
[0] [@0]: dpo-drop ip4
255.255.255.255/32
unicast-ip4-chain
[@0]: dpo-load-balance: [proto:ip4 index:5 buckets:1 uRPF:4 to:[0:0]]
[0] [@0]: dpo-drop ip4
root@VPP1:/home/zwan# vppctl sh ip mfib
ipv4-VRF:0, fib_index:0 flags:none
(*, 0.0.0.0/0): flags:Drop,
Interfaces:
multicast-ip4-chain
[@0]: dpo-drop ip4
(*, 224.0.0.1/32):
Interfaces:
WAN01: Accept,
loop0: Accept,
WAN00: Accept,
multicast-ip4-chain
[@1]: dpo-replicate: [index:0 buckets:1 flags:[has-local ] to:[0:0]]
[0] [@1]: dpo-receive
(*, 224.0.0.2/32):
Interfaces:
WAN01: Accept,
loop0: Accept,
WAN00: Accept,
multicast-ip4-chain
[@1]: dpo-replicate: [index:1 buckets:1 flags:[has-local ] to:[0:0]]
[0] [@1]: dpo-receive
root@VPP1:/home/zwan# vppctl sh ip6 fib
ipv6-VRF:0, fib_index:0, flow hash:[src dst sport dport proto flowlabel ] epoch:0 flags:none locks:[adjacency:1, default-route:1, ]
::/0
unicast-ip6-chain
[@0]: dpo-load-balance: [proto:ip6 index:6 buckets:1 uRPF:5 to:[0:0]]
[0] [@0]: dpo-drop ip6
2001::/64
unicast-ip6-chain
[@0]: dpo-load-balance: [proto:ip6 index:15 buckets:1 uRPF:20 to:[0:0]]
[0] [@4]: ipv6-glean: [src:2001::/64] WAN00: mtu:9000 next:2 flags:[] ffffffffffff207c14f434bf86dd
2001::1/128
unicast-ip6-chain
[@0]: dpo-load-balance: [proto:ip6 index:16 buckets:1 uRPF:21 to:[3:280]]
[0] [@20]: dpo-receive: 2001::1 on WAN00
2001::2/128
unicast-ip6-chain
[@0]: dpo-load-balance: [proto:ip6 index:31 buckets:1 uRPF:40 to:[0:0]]
[0] [@5]: ipv6 via 2001::2 WAN00: mtu:9000 next:5 flags:[] 207c14f434b6207c14f434bf86dd
2002::/64
unicast-ip6-chain
[@0]: dpo-load-balance: [proto:ip6 index:21 buckets:1 uRPF:26 to:[0:0]]
[0] [@4]: ipv6-glean: [src:2002::/64] WAN01: mtu:9000 next:3 flags:[] ffffffffffff207c14f434c086dd
2002::1/128
unicast-ip6-chain
[@0]: dpo-load-balance: [proto:ip6 index:22 buckets:1 uRPF:29 to:[0:0]]
[0] [@20]: dpo-receive: 2002::1 on WAN01
fe80::/10
unicast-ip6-chain
[@0]: dpo-load-balance: [proto:ip6 index:7 buckets:1 uRPF:6 to:[0:0]]
[0] [@14]: ip6-link-local
root@VPP1:/home/zwan# vppctl sh ip6 mfib
ipv6-VRF:0, fib_index 0
(*, ::/0): flags:Drop,
Interfaces:
multicast-ip6-chain
[@0]: dpo-drop ip6
(*, ff02::1/128):
Interfaces:
WAN01: Accept,
WAN00: Accept,
multicast-ip6-chain
[@1]: dpo-replicate: [index:4 buckets:1 flags:[has-local ] to:[0:0]]
[0] [@1]: dpo-receive
(*, ff02::2/128):
Interfaces:
WAN01: Accept,
WAN00: Accept,
multicast-ip6-chain
[@1]: dpo-replicate: [index:3 buckets:1 flags:[has-local ] to:[0:0]]
[0] [@1]: dpo-receive
(*, ff02::16/128):
Interfaces:
WAN01: Accept,
WAN00: Accept,
multicast-ip6-chain
[@1]: dpo-replicate: [index:5 buckets:1 flags:[has-local ] to:[17:1972]]
[0] [@1]: dpo-receive
(*, ff02::1:ff00:0/104):
Interfaces:
WAN01: Accept,
WAN00: Accept,
multicast-ip6-chain
[@1]: dpo-replicate: [index:2 buckets:1 flags:[has-local ] to:[4:288]]
[0] [@1]: dpo-receive
Trace at VPP1:
-------------
root@VPP1:/home/zwan# ./showtrace.sh
------------------- Start of thread 0 vpp_main -------------------
Packet 1
00:04:24:142986: virtio-input
virtio: hw_if_index 5 next-index 4 vring 0 len 74
hdr: flags 0x00 gso_type 0x00 hdr_len 0 gso_size 0 csum_start 0 csum_offset 0 num_buffers 1
00:04:24:143000: ethernet-input
frame: flags 0x1, hw-if-index 5, sw-if-index 5
IP6: 20:7c:14:f4:34:bf -> 33:33:00:01:00:06
00:04:24:143014: ip6-input
UDP: fe80::227c:14ff:fef4:34bf -> ff02::1:6
tos 0xc0, flow label 0xf2ebe, hop limit 1, payload length 20
00:04:24:143023: linux-cp-xc-ip6
lcp-xc: itf:3 adj:1
00:04:24:143029: WAN00-output
WAN00 flags 0x00180005
IP6: 20:7c:14:f4:34:bf -> 33:33:00:01:00:06
UDP: fe80::227c:14ff:fef4:34bf -> ff02::1:6
tos 0xc0, flow label 0xf2ebe, hop limit 1, payload length 20
00:04:24:143039: WAN00-tx
WAN00 tx queue 0
buffer 0x9e453: current data 0, length 74, buffer-pool 0, ref-count 1, trace handle 0x0
l2-hdr-offset 0 l3-hdr-offset 14
PKT MBUF: port 65535, nb_segs 1, pkt_len 74
buf_len 2176, data_len 74, ol_flags 0x0, data_off 128, phys_addr 0x2791540
packet_type 0x0 l2_len 0 l3_len 0 outer_l2_len 0 outer_l3_len 0
rss 0x0 fdir.hi 0x0 fdir.lo 0x0
IP6: 20:7c:14:f4:34:bf -> 33:33:00:01:00:06
UDP: fe80::227c:14ff:fef4:34bf -> ff02::1:6
tos 0xc0, flow label 0xf2ebe, hop limit 1, payload length 20
Packet 2
00:04:24:608684: dpdk-input
WAN00 rx queue 0
buffer 0x97a24: current data 0, length 74, buffer-pool 0, ref-count 1, trace handle 0x1
ext-hdr-valid
PKT MBUF: port 2, nb_segs 1, pkt_len 74
buf_len 2176, data_len 74, ol_flags 0x180, data_off 128, phys_addr 0x25e8980
packet_type 0x241 l2_len 0 l3_len 0 outer_l2_len 0 outer_l3_len 0
rss 0x0 fdir.hi 0x0 fdir.lo 0x0
Packet Offload Flags
PKT_RX_IP_CKSUM_GOOD (0x0080) IP cksum of RX pkt. is valid
PKT_RX_L4_CKSUM_GOOD (0x0100) L4 cksum of RX pkt. is valid
Packet Types
RTE_PTYPE_L2_ETHER (0x0001) Ethernet packet
RTE_PTYPE_L3_IPV6 (0x0040) IPv6 packet without extension headers
RTE_PTYPE_L4_UDP (0x0200) UDP packet
IP6: 20:7c:14:f4:34:b6 -> 33:33:00:01:00:06
UDP: fe80::227c:14ff:fef4:34b6 -> ff02::1:6
tos 0xc0, flow label 0x3d12, hop limit 1, payload length 20
00:04:24:608699: ethernet-input
frame: flags 0x3, hw-if-index 3, sw-if-index 3
IP6: 20:7c:14:f4:34:b6 -> 33:33:00:01:00:06
00:04:24:608709: ip6-input
UDP: fe80::227c:14ff:fef4:34b6 -> ff02::1:6
tos 0xc0, flow label 0x3d12, hop limit 1, payload length 20
00:04:24:608712: ip6-mfib-forward-lookup
fib 0 entry 3
00:04:24:608718: ip6-mfib-forward-rpf
entry 3 itf -1 flags
00:04:24:608721: ip6-drop
fib:0 adj:3 flow:0x00000000
UDP: fe80::227c:14ff:fef4:34b6 -> ff02::1:6
tos 0xc0, flow label 0x3d12, hop limit 1, payload length 20
00:04:24:608727: error-drop
rx:WAN00
00:04:24:608732: drop
ip6-input: Multicast RPF check failed
------
Without VPP, babeld worked over host interfaces:
My id 22:7c:14:ff:fe:f4:34:b8 seqno 50393
Neighbour fe80::41bc:6679:8288:7ba9 dev eno4 reach ff80 ureach 0000 rxcost 96 txcost 96 rtt 0.000 rttcost 0 chan -2.
Neighbour fe80::699:5600:b589:efa8 dev eno3 reach ff00 ureach 0000 rxcost 96 txcost 96 rtt 0.000 rttcost 0 chan -2.
172.1.0.0/24 from 0.0.0.0/0 metric 0 (exported)
172.2.0.0/24 metric 96 (154) refmetric 0 id 22:7c:14:ff:fe:f4:34:af seqno 28661 age 11 via eno4 neigh fe80::41bc:6679:8288:7ba9 nexthop 10.140.0.2 (installed)
172.2.0.0/24 metric 96 (205) refmetric 0 id 22:7c:14:ff:fe:f4:34:af seqno 28661 age 4 via eno3 neigh fe80::699:5600:b589:efa8 nexthop 10.130.0.2 (feasible)
Received hello 1327 (400) from fe80::41bc:6679:8288:7ba9 on eno4.
ip r
172.2.0.0/24 via 10.140.0.2 dev eno4 proto babel onlink
#**********VPP2 (ubuntu 22.04.5 LTS) ***************
vppctl sh version
#-> vpp v25.10-rc0~210-g8941f9ade built by root on VPP2 at 2025-08-06T17:20:06
#vppctl lcp default
vppctl lcp lcp-sync enable
vppctl lcp lcp-sync on
vppctl lcp lcp-auto-subint enable
vppctl lcp lcp-auto-subint on
#WAN00 <- TenGigabitEthernetc/0/0
#WAN01 <- TenGigabitEthernetc/0/1
#LAN00 <- TenGigabitEthernetb/0/0
#LAN01 <- TenGigabitEthernetb/0/1
# Bringup the WAN interfaces
# Assign IP address for WAN Interfaces
vppctl lcp create WAN00 host-if hWAN00
vppctl set int ip address WAN00 10.130.0.2/24
vppctl set int ip address WAN00 2001::2/64
vppctl lcp create WAN01 host-if hWAN01
vppctl set int ip address WAN01 10.140.0.2/24
vppctl set int ip address WAN01 2002::2/64
vppctl set int state WAN00 up
vppctl set int state WAN01 up
# Create a Loopback interfaces
vppctl create loopback interface mac aa:bb:cc:00:00:02 instance 0
vppctl lcp create loop0 host-if vxlanloop0
vppctl set int ip address loop0 10.0.2.0/30
vppctl set int state loop0 up
# As the Loopback interface doesn't learn MAC using ARP, add the remote IP & MAC manually
vppctl set ip neighbor loop0 10.0.1.0 aa:bb:cc:00:00:01
#expect Babel to create this route after learing from VPP1 neighbor
#vppctl ip route add 10.0.1.0/32 via 10.130.0.1 WAN00
#vppctl ip route add 10.0.1.0/32 via 10.140.0.1 WAN01
# Create or overwrite the babeld conf file
cat << 'EOF' > "/etc/babeld.conf"
# For more information about this configuration file, refer to
# babeld(8)
# babeld -c /etc/babeld.conf -d 10
# Enable babel on specific interfaces
local-port 33123
interface hWAN00
interface hWAN01
#export-table 100
#default unicast true
# Redistribute connected routes
#redistribute connected
# Redistribute static routes
#redistribute static
# Redistribute a specific IP prefix
redistribute ip 10.0.2.0/30
redistribute local deny
# Optional: Define filter rules (in/out)
# in filter allows controlling what you accept
# out filter allows controlling what you announce
EOF
-=-=-=-=-=-=-=-=-=-=-=-
Links: You receive all messages sent to this group.
View/Reply Online (#26256): https://lists.fd.io/g/vpp-dev/message/26256
Mute This Topic: https://lists.fd.io/mt/114650700/21656
Group Owner: [email protected]
Unsubscribe: https://lists.fd.io/g/vpp-dev/leave/14379924/21656/631435203/xyzzy
[[email protected]]
-=-=-=-=-=-=-=-=-=-=-=-