Hi,
Sorry of this is the wrong list, trying to find some help.
I am trying to set up 2 KVM hosts using Debian 9 (connecting to a 3 node Centos 7.4 gluster cluster) To get the bandwidth I have bonded 2 NICS each end. However while the network works fine for the Debian VM host the VM guests cannot connect/get an IP using DHCP connecting to br0. (Same setup on the Centos end works fine however)
On the Debian host if I connect to br1 which is a bridge to a NIC port it works fine. My problem then seems to be at the bonding layer for libvirt/kvm ?
Here is my interfaces config, I cannot see the problem. The VM guest is on the gluster cluster and boots fine just no networking.
any ideas what I am doing wrong pls? or a better group/list to go ask in?
auto eno1
iface eno1 inet manual
#iface eno1 inet static
bond-master bond0
#address 192.168.1.62
#netmask 255.255.255.0
#gateway 192.168.1.1
#dns-nameservers 192.168.1.241 192.168.1.104
# mtu 9000
auto enp1s0f1
iface enp1s0f1 inet manual
bond-master bond0
# mtu 9000
auto bond0
iface bond0 inet static
bond-miimon 100
bond-mode 6
bond-updelay 200
bond-xmit_hash_policy layer3+4
bond-lacp-rate 1
# mtu 9000
# address 192.168.1.62
# netmask 255.255.255.0
# gateway 192.168.1.1
# dns-nameservers 192.168.1.241 192.168.1.104
slaves eno1 enp1s0f1
auto br0
iface br0 inet static
address 192.168.1.62
netmask 255.255.255.0
gateway 192.168.1.1
dns-nameservers 192.168.1.241 192.168.1.104
bridge_ports bond0
#bridge_ports eno1 enp1s0f1
bridge_stp on
bridge_fd 0.0
# mtu 9000
#post-up ifconfig bond0 mtu 9000
#post-up ip link set dev bond0 mtu 9000
#auto bond1
#iface bond1 inet static
# address 10.100.200.62
# netmask 255.255.255.0
# bond_slaves enp3s0f1
# bond_primary enp3s0f1
# mtu 9000
auto br1
iface br1 inet static
address 10.100.200.62
netmask 255.255.255.0
gateway 10.100.200.1
bridge_ports enp3s0f1
bridge_stp on
bridge_fd 0.0
root@kvm02:/etc/network#