func (plugin *kubenetNetworkPlugin) Init(host network.Host, hairpinMode componentconfig.HairpinMode, nonMasqueradeCIDR string) error { plugin.host = host plugin.hairpinMode = hairpinMode plugin.nonMasqueradeCIDR = nonMasqueradeCIDR plugin.cniConfig = &libcni.CNIConfig{ Path: []string{DefaultCNIDir, plugin.vendorDir}, } if link, err := findMinMTU(); err == nil { plugin.MTU = link.MTU glog.V(5).Infof("Using interface %s MTU %d as bridge MTU", link.Name, link.MTU) } else { glog.Warningf("Failed to find default bridge MTU: %v", err) } // Since this plugin uses a Linux bridge, set bridge-nf-call-iptables=1 // is necessary to ensure kube-proxy functions correctly. // // This will return an error on older kernel version (< 3.18) as the module // was built-in, we simply ignore the error here. A better thing to do is // to check the kernel version in the future. plugin.execer.Command("modprobe", "br-netfilter").CombinedOutput() err := utilsysctl.SetSysctl(sysctlBridgeCallIptables, 1) if err != nil { glog.Warningf("can't set sysctl %s: %v", sysctlBridgeCallIptables, err) } plugin.loConfig, err = libcni.ConfFromBytes([]byte(`{ "cniVersion": "0.1.0", "name": "kubenet-loopback", "type": "loopback" }`)) if err != nil { return fmt.Errorf("Failed to generate loopback config: %v", err) } plugin.nsenterPath, err = plugin.execer.LookPath("nsenter") if err != nil { return fmt.Errorf("Failed to find nsenter binary: %v", err) } // Need to SNAT outbound traffic from cluster if err = plugin.ensureMasqRule(); err != nil { return err } return nil }
func (plugin *kubenetNetworkPlugin) Event(name string, details map[string]interface{}) { if name != network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE { return } plugin.mu.Lock() defer plugin.mu.Unlock() podCIDR, ok := details[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR].(string) if !ok { glog.Warningf("%s event didn't contain pod CIDR", network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE) return } if plugin.netConfig != nil { glog.V(5).Infof("Ignoring subsequent pod CIDR update to %s", podCIDR) return } glog.V(5).Infof("PodCIDR is set to %q", podCIDR) _, cidr, err := net.ParseCIDR(podCIDR) if err == nil { setHairpin := plugin.hairpinMode == componentconfig.HairpinVeth // Set bridge address to first address in IPNet cidr.IP.To4()[3] += 1 json := fmt.Sprintf(NET_CONFIG_TEMPLATE, BridgeName, plugin.MTU, network.DefaultInterfaceName, setHairpin, podCIDR, cidr.IP.String()) glog.V(2).Infof("CNI network config set to %v", json) plugin.netConfig, err = libcni.ConfFromBytes([]byte(json)) if err == nil { glog.V(5).Infof("CNI network config:\n%s", json) // Ensure cbr0 has no conflicting addresses; CNI's 'bridge' // plugin will bail out if the bridge has an unexpected one plugin.clearBridgeAddressesExcept(cidr) } } if err != nil { glog.Warningf("Failed to generate CNI network config: %v", err) } }
func getLoNetwork(vendorDirPrefix string) *cniNetwork { loConfig, err := libcni.ConfFromBytes([]byte(`{ "cniVersion": "0.1.0", "name": "cni-loopback", "type": "loopback" }`)) if err != nil { // The hardcoded config above should always be valid and unit tests will // catch this panic(err) } cninet := &libcni.CNIConfig{ Path: []string{vendorCNIDir(vendorDirPrefix, loConfig.Network.Type), DefaultCNIDir}, } loNetwork := &cniNetwork{ name: "lo", NetworkConfig: loConfig, CNIConfig: cninet, } return loNetwork }