func cmdAdd(args *skel.CmdArgs) error { n, err := loadConf(args.StdinData) if err != nil { return err } netns, err := ns.GetNS(args.Netns) if err != nil { return fmt.Errorf("failed to open netns %q: %v", netns, err) } defer netns.Close() if err = createMacvlan(n, args.IfName, netns); err != nil { return err } // run the IPAM plugin and get back the config to apply result, err := ipam.ExecAdd(n.IPAM.Type, args.StdinData) if err != nil { return err } if result.IP4 == nil { return errors.New("IPAM plugin returned missing IPv4 config") } err = netns.Do(func(_ ns.NetNS) error { return ipam.ConfigureIface(args.IfName, result) }) if err != nil { return err } result.DNS = n.DNS return result.Print() }
func cmdAdd(args *skel.CmdArgs) error { conf := NetConf{} if err := json.Unmarshal(args.StdinData, &conf); err != nil { return fmt.Errorf("failed to load netconf: %v", err) } if err := ip.EnableIP4Forward(); err != nil { return fmt.Errorf("failed to enable forwarding: %v", err) } // run the IPAM plugin and get back the config to apply result, err := ipam.ExecAdd(conf.IPAM.Type, args.StdinData) if err != nil { return err } if result.IP4 == nil { return errors.New("IPAM plugin returned missing IPv4 config") } hostVethName, err := setupContainerVeth(args.Netns, args.IfName, conf.MTU, result) if err != nil { return err } if err = setupHostVeth(hostVethName, result.IP4); err != nil { return err } if conf.IPMasq { chain := utils.FormatChainName(conf.Name, args.ContainerID) comment := utils.FormatComment(conf.Name, args.ContainerID) if err = ip.SetupIPMasq(&result.IP4.IP, chain, comment); err != nil { return err } } result.DNS = conf.DNS return result.Print() }
func cmdAdd(args *skel.CmdArgs) error { // Unmarshall the network config, and perform validation conf := NetConf{} if err := json.Unmarshal(args.StdinData, &conf); err != nil { return fmt.Errorf("failed to load netconf: %v", err) } ConfigureLogging(conf.LogLevel) workload, orchestrator, err := GetIdentifiers(args) if err != nil { return err } logger := CreateContextLogger(workload) // Allow the hostname to be overridden by the network config if conf.Hostname != "" { hostname = conf.Hostname } logger.WithFields(log.Fields{ "Orchestrator": orchestrator, "Node": hostname, }).Info("Extracted identifiers") logger.WithFields(log.Fields{"NetConfg": conf}).Info("Loaded CNI NetConf") calicoClient, err := CreateClient(conf) if err != nil { return err } // Always check if there's an existing endpoint. endpoints, err := calicoClient.WorkloadEndpoints().List(api.WorkloadEndpointMetadata{ Node: hostname, Orchestrator: orchestrator, Workload: workload}) if err != nil { return err } logger.Debugf("Retrieved endpoints: %v", endpoints) var endpoint *api.WorkloadEndpoint if len(endpoints.Items) == 1 { endpoint = &endpoints.Items[0] } fmt.Fprintf(os.Stderr, "Calico CNI checking for existing endpoint: %v\n", endpoint) // Collect the result in this variable - this is ultimately what gets "returned" by this function by printing // it to stdout. var result *types.Result // If running under Kubernetes then branch off into the kubernetes code, otherwise handle everything in this // function. if orchestrator == "k8s" { if result, err = k8s.CmdAddK8s(args, conf, hostname, calicoClient, endpoint); err != nil { return err } } else { // Default CNI behavior - use the CNI network name as the Calico profile. profileID := conf.Name if endpoint != nil { // There is an existing endpoint - no need to create another. // This occurs when adding an existing container to a new CNI network // Find the IP address from the endpoint and use that in the response. // Don't create the veth or do any networking. // Just update the profile on the endpoint. The profile will be created if needed during the // profile processing step. fmt.Fprintf(os.Stderr, "Calico CNI appending profile: %s\n", profileID) endpoint.Spec.Profiles = append(endpoint.Spec.Profiles, profileID) result, err = CreateResultFromEndpoint(endpoint) logger.WithField("result", result).Debug("Created result from endpoint") if err != nil { return err } } else { // There's no existing endpoint, so we need to do the following: // 1) Call the configured IPAM plugin to get IP address(es) // 2) Configure the Calico endpoint // 3) Create the veth, configuring it on both the host and container namespace. // 1) Run the IPAM plugin and make sure there's an IP address returned. logger.WithFields(log.Fields{"paths": os.Getenv("CNI_PATH"), "type": conf.IPAM.Type}).Debug("Looking for IPAM plugin in paths") result, err = ipam.ExecAdd(conf.IPAM.Type, args.StdinData) logger.WithField("result", result).Info("Got result from IPAM plugin") if err != nil { return err } // Parse endpoint labels passed in by Mesos, and store in a map. labels := map[string]string{} for _, label := range conf.Args.Mesos.NetworkInfo.Labels.Labels { labels[label.Key] = label.Value } // 2) Create the endpoint object endpoint = api.NewWorkloadEndpoint() endpoint.Metadata.Name = args.IfName endpoint.Metadata.Node = hostname endpoint.Metadata.Orchestrator = orchestrator endpoint.Metadata.Workload = workload endpoint.Metadata.Labels = labels endpoint.Spec.Profiles = []string{profileID} logger.WithField("endpoint", endpoint).Debug("Populated endpoint (without nets)") if err = PopulateEndpointNets(endpoint, result); err != nil { // Cleanup IP allocation and return the error. ReleaseIPAllocation(logger, conf.IPAM.Type, args.StdinData) return err } logger.WithField("endpoint", endpoint).Info("Populated endpoint (with nets)") fmt.Fprintf(os.Stderr, "Calico CNI using IPs: %s\n", endpoint.Spec.IPNetworks) // 3) Set up the veth hostVethName, contVethMac, err := DoNetworking(args, conf, result, logger, "") if err != nil { // Cleanup IP allocation and return the error. ReleaseIPAllocation(logger, conf.IPAM.Type, args.StdinData) return err } logger.WithFields(log.Fields{ "HostVethName": hostVethName, "ContainerVethMac": contVethMac, }).Info("Networked namespace") mac, err := net.ParseMAC(contVethMac) if err != nil { // Cleanup IP allocation and return the error. ReleaseIPAllocation(logger, conf.IPAM.Type, args.StdinData) return err } endpoint.Spec.MAC = &cnet.MAC{HardwareAddr: mac} endpoint.Spec.InterfaceName = hostVethName } // Write the endpoint object (either the newly created one, or the updated one with a new ProfileIDs). if _, err := calicoClient.WorkloadEndpoints().Apply(endpoint); err != nil { // Cleanup IP allocation and return the error. ReleaseIPAllocation(logger, conf.IPAM.Type, args.StdinData) return err } logger.WithField("endpoint", endpoint).Info("Wrote endpoint to datastore") } // Handle profile creation - this is only done if there isn't a specific policy handler. if conf.Policy.PolicyType == "" { logger.Debug("Handling profiles") // Start by checking if the profile already exists. If it already exists then there is no work to do. // The CNI plugin never updates a profile. exists := true _, err = calicoClient.Profiles().Get(api.ProfileMetadata{Name: conf.Name}) if err != nil { _, ok := err.(errors.ErrorResourceDoesNotExist) if ok { exists = false } else { // Cleanup IP allocation and return the error. ReleaseIPAllocation(logger, conf.IPAM.Type, args.StdinData) return err } } if !exists { // The profile doesn't exist so needs to be created. The rules vary depending on whether k8s is being used. // Under k8s (without full policy support) the rule is permissive and allows all traffic. // Otherwise, incoming traffic is only allowed from profiles with the same tag. fmt.Fprintf(os.Stderr, "Calico CNI creating profile: %s\n", conf.Name) var inboundRules []api.Rule if orchestrator == "k8s" { inboundRules = []api.Rule{{Action: "allow"}} } else { inboundRules = []api.Rule{{Action: "allow", Source: api.EntityRule{Tag: conf.Name}}} } profile := &api.Profile{ Metadata: api.ProfileMetadata{ Name: conf.Name, Tags: []string{conf.Name}, }, Spec: api.ProfileSpec{ EgressRules: []api.Rule{{Action: "allow"}}, IngressRules: inboundRules, }, } logger.WithField("profile", profile).Info("Creating profile") if _, err := calicoClient.Profiles().Create(profile); err != nil { // Cleanup IP allocation and return the error. ReleaseIPAllocation(logger, conf.IPAM.Type, args.StdinData) return err } } } return result.Print() }
func cmdAdd(args *skel.CmdArgs) error { n, err := loadNetConf(args.StdinData) if err != nil { return err } br, err := setupBridge(n) if err != nil { return err } if err = setupVeth(args.Netns, br, args.IfName, n.MTU, n.HairpinMode); err != nil { return err } // run the IPAM plugin and get back the config to apply result, err := ipam.ExecAdd(n.IPAM.Type, args.StdinData) if err != nil { return err } if result.IP4 == nil { return errors.New("IPAM plugin returned missing IPv4 config") } if result.IP4.Gateway == nil && n.IsGW { result.IP4.Gateway = calcGatewayIP(&result.IP4.IP) } err = ns.WithNetNSPath(args.Netns, false, func(hostNS *os.File) error { return ipam.ConfigureIface(args.IfName, result) }) if err != nil { return err } if n.IsGW { gwn := &net.IPNet{ IP: result.IP4.Gateway, Mask: result.IP4.IP.Mask, } if err = ensureBridgeAddr(br, gwn); err != nil { return err } if err := ip.EnableIP4Forward(); err != nil { return fmt.Errorf("failed to enable forwarding: %v", err) } } if n.IPMasq { chain := utils.FormatChainName(n.Name, args.ContainerID) comment := utils.FormatComment(n.Name, args.ContainerID) if err = ip.SetupIPMasq(ip.Network(&result.IP4.IP), chain, comment); err != nil { return err } } result.DNS = n.DNS return result.Print() }
// CmdAddK8s performs the "ADD" operation on a kubernetes pod // Having kubernetes code in its own file avoids polluting the mainline code. It's expected that the kubernetes case will // more special casing than the mainline code. func CmdAddK8s(args *skel.CmdArgs, conf utils.NetConf, hostname string, calicoClient *calicoclient.Client, endpoint *api.WorkloadEndpoint) (*types.Result, error) { var err error var result *types.Result k8sArgs := utils.K8sArgs{} err = types.LoadArgs(args.Args, &k8sArgs) if err != nil { return nil, err } utils.ConfigureLogging(conf.LogLevel) workload, orchestrator, err := utils.GetIdentifiers(args) if err != nil { return nil, err } logger := utils.CreateContextLogger(workload) logger.WithFields(log.Fields{ "Orchestrator": orchestrator, "Node": hostname, }).Info("Extracted identifiers for CmdAddK8s") if endpoint != nil { // This happens when Docker or the node restarts. K8s calls CNI with the same parameters as before. // Do the networking (since the network namespace was destroyed and recreated). // There's an existing endpoint - no need to create another. Find the IP address from the endpoint // and use that in the response. result, err = utils.CreateResultFromEndpoint(endpoint) if err != nil { return nil, err } logger.WithField("result", result).Debug("Created result from existing endpoint") // If any labels changed whilst the container was being restarted, they will be picked up by the policy // controller so there's no need to update the labels here. } else { client, err := newK8sClient(conf, logger) if err != nil { return nil, err } logger.WithField("client", client).Debug("Created Kubernetes client") if conf.IPAM.Type == "host-local" && strings.EqualFold(conf.IPAM.Subnet, "usePodCidr") { // We've been told to use the "host-local" IPAM plugin with the Kubernetes podCidr for this node. // Replace the actual value in the args.StdinData as that's what's passed to the IPAM plugin. fmt.Fprintf(os.Stderr, "Calico CNI fetching podCidr from Kubernetes\n") var stdinData map[string]interface{} if err := json.Unmarshal(args.StdinData, &stdinData); err != nil { return nil, err } podCidr, err := getPodCidr(client, conf, hostname) if err != nil { return nil, err } logger.WithField("podCidr", podCidr).Info("Fetched podCidr") stdinData["ipam"].(map[string]interface{})["subnet"] = podCidr fmt.Fprintf(os.Stderr, "Calico CNI passing podCidr to host-local IPAM: %s\n", podCidr) args.StdinData, err = json.Marshal(stdinData) if err != nil { return nil, err } logger.WithField("stdin", args.StdinData).Debug("Updated stdin data") } // Run the IPAM plugin logger.Debugf("Calling IPAM plugin %s", conf.IPAM.Type) result, err = ipam.ExecAdd(conf.IPAM.Type, args.StdinData) if err != nil { return nil, err } logger.Debugf("IPAM plugin returned: %+v", result) // Create the endpoint object and configure it. endpoint = api.NewWorkloadEndpoint() endpoint.Metadata.Name = args.IfName endpoint.Metadata.Node = hostname endpoint.Metadata.Orchestrator = orchestrator endpoint.Metadata.Workload = workload endpoint.Metadata.Labels = make(map[string]string) // Set the profileID according to whether Kubernetes policy is required. // If it's not, then just use the network name (which is the normal behavior) // otherwise use one based on the Kubernetes pod's Namespace. if conf.Policy.PolicyType == "k8s" { endpoint.Spec.Profiles = []string{fmt.Sprintf("k8s_ns.%s", k8sArgs.K8S_POD_NAMESPACE)} } else { endpoint.Spec.Profiles = []string{conf.Name} } // Populate the endpoint with the output from the IPAM plugin. if err = utils.PopulateEndpointNets(endpoint, result); err != nil { // Cleanup IP allocation and return the error. utils.ReleaseIPAllocation(logger, conf.IPAM.Type, args.StdinData) return nil, err } logger.WithField("endpoint", endpoint).Info("Populated endpoint") // Only attempt to fetch the labels from Kubernetes if the policy type has been set to "k8s" // This allows users to run the plugin under Kubernetes without needing it to access the Kubernetes API if conf.Policy.PolicyType == "k8s" { labels, err := getK8sLabels(client, k8sArgs) if err != nil { // Cleanup IP allocation and return the error. utils.ReleaseIPAllocation(logger, conf.IPAM.Type, args.StdinData) return nil, err } logger.WithField("labels", labels).Info("Fetched K8s labels") endpoint.Metadata.Labels = labels } } fmt.Fprintf(os.Stderr, "Calico CNI using IPs: %s\n", endpoint.Spec.IPNetworks) // Whether the endpoint existed or not, the veth needs (re)creating. hostVethName := k8sbackend.VethNameForWorkload(workload) _, contVethMac, err := utils.DoNetworking(args, conf, result, logger, hostVethName) if err != nil { // Cleanup IP allocation and return the error. logger.Errorf("Error setting up networking: %s", err) utils.ReleaseIPAllocation(logger, conf.IPAM.Type, args.StdinData) return nil, err } mac, err := net.ParseMAC(contVethMac) if err != nil { // Cleanup IP allocation and return the error. logger.Errorf("Error parsing MAC (%s): %s", contVethMac, err) utils.ReleaseIPAllocation(logger, conf.IPAM.Type, args.StdinData) return nil, err } endpoint.Spec.MAC = &cnet.MAC{HardwareAddr: mac} endpoint.Spec.InterfaceName = hostVethName logger.WithField("endpoint", endpoint).Info("Added Mac and interface name to endpoint") // Write the endpoint object (either the newly created one, or the updated one) if _, err := calicoClient.WorkloadEndpoints().Apply(endpoint); err != nil { // Cleanup IP allocation and return the error. utils.ReleaseIPAllocation(logger, conf.IPAM.Type, args.StdinData) return nil, err } logger.Info("Wrote updated endpoint to datastore") return result, nil }
func cmdAdd(args *skel.CmdArgs) error { n, err := loadNetConf(args.StdinData) if err != nil { return err } if n.IsDefaultGW { n.IsGW = true } br, err := setupBridge(n) if err != nil { return err } netns, err := ns.GetNS(args.Netns) if err != nil { return fmt.Errorf("failed to open netns %q: %v", args.Netns, err) } defer netns.Close() if err = setupVeth(netns, br, args.IfName, n.MTU, n.HairpinMode); err != nil { return err } // run the IPAM plugin and get back the config to apply result, err := ipam.ExecAdd(n.IPAM.Type, args.StdinData) if err != nil { return err } // TODO: make this optional when IPv6 is supported if result.IP4 == nil { return errors.New("IPAM plugin returned missing IPv4 config") } if result.IP4.Gateway == nil && n.IsGW { result.IP4.Gateway = calcGatewayIP(&result.IP4.IP) } if err := netns.Do(func(_ ns.NetNS) error { // set the default gateway if requested if n.IsDefaultGW { _, defaultNet, err := net.ParseCIDR("0.0.0.0/0") if err != nil { return err } for _, route := range result.IP4.Routes { if defaultNet.String() == route.Dst.String() { if route.GW != nil && !route.GW.Equal(result.IP4.Gateway) { return fmt.Errorf( "isDefaultGateway ineffective because IPAM sets default route via %q", route.GW, ) } } } result.IP4.Routes = append( result.IP4.Routes, types.Route{Dst: *defaultNet, GW: result.IP4.Gateway}, ) // TODO: IPV6 } if err := ipam.ConfigureIface(args.IfName, result); err != nil { return err } if err := ip.SetHWAddrByIP(args.IfName, result.IP4.IP.IP, nil /* TODO IPv6 */); err != nil { return err } return nil }); err != nil { return err } if n.IsGW { gwn := &net.IPNet{ IP: result.IP4.Gateway, Mask: result.IP4.IP.Mask, } if err = ensureBridgeAddr(br, gwn, n.ForceAddress); err != nil { return err } if err := ip.SetHWAddrByIP(n.BrName, gwn.IP, nil /* TODO IPv6 */); err != nil { return err } if err := ip.EnableIP4Forward(); err != nil { return fmt.Errorf("failed to enable forwarding: %v", err) } } if n.IPMasq { chain := utils.FormatChainName(n.Name, args.ContainerID) comment := utils.FormatComment(n.Name, args.ContainerID) if err = ip.SetupIPMasq(ip.Network(&result.IP4.IP), chain, comment); err != nil { return err } } result.DNS = n.DNS return result.Print() }