func K8sNP2CP(np *v1beta1.NetworkPolicy) (string, *policy.Node, error) { var parentNodeName, policyName string if np.Annotations[common.K8sAnnotationParentName] == "" { parentNodeName = common.GlobalLabelPrefix } else { parentNodeName = np.Annotations[common.K8sAnnotationParentName] } if np.Annotations[common.K8sAnnotationName] == "" { policyName = np.Name } else { policyName = np.Annotations[common.K8sAnnotationName] } allowRules := []policy.AllowRule{} for _, iRule := range np.Spec.Ingress { if iRule.From != nil { for _, rule := range iRule.From { if rule.PodSelector != nil { for k, v := range rule.PodSelector.MatchLabels { l := labels.NewLabel(k, v, "") if l.Source == common.CiliumLabelSource { l.Source = common.K8sLabelSource } ar := policy.AllowRule{ Action: policy.ALWAYS_ACCEPT, Label: *l, } allowRules = append(allowRules, ar) } } else if rule.NamespaceSelector != nil { for k := range rule.NamespaceSelector.MatchLabels { l := labels.NewLabel(common.K8sPodNamespaceLabel, k, common.K8sLabelSource) ar := policy.AllowRule{ Action: policy.ALWAYS_ACCEPT, Label: *l, } allowRules = append(allowRules, ar) } } } } } coverageLbls := labels.Map2Labels(np.Spec.PodSelector.MatchLabels, common.K8sLabelSource) pn := policy.NewNode(policyName, nil) pn.Rules = []policy.PolicyRule{ &policy.PolicyRuleConsumers{ Coverage: coverageLbls.ToSlice(), Allow: allowRules, }, } return parentNodeName, pn, nil }
func (d *Daemon) policyAdd(path string, node *policy.Node) error { var ( currNode, parentNode *policy.Node err error ) if node.Name == "" { path, node.Name = policy.SplitNodePath(path) } else if strings.Contains(node.Name, ".") && node.Name != common.GlobalLabelPrefix { path, node.Name = policy.SplitNodePath(path + "." + node.Name) } currNode, parentNode, err = d.findNode(path) if err != nil { return err } log.Debugf("Policy currNode %+v, parentNode %+v", currNode, parentNode) // eg. path = io.cilium.lizards.foo.db and io.cilium.lizards doesn't exist if (currNode == nil && parentNode == nil) || // eg. path = io.cilium.lizards.foo and io.cilium.lizards.foo doesn't exist (currNode == nil && parentNode != nil) { pn := policy.NewNode("", nil) if err := d.policyAdd(path, pn); err != nil { return err } currNode, parentNode, err = d.findNode(path) if err != nil { return err } log.Debugf("Policy currNode %+v, parentNode %+v", currNode, parentNode) } // eg. path = io.cilium if currNode != nil && parentNode == nil { if currNode.Name == node.Name { node.Path() if err := currNode.Merge(node); err != nil { return err } } else { if err := currNode.AddChild(node.Name, node); err != nil { return err } } } else if currNode != nil && parentNode != nil { // eg. path = io.cilium.lizards.db exists if err := currNode.AddChild(node.Name, node); err != nil { return err } } return nil }
// PolicyDelete deletes the policy set in path from the policy tree. func (d *Daemon) PolicyDelete(path string) error { log.Debugf("Policy Delete Request: %s", path) d.policyMU.Lock() node, parent, err := d.findNode(path) if err != nil { d.policyMU.Unlock() return err } if parent == nil { d.policy.Root = policy.NewNode(common.GlobalLabelPrefix, nil) d.policy.Root.Path() } else { delete(parent.Children, node.Name) } d.policyMU.Unlock() d.triggerPolicyUpdates([]uint32{}) return nil }
// NewDaemon creates and returns a new Daemon with the parameters set in c. func NewDaemon(c *Config) (*Daemon, error) { if c == nil { return nil, fmt.Errorf("Configuration is nil") } var kvClient kvstore.KVClient if c.ConsulConfig != nil { c, err := kvstore.NewConsulClient(c.ConsulConfig) if err != nil { return nil, err } kvClient = c } else if c.EtcdCfgPath != "" || c.EtcdConfig != nil { c, err := kvstore.NewEtcdClient(c.EtcdConfig, c.EtcdCfgPath) if err != nil { return nil, err } kvClient = c } else { return nil, fmt.Errorf("empty KVStore configuration provided") } dockerClient, err := createDockerClient(c.DockerEndpoint) if err != nil { return nil, err } rootNode := policy.Tree{ Root: policy.NewNode(common.GlobalLabelPrefix, nil), } rootNode.Root.Path() lb := types.NewLoadBalancer() d := Daemon{ conf: c, kvClient: kvClient, dockerClient: dockerClient, containers: make(map[string]*types.Container), endpoints: make(map[uint16]*endpoint.Endpoint), endpointsDocker: make(map[string]*endpoint.Endpoint), endpointsDockerEP: make(map[string]*endpoint.Endpoint), endpointsLearning: make(map[uint16]labels.LearningLabel), endpointsLearningRegister: make(chan labels.LearningLabel, 1), loadBalancer: lb, cacheIteration: 1, reservedConsumables: make([]*policy.Consumable, 0), policy: rootNode, uiTopo: types.NewUITopo(), uiListeners: make(map[*Conn]bool), registerUIListener: make(chan *Conn, 1), } if c.IsK8sEnabled() { d.k8sClient, err = createK8sClient(c.K8sEndpoint, c.K8sCfgPath) if err != nil { return nil, err } } if nodeName := os.Getenv(common.K8sEnvNodeNameSpec); nodeName != "" { // Try to retrieve node's cidr from k8s's configuration if err := d.useK8sNodeCIDR(nodeName); err != nil { return nil, err } } // Set up ipam conf after init() because we might be running d.conf.KVStoreIPv4Registration if d.ipamConf, err = d.conf.createIPAMConf(); err != nil { return nil, err } if err = d.init(); err != nil { log.Errorf("Error while initializing daemon: %s\n", err) return nil, err } if d.conf.IsUIEnabled() { d.ListenBuildUIEvents() } if c.RestoreState { if err := d.SyncState(common.CiliumPath, true); err != nil { log.Warningf("Error while recovering endpoints: %s\n", err) } if err := d.SyncLBMap(); err != nil { log.Warningf("Error while recovering endpoints: %s\n", err) } } d.endpointsMU.Lock() defer d.endpointsMU.Unlock() walker := func(path string, _ os.FileInfo, _ error) error { return d.staleMapWalker(path) } if err := filepath.Walk(common.BPFCiliumMaps, walker); err != nil { log.Warningf("Error while scanning for stale maps: %s", err) } return &d, nil }