// deleteDefaultPolicy deletes the policy, thus enabling isolation // effectively setting DefaultDeny to on. func deleteDefaultPolicy(o *v1.Namespace, l *KubeListener) { var err error // TODO this should be ExternalID, not Name... policyName := getDefaultPolicyName(o) policy := common.Policy{Name: policyName} policyURL, err := l.restClient.GetServiceUrl("policy") if err != nil { log.Errorf("In deleteDefaultPolicy :: Failed to find policy service: %s\n", err) log.Errorf("In deleteDefaultPolicy :: Failed to delete default policy: %s\n", policyName) return } policyURL = fmt.Sprintf("%s/find/policies/%s", policyURL, policy.Name) err = l.restClient.Get(policyURL, &policy) if err != nil { // An annotation to set isolation on may be issued multiple times. // If it already was reacted to and default policy was dropped, // then we don't do anything. log.Debugf("In deleteDefaultPolicy :: Failed to find policy %s: %s, ignoring\n", policyName, err) return } if err = l.deleteNetworkPolicyByID(policy.ID); err != nil { log.Errorf("In deleteDefaultPolicy :: Error :: failed to delete policy %d: %s\n", policy.ID, err) } }
// addDefaultPolicy adds the default policy which is to allow // all ingres. func addDefaultPolicy(o *v1.Namespace, l *KubeListener) { var err error // TODO this should be ExternalID, not Name... policyName := getDefaultPolicyName(o) // Before adding the default policy, see if it may already exist. policy := common.Policy{Name: policyName} policyURL, err := l.restClient.GetServiceUrl("policy") if err != nil { log.Errorf("In addDefaultPolicy :: Failed to find policy service: %s\n", err) log.Errorf("In addDefaultPolicy :: Failed to add default policy: %s\n", policyName) return } policyURL = fmt.Sprintf("%s/find/policies/%s", policyURL, policy.Name) err = l.restClient.Get(policyURL, &policy) if err == nil { // An annotation to set isolation off may be issued multiple // times and we already have the default policy caused by that in place. // So we just do not do anything. log.Infof("In addDefaultPolicy :: Policy %s (%d) already exists, ignoring\n", policy.Name, policy.ID) return } // Find tenant, to properly set up policy // TODO This really should be by external ID... tnt, err := l.resolveTenantByName(o.ObjectMeta.Name) if err != nil { log.Infof("In addDefaultPolicy :: Error :: failed to resolve tenant %s \n", err) return } romanaPolicy := &common.Policy{ Direction: common.PolicyDirectionIngress, Name: policyName, // ExternalID: externalID, AppliedTo: []common.Endpoint{{TenantNetworkID: &tnt.NetworkID}}, Ingress: []common.RomanaIngress{ common.RomanaIngress{ Peers: []common.Endpoint{{Peer: common.Wildcard}}, Rules: []common.Rule{{Protocol: common.Wildcard}}, }, }, } err = l.addNetworkPolicy(*romanaPolicy) switch err := err.(type) { default: log.Errorf("In addDefaultPolicy :: Error :: failed to create policy %s: %s\n", policyName, err) case nil: log.Debugf("In addDefaultPolicy: Succesfully created policy %s\n", policyName) case common.HttpError: if err.StatusCode == http.StatusConflict { log.Infof("In addDefaultPolicy ::Policy %s already exists.\n", policyName) } else { log.Errorf("In addDefaultPolicy :: Error :: failed to create policy %s: %s\n", policyName, err) } } }
// process is a goroutine that consumes resource update events coming from // Kubernetes and: // 1. On receiving an added or deleted event: // i. add it to the queue // ii. on a timer event, send the events to handleNetworkPolicyEvents and empty the queue // 2. On receiving a done event, exit the goroutine func (l *KubeListener) process(in <-chan Event, done chan struct{}) { log.Infof("KubeListener: process(): Entered with in %v, done %v", in, done) timer := time.Tick(processorTickTime * time.Second) var networkPolicyEvents []Event go func() { for { select { case <-timer: if len(networkPolicyEvents) > 0 { log.Infof("Calling network policy handler for scheduled %d events", len(networkPolicyEvents)) handleNetworkPolicyEvents(networkPolicyEvents, l) networkPolicyEvents = nil } case e := <-in: log.Infof("KubeListener: process(): Got %v", e) switch obj := e.Object.(type) { case *v1beta1.NetworkPolicy: log.Tracef(trace.Inside, "Scheduing network policy action, now scheduled %d actions", len(networkPolicyEvents)) networkPolicyEvents = append(networkPolicyEvents, e) case *v1.Namespace: log.Tracef(trace.Inside, "Processor received namespace") handleNamespaceEvent(e, l) default: log.Errorf("Processor received an event of unkonwn type %s, ignoring object %s", reflect.TypeOf(obj), obj) } case <-done: log.Infof("KubeListener: process(): Got done") return } } }() return }
// Cleanup implements Firewall interface. func (fw IPtables) Cleanup(netif FirewallEndpoint) error { if err := fw.deleteIPtablesRulesBySubstring(netif.GetName()); err != nil { log.Errorf("In Cleanup() failed to clean firewall for %s", netif.GetName()) return err } return nil }
// CreateDefaultRule creates iptables rule for a chain with the // specified target func (fw *IPtables) CreateDefaultRule(chain int, target string) error { log.Infof("In CreateDefaultRule() %s rules for chain %d", target, chain) chainName := fw.chains[chain].ChainName body := fmt.Sprintf("%s %s %s", chainName, "-j", target) rule := &IPtablesRule{ Body: body, State: setRuleInactive.String(), } // First create rule record in database. err0 := fw.addIPtablesRule(rule) if err0 != nil { log.Error("In CreateDefaultRule() create db record for iptables rule ", rule.GetBody()) return err0 } err1 := fw.EnsureRule(rule, EnsureLast) if err1 != nil { log.Errorf("In CreateDefaultRule() %s rules failed", target) return err1 } // Finally, set 'active' flag in database record. if err2 := fw.Store.switchIPtablesRule(rule, setRuleActive); err2 != nil { log.Error("In CreateDefaultRule() iptables rule created but activation failed ", rule.GetBody()) return err2 } log.Info("In CreateDefaultRule() success") return nil }
// HandleDefaultPolicy handles isolation flag on a namespace by creating/deleting // default network policy. See http://kubernetes.io/docs/user-guide/networkpolicies/ func HandleDefaultPolicy(o *v1.Namespace, l *KubeListener) { var defaultDeny bool annotationKey := "net.beta.kubernetes.io/networkpolicy" if np, ok := o.ObjectMeta.Annotations[annotationKey]; ok { log.Infof("Handling default policy on a namespace %s, policy is now %s \n", o.ObjectMeta.Name, np) // Annotations are stored in the Annotations map as raw JSON. // So we need to parse it. isolationPolicy := struct { Ingress struct { Isolation string `json:"isolation"` } `json:"ingress"` }{} // TODO change to json.Unmarshal. Stas err := json.NewDecoder(strings.NewReader(np)).Decode(&isolationPolicy) if err != nil { log.Errorf("In HandleDefaultPolicy :: Error decoding annotation %s: %s", annotationKey, err) return } log.Debugf("Decoded to policy: %v", isolationPolicy) defaultDeny = isolationPolicy.Ingress.Isolation == "DefaultDeny" } else { log.Infof("Handling default policy on a namespace, no annotation detected assuming non isolated namespace\n") defaultDeny = false } if defaultDeny { deleteDefaultPolicy(o, l) } else { addDefaultPolicy(o, l) } }
// handleNetworkPolicyEvents by creating or deleting romana policies. func handleNetworkPolicyEvents(events []Event, l *KubeListener) { // TODO optimise deletion, search policy by name/id // and delete by id rather then sending full policy body. // Stas. var deleteEvents []v1beta1.NetworkPolicy var createEvents []v1beta1.NetworkPolicy for _, event := range events { switch event.Type { case KubeEventAdded: createEvents = append(createEvents, *event.Object.(*v1beta1.NetworkPolicy)) case KubeEventDeleted: deleteEvents = append(deleteEvents, *event.Object.(*v1beta1.NetworkPolicy)) default: log.Tracef(trace.Inside, "Ignoring %s event in handleNetworkPolicyEvents", event.Type) } } // Translate new network policies into romana policies. createPolicyList, kubePolicy, err := PTranslator.Kube2RomanaBulk(createEvents) if err != nil { log.Errorf("Not all kubernetes policies could be translated to Romana policies. Attempted %d, success %d, fail %d, error %s", len(createEvents), len(createPolicyList), len(kubePolicy), err) } for kn, _ := range kubePolicy { log.Errorf("Failed to translate kubernetes policy %v", kubePolicy[kn]) } // Create new policies. for pn, _ := range createPolicyList { err = l.addNetworkPolicy(createPolicyList[pn]) if err != nil { log.Errorf("Error adding policy with Kubernetes ID %s: %s", createPolicyList[pn].ExternalID, err) } } // Delete old policies. for _, policy := range deleteEvents { // policy name is derived as below in translator and thus use the // same technique to derive the policy name here for deleting it. policyName := fmt.Sprintf("kube.%s.%s", policy.ObjectMeta.Namespace, policy.ObjectMeta.Name) // TODO this must be changed to use External ID err = l.deleteNetworkPolicy(common.Policy{Name: policyName}) if err != nil { log.Errorf("Error deleting policy %s (%s): %s", policyName, policy.GetUID(), err) } } }
// deleteIPtablesRule attempts to uninstall and delete the given rule. func (fw *IPtables) deleteIPtablesRule(rule *IPtablesRule) error { if err := fw.Store.switchIPtablesRule(rule, setRuleInactive); err != nil { log.Error("In deleteIPtablesRule() failed to deactivate the rule", rule.GetBody()) return err } if err1 := fw.EnsureRule(rule, EnsureAbsent); err1 != nil { log.Errorf("In deleteIPtablesRule() rule %s set inactive but failed to uninstall", rule.GetBody()) return err1 } if err2 := fw.Store.deleteIPtablesRule(rule); err2 != nil { log.Errorf("In deleteIPtablesRule() rule %s set inactive and uninstalled but failed to delete DB record", rule.GetBody()) return err2 } return nil }
// updateCache contacts romana Tenant service, lists // all resources and loads them into memory. func (t *Translator) updateCache() error { log.Info("In updateCache") tenantURL, err := t.restClient.GetServiceUrl("tenant") if err != nil { return TranslatorError{ErrorCacheUpdate, err} } tenants := []tenant.Tenant{} err = t.restClient.Get(tenantURL+"/tenants", &tenants) if err != nil { log.Errorf("updateCache(): Error getting tenant information: %s", err) return TranslatorError{ErrorCacheUpdate, err} } if t.restClient == nil { log.Critical("REST client is nil") os.Exit(255) } // tenants := []tenant.Tenant{} // _ = t.restClient.Find(&tenants, common.FindAll) t.cacheMu.Lock() defer func() { log.Infof("Exiting updateCache with %d tenants", len(t.tenantsCache)) t.cacheMu.Unlock() }() t.tenantsCache = nil for _, ten := range tenants { segments := []tenant.Segment{} fullUrl := fmt.Sprintf("%s/tenants/%d/segments", tenantURL, ten.ID) err = t.restClient.Get(fullUrl, &segments) // ignore 404 error here which means no segments // considered to be a zero segments rather then // an error. if err != nil && !checkHttp404(err) { log.Errorf("updateCache(): Error getting segment information for tenant %d: %s", ten.ID, err) return TranslatorError{ErrorCacheUpdate, err} } t.tenantsCache = append(t.tenantsCache, TenantCacheEntry{ten, segments}) } return nil }
// DivertTrafficToRomanaIPtablesChain injects iptables Rules to send traffic // into the ROMANA chain. // We need to do this for each tenant/segment pair as each pair will have different chain name. func (fw *IPtables) DivertTrafficToRomanaIPtablesChain(chain IPtablesChain, opType opDivertTrafficAction) error { // Should be like that // iptables -A INPUT -i tap1234 -j ROMANA-T0S1-INPUT log.Infof("In DivertTrafficToRomanaIPtablesChain() processing chain %v with state %s", chain, opType) var state RuleState switch opType { case installDivertRules: state = EnsureLast case removeDivertRules: state = EnsureAbsent } // baseChain := fw.chains[chain].BaseChain for _, directionLiteral := range chain.Directions { direction := fmt.Sprintf("-%s", directionLiteral) body := fmt.Sprintf("%s %s %s %s %s", chain.BaseChain, direction, fw.interfaceName, "-j", chain.ChainName) rule := &IPtablesRule{ Body: body, State: setRuleInactive.String(), } // First create rule record in database. err0 := fw.addIPtablesRule(rule) if err0 != nil { log.Errorf("In DivertTrafficToRomanaIPtablesChain() failed to process chain %v", chain) return err0 } // Then create actuall rule in the system. if err1 := fw.EnsureRule(rule, state); err1 != nil { log.Errorf("In DivertTrafficToRomanaIPtablesChain() failed to process chain %v", chain) return err1 } // Finally, set 'active' flag in database record. if err2 := fw.Store.switchIPtablesRule(rule, setRuleActive); err2 != nil { log.Error("In DivertTrafficToRomanaIPtablesChain() iptables rule created but activation failed ", rule.GetBody()) return err2 } } log.Info("DivertTrafficToRomanaIPtablesChain() successfully processed chain number", chain) return nil }
func (t *Translator) Init(client *common.RestClient, segmentLabelName, tenantLabelName string) { t.cacheMu = &sync.Mutex{} t.restClient = client err := t.updateCache() if err == nil { log.Infof("Translator cache updated - have %d tenant entries", len(t.tenantsCache)) } else { log.Errorf("Translator cache update failed, %s", err) } t.segmentLabelName = segmentLabelName t.tenantLabelName = tenantLabelName }
// translateTarget analizes kubePolicy and fills romanaPolicy.AppliedTo field. func (tg *TranslateGroup) translateTarget(translator *Translator) error { // Translate kubernetes namespace into romana tenant. Must be defined. tenantCacheEntry := translator.checkTenantInCache(tg.kubePolicy.ObjectMeta.Namespace) if tenantCacheEntry == nil { log.Errorf("Tenant not found when translating policy %v", tg.romanaPolicy) return TranslatorError{ErrorTenantNotInCache, nil} } // Empty PodSelector means policy applied to the entire namespace. if len(tg.kubePolicy.Spec.PodSelector.MatchLabels) == 0 { tg.romanaPolicy.AppliedTo = []common.Endpoint{ common.Endpoint{TenantID: tenantCacheEntry.Tenant.ID, TenantExternalID: tenantCacheEntry.Tenant.ExternalID}, } log.Tracef(trace.Inside, "Segment was not specified in policy %v, assuming target is a namespace", tg.kubePolicy) return nil } // If PodSelector is not empty then segment label must be defined. kubeSegmentID, ok := tg.kubePolicy.Spec.PodSelector.MatchLabels[translator.segmentLabelName] if !ok || kubeSegmentID == "" { log.Errorf("Expected segment to be specified in podSelector part as %s", translator.segmentLabelName) return common.NewError("Expected segment to be specified in podSelector part as '%s'", translator.segmentLabelName) } // Translate kubernetes segment label into romana segment. segment, err := translator.getOrAddSegment(tg.kubePolicy.ObjectMeta.Namespace, kubeSegmentID) if err != nil { log.Errorf("Error in translate while calling l.getOrAddSegment with %s and %s - error %s", tg.kubePolicy.ObjectMeta.Namespace, kubeSegmentID, err) return err } tg.romanaPolicy.AppliedTo = []common.Endpoint{ common.Endpoint{TenantID: tenantCacheEntry.Tenant.ID, TenantExternalID: tenantCacheEntry.Tenant.ExternalID, SegmentID: segment.ID}, } return nil }
// createNewDbRules is a helper method that puts a list of firewall rules // in a firewall storage. func (i IPTsaveFirewall) createNewDbRules(ruleList []*IPtablesRule) error { for ruleNum, _ := range ruleList { rule := ruleList[ruleNum] log.Tracef(trace.Inside, "In createNewDbRules() storing rule %p", rule) err0 := i.Store.ensureIPtablesRule(rule) if err0 != nil { log.Errorf("In createNewDbRules() failed to store rule %v", rule) return err0 } } return nil }
// enableNewDbRules is a halper method that sets `enabled` flag for // a list of firewall rules in a firewall storage. func (i IPTsaveFirewall) enableNewDbRules(ruleList []*IPtablesRule) error { for ruleNum, _ := range ruleList { rule := ruleList[ruleNum] log.Tracef(trace.Inside, "In switchIPtablesRule() activating rule %p", rule) err0 := i.Store.switchIPtablesRule(rule, setRuleActive) if err0 != nil { log.Errorf("In enableNewDbRules() failed to enable rule %v", rule) return err0 } } return nil }
// deleteDbRules is a helper method that deletes a list of firewall rules // from a firewall storage. func (i IPTsaveFirewall) deleteDbRules(ruleList []*IPtablesRule) error { for ruleNum, _ := range ruleList { rule := ruleList[ruleNum] log.Tracef(trace.Inside, "In deleteDbRules() deleting rule %p", rule) err0 := i.Store.deleteIPtablesRule(rule) if err0 != nil { log.Errorf("In deleteDbRules() failed to enable rule %v", rule) return err0 } } return nil }
// EnsureRule verifies if given iptables rule exists and creates if it's not. func (fw IPtables) EnsureRule(rule FirewallRule, opType RuleState) error { ruleExists := fw.isRuleExist(rule) args := []string{} if ruleExists { switch opType { case EnsureAbsent: args = append(args, "-D") default: log.Info("In EnsureRule - nothing to do ", rule.GetBody()) return nil } } else { switch opType { case EnsureLast: args = append(args, "-A") case EnsureFirst: args = append(args, "-I") default: log.Info("In EnsureRule - nothing to do ", rule.GetBody()) return nil } } args = append(args, strings.Split(rule.GetBody(), " ")...) cmdStr := iptablesCmd + " " + strings.Join(args, " ") out, err := fw.os.Exec(iptablesCmd, args) if err != nil { log.Errorf("[%s]: %s failed for rule %s with error %v, saying [%s]", cmdStr, opType, rule.GetBody(), err, string(out)) } else { if out != nil && len(out) > 0 { log.Infof("%s success %s: [%s]", opType, rule.GetBody(), string(out)) } else { log.Infof("%s success %s", opType, rule.GetBody()) } } return err }
// ensureIPtablesRule checks if given rule exists in a database and if not, creates it. func (firewallStore *firewallStore) ensureIPtablesRule(rule *IPtablesRule) error { log.Info("Acquiring store mutex for listIPtablesRules") firewallStore.mu.Lock() defer func() { log.Info("Releasing store mutex for listIPtablesRules") firewallStore.mu.Unlock() }() log.Info("Acquired store mutex for listIPtablesRules") if firewallStore.DbStore.Db.Where("body = ?", rule.Body).First(rule).RecordNotFound() { log.Tracef(trace.Inside, "In ensureIPtablesRule(), rule %s not found in db - creating", rule.Body) err0 := firewallStore.addIPtablesRuleUnsafe(rule) if err0 != nil { log.Errorf("In ensureIPtablesRule() failed to store rule %v", rule) return err0 } } else { log.Tracef(trace.Inside, "In ensureIPtablesRule(), rule %s already in db - nothing to do", rule.Body) } return nil }
// Kube2RomanaBulk attempts to translate a list of kubernetes policies into // romana representation, returns a list of translated policies and a list // of policies that can't be translated in original format. func (t Translator) Kube2RomanaBulk(kubePolicies []v1beta1.NetworkPolicy) ([]common.Policy, []v1beta1.NetworkPolicy, error) { log.Info("In Kube2RomanaBulk") var returnRomanaPolicy []common.Policy var returnKubePolicy []v1beta1.NetworkPolicy err := t.updateCache() if err != nil { return returnRomanaPolicy, returnKubePolicy, TranslatorError{ErrorCacheUpdate, err} } for kubePolicyNumber, _ := range kubePolicies { romanaPolicy, err := t.translateNetworkPolicy(&kubePolicies[kubePolicyNumber]) if err != nil { log.Errorf("Error during policy translation %s", err) returnKubePolicy = append(returnKubePolicy, kubePolicies[kubePolicyNumber]) } else { returnRomanaPolicy = append(returnRomanaPolicy, romanaPolicy) } } return returnRomanaPolicy, returnKubePolicy, nil }
// ProduceNewPolicyEvents produces kubernetes network policy events that arent applied // in romana policy service yet. func ProduceNewPolicyEvents(out chan Event, done <-chan struct{}, KubeListener *KubeListener) { var sleepTime time.Duration = 1 log.Infof("Listening for kubernetes network policies") // watcher watches all network policy. watcher := cache.NewListWatchFromClient( KubeListener.kubeClient.ExtensionsClient, "networkpolicies", api.NamespaceAll, fields.Everything(), ) store, controller := cache.NewInformer( watcher, &v1beta1.NetworkPolicy{}, 0, cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { out <- Event{ Type: KubeEventAdded, Object: obj, } }, UpdateFunc: func(old, obj interface{}) { out <- Event{ Type: KubeEventModified, Object: obj, } }, DeleteFunc: func(obj interface{}) { out <- Event{ Type: KubeEventDeleted, Object: obj, } }, }) go controller.Run(done) time.Sleep(sleepTime) var kubePolicyList []v1beta1.NetworkPolicy for _, kp := range store.List() { kubePolicyList = append(kubePolicyList, kp.(v1beta1.NetworkPolicy)) } newEvents, oldPolicies, err := KubeListener.syncNetworkPolicies(kubePolicyList) if err != nil { log.Errorf("Failed to sync romana policies with kube policies, sync failed with %s", err) } log.Infof("Produce policies detected %d new kubernetes policies and %d old romana policies", len(newEvents), len(oldPolicies)) // Create new kubernetes policies for en, _ := range newEvents { out <- newEvents[en] } // Delete old romana policies. // TODO find a way to remove policy deletion from this function. Stas. policyUrl, err := KubeListener.restClient.GetServiceUrl("policy") if err != nil { log.Errorf("Failed to discover policy url before deleting outdated romana policies") } for k, _ := range oldPolicies { err = KubeListener.restClient.Delete(fmt.Sprintf("%s/policies/%d", policyUrl, oldPolicies[k].ID), nil, &oldPolicies) if err != nil { log.Errorf("Sync policies detected obsolete policy %d but failed to delete, %s", oldPolicies[k].ID, err) } } }
/// makeNextIngressPeer analyzes current Ingress rule and adds new Peer to romanaPolicy.Peers. func (tg *TranslateGroup) makeNextIngressPeer(translator *Translator) error { ingress := tg.kubePolicy.Spec.Ingress[tg.ingressIndex] for _, fromEntry := range ingress.From { tenantCacheEntry := &TenantCacheEntry{} // Exactly one of From.PodSelector or From.NamespaceSelector must be specified. if fromEntry.PodSelector == nil && fromEntry.NamespaceSelector == nil { log.Errorf("Either PodSElector or NamespacesSelector must be specified") return common.NewError("Either PodSElector or NamespacesSelector must be specified") } else if fromEntry.PodSelector != nil && fromEntry.NamespaceSelector != nil { log.Errorf("Exactly one of PodSElector or NamespacesSelector must be specified") return common.NewError("Exactly on of PodSElector or NamespacesSelector must be specified") } // This ingress field matching a namespace which will be our source tenant. if fromEntry.NamespaceSelector != nil { tenantName, ok := fromEntry.NamespaceSelector.MatchLabels[translator.tenantLabelName] if !ok || tenantName == "" { log.Errorf("Expected tenant name to be specified in NamespaceSelector field with a key %s", translator.tenantLabelName) return common.NewError("Expected tenant name to be specified in NamespaceSelector field with a key %s", translator.tenantLabelName) } tenantCacheEntry = translator.checkTenantInCache(tenantName) if tenantCacheEntry == nil { log.Errorf("Tenant not not found when translating policy %v", tg.romanaPolicy) return TranslatorError{ErrorTenantNotInCache, nil} } // Found a source tenant, let's register it as romana Peeer. tg.romanaPolicy.Ingress[tg.ingressIndex].Peers = append(tg.romanaPolicy.Ingress[tg.ingressIndex].Peers, common.Endpoint{TenantID: tenantCacheEntry.Tenant.ID, TenantExternalID: tenantCacheEntry.Tenant.ExternalID}) } // This ingress field matches a segment and source tenant is a same as target tenant. if fromEntry.PodSelector != nil { // Check if source/target tenant in cache. tenantCacheEntry = translator.checkTenantInCache(tg.kubePolicy.ObjectMeta.Namespace) if tenantCacheEntry == nil { log.Errorf("Tenant not not found when translating policy %v", tg.romanaPolicy) return TranslatorError{ErrorTenantNotInCache, nil} } // If podSelector is empty match all traffic from the tenant. if len(fromEntry.PodSelector.MatchLabels) == 0 { tg.romanaPolicy.Ingress[tg.ingressIndex].Peers = append(tg.romanaPolicy.Ingress[tg.ingressIndex].Peers, common.Endpoint{TenantID: tenantCacheEntry.Tenant.ID, TenantExternalID: tenantCacheEntry.Tenant.ExternalID}) log.Tracef(trace.Inside, "No segment specified when translating ingress rule %v", tg.kubePolicy.Spec.Ingress[tg.ingressIndex]) return nil } // Get segment name from podSelector. kubeSegmentID, ok := fromEntry.PodSelector.MatchLabels[translator.segmentLabelName] if !ok || kubeSegmentID == "" { log.Errorf("Expected segment to be specified in podSelector part as %s", translator.segmentLabelName) return common.NewError("Expected segment to be specified in podSelector part as '%s'", translator.segmentLabelName) } // Translate kubernetes segment name into romana segment. segment, err := translator.getOrAddSegment(tenantCacheEntry.Tenant.Name, kubeSegmentID) if err != nil { log.Errorf("Error in translate while calling l.getOrAddSegment with %s and %s - error %s", tenantCacheEntry.Tenant.Name, kubeSegmentID, err) return err } // Register source tenant/segment as a romana Peer. tg.romanaPolicy.Ingress[tg.ingressIndex].Peers = append(tg.romanaPolicy.Ingress[tg.ingressIndex].Peers, common.Endpoint{TenantID: tenantCacheEntry.Tenant.ID, TenantExternalID: tenantCacheEntry.Tenant.ExternalID, SegmentID: segment.ID}) } } return nil }