Ejemplo n.º 1
0
// applyRules renders desired rules and passes them as stdin to iptables-restore.
func (i *IPTsaveFirewall) applyRules(iptables *iptsave.IPtables) error {
	cmd := i.os.Cmd(iptablesRestoreBin, []string{"--noflush"})
	reader := bytes.NewReader([]byte(iptables.Render()))

	log.Tracef(trace.Inside, "In applyRules allocating stdin pipe")
	stdin, err := cmd.StdinPipe()
	if err != nil {
		return fmt.Errorf("Failed to allocate stdin for iptables-restore - %s", err)
	}

	log.Tracef(trace.Inside, "In applyRules starting the command")
	if err := cmd.Start(); err != nil {
		return err
	}

	log.Tracef(trace.Inside, "In applyRules sending the rules")
	_, err = reader.WriteTo(stdin)
	if err != nil {
		return err
	}

	stdin.Close()

	log.Tracef(trace.Inside, "In applyRules waiting for command to complete")
	if err := cmd.Wait(); err != nil {
		log.Tracef(trace.Inside, "In applyRules failed to apply")
		return err
	}

	return nil
}
Ejemplo n.º 2
0
// process is a goroutine that consumes resource update events coming from
// Kubernetes and:
// 1. On receiving an added or deleted event:
//    i. add it to the queue
//    ii. on a timer event, send the events to handleNetworkPolicyEvents and empty the queue
// 2. On receiving a done event, exit the goroutine
func (l *KubeListener) process(in <-chan Event, done chan struct{}) {
	log.Infof("KubeListener: process(): Entered with in %v, done %v", in, done)

	timer := time.Tick(processorTickTime * time.Second)
	var networkPolicyEvents []Event

	go func() {
		for {
			select {
			case <-timer:
				if len(networkPolicyEvents) > 0 {
					log.Infof("Calling network policy handler for scheduled %d events", len(networkPolicyEvents))
					handleNetworkPolicyEvents(networkPolicyEvents, l)
					networkPolicyEvents = nil
				}
			case e := <-in:
				log.Infof("KubeListener: process(): Got %v", e)
				switch obj := e.Object.(type) {
				case *v1beta1.NetworkPolicy:
					log.Tracef(trace.Inside, "Scheduing network policy action, now scheduled %d actions", len(networkPolicyEvents))
					networkPolicyEvents = append(networkPolicyEvents, e)
				case *v1.Namespace:
					log.Tracef(trace.Inside, "Processor received namespace")
					handleNamespaceEvent(e, l)
				default:
					log.Errorf("Processor received an event of unkonwn type %s, ignoring object %s", reflect.TypeOf(obj), obj)
				}
			case <-done:
				log.Infof("KubeListener: process(): Got done")
				return
			}
		}
	}()
	return
}
Ejemplo n.º 3
0
// SetEndpoint implements Firewall interface. It initializes
// endpoint dependend values of firewall.
func (i *IPTsaveFirewall) SetEndpoint(netif FirewallEndpoint) error {
	log.Infof("In SetEndpoint() with endpoint <iface=%s, ip=%s, mac=%s>", netif.GetName(), netif.GetIP(), netif.GetMac())

	var err error
	i.interfaceName = netif.GetName()

	i.u32filter, i.chainPrefix, err = prepareU32Rules(netif.GetIP(), i.networkConfig)
	if err != nil {
		return err
	}

	// Assemble firewall rules needed to divert traffic
	// to/from the endpoint.
	divertFilter := makeDivertRules(netif)
	log.Tracef(trace.Inside, "In SetEndpoint() after divertFilter with\n%s", divertFilter.RenderFooter())

	// compare list of divert rules and list of current rules
	// make a list of chains filled with divert rules that need
	// to be created to match current rules.
	backendFilter := i.CurrentState.TableByName("filter")
	newChains := iptsave.MergeTables(backendFilter, divertFilter)

	// schedule divert rules that don't exist yet for installation.
	newFilter := i.DesiredState.TableByName("filter")
	newFilter.Chains = append(newFilter.Chains, newChains...)

	log.Tracef(trace.Inside, "In SetEndpoint after merge\n%s", i.CurrentState.Render())

	return err
}
Ejemplo n.º 4
0
// ensureIPtablesChain checks if iptables chain in a desired state.
func (fw *IPtables) ensureIPtablesChain(chainName string, opType chainState) error {
	log.Infof("In ensureIPtablesChain(): %s %s", opType.String(), chainName)

	log.Tracef(trace.Inside, "In ensureIPtablesChain(): Testing chain ", chainName)
	exists := fw.isChainExistByName(chainName)
	log.Tracef(trace.Inside, "In ensureIPtablesChain(): Test for chain %s returned %b", chainName, exists)

	var iptablesAction string
	switch opType {
	case ensureChainExists:
		if exists {
			log.Tracef(trace.Inside, "In ensureIPtablesChain(): nothing to do for chain %s", chainName)
			return nil
		} else {
			iptablesAction = "-N"
		}

	case ensureChainAbsent:
		if exists {
			iptablesAction = "-D"
		} else {
			log.Tracef(trace.Inside, "In ensureIPtablesChain(): nothing to do for chain %s", chainName)
			return nil
		}
	}

	args := []string{iptablesAction, chainName}
	_, err := fw.os.Exec(iptablesCmd, args)
	return err
}
Ejemplo n.º 5
0
// ensureInterHostRoutes ensures we have routes to every other host.
func (h Helper) ensureInterHostRoutes() error {
	log.Trace(trace.Inside, "Acquiring mutex ensureInterhostRoutes")
	h.ensureInterHostRoutesMutex.Lock()
	defer func() {
		log.Trace(trace.Inside, "Releasing mutex ensureInterhostRoutes")
		h.ensureInterHostRoutesMutex.Unlock()
	}()
	log.Trace(trace.Inside, "Acquired mutex ensureInterhostRoutes")

	via := "via"
	log.Tracef(trace.Inside, "In ensureInterHostRoutes over %v\n", h.Agent.networkConfig.otherHosts)
	for _, host := range h.Agent.networkConfig.otherHosts {
		log.Tracef(trace.Inside, "In ensureInterHostRoutes ensuring route for %v\n", host)
		_, romanaCidr, err := net.ParseCIDR(host.RomanaIp)
		if err != nil {
			return failedToParseOtherHosts(host.RomanaIp)
		}
		romanaMaskInt, _ := romanaCidr.Mask.Size()
		romanaMask := fmt.Sprintf("%d", romanaMaskInt)
		dest := host.Ip

		// wait until no one messing with routes
		// If route doesn't exist yet
		if err := h.isRouteExist(romanaCidr.IP, romanaMask); err != nil {

			// Create it
			err2 := h.createRoute(romanaCidr.IP, romanaMask, via, dest)
			if err2 != nil {
				return routeCreateError(err, romanaCidr.IP.String(), romanaMask, dest)
			}
		}
	}
	return nil
}
Ejemplo n.º 6
0
// syncNetworkPolicies compares a list of kubernetes network policies with romana network policies,
// it returns a list of kubernetes policies that don't have corresponding kubernetes network policy for them,
// and a list of romana policies that used to represent kubernetes policy but corresponding kubernetes policy is gone.
func (l *KubeListener) syncNetworkPolicies(kubePolicies []v1beta1.NetworkPolicy) (kubernetesEvents []Event, romanaPolicies []common.Policy, err error) {
	log.Infof("In syncNetworkPolicies with %v", kubePolicies)

	policies, err := getAllPoliciesFunc(l.restClient)
	if err != nil {
		return
	}

	log.Infof("In syncNetworkPolicies fetched %d romana policies", len(policies))

	// Compare kubernetes policies and all romana policies by name.
	// TODO Coparing by name is fragile should be `external_id == UID`. Stas.

	// Prepare a list of kubernetes policies that don't have corresponding
	// romana policy.
	var found bool
	accountedRomanaPolicies := make(map[int]bool)

	for kn, kubePolicy := range kubePolicies {
		namespacePolicyNamePrefix := fmt.Sprintf("kube.default.")
		found = false
		for pn, policy := range policies {
			fullPolicyName := fmt.Sprintf("%s%s", namespacePolicyNamePrefix, kubePolicy.ObjectMeta.Name)
			if fullPolicyName == policy.Name {
				found = true
				accountedRomanaPolicies[pn] = true
				break
			}
		}

		if !found {
			log.Tracef(trace.Inside, "Sync policies detected new kube policy %v", kubePolicies[kn])
			kubernetesEvents = append(kubernetesEvents, Event{KubeEventAdded, kubePolicies[kn]})
		}
	}

	// Delete romana policies that don't have corresponding
	// kubernetes policy.
	// Ignore policies that don't have "kube." prefix in the name.
	for k, _ := range policies {
		if !strings.HasPrefix(policies[k].Name, "kube.") {
			log.Tracef(trace.Inside, "Sync policies skipping policy %s since it doesn't match the prefix `kube.`", policies[k].Name)
			continue
		}

		if !accountedRomanaPolicies[k] {
			log.Infof("Sync policies detected that romana policy %d is obsolete - scheduling for deletion", policies[k].ID)
			log.Tracef(trace.Inside, "Sync policies detected that romana policy %d is obsolete - scheduling for deletion", policies[k].ID)
			romanaPolicies = append(romanaPolicies, policies[k])
		}
	}

	return
}
Ejemplo n.º 7
0
// ProvisionEndpoint implements Firewall interface.
func (i *IPTsaveFirewall) ProvisionEndpoint() error {
	log.Tracef(trace.Public, "In ProvisionEndpoint\n%s", i.DesiredState.Render())

	// Generate a list of rules for firewall store.
	ruleList, err := makeDbRules(i.DesiredState)
	if err != nil {
		return err
	}

	// Create rules in firewall store.
	err = i.createNewDbRules(ruleList)
	if err != nil {
		return err
	}

	// Install iptables rules from desired state.
	err = i.applyRules(i.DesiredState)
	if err != nil {
		return err
	}

	// Activate rules in firewall store.
	err = i.enableNewDbRules(ruleList)
	if err != nil {
		return err
	}

	return nil
}
Ejemplo n.º 8
0
// Init implements Firewall interface
func (i *IPTsaveFirewall) Init(exec utilexec.Executable, store FirewallStore, nc NetConfig) error {
	log.Infof("In Init()")

	fwstore := firewallStore{}
	fwstore.DbStore = store.GetDb()
	fwstore.mu = store.GetMutex()

	i.Store = fwstore
	i.os = exec
	i.networkConfig = nc

	// Read current iptables config.
	output, err := i.os.Exec(iptablesSaveBin, []string{})
	if err != nil {
		log.Infof("In Init(), failed to call iptables-save, %s", err)
		return err
	}

	// Parse iptables-save output.
	i.CurrentState = &iptsave.IPtables{}
	i.CurrentState.Parse(bytes.NewReader(output))

	// Inirialize desired state filter table.
	i.DesiredState = &iptsave.IPtables{}
	i.DesiredState.Tables = append(i.DesiredState.Tables, &iptsave.IPtable{Name: "filter"})
	log.Tracef(trace.Inside, "In Init(), iptables rules loaded\n, %s", i.CurrentState.Render())

	return nil
}
Ejemplo n.º 9
0
// MergeTables merges source IPtable into destination IPtable,
// returns a list of chains with only rules from source table
// that were propagated into destination table.
func MergeTables(dstTable, srcTable *IPtable) []*IPchain {
	var returnChains []*IPchain
	var newChains []*IPchain
	var newChainFound bool

	// Walk through source and look for corresponding
	// dest chain. If dest chain exists, merge them,
	// otherwise add whole source chain to the dest table.
	for srcChainNum, srcChain := range srcTable.Chains {
		newChainFound = true
		for dstChainNum, dstChain := range dstTable.Chains {
			if dstChain.Name == srcChain.Name {
				newChainFound = false
				var newRules []*IPrule

				log.Tracef(trace.Inside, "In MergeTables, merging chain %s into table %s", srcChain.Name, dstTable.Name)

				// iptables-restore with --noflush flag works differently with
				// default builtin chains and user-defined chains.
				// When builtin chains with --noflush will not be flushed, user defined chains will, regardles
				// of the flag (at least in v1.4.21), so different merge strategies are needed.
				if dstChain.IsBuiltin() {
					newRules = MergeChains(dstTable.Chains[dstChainNum], srcTable.Chains[srcChainNum])
				} else {
					newRules = MergeUserChains(dstTable.Chains[dstChainNum], srcTable.Chains[srcChainNum])
				}

				// Make new chain similar to current source but with
				// rules returned by mergeChain, use it for return.
				returnChains = append(returnChains, &IPchain{Name: srcChain.Name, Policy: srcChain.Policy, Rules: newRules})
			}
		}

		if newChainFound {
			log.Tracef(trace.Inside, "In MergeTables, adding chain %s into table %s", srcChain.Name, dstTable.Name)
			newChains = append(newChains, srcTable.Chains[srcChainNum])
		}
	}
	dstTable.Chains = append(dstTable.Chains, newChains...)

	// Making sure that we are returning new IPchain structs
	// and not pointers to the original source chains.
	for _, c := range newChains {
		returnChains = append(returnChains, &IPchain{Name: c.Name, Policy: c.Policy, Rules: c.Rules})
	}
	return returnChains
}
Ejemplo n.º 10
0
// deleteIPtablesRulesBySubstring deletes rules from database and generates patch for desired state
// that would bring current state ro remove same rules that were deleted from database.
func (i *IPTsaveFirewall) deleteIPtablesRulesBySubstring(substring string) error {

	rulesPtr, err := i.Store.findIPtablesRules(substring)
	if err != nil {
		return err
	}
	log.Tracef(trace.Inside, "In Cleanup - found %d rules for interface %s", len(*rulesPtr), substring)

	// This function operates on "filter" table.
	tableDesired := i.DesiredState.TableByName("filter")
	if tableDesired == nil {
		return fmt.Errorf("In Cleanup() firewall doesn't have filter table")
	}

	tableCurrent := i.CurrentState.TableByName("filter")
	if tableCurrent == nil {
		return fmt.Errorf("In Cleanup() firewall doesn't have filter table")
	}

	// walk through rules from database, check if they are present
	// in current iptables config and schedule them for deletion if necessary.
	for _, rule := range *rulesPtr {
		log.Tracef(trace.Inside, "In Cleanup - deleting rule %s", rule.GetBody())

		// ignore inactive rules, they shouldn't be
		// in current state anyway
		if rule.State == setRuleInactive.String() {
			continue
		}

		err = i.Store.deleteIPtablesRule(&rule)
		if err != nil {
			return err
		}

		makeUndoRule(&rule, tableCurrent, tableDesired)

	}

	return nil
}
Ejemplo n.º 11
0
Archivo: store.go Proyecto: romana/core
// ensureIPtablesRule checks if given rule exists in a database and if not, creates it.
func (firewallStore *firewallStore) ensureIPtablesRule(rule *IPtablesRule) error {
	log.Info("Acquiring store mutex for listIPtablesRules")
	firewallStore.mu.Lock()
	defer func() {
		log.Info("Releasing store mutex for listIPtablesRules")
		firewallStore.mu.Unlock()
	}()
	log.Info("Acquired store mutex for listIPtablesRules")

	if firewallStore.DbStore.Db.Where("body = ?", rule.Body).First(rule).RecordNotFound() {
		log.Tracef(trace.Inside, "In ensureIPtablesRule(), rule %s not found in db - creating", rule.Body)
		err0 := firewallStore.addIPtablesRuleUnsafe(rule)
		if err0 != nil {
			log.Errorf("In ensureIPtablesRule() failed to store rule %v", rule)
			return err0
		}
	} else {
		log.Tracef(trace.Inside, "In ensureIPtablesRule(), rule %s already in db - nothing to do", rule.Body)
	}

	return nil
}
Ejemplo n.º 12
0
// deleteDbRules is a helper method that deletes a list of firewall rules
// from a firewall storage.
func (i IPTsaveFirewall) deleteDbRules(ruleList []*IPtablesRule) error {

	for ruleNum, _ := range ruleList {
		rule := ruleList[ruleNum]
		log.Tracef(trace.Inside, "In deleteDbRules() deleting rule %p", rule)
		err0 := i.Store.deleteIPtablesRule(rule)
		if err0 != nil {
			log.Errorf("In deleteDbRules() failed to enable rule %v", rule)
			return err0
		}
	}

	return nil
}
Ejemplo n.º 13
0
// enableNewDbRules is a halper method that sets `enabled` flag for
// a list of firewall rules in a firewall storage.
func (i IPTsaveFirewall) enableNewDbRules(ruleList []*IPtablesRule) error {

	for ruleNum, _ := range ruleList {
		rule := ruleList[ruleNum]
		log.Tracef(trace.Inside, "In switchIPtablesRule() activating rule %p", rule)
		err0 := i.Store.switchIPtablesRule(rule, setRuleActive)
		if err0 != nil {
			log.Errorf("In enableNewDbRules() failed to enable rule %v", rule)
			return err0
		}
	}

	return nil
}
Ejemplo n.º 14
0
// createNewDbRules is a helper method that puts a list of firewall rules
// in a firewall storage.
func (i IPTsaveFirewall) createNewDbRules(ruleList []*IPtablesRule) error {

	for ruleNum, _ := range ruleList {
		rule := ruleList[ruleNum]
		log.Tracef(trace.Inside, "In createNewDbRules() storing rule %p", rule)
		err0 := i.Store.ensureIPtablesRule(rule)
		if err0 != nil {
			log.Errorf("In createNewDbRules() failed to store rule %v", rule)
			return err0
		}
	}

	return nil
}
Ejemplo n.º 15
0
// handleNetworkPolicyEvents by creating or deleting romana policies.
func handleNetworkPolicyEvents(events []Event, l *KubeListener) {
	// TODO optimise deletion, search policy by name/id
	// and delete by id rather then sending full policy body.
	// Stas.
	var deleteEvents []v1beta1.NetworkPolicy
	var createEvents []v1beta1.NetworkPolicy

	for _, event := range events {
		switch event.Type {
		case KubeEventAdded:
			createEvents = append(createEvents, *event.Object.(*v1beta1.NetworkPolicy))
		case KubeEventDeleted:
			deleteEvents = append(deleteEvents, *event.Object.(*v1beta1.NetworkPolicy))
		default:
			log.Tracef(trace.Inside, "Ignoring %s event in handleNetworkPolicyEvents", event.Type)
		}
	}

	// Translate new network policies into romana policies.
	createPolicyList, kubePolicy, err := PTranslator.Kube2RomanaBulk(createEvents)
	if err != nil {
		log.Errorf("Not all kubernetes policies could be translated to Romana policies. Attempted %d, success %d, fail %d, error %s", len(createEvents), len(createPolicyList), len(kubePolicy), err)
	}
	for kn, _ := range kubePolicy {
		log.Errorf("Failed to translate kubernetes policy %v", kubePolicy[kn])
	}

	// Create new policies.
	for pn, _ := range createPolicyList {
		err = l.addNetworkPolicy(createPolicyList[pn])
		if err != nil {
			log.Errorf("Error adding policy with Kubernetes ID %s: %s", createPolicyList[pn].ExternalID, err)
		}
	}

	// Delete old policies.
	for _, policy := range deleteEvents {
		// policy name is derived as below in translator and thus use the
		// same technique to derive the policy name here for deleting it.
		policyName := fmt.Sprintf("kube.%s.%s", policy.ObjectMeta.Namespace, policy.ObjectMeta.Name)
		// TODO this must be changed to use External ID
		err = l.deleteNetworkPolicy(common.Policy{Name: policyName})
		if err != nil {
			log.Errorf("Error deleting policy %s (%s): %s", policyName, policy.GetUID(), err)
		}
	}
}
Ejemplo n.º 16
0
// waitForIface waits for network interface to become available in the system.
func (h Helper) waitForIface(expectedIface string) bool {
	for i := 0; i <= h.Agent.waitForIfaceTry; i++ {
		log.Tracef(trace.Inside, "Helper: Waiting for interface %s, %d attempt", expectedIface, i)
		ifaceList, err := net.Interfaces()
		log.Trace(trace.Inside, "Agent: Entering podUpHandlerAsync()")
		if err != nil {
			log.Warn("Warning: Helper: failed to read net.Interfaces()")
		}
		for iface := range ifaceList {
			if ifaceList[iface].Name == expectedIface {
				return true
			}
		}
		time.Sleep(10 * time.Second)
	}
	return false
}
Ejemplo n.º 17
0
// MergeUserChains merges rules from the source chain into the destination chain
// produces list of rules that combines rules from both chains with order
// preserved as much as possible.
func MergeUserChains(dstChain, srcChain *IPchain) []*IPrule {
	var retRules []*IPrule
	dstLen := len(dstChain.Rules)
	srcLen := len(srcChain.Rules)

	// if one chain is empty then other chain is a result of the merge
	if srcLen == 0 {
		return dstChain.Rules
	}
	if dstLen == 0 {
		return srcChain.Rules
	}

	maxLen := 0
	if dstLen <= srcLen {
		maxLen = srcLen
	} else {
		maxLen = dstLen
	}

	// Merge strategy is to walk both rule lists at same time and compare the rules at same position
	// if rules match then one of them added to the result, otherwise both are.
	for i := 0; i < maxLen; i++ {
		if i <= dstLen && i <= srcLen {
			log.Tracef(trace.Inside, "In MergeUserTables, counter=%d, src table len=%d, dst table len=%d", i, srcLen, dstLen)
			if dstChain.Rules[i].String() == srcChain.Rules[i].String() {
				retRules = append(retRules, dstChain.Rules[i])
			} else {
				retRules = append(retRules, dstChain.Rules[i])
				retRules = append(retRules, srcChain.Rules[i])

			}
		} else if i <= dstLen {
			retRules = append(retRules, dstChain.Rules[i])
		} else if i <= srcLen {
			retRules = append(retRules, srcChain.Rules[i])
		} else {
			// Should never get here.
			panic(fmt.Sprintf("Unexpected state in MergeUserChains, counter=%d, source len=%d, dst len=%d", i, srcLen, dstLen))
		}
	}

	return retRules
}
Ejemplo n.º 18
0
// vmDownHandler handles HTTP requests for endpoints teardown.
func (a *Agent) vmDownHandler(input interface{}, ctx common.RestContext) (interface{}, error) {
	log.Tracef(trace.Private, "In vmDownHandler() with %T %v", input, input)
	netif := input.(*NetIf)
	if netif.Name == "" {
		// This is a request from OpenStack Mech driver who does not have a name,
		// let's find it by mac.
		err := a.store.findNetIf(netif)
		if err != nil {
			return nil, err
		}
	}
	log.Infof("Agent: Provisioning DHCP for %s, IP %s Mac %s\n", netif.Name, netif.IP, netif.Mac)

	if err := a.leaseFile.provisionLease(netif, leaseRemove); err != nil {
		log.Error(agentError(err))
		return "Error removing DHCP lease", agentError(err)
	}

	// We need new firewall instance here to use it's Cleanup()
	// to uninstall firewall rules related to the endpoint.
	fw, err := firewall.NewFirewall(a.getFirewallType())
	if err != nil {
		return nil, err
	}

	err = fw.Init(a.Helper.Executor, a.store, a.networkConfig)
	if err != nil {
		return nil, err
	}

	err = fw.Cleanup(netif)
	if err != nil {
		return nil, err
	}
	err = a.store.deleteNetIf(netif)
	if err != nil {
		return nil, err
	}
	return "OK", nil
}
Ejemplo n.º 19
0
// translateTarget analizes kubePolicy and fills romanaPolicy.AppliedTo field.
func (tg *TranslateGroup) translateTarget(translator *Translator) error {

	// Translate kubernetes namespace into romana tenant. Must be defined.
	tenantCacheEntry := translator.checkTenantInCache(tg.kubePolicy.ObjectMeta.Namespace)
	if tenantCacheEntry == nil {
		log.Errorf("Tenant not found when translating policy %v", tg.romanaPolicy)
		return TranslatorError{ErrorTenantNotInCache, nil}
	}

	// Empty PodSelector means policy applied to the entire namespace.
	if len(tg.kubePolicy.Spec.PodSelector.MatchLabels) == 0 {
		tg.romanaPolicy.AppliedTo = []common.Endpoint{
			common.Endpoint{TenantID: tenantCacheEntry.Tenant.ID, TenantExternalID: tenantCacheEntry.Tenant.ExternalID},
		}

		log.Tracef(trace.Inside, "Segment was not specified in policy %v, assuming target is a namespace", tg.kubePolicy)
		return nil
	}

	// If PodSelector is not empty then segment label must be defined.
	kubeSegmentID, ok := tg.kubePolicy.Spec.PodSelector.MatchLabels[translator.segmentLabelName]
	if !ok || kubeSegmentID == "" {
		log.Errorf("Expected segment to be specified in podSelector part as %s", translator.segmentLabelName)
		return common.NewError("Expected segment to be specified in podSelector part as '%s'", translator.segmentLabelName)
	}

	// Translate kubernetes segment label into romana segment.
	segment, err := translator.getOrAddSegment(tg.kubePolicy.ObjectMeta.Namespace, kubeSegmentID)
	if err != nil {
		log.Errorf("Error in translate while calling l.getOrAddSegment with %s and %s - error %s", tg.kubePolicy.ObjectMeta.Namespace, kubeSegmentID, err)
		return err
	}

	tg.romanaPolicy.AppliedTo = []common.Endpoint{
		common.Endpoint{TenantID: tenantCacheEntry.Tenant.ID, TenantExternalID: tenantCacheEntry.Tenant.ExternalID, SegmentID: segment.ID},
	}

	return nil
}
Ejemplo n.º 20
0
// Cleanup implements Firewall interface.
func (i *IPTsaveFirewall) Cleanup(netif FirewallEndpoint) error {

	// Delete netif related rules from ifirewall store and schedule
	// them for deletion from current state.
	// TODO it is possible that someone will make a chain
	// with a name that matchies interface name and this call
	// will delete all rules from such a chain.
	// This is very unlikely but still should be
	// addressed just in case. Stas.
	err := i.deleteIPtablesRulesBySubstring(netif.GetName())
	if err != nil {
		return err
	}

	// Delete netif related rules from current state.
	err = i.applyRules(i.DesiredState)
	if err != nil {
		return err
	}

	log.Tracef(trace.Inside, "In Cleanup \n%s", i.DesiredState.Render())
	return nil
}
Ejemplo n.º 21
0
// handleAnnotations on a namespace by implementing extra features requested through the annotation
func handleAnnotations(o *v1.Namespace, l *KubeListener) {
	log.Tracef(trace.Private, "In handleAnnotations")

	// We only care about one annotation for now.
	HandleDefaultPolicy(o, l)
}
Ejemplo n.º 22
0
/// makeNextIngressPeer analyzes current Ingress rule and adds new Peer to romanaPolicy.Peers.
func (tg *TranslateGroup) makeNextIngressPeer(translator *Translator) error {
	ingress := tg.kubePolicy.Spec.Ingress[tg.ingressIndex]

	for _, fromEntry := range ingress.From {
		tenantCacheEntry := &TenantCacheEntry{}

		// Exactly one of From.PodSelector or From.NamespaceSelector must be specified.
		if fromEntry.PodSelector == nil && fromEntry.NamespaceSelector == nil {
			log.Errorf("Either PodSElector or NamespacesSelector must be specified")
			return common.NewError("Either PodSElector or NamespacesSelector must be specified")
		} else if fromEntry.PodSelector != nil && fromEntry.NamespaceSelector != nil {
			log.Errorf("Exactly one of PodSElector or NamespacesSelector must be specified")
			return common.NewError("Exactly on of PodSElector or NamespacesSelector must be specified")
		}

		// This ingress field matching a namespace which will be our source tenant.
		if fromEntry.NamespaceSelector != nil {
			tenantName, ok := fromEntry.NamespaceSelector.MatchLabels[translator.tenantLabelName]
			if !ok || tenantName == "" {
				log.Errorf("Expected tenant name to be specified in NamespaceSelector field with a key %s", translator.tenantLabelName)
				return common.NewError("Expected tenant name to be specified in NamespaceSelector field with a key %s", translator.tenantLabelName)
			}

			tenantCacheEntry = translator.checkTenantInCache(tenantName)
			if tenantCacheEntry == nil {
				log.Errorf("Tenant not not found when translating policy %v", tg.romanaPolicy)
				return TranslatorError{ErrorTenantNotInCache, nil}
			}

			// Found a source tenant, let's register it as romana Peeer.
			tg.romanaPolicy.Ingress[tg.ingressIndex].Peers = append(tg.romanaPolicy.Ingress[tg.ingressIndex].Peers,
				common.Endpoint{TenantID: tenantCacheEntry.Tenant.ID, TenantExternalID: tenantCacheEntry.Tenant.ExternalID})
		}

		// This ingress field matches a segment and source tenant is a same as target tenant.
		if fromEntry.PodSelector != nil {

			// Check if source/target tenant in cache.
			tenantCacheEntry = translator.checkTenantInCache(tg.kubePolicy.ObjectMeta.Namespace)
			if tenantCacheEntry == nil {
				log.Errorf("Tenant not not found when translating policy %v", tg.romanaPolicy)
				return TranslatorError{ErrorTenantNotInCache, nil}
			}

			// If podSelector is empty match all traffic from the tenant.
			if len(fromEntry.PodSelector.MatchLabels) == 0 {
				tg.romanaPolicy.Ingress[tg.ingressIndex].Peers = append(tg.romanaPolicy.Ingress[tg.ingressIndex].Peers,
					common.Endpoint{TenantID: tenantCacheEntry.Tenant.ID, TenantExternalID: tenantCacheEntry.Tenant.ExternalID})

				log.Tracef(trace.Inside, "No segment specified when translating ingress rule %v", tg.kubePolicy.Spec.Ingress[tg.ingressIndex])
				return nil
			}

			// Get segment name from podSelector.
			kubeSegmentID, ok := fromEntry.PodSelector.MatchLabels[translator.segmentLabelName]
			if !ok || kubeSegmentID == "" {
				log.Errorf("Expected segment to be specified in podSelector part as %s", translator.segmentLabelName)
				return common.NewError("Expected segment to be specified in podSelector part as '%s'", translator.segmentLabelName)
			}

			// Translate kubernetes segment name into romana segment.
			segment, err := translator.getOrAddSegment(tenantCacheEntry.Tenant.Name, kubeSegmentID)
			if err != nil {
				log.Errorf("Error in translate while calling l.getOrAddSegment with %s and %s - error %s", tenantCacheEntry.Tenant.Name, kubeSegmentID, err)
				return err
			}

			// Register source tenant/segment as a romana Peer.
			tg.romanaPolicy.Ingress[tg.ingressIndex].Peers = append(tg.romanaPolicy.Ingress[tg.ingressIndex].Peers,
				common.Endpoint{TenantID: tenantCacheEntry.Tenant.ID, TenantExternalID: tenantCacheEntry.Tenant.ExternalID, SegmentID: segment.ID})
		}

	}
	return nil
}
Ejemplo n.º 23
0
// parseItem installs given item in its appropriate place in IPtables.Tables tree.
func (i *IPtables) parseItem(item Item) {
	switch item.Type {
	case itemComment:
		// Ignore comment items.
		return
	case itemTable:
		// If item is a table, initialize a new Itable.
		i.Tables = append(i.Tables, &IPtable{Name: item.Body})
	case itemChain:
		// If item is a chain, add a new chain to the last table.
		table := i.lastTable()
		if table == nil {
			panic("Chain before table")
		} // TODO crash here

		log.Tracef(trace.Inside, "In ParseItem adding chain %s to the table %s", item.Body, table.Name)

		table.Chains = append(table.Chains, &IPchain{Name: item.Body})
	case itemChainPolicy:
		// If item is a chain policy, set a policy for the last chain.
		table := i.lastTable()

		log.Tracef(trace.Inside, "In ParseItem table %s has %d chains", table.Name, len(table.Chains))

		chain := table.lastChain()
		if table == nil || chain == nil {
			panic("Chain policy before table/chain")
		} // TODO crash here

		chain.Policy = item.Body
	case itemChainCounter:
		// If item is a chain counter, set a chain counter for the last chain.
		table := i.lastTable()
		chain := table.lastChain()
		if table == nil || chain == nil {
			panic("Chain policy before table/chain")
		} // TODO crash here

		chain.Counters = item.Body
	case itemCommit:
		// Ignore COMMIT items.
		return // TODO, ignored for now, should probably be in the model
	case itemRule:
		// If item is a rule, add a new rule in to the proper chain,
		// and initialize i.currentRule.
		table := i.lastTable()
		chain := table.ChainByName(item.Body)
		if table == nil || chain == nil {
			panic("Rule before table/chain")
		} // TODO crash here

		newRule := new(IPrule)
		chain.Rules = append(chain.Rules, newRule)

		i.currentRule = newRule
	case itemRuleMatch:
		// If item is a rule match, add new match to the current rule.
		if i.currentRule == nil {
			panic("RuleMatch before table/chain/rule")
		} // TODO crash here

		i.currentRule.Match = append(i.currentRule.Match, &Match{Body: item.Body})
	case itemAction:
		// If item is a rule action, add a new target to the current rule.
		if i.currentRule == nil {
			panic("RuleMatch before table/chain/rule")
		} // TODO crash here
		i.currentRule.Action = IPtablesAction{Body: item.Body}

	}

	return
}
Ejemplo n.º 24
0
// makeDivertRules creates iptables "filter" table with rules to divert traffic
// to/from given endpoint into romana chains.
func makeDivertRules(netif FirewallEndpoint) *iptsave.IPtable {
	log.Tracef(trace.Private, "In makeDivertRules() with %s", netif.GetName())
	divertTable := iptsave.IPtable{
		Name: "filter",
		Chains: []*iptsave.IPchain{
			&iptsave.IPchain{
				Name:   "INPUT",
				Policy: "-",
				Rules: []*iptsave.IPrule{
					&iptsave.IPrule{
						Action: iptsave.IPtablesAction{
							Body: ChainNameEndpointToHost,
						},
						Match: []*iptsave.Match{
							&iptsave.Match{
								Negated: false,
								Body:    "-i " + netif.GetName(),
							},
						},
					},
				},
			},
			&iptsave.IPchain{
				Name:   "OUTPUT",
				Policy: "-",
				Rules: []*iptsave.IPrule{
					&iptsave.IPrule{
						Action: iptsave.IPtablesAction{
							Body: ChainNameHostToEndpoint,
						},
						Match: []*iptsave.Match{
							&iptsave.Match{
								Negated: false,
								Body:    "-o " + netif.GetName(),
							},
						},
					},
				},
			},
			&iptsave.IPchain{
				Name:   "FORWARD",
				Policy: "-",
				Rules: []*iptsave.IPrule{
					&iptsave.IPrule{
						Action: iptsave.IPtablesAction{
							Body: ChainNameEndpointEgress,
						},
						Match: []*iptsave.Match{
							&iptsave.Match{
								Negated: false,
								Body:    "-i " + netif.GetName(),
							},
						},
					},
					&iptsave.IPrule{
						Action: iptsave.IPtablesAction{
							Body: ChainNameEndpointIngress,
						},
						Match: []*iptsave.Match{
							&iptsave.Match{
								Negated: false,
								Body:    "-o " + netif.GetName(),
							},
						},
					},
				},
			},
			&iptsave.IPchain{
				Name:   ChainNameEndpointToHost,
				Policy: "-",
			},
			/* Skipping this chain because it is similar to ChainNameEndpointIngress
			&iptsave.IPchain{
				Name: ChainNameHostToEndpoint,
				Policy: "-",
			},
			*/
			&iptsave.IPchain{
				Name:   ChainNameEndpointEgress,
				Policy: "-",
			},
			&iptsave.IPchain{
				Name:   ChainNameEndpointIngress,
				Policy: "-",
			},
		},
	}

	return &divertTable

}
Ejemplo n.º 25
0
// identifyCurrentHost discovers network configuration
// of the host we are running on.
// We need to know public IP and Romana gateway IP of the current host.
// This is done by matching current host IP addresses against what topology
// service thinks the host address is.
// If no match is found we assume we are running on host which is not
// part of the Romana setup and spit error out.
func (a Agent) identifyCurrentHost() error {
	client, err := common.NewRestClient(common.GetRestClientConfig(a.config))

	if err != nil {
		return agentError(err)
	}
	topologyURL, err := client.GetServiceUrl("topology")
	if err != nil {
		return agentError(err)
	}
	index := common.IndexResponse{}
	err = client.Get(topologyURL, &index)
	if err != nil {
		return agentError(err)
	}
	dcURL := index.Links.FindByRel("datacenter")
	a.networkConfig.dc = common.Datacenter{}
	err = client.Get(dcURL, &a.networkConfig.dc)
	if err != nil {
		return agentError(err)
	}

	hostURL := index.Links.FindByRel("host-list")
	hosts := []common.Host{}
	err = client.Get(hostURL, &hosts)
	if err != nil {
		return agentError(err)
	}
	log.Trace(trace.Inside, "Retrieved hosts list, found", len(hosts), "hosts")

	addrs, err := net.InterfaceAddrs()
	if err != nil {
		return err
	}

	log.Tracef(trace.Inside, "Searching %d interfaces for a matching host configuration: %v", len(addrs), addrs)

	// Find an interface that matches a Romana CIDR
	// and store that interface's IP address.
	// It will be used when configuring iptables and routes to tap interfaces.
	for i, host := range hosts {
		_, romanaCIDR, err := net.ParseCIDR(host.RomanaIp)
		if err != nil {
			log.Tracef(trace.Inside, "Unable to parse '%s' (%s). Skipping.", host.RomanaIp, err)
			continue
		}
		for _, addr := range addrs {
			ipnet, ok := addr.(*net.IPNet)
			if !ok {
				continue
			}
			if romanaCIDR.Contains(ipnet.IP) {
				// Check that it's the same subnet size
				s1, _ := romanaCIDR.Mask.Size()
				s2, _ := ipnet.Mask.Size()
				if s1 != s2 {
					continue
				}
				// OK, we're happy with this result
				a.networkConfig.romanaGW = ipnet.IP
				a.networkConfig.romanaGWMask = ipnet.Mask
				// Retain the other hosts that were listed.
				// This will be used for creating inter-host routes.
				a.networkConfig.otherHosts = append(a.networkConfig.otherHosts, hosts[0:i]...)
				a.networkConfig.otherHosts = append(a.networkConfig.otherHosts, hosts[i+1:]...)
				log.Trace(trace.Inside, "Found match for CIDR", romanaCIDR, "using address", ipnet.IP)
				return nil
			}
		}
	}
	return agentErrorString("Unable to find interface matching any Romana CIDR")
}
Ejemplo n.º 26
0
func prepareFirewallRules(fw firewall.Firewall, nc *NetworkConfig, rules RuleSet, firewallProvider firewall.Provider) error {
	metadata := fw.Metadata()

	var defaultRules []firewall.FirewallRule
	var u32filter string = metadata["u32filter"].(string)
	var hostAddr = nc.RomanaGW()
	var formatBody string

	switch firewallProvider {
	case firewall.ShellexProvider:
		var chainNames []string = metadata["chains"].([]string)

		for _, rule := range rules {
			log.Tracef(trace.Inside, "In prepareFirewallRules(), with %v", rule)

			var currentChain string
			switch rule.Direction {
			case EgressLocalDirection:
				currentChain = chainNames[firewall.InputChainIndex]
			case EgressGlobalDirection:
				currentChain = chainNames[firewall.ForwardOutChainIndex]
			case IngressGlobalDirection:
				currentChain = chainNames[firewall.ForwardInChainIndex]
			default:
				return fmt.Errorf("Error, unsupported rule direction type with firewall provider %d", firewallProvider)
			}

			switch rule.Format {
			case FormatChain:
				formatBody = fmt.Sprintf(rule.Body, currentChain)
			case FormatChainHostU32TenantSegment:
				formatBody = fmt.Sprintf(rule.Body, currentChain, hostAddr, u32filter)
			default:
				return fmt.Errorf("Error, unsupported rule format type with firewall provider %d", firewallProvider)
			}

			r := firewall.NewFirewallRule()
			r.SetBody(formatBody)

			switch rule.Position {
			case DefaultPosition:
				defaultRules = append(defaultRules, r)
			default:
				return fmt.Errorf("Error, unsupported rule position with firewall provider %d", firewallProvider)
			}
		}
	case firewall.IPTsaveProvider:
		for _, rule := range rules {
			log.Tracef(trace.Inside, "In prepareFirewallRules(), with %v", rule)

			var currentChain string
			switch rule.Direction {
			case EgressLocalDirection:
				currentChain = firewall.ChainNameEndpointToHost
			case EgressGlobalDirection:
				currentChain = firewall.ChainNameEndpointEgress
			case IngressGlobalDirection:
				currentChain = firewall.ChainNameEndpointIngress
			default:
				return fmt.Errorf("Error, unsupported rule direction type with firewall provider %d", firewallProvider)
			}

			switch rule.Format {
			case FormatChain:
				formatBody = fmt.Sprintf(rule.Body, currentChain)
			case FormatChainHostU32TenantSegment:
				formatBody = fmt.Sprintf(rule.Body, currentChain, hostAddr, u32filter)
			default:
				return fmt.Errorf("Error, unsupported rule format type with firewall provider %d", firewallProvider)
			}

			r := firewall.NewFirewallRule()
			r.SetBody(formatBody)

			switch rule.Position {
			case TopPosition:
				fw.EnsureRule(r, firewall.EnsureFirst)
			case BottomPosition:
				fw.EnsureRule(r, firewall.EnsureLast)
			default:
				return fmt.Errorf("Error, unsupported rule position with firewall provider %d", firewallProvider)
			}
		}
	default:
		return fmt.Errorf("Error, unsupported firewall provider type when preparing firewall rules")
	}

	return nil
}