Example #1
0
func (proxy *ovsProxyPlugin) Start(baseHandler pconfig.EndpointsConfigHandler) error {
	glog.Infof("Starting multitenant SDN proxy endpoint filter")

	proxy.baseEndpointsHandler = baseHandler

	// Populate pod info map synchronously so that kube proxy can filter endpoints to support isolation
	pods, err := proxy.registry.GetAllPods()
	if err != nil {
		return err
	}

	policies, err := proxy.registry.GetEgressNetworkPolicies()
	if err != nil {
		return fmt.Errorf("Could not get EgressNetworkPolicies: %s", err)
	}
	for _, policy := range policies {
		proxy.updateNetworkPolicy(policy)
	}

	for _, pod := range pods {
		proxy.trackPod(&pod)
	}

	go utilwait.Forever(proxy.watchPods, 0)
	go utilwait.Forever(proxy.watchEgressNetworkPolicies, 0)

	return nil
}
Example #2
0
func (master *OsdnMaster) SubnetStartMaster(clusterNetwork *net.IPNet, hostSubnetLength uint32) error {
	subrange := make([]string, 0)
	subnets, err := master.osClient.HostSubnets().List(kapi.ListOptions{})
	if err != nil {
		log.Errorf("Error in initializing/fetching subnets: %v", err)
		return err
	}
	for _, sub := range subnets.Items {
		subrange = append(subrange, sub.Subnet)
		if err = master.networkInfo.validateNodeIP(sub.HostIP); err != nil {
			// Don't error out; just warn so the error can be corrected with 'oc'
			log.Errorf("Failed to validate HostSubnet %s: %v", hostSubnetToString(&sub), err)
		} else {
			log.Infof("Found existing HostSubnet %s", hostSubnetToString(&sub))
		}
	}

	master.subnetAllocator, err = netutils.NewSubnetAllocator(clusterNetwork.String(), hostSubnetLength, subrange)
	if err != nil {
		return err
	}

	go utilwait.Forever(master.watchNodes, 0)
	go utilwait.Forever(master.watchSubnets, 0)
	return nil
}
Example #3
0
// Run begins watching and syncing.
func (c *RouterController) Run() {
	glog.V(4).Info("Running router controller")
	if c.Namespaces != nil {
		c.HandleNamespaces()
		go utilwait.Forever(c.HandleNamespaces, c.NamespaceSyncInterval)
	}
	go utilwait.Forever(c.HandleRoute, 0)
	go utilwait.Forever(c.HandleEndpoints, 0)
}
Example #4
0
func (master *OsdnMaster) VnidStartMaster() error {
	err := master.vnids.populateVNIDs(master.osClient)
	if err != nil {
		return err
	}

	go utilwait.Forever(master.watchNamespaces, 0)
	go utilwait.Forever(master.watchNetNamespaces, 0)
	return nil
}
Example #5
0
func (oc *OsdnController) VnidStartNode() error {
	// Populate vnid map synchronously so that existing services can fetch vnid
	err := populateVNIDMap(oc)
	if err != nil {
		return err
	}

	go utilwait.Forever(oc.watchNetNamespaces, 0)
	go utilwait.Forever(oc.watchServices, 0)
	return nil
}
Example #6
0
func (node *OsdnNode) VnidStartNode() error {
	// Populate vnid map synchronously so that existing services can fetch vnid
	err := node.vnids.PopulateVNIDs(node.registry)
	if err != nil {
		return err
	}

	go utilwait.Forever(node.watchNetNamespaces, 0)
	go utilwait.Forever(node.watchServices, 0)
	return nil
}
Example #7
0
func (oc *OsdnController) SubnetStartMaster(clusterNetwork *net.IPNet, hostSubnetLength uint) error {
	subrange := make([]string, 0)
	subnets, err := oc.Registry.GetSubnets()
	if err != nil {
		log.Errorf("Error in initializing/fetching subnets: %v", err)
		return err
	}
	for _, sub := range subnets {
		subrange = append(subrange, sub.Subnet)
		if err := oc.validateNode(sub.HostIP); err != nil {
			// Don't error out; just warn so the error can be corrected with 'oc'
			log.Errorf("Failed to validate HostSubnet %s: %v", err)
		} else {
			log.Infof("Found existing HostSubnet %s", HostSubnetToString(&sub))
		}
	}

	oc.subnetAllocator, err = netutils.NewSubnetAllocator(clusterNetwork.String(), hostSubnetLength, subrange)
	if err != nil {
		return err
	}

	go utilwait.Forever(oc.watchNodes, 0)
	return nil
}
Example #8
0
func newNamer(kubeClient *client.Client, clusterName string) (*utils.Namer, error) {
	name, err := getClusterUID(kubeClient, clusterName)
	if err != nil {
		return nil, err
	}

	namer := utils.NewNamer(name)
	vault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName)

	// Start a goroutine to poll the cluster UID config map
	// We don't watch because we know exactly which configmap we want and this
	// controller already watches 5 other resources, so it isn't worth the cost
	// of another connection and complexity.
	go wait.Forever(func() {
		uid, found, err := vault.Get()
		existing := namer.GetClusterName()
		if found && uid != existing {
			glog.Infof("Cluster uid changed from %v -> %v", existing, uid)
			namer.SetClusterName(uid)
		} else if err != nil {
			glog.Errorf("Failed to reconcile cluster uid %v, currently set to %v", err, existing)
		}
	}, 5*time.Second)
	return namer, nil
}
Example #9
0
// Start the CNIServer's local HTTP server on a root-owned Unix domain socket.
// requestFunc will be called to handle pod setup/teardown operations on each
// request to the CNIServer's HTTP server, and should return a PodResult
// when the operation has completed.
func (s *CNIServer) Start(requestFunc cniRequestFunc) error {
	if requestFunc == nil {
		return fmt.Errorf("no pod request handler")
	}
	s.requestFunc = requestFunc

	// Remove and re-create the socket directory with root-only permissions
	dirName := path.Dir(s.path)
	if err := os.RemoveAll(dirName); err != nil {
		return fmt.Errorf("failed to removing old pod info socket: %v", err)
	}
	if err := os.MkdirAll(dirName, 0700); err != nil {
		return fmt.Errorf("failed to create pod info socket directory: %v", err)
	}

	// On Linux the socket is created with the permissions of the directory
	// it is in, so as long as the directory is root-only we can avoid
	// racy umask manipulation.
	l, err := net.Listen("unix", s.path)
	if err != nil {
		return fmt.Errorf("failed to listen on pod info socket: %v", err)
	}
	if err := os.Chmod(s.path, 0600); err != nil {
		l.Close()
		return fmt.Errorf("failed to set pod info socket mode: %v", err)
	}

	s.SetKeepAlivesEnabled(false)
	go utilwait.Forever(func() {
		if err := s.Serve(l); err != nil {
			utilruntime.HandleError(fmt.Errorf("CNI server Serve() failed: %v", err))
		}
	}, 0)
	return nil
}
func (plugin *OsdnNode) SetupEgressNetworkPolicy() error {
	policies, err := plugin.osClient.EgressNetworkPolicies(kapi.NamespaceAll).List(kapi.ListOptions{})
	if err != nil {
		return fmt.Errorf("could not get EgressNetworkPolicies: %s", err)
	}

	for _, policy := range policies.Items {
		vnid, err := plugin.vnids.GetVNID(policy.Namespace)
		if err != nil {
			glog.Warningf("Could not find netid for namespace %q: %v", policy.Namespace, err)
			continue
		}
		plugin.egressPolicies[vnid] = append(plugin.egressPolicies[vnid], policy)
	}

	for vnid := range plugin.egressPolicies {
		err := plugin.updateEgressNetworkPolicyRules(vnid)
		if err != nil {
			return err
		}
	}

	go utilwait.Forever(plugin.watchEgressNetworkPolicies, 0)
	return nil
}
Example #11
0
// CreateAwsManager constructs awsManager object.
func CreateAwsManager(configReader io.Reader) (*AwsManager, error) {
	if configReader != nil {
		var cfg provider_aws.AWSCloudConfig
		if err := gcfg.ReadInto(&cfg, configReader); err != nil {
			glog.Errorf("Couldn't read config: %v", err)
			return nil, err
		}
	}

	service := autoscaling.New(session.New())
	manager := &AwsManager{
		asgs:     make([]*asgInformation, 0),
		service:  service,
		asgCache: make(map[AwsRef]*Asg),
	}

	go wait.Forever(func() {
		manager.cacheMutex.Lock()
		defer manager.cacheMutex.Unlock()
		if err := manager.regenerateCache(); err != nil {
			glog.Errorf("Error while regenerating Asg cache: %v", err)
		}
	}, time.Hour)

	return manager, nil
}
Example #12
0
// Run Start the healthchecker main loop
func Run() {
	healthchecker = proxyHealthCheckFactory()
	// Wrap with a wait.Forever to handle panics.
	go wait.Forever(func() {
		healthchecker.handlerLoop()
	}, 0)
}
Example #13
0
// Run starts the loop to keep the status in sync
func (s statusSync) Run(stopCh <-chan struct{}) {
	go wait.Forever(s.elector.Run, 0)
	go s.run()

	go s.syncQueue.Run(time.Second, stopCh)

	<-stopCh
}
Example #14
0
// Run starts an http server for the static assets listening on the configured
// bind address
func (c *AssetConfig) Run() {
	publicURL, err := url.Parse(c.Options.PublicURL)
	if err != nil {
		glog.Fatal(err)
	}

	mux := http.NewServeMux()
	err = c.addHandlers(mux)
	if err != nil {
		glog.Fatal(err)
	}

	if publicURL.Path != "/" {
		mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
			http.Redirect(w, req, publicURL.Path, http.StatusFound)
		})
	}

	timeout := c.Options.ServingInfo.RequestTimeoutSeconds
	if timeout == -1 {
		timeout = 0
	}

	server := &http.Server{
		Addr:           c.Options.ServingInfo.BindAddress,
		Handler:        mux,
		ReadTimeout:    time.Duration(timeout) * time.Second,
		WriteTimeout:   time.Duration(timeout) * time.Second,
		MaxHeaderBytes: 1 << 20,
	}

	isTLS := configapi.UseTLS(c.Options.ServingInfo.ServingInfo)

	go utilwait.Forever(func() {
		if isTLS {
			extraCerts, err := configapi.GetNamedCertificateMap(c.Options.ServingInfo.NamedCertificates)
			if err != nil {
				glog.Fatal(err)
			}
			server.TLSConfig = crypto.SecureTLSConfig(&tls.Config{
				// Set SNI certificate func
				GetCertificate: cmdutil.GetCertificateFunc(extraCerts),
			})
			glog.Infof("Web console listening at https://%s", c.Options.ServingInfo.BindAddress)
			glog.Fatal(cmdutil.ListenAndServeTLS(server, c.Options.ServingInfo.BindNetwork, c.Options.ServingInfo.ServerCert.CertFile, c.Options.ServingInfo.ServerCert.KeyFile))
		} else {
			glog.Infof("Web console listening at http://%s", c.Options.ServingInfo.BindAddress)
			glog.Fatal(server.ListenAndServe())
		}
	}, 0)

	// Attempt to verify the server came up for 20 seconds (100 tries * 100ms, 100ms timeout per try)
	cmdutil.WaitForSuccessfulDial(isTLS, c.Options.ServingInfo.BindNetwork, c.Options.ServingInfo.BindAddress, 100*time.Millisecond, 100*time.Millisecond, 100)

	glog.Infof("Web console available at %s", c.Options.PublicURL)
}
// Start eager background caching of volume stats.
func (s *fsResourceAnalyzer) Start() {
	s.startOnce.Do(func() {
		if s.calcPeriod <= 0 {
			glog.Info("Volume stats collection disabled.")
			return
		}
		glog.Info("Starting FS ResourceAnalyzer")
		go wait.Forever(func() { s.updateCachedPodVolumeStats() }, s.calcPeriod)
	})
}
Example #16
0
// RegisterMetricAndTrackRateLimiterUsage registers a metric ownerName_rate_limiter_use in prometheus to track
// how much used rateLimiter is and starts a goroutine that updates this metric every updatePeriod
func RegisterMetricAndTrackRateLimiterUsage(ownerName string, rateLimiter flowcontrol.RateLimiter) error {
	err := registerRateLimiterMetric(ownerName)
	if err != nil {
		return err
	}
	go wait.Forever(func() {
		metricsLock.Lock()
		defer metricsLock.Unlock()
		rateLimiterMetrics[ownerName].Set(rateLimiter.Saturation())
	}, updatePeriod)
	return nil
}
// Start eager background caching of volume stats.
func (s *fsResourceAnalyzer) Start() {
	if s.calcVolumePeriod <= 0 {
		glog.Info("Volume stats collection disabled.")
		return
	}
	glog.Info("Starting FS ResourceAnalyzer")
	go wait.Forever(func() {
		startTime := time.Now()
		s.updateCachedPodVolumeStats()
		glog.V(3).Infof("Finished calculating volume stats in %v.", time.Now().Sub(startTime))
		metrics.MetricsVolumeCalcLatency.Observe(metrics.SinceInMicroseconds(startTime))
	}, s.calcVolumePeriod)
}
Example #18
0
func (node *OsdnNode) SubnetStartNode(mtu uint32) (bool, error) {
	err := node.initSelfSubnet()
	if err != nil {
		return false, err
	}

	networkChanged, err := node.SetupSDN(node.localSubnet.Subnet, node.networkInfo.ClusterNetwork.String(), node.networkInfo.ServiceNetwork.String(), mtu)
	if err != nil {
		return false, err
	}

	go utilwait.Forever(node.watchSubnets, 0)
	return networkChanged, nil
}
Example #19
0
func newPortRangeAllocator(r net.PortRange, autoFill bool) PortAllocator {
	if r.Base == 0 || r.Size == 0 {
		panic("illegal argument: may not specify an empty port range")
	}
	ra := &rangeAllocator{
		PortRange: r,
		ports:     make(chan int, portsBufSize),
		rand:      rand.New(rand.NewSource(time.Now().UnixNano())),
	}
	if autoFill {
		go wait.Forever(func() { ra.fillPorts() }, nextFreePortCooldown)
	}
	return ra
}
Example #20
0
func (plugin *cniNetworkPlugin) Init(host network.Host, hairpinMode componentconfig.HairpinMode, nonMasqueradeCIDR string) error {
	var err error
	plugin.nsenterPath, err = plugin.execer.LookPath("nsenter")
	if err != nil {
		return err
	}

	plugin.host = host

	// sync network config from pluginDir periodically to detect network config updates
	go wait.Forever(func() {
		plugin.syncNetworkConfig()
	}, 10*time.Second)
	return nil
}
Example #21
0
func (n *NodeIPTables) Setup() error {
	if err := n.syncIPTableRules(); err != nil {
		return err
	}

	// If firewalld is running, reload will call this method
	n.ipt.AddReloadFunc(func() {
		if err := n.syncIPTableRules(); err != nil {
			glog.Errorf("Reloading openshift iptables failed: %v", err)
		}
	})

	go utilwait.Forever(n.syncLoop, 0)
	return nil
}
Example #22
0
func probeNetworkPluginsWithVendorCNIDirPrefix(pluginDir, vendorCNIDirPrefix string) []network.NetworkPlugin {
	plugin := &cniNetworkPlugin{
		defaultNetwork: nil,
		loNetwork:      getLoNetwork(vendorCNIDirPrefix),
		execer:         utilexec.New(),
	}

	plugin.syncNetworkConfig(pluginDir, vendorCNIDirPrefix)
	// sync network config from pluginDir periodically to detect network config updates
	go wait.Forever(func() {
		plugin.syncNetworkConfig(pluginDir, vendorCNIDirPrefix)
	}, 10*time.Second)

	return []network.NetworkPlugin{plugin}
}
Example #23
0
// CreateGceManager constructs gceManager object.
func CreateGceManager(migs []*config.MigConfig) (*GceManager, error) {
	// Create Google Compute Engine service.
	client := oauth2.NewClient(oauth2.NoContext, google.ComputeTokenSource(""))
	gceService, err := gce.New(client)
	if err != nil {
		return nil, err
	}

	manager := &GceManager{
		migs:     migs,
		service:  gceService,
		migCache: map[config.InstanceConfig]*config.MigConfig{},
	}

	go wait.Forever(func() { manager.regenerateCacheIgnoreError() }, time.Hour)

	return manager, nil
}
Example #24
0
func (oc *OsdnController) VnidStartMaster() error {
	err := populateVNIDMap(oc)
	if err != nil {
		return err
	}

	// VNID: 0 reserved for default namespace and can reach any network in the cluster
	// VNID: 1 to 9 are internally reserved for any special cases in the future
	oc.netIDManager, err = netutils.NewNetIDAllocator(10, MaxVNID, oc.getAllocatedVNIDs())
	if err != nil {
		return err
	}

	// 'default' namespace is currently always an admin namespace
	oc.adminNamespaces = append(oc.adminNamespaces, "default")

	go utilwait.Forever(oc.watchNamespaces, 0)
	return nil
}
Example #25
0
func (master *OsdnMaster) VnidStartMaster() error {
	err := master.vnids.PopulateVNIDs(master.registry)
	if err != nil {
		return err
	}

	// VNID: 0 reserved for default namespace and can reach any network in the cluster
	// VNID: 1 to 9 are internally reserved for any special cases in the future
	master.netIDManager, err = netutils.NewNetIDAllocator(10, MaxVNID, master.vnids.GetAllocatedVNIDs())
	if err != nil {
		return err
	}

	// 'default' namespace is currently always an admin namespace
	master.adminNamespaces = append(master.adminNamespaces, "default")

	go utilwait.Forever(master.watchNamespaces, 0)
	return nil
}
Example #26
0
func (node *OsdnNode) SubnetStartNode(mtu uint) (bool, error) {
	err := node.initSelfSubnet()
	if err != nil {
		return false, err
	}

	// Assume we are working with IPv4
	ni, err := node.registry.GetNetworkInfo()
	if err != nil {
		return false, err
	}
	networkChanged, err := node.SetupSDN(node.localSubnet.Subnet, ni.ClusterNetwork.String(), ni.ServiceNetwork.String(), mtu)
	if err != nil {
		return false, err
	}

	go utilwait.Forever(node.watchSubnets, 0)
	return networkChanged, nil
}
Example #27
0
File: gce.go Project: raggi/contrib
// CreateGceManager constructs gceManager object.
func CreateGceManager(migs []*config.MigConfig, configReader io.Reader) (*GceManager, error) {
	// Create Google Compute Engine token.
	tokenSource := google.ComputeTokenSource("")
	if configReader != nil {
		var cfg provider_gce.Config
		if err := gcfg.ReadInto(&cfg, configReader); err != nil {
			glog.Errorf("Couldn't read config: %v", err)
			return nil, err
		}
		if cfg.Global.TokenURL == "" {
			glog.Warning("Empty tokenUrl in cloud config")
		} else {
			glog.Infof("Using TokenSource from config %#v", tokenSource)
			tokenSource = provider_gce.NewAltTokenSource(cfg.Global.TokenURL, cfg.Global.TokenBody)
		}
	} else {
		glog.Infof("Using default TokenSource %#v", tokenSource)
	}

	// Create Google Compute Engine service.
	client := oauth2.NewClient(oauth2.NoContext, tokenSource)
	gceService, err := gce.New(client)
	if err != nil {
		return nil, err
	}

	migInfos := make([]*migInformation, 0, len(migs))
	for _, mig := range migs {
		migInfos = append(migInfos, &migInformation{
			config: mig,
		})
	}

	manager := &GceManager{
		migs:     migInfos,
		service:  gceService,
		migCache: map[config.InstanceConfig]*config.MigConfig{},
	}

	go wait.Forever(func() { manager.regenerateCacheIgnoreError() }, time.Hour)

	return manager, nil
}
Example #28
0
func (oc *OsdnController) SubnetStartNode(mtu uint) (bool, error) {
	err := oc.initSelfSubnet()
	if err != nil {
		return false, err
	}

	// Assume we are working with IPv4
	ni, err := oc.Registry.GetNetworkInfo()
	if err != nil {
		return false, err
	}
	networkChanged, err := oc.pluginHooks.SetupSDN(oc.localSubnet.Subnet, ni.ClusterNetwork.String(), ni.ServiceNetwork.String(), mtu)
	if err != nil {
		return false, err
	}

	go utilwait.Forever(oc.watchSubnets, 0)
	return networkChanged, nil
}
func (p RESTStorageProvider) postStartHookFunc(hookContext genericapiserver.PostStartHookContext) error {
	clientset, err := extensionsclient.NewForConfig(hookContext.LoopbackClientConfig)
	if err != nil {
		utilruntime.HandleError(fmt.Errorf("unable to initialize clusterroles: %v", err))
		return nil
	}

	thirdPartyControl := ThirdPartyController{
		master: p.ResourceInterface,
		client: clientset,
	}
	go wait.Forever(func() {
		if err := thirdPartyControl.SyncResources(); err != nil {
			glog.Warningf("third party resource sync failed: %v", err)
		}
	}, 10*time.Second)

	return nil
}
Example #30
0
func (proxy *ovsProxyPlugin) Start(baseHandler pconfig.EndpointsConfigHandler) error {
	glog.Infof("Starting multitenant SDN proxy endpoint filter")

	proxy.baseEndpointsHandler = baseHandler

	policies, err := proxy.registry.GetEgressNetworkPolicies()
	if err != nil {
		if kapierrs.IsForbidden(err) {
			// controller.go will log an error about this
			return nil
		}
		return fmt.Errorf("could not get EgressNetworkPolicies: %s", err)
	}
	for _, policy := range policies {
		proxy.updateNetworkPolicy(policy)
	}

	go utilwait.Forever(proxy.watchEgressNetworkPolicies, 0)
	return nil
}