Ejemplo n.º 1
0
func createPodConfigTester(mode PodConfigNotificationMode) (chan<- interface{}, <-chan kubelet.PodUpdate, *PodConfig) {
	eventBroadcaster := record.NewBroadcaster()
	config := NewPodConfig(mode, eventBroadcaster.NewRecorder(api.EventSource{Component: "kubelet"}))
	channel := config.Channel(TestSource)
	ch := config.Updates()
	return channel, ch, config
}
Ejemplo n.º 2
0
// Create constructs a BuildController
func (factory *BuildControllerFactory) Create() controller.RunnableController {
	queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(&buildLW{client: factory.OSClient}, &buildapi.Build{}, queue, 2*time.Minute).Run()

	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartRecordingToSink(factory.KubeClient.Events(""))

	client := ControllerClient{factory.KubeClient, factory.OSClient}
	buildController := &buildcontroller.BuildController{
		BuildUpdater:      factory.BuildUpdater,
		ImageStreamClient: client,
		PodManager:        client,
		BuildStrategy: &typeBasedFactoryStrategy{
			DockerBuildStrategy: factory.DockerBuildStrategy,
			SourceBuildStrategy: factory.SourceBuildStrategy,
			CustomBuildStrategy: factory.CustomBuildStrategy,
		},
		Recorder:         eventBroadcaster.NewRecorder(kapi.EventSource{Component: "build-controller"}),
		OpenshiftEnabled: factory.OpenshiftEnabled,
	}

	return &controller.RetryController{
		Queue: queue,
		RetryManager: controller.NewQueueRetryManager(
			queue,
			cache.MetaNamespaceKeyFunc,
			limitedLogAndRetry(factory.BuildUpdater, 30*time.Minute),
			kutil.NewTokenBucketRateLimiter(1, 10)),
		Handle: func(obj interface{}) error {
			build := obj.(*buildapi.Build)
			return buildController.HandleBuild(build)
		},
	}
}
Ejemplo n.º 3
0
// Run runs the specified SchedulerServer.  This should never exit.
func (s *SchedulerServer) Run(_ []string) error {
	if s.Kubeconfig == "" && s.Master == "" {
		glog.Warningf("Neither --kubeconfig nor --master was specified.  Using default API client.  This might not work.")
	}

	// This creates a client, first loading any specified kubeconfig
	// file, and then overriding the Master flag, if non-empty.
	kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
		&clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig},
		&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig()
	if err != nil {
		return err
	}
	kubeconfig.QPS = 20.0
	kubeconfig.Burst = 30

	kubeClient, err := client.New(kubeconfig)
	if err != nil {
		glog.Fatalf("Invalid API configuration: %v", err)
	}

	go func() {
		mux := http.NewServeMux()
		healthz.InstallHandler(mux)
		if s.EnableProfiling {
			mux.HandleFunc("/debug/pprof/", pprof.Index)
			mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
			mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
		}
		mux.Handle("/metrics", prometheus.Handler())

		server := &http.Server{
			Addr:    net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)),
			Handler: mux,
		}
		glog.Fatal(server.ListenAndServe())
	}()

	configFactory := factory.NewConfigFactory(kubeClient)
	config, err := s.createConfig(configFactory)
	if err != nil {
		glog.Fatalf("Failed to create scheduler configuration: %v", err)
	}

	config.Cloud, err = cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
	if err != nil {
		glog.Fatalf("Cloud provider could not be initialized: %v", err)
	}

	eventBroadcaster := record.NewBroadcaster()
	config.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"})
	eventBroadcaster.StartLogging(glog.Infof)
	eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))

	sched := scheduler.New(config)
	sched.Run()

	select {}
}
Ejemplo n.º 4
0
// RunKubelet is responsible for setting up and running a kubelet.  It is used in three different applications:
//   1 Integration tests
//   2 Kubelet binary
//   3 Standalone 'kubernetes' binary
// Eventually, #2 will be replaced with instances of #3
func RunKubelet(kcfg *KubeletConfig, builder KubeletBuilder) error {
	kcfg.Hostname = nodeutil.GetHostname(kcfg.HostnameOverride)

	if len(kcfg.NodeName) == 0 {
		// Query the cloud provider for our node name, default to Hostname
		nodeName := kcfg.Hostname
		if kcfg.Cloud != nil {
			var err error
			instances, ok := kcfg.Cloud.Instances()
			if !ok {
				return fmt.Errorf("failed to get instances from cloud provider")
			}

			nodeName, err = instances.CurrentNodeName(kcfg.Hostname)
			if err != nil {
				return fmt.Errorf("error fetching current instance name from cloud provider: %v", err)
			}

			glog.V(2).Infof("cloud provider determined current node name to be %s", nodeName)
		}

		kcfg.NodeName = nodeName
	}

	eventBroadcaster := record.NewBroadcaster()
	kcfg.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "kubelet", Host: kcfg.NodeName})
	eventBroadcaster.StartLogging(glog.V(3).Infof)
	if kcfg.KubeClient != nil {
		glog.V(4).Infof("Sending events to api server.")
		eventBroadcaster.StartRecordingToSink(kcfg.KubeClient.Events(""))
	} else {
		glog.Warning("No api server defined - no events will be sent to API server.")
	}
	capabilities.Setup(kcfg.AllowPrivileged, kcfg.HostNetworkSources)

	credentialprovider.SetPreferredDockercfgPath(kcfg.RootDirectory)

	if builder == nil {
		builder = createAndInitKubelet
	}
	if kcfg.OSInterface == nil {
		kcfg.OSInterface = kubecontainer.RealOS{}
	}
	k, podCfg, err := builder(kcfg)
	if err != nil {
		return fmt.Errorf("failed to create kubelet: %v", err)
	}
	// process pods and exit.
	if kcfg.Runonce {
		if _, err := k.RunOnce(podCfg.Updates()); err != nil {
			return fmt.Errorf("runonce failed: %v", err)
		}
		glog.Infof("Started kubelet as runonce")
	} else {
		startKubelet(k, podCfg, kcfg)
		glog.Infof("Started kubelet")
	}
	return nil
}
Ejemplo n.º 5
0
// Create creates a DeploymentConfigChangeController.
func (factory *DeploymentConfigChangeControllerFactory) Create() controller.RunnableController {
	deploymentConfigLW := &deployutil.ListWatcherImpl{
		ListFunc: func() (runtime.Object, error) {
			return factory.Client.DeploymentConfigs(kapi.NamespaceAll).List(labels.Everything(), fields.Everything())
		},
		WatchFunc: func(resourceVersion string) (watch.Interface, error) {
			return factory.Client.DeploymentConfigs(kapi.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion)
		},
	}
	queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(deploymentConfigLW, &deployapi.DeploymentConfig{}, queue, 2*time.Minute).Run()

	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartRecordingToSink(factory.KubeClient.Events(""))

	changeController := &DeploymentConfigChangeController{
		changeStrategy: &changeStrategyImpl{
			getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) {
				return factory.KubeClient.ReplicationControllers(namespace).Get(name)
			},
			generateDeploymentConfigFunc: func(namespace, name string) (*deployapi.DeploymentConfig, error) {
				return factory.Client.DeploymentConfigs(namespace).Generate(name)
			},
			updateDeploymentConfigFunc: func(namespace string, config *deployapi.DeploymentConfig) (*deployapi.DeploymentConfig, error) {
				return factory.Client.DeploymentConfigs(namespace).Update(config)
			},
		},
		decodeConfig: func(deployment *kapi.ReplicationController) (*deployapi.DeploymentConfig, error) {
			return deployutil.DecodeDeploymentConfig(deployment, factory.Codec)
		},
		recorder: eventBroadcaster.NewRecorder(kapi.EventSource{Component: "deployer"}),
	}

	return &controller.RetryController{
		Queue: queue,
		RetryManager: controller.NewQueueRetryManager(
			queue,
			cache.MetaNamespaceKeyFunc,
			func(obj interface{}, err error, retries controller.Retry) bool {
				kutil.HandleError(err)
				if _, isFatal := err.(fatalError); isFatal {
					return false
				}
				if retries.Count > 0 {
					return false
				}
				return true
			},
			kutil.NewTokenBucketRateLimiter(1, 10),
		),
		Handle: func(obj interface{}) error {
			config := obj.(*deployapi.DeploymentConfig)
			return changeController.Handle(config)
		},
	}
}
Ejemplo n.º 6
0
func (k *KubernetesScheduler) NewPluginConfig(terminate <-chan struct{}, mux *http.ServeMux,
	podsWatcher *cache.ListWatch) *PluginConfig {

	// Watch and queue pods that need scheduling.
	updates := make(chan queue.Entry, k.schedcfg.UpdatesBacklog)
	podUpdates := &podStoreAdapter{queue.NewHistorical(updates)}
	reflector := cache.NewReflector(podsWatcher, &api.Pod{}, podUpdates, 0)

	// lock that guards critial sections that involve transferring pods from
	// the store (cache) to the scheduling queue; its purpose is to maintain
	// an ordering (vs interleaving) of operations that's easier to reason about.
	kapi := &k8smScheduler{internal: k}
	q := newQueuer(podUpdates)
	podDeleter := &deleter{
		api: kapi,
		qr:  q,
	}
	eh := &errorHandler{
		api:     kapi,
		backoff: backoff.New(k.schedcfg.InitialPodBackoff.Duration, k.schedcfg.MaxPodBackoff.Duration),
		qr:      q,
	}
	startLatch := make(chan struct{})
	eventBroadcaster := record.NewBroadcaster()
	runtime.On(startLatch, func() {
		eventBroadcaster.StartRecordingToSink(k.client.Events(""))
		reflector.Run() // TODO(jdef) should listen for termination
		podDeleter.Run(updates, terminate)
		q.Run(terminate)

		q.installDebugHandlers(mux)
		podtask.InstallDebugHandlers(k.taskRegistry, mux)
	})
	return &PluginConfig{
		Config: &plugin.Config{
			MinionLister: nil,
			Algorithm: &kubeScheduler{
				api:                      kapi,
				podUpdates:               podUpdates,
				defaultContainerCPULimit: k.defaultContainerCPULimit,
				defaultContainerMemLimit: k.defaultContainerMemLimit,
			},
			Binder:   &binder{api: kapi},
			NextPod:  q.yield,
			Error:    eh.handleSchedulingError,
			Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}),
		},
		api:      kapi,
		client:   k.client,
		qr:       q,
		deleter:  podDeleter,
		starting: startLatch,
	}
}
Ejemplo n.º 7
0
// RunScheduler starts the Kubernetes scheduler
func (c *MasterConfig) RunScheduler() {
	config, err := c.createSchedulerConfig()
	if err != nil {
		glog.Fatalf("Unable to start scheduler: %v", err)
	}
	eventcast := record.NewBroadcaster()
	config.Recorder = eventcast.NewRecorder(kapi.EventSource{Component: "scheduler"})
	eventcast.StartRecordingToSink(c.KubeClient.Events(""))

	s := scheduler.New(config)
	s.Run()
	glog.Infof("Started Kubernetes Scheduler")
}
Ejemplo n.º 8
0
// New returns a new service controller to keep cloud provider service resources
// (like external load balancers) in sync with the registry.
func New(cloud cloudprovider.Interface, kubeClient client.Interface, clusterName string) *ServiceController {
	broadcaster := record.NewBroadcaster()
	broadcaster.StartRecordingToSink(kubeClient.Events(""))
	recorder := broadcaster.NewRecorder(api.EventSource{Component: "service-controller"})

	return &ServiceController{
		cloud:            cloud,
		kubeClient:       kubeClient,
		clusterName:      clusterName,
		cache:            &serviceCache{serviceMap: make(map[string]*cachedService)},
		eventBroadcaster: broadcaster,
		eventRecorder:    recorder,
	}
}
// NewReplicationManager creates a new ReplicationManager.
func NewReplicationManager(kubeClient client.Interface) *ReplicationManager {
	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))

	rm := &ReplicationManager{
		kubeClient: kubeClient,
		podControl: RealPodControl{
			kubeClient: kubeClient,
			recorder:   eventBroadcaster.NewRecorder(api.EventSource{Component: "replication-controller"}),
		},
	}

	rm.syncHandler = rm.syncReplicationController
	return rm
}
Ejemplo n.º 10
0
func TestUnschedulableNodes(t *testing.T) {
	helper, err := framework.NewHelper()
	if err != nil {
		t.Fatalf("Couldn't create etcd helper: %v", err)
	}
	framework.DeleteAllEtcdKeys()

	var m *master.Master
	s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
		m.Handler.ServeHTTP(w, req)
	}))
	defer s.Close()

	m = master.New(&master.Config{
		EtcdHelper:            helper,
		KubeletClient:         client.FakeKubeletClient{},
		EnableCoreControllers: true,
		EnableLogsSupport:     false,
		EnableUISupport:       false,
		EnableIndex:           true,
		APIPrefix:             "/api",
		// Enable v1beta3 if we are testing that version.
		EnableV1Beta3:    testapi.Version() == "v1beta3",
		Authorizer:       apiserver.NewAlwaysAllowAuthorizer(),
		AdmissionControl: admit.NewAlwaysAdmit(),
	})

	restClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Version()})

	schedulerConfigFactory := factory.NewConfigFactory(restClient)
	schedulerConfig, err := schedulerConfigFactory.Create()
	if err != nil {
		t.Fatalf("Couldn't create scheduler config: %v", err)
	}
	eventBroadcaster := record.NewBroadcaster()
	schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"})
	eventBroadcaster.StartRecordingToSink(restClient.Events(""))
	scheduler.New(schedulerConfig).Run()

	defer close(schedulerConfig.StopEverything)

	DoTestUnschedulableNodes(t, restClient, schedulerConfigFactory.NodeLister.Store)
}
Ejemplo n.º 11
0
// NewNodeController returns a new node controller to sync instances from cloudprovider.
func NewNodeController(
	cloud cloudprovider.Interface,
	kubeClient client.Interface,
	registerRetryCount int,
	podEvictionTimeout time.Duration,
	podEvictor *PodEvictor,
	nodeMonitorGracePeriod time.Duration,
	nodeStartupGracePeriod time.Duration,
	nodeMonitorPeriod time.Duration,
	clusterCIDR *net.IPNet,
	allocateNodeCIDRs bool) *NodeController {
	eventBroadcaster := record.NewBroadcaster()
	recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "controllermanager"})
	eventBroadcaster.StartLogging(glog.Infof)
	if kubeClient != nil {
		glog.Infof("Sending events to api server.")
		eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
	} else {
		glog.Infof("No api server defined - no events will be sent to API server.")
	}
	if allocateNodeCIDRs && clusterCIDR == nil {
		glog.Fatal("NodeController: Must specify clusterCIDR if allocateNodeCIDRs == true.")
	}
	return &NodeController{
		cloud:                  cloud,
		kubeClient:             kubeClient,
		recorder:               recorder,
		registerRetryCount:     registerRetryCount,
		podEvictionTimeout:     podEvictionTimeout,
		podEvictor:             podEvictor,
		nodeStatusMap:          make(map[string]nodeStatusData),
		nodeMonitorGracePeriod: nodeMonitorGracePeriod,
		nodeMonitorPeriod:      nodeMonitorPeriod,
		nodeStartupGracePeriod: nodeStartupGracePeriod,
		lookupIP:               net.LookupIP,
		now:                    util.Now,
		clusterCIDR:            clusterCIDR,
		allocateNodeCIDRs:      allocateNodeCIDRs,
	}
}
// NewNodeController returns a new node controller to sync instances from cloudprovider.
func NewNodeController(
	cloud cloudprovider.Interface,
	matchRE string,
	nodes []string,
	staticResources *api.NodeResources,
	kubeClient client.Interface,
	registerRetryCount int,
	podEvictionTimeout time.Duration,
	deletingPodsRateLimiter util.RateLimiter,
	nodeMonitorGracePeriod time.Duration,
	nodeStartupGracePeriod time.Duration,
	nodeMonitorPeriod time.Duration,
	clusterName string) *NodeController {
	eventBroadcaster := record.NewBroadcaster()
	recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "controllermanager"})
	if kubeClient != nil {
		glog.Infof("Sending events to api server.")
		eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
	} else {
		glog.Infof("No api server defined - no events will be sent to API server.")
	}
	return &NodeController{
		cloud:                   cloud,
		matchRE:                 matchRE,
		nodes:                   nodes,
		staticResources:         staticResources,
		kubeClient:              kubeClient,
		recorder:                recorder,
		registerRetryCount:      registerRetryCount,
		podEvictionTimeout:      podEvictionTimeout,
		deletingPodsRateLimiter: deletingPodsRateLimiter,
		nodeStatusMap:           make(map[string]nodeStatusData),
		nodeMonitorGracePeriod:  nodeMonitorGracePeriod,
		nodeMonitorPeriod:       nodeMonitorPeriod,
		nodeStartupGracePeriod:  nodeStartupGracePeriod,
		lookupIP:                net.LookupIP,
		now:                     util.Now,
		clusterName:             clusterName,
	}
}
Ejemplo n.º 13
0
// RunKubelet is responsible for setting up and running a kubelet.  It is used in three different applications:
//   1 Integration tests
//   2 Kubelet binary
//   3 Standalone 'kubernetes' binary
// Eventually, #2 will be replaced with instances of #3
func RunKubelet(kcfg *KubeletConfig, builder KubeletBuilder) error {
	kcfg.Hostname = util.GetHostname(kcfg.HostnameOverride)
	eventBroadcaster := record.NewBroadcaster()
	kcfg.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "kubelet", Host: kcfg.Hostname})
	eventBroadcaster.StartLogging(glog.V(3).Infof)
	if kcfg.KubeClient != nil {
		glog.V(4).Infof("Sending events to api server.")
		eventBroadcaster.StartRecordingToSink(kcfg.KubeClient.Events(""))
	} else {
		glog.Warning("No api server defined - no events will be sent to API server.")
	}
	capabilities.Setup(kcfg.AllowPrivileged, kcfg.HostNetworkSources)

	credentialprovider.SetPreferredDockercfgPath(kcfg.RootDirectory)

	if builder == nil {
		builder = createAndInitKubelet
	}
	if kcfg.OSInterface == nil {
		kcfg.OSInterface = kubecontainer.RealOS{}
	}
	k, podCfg, err := builder(kcfg)
	if err != nil {
		return fmt.Errorf("failed to create kubelet: %v", err)
	}
	// process pods and exit.
	if kcfg.Runonce {
		if _, err := k.RunOnce(podCfg.Updates()); err != nil {
			return fmt.Errorf("runonce failed: %v", err)
		}
		glog.Infof("Started kubelet as runonce")
	} else {
		startKubelet(k, podCfg, kcfg)
		glog.Infof("Started kubelet")
	}
	return nil
}
Ejemplo n.º 14
0
func startComponents(firstManifestURL, secondManifestURL, apiVersion string) (string, string) {
	// Setup
	servers := []string{}
	glog.Infof("Creating etcd client pointing to %v", servers)

	handler := delegateHandler{}
	apiServer := httptest.NewServer(&handler)

	etcdClient := etcd.NewClient(servers)
	sleep := 4 * time.Second
	ok := false
	for i := 0; i < 3; i++ {
		keys, err := etcdClient.Get("/", false, false)
		if err != nil {
			glog.Warningf("Unable to list root etcd keys: %v", err)
			if i < 2 {
				time.Sleep(sleep)
				sleep = sleep * sleep
			}
			continue
		}
		for _, node := range keys.Node.Nodes {
			if _, err := etcdClient.Delete(node.Key, true); err != nil {
				glog.Fatalf("Unable delete key: %v", err)
			}
		}
		ok = true
		break
	}
	if !ok {
		glog.Fatalf("Failed to connect to etcd")
	}

	cl := client.NewOrDie(&client.Config{Host: apiServer.URL, Version: apiVersion})

	etcdStorage, err := master.NewEtcdStorage(etcdClient, latest.InterfacesFor, latest.Version, etcdtest.PathPrefix())
	if err != nil {
		glog.Fatalf("Unable to get etcd storage: %v", err)
	}
	expEtcdStorage, err := master.NewEtcdStorage(etcdClient, explatest.InterfacesFor, explatest.Version, etcdtest.PathPrefix())
	if err != nil {
		glog.Fatalf("Unable to get etcd storage for experimental: %v", err)
	}

	// Master
	host, port, err := net.SplitHostPort(strings.TrimLeft(apiServer.URL, "http://"))
	if err != nil {
		glog.Fatalf("Unable to parse URL '%v': %v", apiServer.URL, err)
	}
	portNumber, err := strconv.Atoi(port)
	if err != nil {
		glog.Fatalf("Nonnumeric port? %v", err)
	}

	publicAddress := net.ParseIP(host)
	if publicAddress == nil {
		glog.Fatalf("no public address for %s", host)
	}

	// Create a master and install handlers into mux.
	m := master.New(&master.Config{
		DatabaseStorage:       etcdStorage,
		ExpDatabaseStorage:    expEtcdStorage,
		KubeletClient:         fakeKubeletClient{},
		EnableCoreControllers: true,
		EnableLogsSupport:     false,
		EnableProfiling:       true,
		APIPrefix:             "/api",
		ExpAPIPrefix:          "/experimental",
		Authorizer:            apiserver.NewAlwaysAllowAuthorizer(),
		AdmissionControl:      admit.NewAlwaysAdmit(),
		ReadWritePort:         portNumber,
		PublicAddress:         publicAddress,
		CacheTimeout:          2 * time.Second,
	})
	handler.delegate = m.Handler

	// Scheduler
	schedulerConfigFactory := factory.NewConfigFactory(cl, nil)
	schedulerConfig, err := schedulerConfigFactory.Create()
	if err != nil {
		glog.Fatalf("Couldn't create scheduler config: %v", err)
	}
	eventBroadcaster := record.NewBroadcaster()
	schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"})
	eventBroadcaster.StartLogging(glog.Infof)
	eventBroadcaster.StartRecordingToSink(cl.Events(""))
	scheduler.New(schedulerConfig).Run()

	endpoints := endpointcontroller.NewEndpointController(cl)
	// ensure the service endpoints are sync'd several times within the window that the integration tests wait
	go endpoints.Run(3, util.NeverStop)

	controllerManager := replicationControllerPkg.NewReplicationManager(cl, replicationControllerPkg.BurstReplicas)

	// TODO: Write an integration test for the replication controllers watch.
	go controllerManager.Run(3, util.NeverStop)

	nodeController := nodecontroller.NewNodeController(nil, cl, 5*time.Minute, nodecontroller.NewPodEvictor(util.NewFakeRateLimiter()),
		40*time.Second, 60*time.Second, 5*time.Second, nil, false)
	nodeController.Run(5 * time.Second)
	cadvisorInterface := new(cadvisor.Fake)

	// Kubelet (localhost)
	testRootDir := makeTempDirOrDie("kubelet_integ_1.", "")
	configFilePath := makeTempDirOrDie("config", testRootDir)
	glog.Infof("Using %s as root dir for kubelet #1", testRootDir)
	fakeDocker1.VersionInfo = docker.Env{"ApiVersion=1.15"}
	kcfg := kubeletapp.SimpleKubelet(cl, &fakeDocker1, "localhost", testRootDir, firstManifestURL, "127.0.0.1", 10250, api.NamespaceDefault, empty_dir.ProbeVolumePlugins(), nil, cadvisorInterface, configFilePath, nil, kubecontainer.FakeOS{})
	kubeletapp.RunKubelet(kcfg, nil)
	// Kubelet (machine)
	// Create a second kubelet so that the guestbook example's two redis slaves both
	// have a place they can schedule.
	testRootDir = makeTempDirOrDie("kubelet_integ_2.", "")
	glog.Infof("Using %s as root dir for kubelet #2", testRootDir)
	fakeDocker2.VersionInfo = docker.Env{"ApiVersion=1.15"}
	kcfg = kubeletapp.SimpleKubelet(cl, &fakeDocker2, "127.0.0.1", testRootDir, secondManifestURL, "127.0.0.1", 10251, api.NamespaceDefault, empty_dir.ProbeVolumePlugins(), nil, cadvisorInterface, "", nil, kubecontainer.FakeOS{})
	kubeletapp.RunKubelet(kcfg, nil)
	return apiServer.URL, configFilePath
}
func TestScheduler(t *testing.T) {
	eventBroadcaster := record.NewBroadcaster()
	defer eventBroadcaster.StartLogging(t.Logf).Stop()
	errS := errors.New("scheduler")
	errB := errors.New("binder")

	table := []struct {
		injectBindError  error
		sendPod          *api.Pod
		algo             scheduler.Scheduler
		expectErrorPod   *api.Pod
		expectAssumedPod *api.Pod
		expectError      error
		expectBind       *api.Binding
		eventReason      string
	}{
		{
			sendPod:          podWithID("foo", ""),
			algo:             mockScheduler{"machine1", nil},
			expectBind:       &api.Binding{ObjectMeta: api.ObjectMeta{Name: "foo"}, Target: api.ObjectReference{Kind: "Node", Name: "machine1"}},
			expectAssumedPod: podWithID("foo", "machine1"),
			eventReason:      "scheduled",
		}, {
			sendPod:        podWithID("foo", ""),
			algo:           mockScheduler{"machine1", errS},
			expectError:    errS,
			expectErrorPod: podWithID("foo", ""),
			eventReason:    "failedScheduling",
		}, {
			sendPod:         podWithID("foo", ""),
			algo:            mockScheduler{"machine1", nil},
			expectBind:      &api.Binding{ObjectMeta: api.ObjectMeta{Name: "foo"}, Target: api.ObjectReference{Kind: "Node", Name: "machine1"}},
			injectBindError: errB,
			expectError:     errB,
			expectErrorPod:  podWithID("foo", ""),
			eventReason:     "failedScheduling",
		},
	}

	for i, item := range table {
		var gotError error
		var gotPod *api.Pod
		var gotAssumedPod *api.Pod
		var gotBinding *api.Binding
		c := &Config{
			Modeler: &FakeModeler{
				AssumePodFunc: func(pod *api.Pod) {
					gotAssumedPod = pod
				},
			},
			MinionLister: scheduler.FakeMinionLister(
				api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
			),
			Algorithm: item.algo,
			Binder: fakeBinder{func(b *api.Binding) error {
				gotBinding = b
				return item.injectBindError
			}},
			Error: func(p *api.Pod, err error) {
				gotPod = p
				gotError = err
			},
			NextPod: func() *api.Pod {
				return item.sendPod
			},
			Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}),
		}
		s := New(c)
		called := make(chan struct{})
		events := eventBroadcaster.StartEventWatcher(func(e *api.Event) {
			if e, a := item.eventReason, e.Reason; e != a {
				t.Errorf("%v: expected %v, got %v", i, e, a)
			}
			close(called)
		})
		s.scheduleOne()
		if e, a := item.expectAssumedPod, gotAssumedPod; !reflect.DeepEqual(e, a) {
			t.Errorf("%v: assumed pod: wanted %v, got %v", i, e, a)
		}
		if e, a := item.expectErrorPod, gotPod; !reflect.DeepEqual(e, a) {
			t.Errorf("%v: error pod: wanted %v, got %v", i, e, a)
		}
		if e, a := item.expectError, gotError; !reflect.DeepEqual(e, a) {
			t.Errorf("%v: error: wanted %v, got %v", i, e, a)
		}
		if e, a := item.expectBind, gotBinding; !reflect.DeepEqual(e, a) {
			t.Errorf("%v: error: %s", i, util.ObjectDiff(e, a))
		}
		<-called
		events.Stop()
	}
}
func TestSchedulerForgetAssumedPodAfterDelete(t *testing.T) {
	eventBroadcaster := record.NewBroadcaster()
	defer eventBroadcaster.StartLogging(t.Logf).Stop()

	// Setup modeler so we control the contents of all 3 stores: assumed,
	// scheduled and queued
	scheduledPodStore := cache.NewStore(cache.MetaNamespaceKeyFunc)
	scheduledPodLister := &cache.StoreToPodLister{scheduledPodStore}

	queuedPodStore := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	queuedPodLister := &cache.StoreToPodLister{queuedPodStore}

	modeler := NewSimpleModeler(queuedPodLister, scheduledPodLister)

	// Create a fake clock used to timestamp entries and calculate ttl. Nothing
	// will expire till we flip to something older than the ttl, at which point
	// all entries inserted with fakeTime will expire.
	ttl := 30 * time.Second
	fakeTime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
	fakeClock := &util.FakeClock{fakeTime}
	ttlPolicy := &cache.TTLPolicy{ttl, fakeClock}
	assumedPodsStore := cache.NewFakeExpirationStore(
		cache.MetaNamespaceKeyFunc, nil, ttlPolicy, fakeClock)
	modeler.assumedPods = &cache.StoreToPodLister{assumedPodsStore}

	// Port is the easiest way to cause a fit predicate failure
	podPort := 8080
	firstPod := podWithPort("foo", "", podPort)

	// Create the scheduler config
	algo := scheduler.NewGenericScheduler(
		map[string]scheduler.FitPredicate{"PodFitsPorts": scheduler.PodFitsPorts},
		[]scheduler.PriorityConfig{},
		modeler.PodLister(),
		rand.New(rand.NewSource(time.Now().UnixNano())))

	var gotBinding *api.Binding
	c := &Config{
		Modeler: modeler,
		MinionLister: scheduler.FakeMinionLister(
			api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
		),
		Algorithm: algo,
		Binder: fakeBinder{func(b *api.Binding) error {
			scheduledPodStore.Add(podWithPort(b.Name, b.Target.Name, podPort))
			gotBinding = b
			return nil
		}},
		NextPod: func() *api.Pod {
			return queuedPodStore.Pop().(*api.Pod)
		},
		Error: func(p *api.Pod, err error) {
			t.Errorf("Unexpected error when scheduling pod %+v: %v", p, err)
		},
		Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}),
	}

	// First scheduling pass should schedule the pod
	s := New(c)
	called := make(chan struct{})
	events := eventBroadcaster.StartEventWatcher(func(e *api.Event) {
		if e, a := "scheduled", e.Reason; e != a {
			t.Errorf("expected %v, got %v", e, a)
		}
		close(called)
	})

	queuedPodStore.Add(firstPod)
	// queuedPodStore: [foo:8080]
	// scheduledPodStore: []
	// assumedPods: []

	s.scheduleOne()
	// queuedPodStore: []
	// scheduledPodStore: [foo:8080]
	// assumedPods: [foo:8080]

	pod, exists, _ := scheduledPodStore.GetByKey("foo")
	if !exists {
		t.Errorf("Expected scheduled pod store to contain pod")
	}
	pod, exists, _ = queuedPodStore.GetByKey("foo")
	if exists {
		t.Errorf("Did not expect a queued pod, found %+v", pod)
	}
	pod, exists, _ = assumedPodsStore.GetByKey("foo")
	if !exists {
		t.Errorf("Assumed pod store should contain stale pod")
	}

	expectBind := &api.Binding{
		ObjectMeta: api.ObjectMeta{Name: "foo"},
		Target:     api.ObjectReference{Kind: "Node", Name: "machine1"},
	}
	if ex, ac := expectBind, gotBinding; !reflect.DeepEqual(ex, ac) {
		t.Errorf("Expected exact match on binding: %s", util.ObjectDiff(ex, ac))
	}

	<-called
	events.Stop()

	scheduledPodStore.Delete(pod)
	_, exists, _ = assumedPodsStore.Get(pod)
	if !exists {
		t.Errorf("Expected pod %#v in assumed pod store", pod)
	}

	secondPod := podWithPort("bar", "", podPort)
	queuedPodStore.Add(secondPod)
	// queuedPodStore: [bar:8080]
	// scheduledPodStore: []
	// assumedPods: [foo:8080]

	// Second scheduling pass will fail to schedule if the store hasn't expired
	// the deleted pod. This would normally happen with a timeout.
	//expirationPolicy.NeverExpire = util.NewStringSet()
	fakeClock.Time = fakeClock.Time.Add(ttl + 1)

	called = make(chan struct{})
	events = eventBroadcaster.StartEventWatcher(func(e *api.Event) {
		if e, a := "scheduled", e.Reason; e != a {
			t.Errorf("expected %v, got %v", e, a)
		}
		close(called)
	})

	s.scheduleOne()

	expectBind = &api.Binding{
		ObjectMeta: api.ObjectMeta{Name: "bar"},
		Target:     api.ObjectReference{Kind: "Node", Name: "machine1"},
	}
	if ex, ac := expectBind, gotBinding; !reflect.DeepEqual(ex, ac) {
		t.Errorf("Expected exact match on binding: %s", util.ObjectDiff(ex, ac))
	}
	<-called
	events.Stop()
}
Ejemplo n.º 17
0
// Create creates a DeploymentController.
func (factory *DeploymentControllerFactory) Create() controller.RunnableController {
	deploymentLW := &deployutil.ListWatcherImpl{
		// TODO: Investigate specifying annotation field selectors to fetch only 'deployments'
		// Currently field selectors are not supported for replication controllers
		ListFunc: func() (runtime.Object, error) {
			return factory.KubeClient.ReplicationControllers(kapi.NamespaceAll).List(labels.Everything())
		},
		WatchFunc: func(resourceVersion string) (watch.Interface, error) {
			return factory.KubeClient.ReplicationControllers(kapi.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion)
		},
	}
	deploymentQueue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(deploymentLW, &kapi.ReplicationController{}, deploymentQueue, 2*time.Minute).Run()

	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartRecordingToSink(factory.KubeClient.Events(""))

	deployController := &DeploymentController{
		serviceAccount: factory.ServiceAccount,
		deploymentClient: &deploymentClientImpl{
			getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) {
				return factory.KubeClient.ReplicationControllers(namespace).Get(name)
			},
			updateDeploymentFunc: func(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) {
				return factory.KubeClient.ReplicationControllers(namespace).Update(deployment)
			},
		},
		podClient: &podClientImpl{
			getPodFunc: func(namespace, name string) (*kapi.Pod, error) {
				return factory.KubeClient.Pods(namespace).Get(name)
			},
			createPodFunc: func(namespace string, pod *kapi.Pod) (*kapi.Pod, error) {
				return factory.KubeClient.Pods(namespace).Create(pod)
			},
			deletePodFunc: func(namespace, name string) error {
				return factory.KubeClient.Pods(namespace).Delete(name, nil)
			},
			updatePodFunc: func(namespace string, pod *kapi.Pod) (*kapi.Pod, error) {
				return factory.KubeClient.Pods(namespace).Update(pod)
			},
			// Find deployer pods using the label they should all have which
			// correlates them to the named deployment.
			getDeployerPodsForFunc: func(namespace, name string) ([]kapi.Pod, error) {
				labelSel, err := labels.Parse(fmt.Sprintf("%s=%s", deployapi.DeployerPodForDeploymentLabel, name))
				if err != nil {
					return []kapi.Pod{}, err
				}
				pods, err := factory.KubeClient.Pods(namespace).List(labelSel, fields.Everything())
				if err != nil {
					return []kapi.Pod{}, err
				}
				return pods.Items, nil
			},
		},
		makeContainer: func(strategy *deployapi.DeploymentStrategy) (*kapi.Container, error) {
			return factory.makeContainer(strategy)
		},
		decodeConfig: func(deployment *kapi.ReplicationController) (*deployapi.DeploymentConfig, error) {
			return deployutil.DecodeDeploymentConfig(deployment, factory.Codec)
		},
		recorder: eventBroadcaster.NewRecorder(kapi.EventSource{Component: "deployer"}),
	}

	return &controller.RetryController{
		Queue: deploymentQueue,
		RetryManager: controller.NewQueueRetryManager(
			deploymentQueue,
			cache.MetaNamespaceKeyFunc,
			func(obj interface{}, err error, retries controller.Retry) bool {
				if _, isFatal := err.(fatalError); isFatal {
					kutil.HandleError(err)
					return false
				}
				if retries.Count > 1 {
					return false
				}
				return true
			},
			kutil.NewTokenBucketRateLimiter(1, 10),
		),
		Handle: func(obj interface{}) error {
			deployment := obj.(*kapi.ReplicationController)
			return deployController.Handle(deployment)
		},
	}
}
Ejemplo n.º 18
0
// Create creates a DeploymentConfigController.
func (factory *DeploymentConfigControllerFactory) Create() controller.RunnableController {
	deploymentConfigLW := &deployutil.ListWatcherImpl{
		ListFunc: func() (runtime.Object, error) {
			return factory.Client.DeploymentConfigs(kapi.NamespaceAll).List(labels.Everything(), fields.Everything())
		},
		WatchFunc: func(resourceVersion string) (watch.Interface, error) {
			return factory.Client.DeploymentConfigs(kapi.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion)
		},
	}
	queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(deploymentConfigLW, &deployapi.DeploymentConfig{}, queue, 2*time.Minute).Run()

	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartRecordingToSink(factory.KubeClient.Events(""))

	configController := &DeploymentConfigController{
		deploymentClient: &deploymentClientImpl{
			createDeploymentFunc: func(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) {
				return factory.KubeClient.ReplicationControllers(namespace).Create(deployment)
			},
			listDeploymentsForConfigFunc: func(namespace, configName string) (*kapi.ReplicationControllerList, error) {
				sel := deployutil.ConfigSelector(configName)
				list, err := factory.KubeClient.ReplicationControllers(namespace).List(sel)
				if err != nil {
					return nil, err
				}
				return list, nil
			},
			updateDeploymentFunc: func(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) {
				return factory.KubeClient.ReplicationControllers(namespace).Update(deployment)
			},
		},
		makeDeployment: func(config *deployapi.DeploymentConfig) (*kapi.ReplicationController, error) {
			return deployutil.MakeDeployment(config, factory.Codec)
		},
		recorder: eventBroadcaster.NewRecorder(kapi.EventSource{Component: "deployer"}),
	}

	return &controller.RetryController{
		Queue: queue,
		RetryManager: controller.NewQueueRetryManager(
			queue,
			cache.MetaNamespaceKeyFunc,
			func(obj interface{}, err error, retries controller.Retry) bool {
				config := obj.(*deployapi.DeploymentConfig)
				// no retries for a fatal error
				if _, isFatal := err.(fatalError); isFatal {
					glog.V(4).Infof("Will not retry fatal error for deploymentConfig %s/%s: %v", config.Namespace, config.Name, err)
					kutil.HandleError(err)
					return false
				}
				// infinite retries for a transient error
				if _, isTransient := err.(transientError); isTransient {
					glog.V(4).Infof("Retrying deploymentConfig %s/%s with error: %v", config.Namespace, config.Name, err)
					return true
				}
				kutil.HandleError(err)
				// no retries for anything else
				if retries.Count > 0 {
					return false
				}
				return true
			},
			kutil.NewTokenBucketRateLimiter(1, 10),
		),
		Handle: func(obj interface{}) error {
			config := obj.(*deployapi.DeploymentConfig)
			return configController.Handle(config)
		},
	}
}
// NewReplicationManager creates a new ReplicationManager.
func NewReplicationManager(kubeClient client.Interface, burstReplicas int) *ReplicationManager {
	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartLogging(glog.Infof)
	eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))

	rm := &ReplicationManager{
		kubeClient: kubeClient,
		podControl: controller.RealPodControl{
			KubeClient: kubeClient,
			Recorder:   eventBroadcaster.NewRecorder(api.EventSource{Component: "replication-controller"}),
		},
		burstReplicas: burstReplicas,
		expectations:  controller.NewControllerExpectations(),
		queue:         workqueue.New(),
	}

	rm.rcStore.Store, rm.rcController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return rm.kubeClient.ReplicationControllers(api.NamespaceAll).List(labels.Everything())
			},
			WatchFunc: func(rv string) (watch.Interface, error) {
				return rm.kubeClient.ReplicationControllers(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv)
			},
		},
		&api.ReplicationController{},
		FullControllerResyncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc: rm.enqueueController,
			UpdateFunc: func(old, cur interface{}) {
				// We only really need to do this when spec changes, but for correctness it is safer to
				// periodically double check. It is overkill for 2 reasons:
				// 1. Status.Replica updates will cause a sync
				// 2. Every 30s we will get a full resync (this will happen anyway every 5 minutes when pods relist)
				// However, it shouldn't be that bad as rcs that haven't met expectations won't sync, and all
				// the listing is done using local stores.
				oldRC := old.(*api.ReplicationController)
				curRC := cur.(*api.ReplicationController)
				if oldRC.Status.Replicas != curRC.Status.Replicas {
					glog.V(4).Infof("Observed updated replica count for rc: %v, %d->%d", curRC.Name, oldRC.Status.Replicas, curRC.Status.Replicas)
				}
				rm.enqueueController(cur)
			},
			// This will enter the sync loop and no-op, becuase the controller has been deleted from the store.
			// Note that deleting a controller immediately after scaling it to 0 will not work. The recommended
			// way of achieving this is by performing a `stop` operation on the controller.
			DeleteFunc: rm.enqueueController,
		},
	)

	rm.podStore.Store, rm.podController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return rm.kubeClient.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(rv string) (watch.Interface, error) {
				return rm.kubeClient.Pods(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv)
			},
		},
		&api.Pod{},
		PodRelistPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc: rm.addPod,
			// This invokes the rc for every pod change, eg: host assignment. Though this might seem like overkill
			// the most frequent pod update is status, and the associated rc will only list from local storage, so
			// it should be ok.
			UpdateFunc: rm.updatePod,
			DeleteFunc: rm.deletePod,
		},
	)

	rm.syncHandler = rm.syncReplicationController
	rm.podStoreSynced = rm.podController.HasSynced
	return rm
}