func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) {

	hpaResponse := serverResponse{http.StatusOK, &expapi.HorizontalPodAutoscalerList{
		Items: []expapi.HorizontalPodAutoscaler{
			{
				ObjectMeta: api.ObjectMeta{
					Name:      "foo",
					Namespace: namespace,
				},
				Spec: expapi.HorizontalPodAutoscalerSpec{
					ScaleRef: &expapi.SubresourceReference{
						Kind:        "replicationController",
						Name:        rcName,
						Namespace:   namespace,
						Subresource: "scale",
					},
					MinCount: 1,
					MaxCount: 5,
					Target:   target,
				},
			}}}}

	scaleResponse := serverResponse{http.StatusOK, &expapi.Scale{
		ObjectMeta: api.ObjectMeta{
			Name:      "rcName",
			Namespace: namespace,
		},
		Spec: expapi.ScaleSpec{
			Replicas: 5,
		},
		Status: expapi.ScaleStatus{
			Replicas: 2,
			Selector: map[string]string{"name": podNameLabel},
		},
	}}

	podListResponse := serverResponse{http.StatusOK, &api.PodList{
		Items: []api.Pod{
			{
				ObjectMeta: api.ObjectMeta{
					Name:      podName,
					Namespace: namespace,
				},
			}}}}

	heapsterRawResponse := "UPADTE ME"
	heapsterResponse := serverResponse{http.StatusOK, &heapsterRawResponse}

	testServer, handlers := makeTestServer(t, hpaResponse, scaleResponse, podListResponse, heapsterResponse)
	defer testServer.Close()
	kubeClient := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()})
	expClient := client.NewExperimentalOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()})

	hpaController := New(kubeClient, expClient)
	err := hpaController.reconcileAutoscalers()
	if err != nil {
		t.Fatal("Failed to reconcile %v", err)
	}
	for _, h := range handlers {
		h.ValidateRequestCount(t, 1)
	}
}
示例#2
0
func startComponents(firstManifestURL, secondManifestURL string) (string, string) {
	// Setup
	servers := []string{}
	glog.Infof("Creating etcd client pointing to %v", servers)

	handler := delegateHandler{}
	apiServer := httptest.NewServer(&handler)

	etcdClient := etcd.NewClient(servers)
	sleep := 4 * time.Second
	ok := false
	for i := 0; i < 3; i++ {
		keys, err := etcdClient.Get("/", false, false)
		if err != nil {
			glog.Warningf("Unable to list root etcd keys: %v", err)
			if i < 2 {
				time.Sleep(sleep)
				sleep = sleep * sleep
			}
			continue
		}
		for _, node := range keys.Node.Nodes {
			if _, err := etcdClient.Delete(node.Key, true); err != nil {
				glog.Fatalf("Unable delete key: %v", err)
			}
		}
		ok = true
		break
	}
	if !ok {
		glog.Fatalf("Failed to connect to etcd")
	}

	cl := client.NewOrDie(&client.Config{Host: apiServer.URL, Version: testapi.Default.GroupAndVersion()})

	// TODO: caesarxuchao: hacky way to specify version of Experimental client.
	// We will fix this by supporting multiple group versions in Config
	cl.ExperimentalClient = client.NewExperimentalOrDie(&client.Config{Host: apiServer.URL, Version: testapi.Experimental.GroupAndVersion()})

	storageVersions := make(map[string]string)
	etcdStorage, err := master.NewEtcdStorage(etcdClient, latest.GroupOrDie("").InterfacesFor, testapi.Default.GroupAndVersion(), etcdtest.PathPrefix())
	storageVersions[""] = testapi.Default.GroupAndVersion()
	if err != nil {
		glog.Fatalf("Unable to get etcd storage: %v", err)
	}
	expEtcdStorage, err := master.NewEtcdStorage(etcdClient, latest.GroupOrDie("experimental").InterfacesFor, testapi.Experimental.GroupAndVersion(), etcdtest.PathPrefix())
	storageVersions["experimental"] = testapi.Experimental.GroupAndVersion()
	if err != nil {
		glog.Fatalf("Unable to get etcd storage for experimental: %v", err)
	}
	storageDestinations := master.NewStorageDestinations()
	storageDestinations.AddAPIGroup("", etcdStorage)
	storageDestinations.AddAPIGroup("experimental", expEtcdStorage)

	// Master
	host, port, err := net.SplitHostPort(strings.TrimLeft(apiServer.URL, "http://"))
	if err != nil {
		glog.Fatalf("Unable to parse URL '%v': %v", apiServer.URL, err)
	}
	portNumber, err := strconv.Atoi(port)
	if err != nil {
		glog.Fatalf("Nonnumeric port? %v", err)
	}

	publicAddress := net.ParseIP(host)
	if publicAddress == nil {
		glog.Fatalf("no public address for %s", host)
	}

	// Create a master and install handlers into mux.
	m := master.New(&master.Config{
		StorageDestinations:   storageDestinations,
		KubeletClient:         fakeKubeletClient{},
		EnableCoreControllers: true,
		EnableLogsSupport:     false,
		EnableProfiling:       true,
		APIPrefix:             "/api",
		APIGroupPrefix:        "/apis",
		Authorizer:            apiserver.NewAlwaysAllowAuthorizer(),
		AdmissionControl:      admit.NewAlwaysAdmit(),
		ReadWritePort:         portNumber,
		PublicAddress:         publicAddress,
		CacheTimeout:          2 * time.Second,
		StorageVersions:       storageVersions,
	})
	handler.delegate = m.Handler

	// Scheduler
	schedulerConfigFactory := factory.NewConfigFactory(cl, nil)
	schedulerConfig, err := schedulerConfigFactory.Create()
	if err != nil {
		glog.Fatalf("Couldn't create scheduler config: %v", err)
	}
	eventBroadcaster := record.NewBroadcaster()
	schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"})
	eventBroadcaster.StartLogging(glog.Infof)
	eventBroadcaster.StartRecordingToSink(cl.Events(""))
	scheduler.New(schedulerConfig).Run()

	// ensure the service endpoints are sync'd several times within the window that the integration tests wait
	go endpointcontroller.NewEndpointController(cl, controller.NoResyncPeriodFunc).
		Run(3, util.NeverStop)

	// TODO: Write an integration test for the replication controllers watch.
	go replicationControllerPkg.NewReplicationManager(cl, controller.NoResyncPeriodFunc, replicationControllerPkg.BurstReplicas).
		Run(3, util.NeverStop)

	nodeController := nodecontroller.NewNodeController(nil, cl, 5*time.Minute, util.NewFakeRateLimiter(), util.NewFakeRateLimiter(),
		40*time.Second, 60*time.Second, 5*time.Second, nil, false)
	nodeController.Run(5 * time.Second)
	cadvisorInterface := new(cadvisor.Fake)

	// Kubelet (localhost)
	testRootDir := makeTempDirOrDie("kubelet_integ_1.", "")
	configFilePath := makeTempDirOrDie("config", testRootDir)
	glog.Infof("Using %s as root dir for kubelet #1", testRootDir)
	fakeDocker1.VersionInfo = docker.Env{"ApiVersion=1.15"}

	kcfg := kubeletapp.SimpleKubelet(
		cl,
		&fakeDocker1,
		"localhost",
		testRootDir,
		firstManifestURL,
		"127.0.0.1",
		10250, /* KubeletPort */
		0,     /* ReadOnlyPort */
		api.NamespaceDefault,
		empty_dir.ProbeVolumePlugins(),
		nil,
		cadvisorInterface,
		configFilePath,
		nil,
		kubecontainer.FakeOS{},
		1*time.Second,  /* FileCheckFrequency */
		1*time.Second,  /* HTTPCheckFrequency */
		10*time.Second, /* MinimumGCAge */
		3*time.Second,  /* NodeStatusUpdateFrequency */
		10*time.Second, /* SyncFrequency */
		40 /* MaxPods */)

	kubeletapp.RunKubelet(kcfg)
	// Kubelet (machine)
	// Create a second kubelet so that the guestbook example's two redis slaves both
	// have a place they can schedule.
	testRootDir = makeTempDirOrDie("kubelet_integ_2.", "")
	glog.Infof("Using %s as root dir for kubelet #2", testRootDir)
	fakeDocker2.VersionInfo = docker.Env{"ApiVersion=1.15"}

	kcfg = kubeletapp.SimpleKubelet(
		cl,
		&fakeDocker2,
		"127.0.0.1",
		testRootDir,
		secondManifestURL,
		"127.0.0.1",
		10251, /* KubeletPort */
		0,     /* ReadOnlyPort */
		api.NamespaceDefault,
		empty_dir.ProbeVolumePlugins(),
		nil,
		cadvisorInterface,
		"",
		nil,
		kubecontainer.FakeOS{},
		1*time.Second,  /* FileCheckFrequency */
		1*time.Second,  /* HTTPCheckFrequency */
		10*time.Second, /* MinimumGCAge */
		3*time.Second,  /* NodeStatusUpdateFrequency */
		10*time.Second, /* SyncFrequency */

		40 /* MaxPods */)

	kubeletapp.RunKubelet(kcfg)
	return apiServer.URL, configFilePath
}
示例#3
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())
	addFlags(pflag.CommandLine)

	util.InitFlags()
	util.ReallyCrash = true
	util.InitLogs()
	defer util.FlushLogs()

	go func() {
		defer util.FlushLogs()
		time.Sleep(maxTestTimeout)
		glog.Fatalf("This test has timed out.")
	}()

	glog.Infof("Running tests for APIVersion: %s", os.Getenv("KUBE_TEST_API"))

	firstManifestURL := ServeCachedManifestFile(testPodSpecFile)
	secondManifestURL := ServeCachedManifestFile(testPodSpecFile)
	apiServerURL, _ := startComponents(firstManifestURL, secondManifestURL)

	// Ok. we're good to go.
	glog.Infof("API Server started on %s", apiServerURL)
	// Wait for the synchronization threads to come up.
	time.Sleep(time.Second * 10)

	kubeClient := client.NewOrDie(&client.Config{Host: apiServerURL, Version: testapi.Default.GroupAndVersion()})
	// TODO: caesarxuchao: hacky way to specify version of Experimental client.
	// We will fix this by supporting multiple group versions in Config
	kubeClient.ExperimentalClient = client.NewExperimentalOrDie(&client.Config{Host: apiServerURL, Version: testapi.Experimental.GroupAndVersion()})

	// Run tests in parallel
	testFuncs := []testFunc{
		runReplicationControllerTest,
		runAtomicPutTest,
		runPatchTest,
		runServiceTest,
		runAPIVersionsTest,
		runMasterServiceTest,
		func(c *client.Client) {
			runSelfLinkTestOnNamespace(c, api.NamespaceDefault)
			runSelfLinkTestOnNamespace(c, "other")
		},
	}

	// Only run at most maxConcurrency tests in parallel.
	if maxConcurrency <= 0 {
		maxConcurrency = len(testFuncs)
	}
	glog.Infof("Running %d tests in parallel.", maxConcurrency)
	ch := make(chan struct{}, maxConcurrency)

	var wg sync.WaitGroup
	wg.Add(len(testFuncs))
	for i := range testFuncs {
		f := testFuncs[i]
		go func() {
			ch <- struct{}{}
			f(kubeClient)
			<-ch
			wg.Done()
		}()
	}
	wg.Wait()
	close(ch)

	// Check that kubelet tried to make the containers.
	// Using a set to list unique creation attempts. Our fake is
	// really stupid, so kubelet tries to create these multiple times.
	createdConts := sets.String{}
	for _, p := range fakeDocker1.Created {
		// The last 8 characters are random, so slice them off.
		if n := len(p); n > 8 {
			createdConts.Insert(p[:n-8])
		}
	}
	for _, p := range fakeDocker2.Created {
		// The last 8 characters are random, so slice them off.
		if n := len(p); n > 8 {
			createdConts.Insert(p[:n-8])
		}
	}
	// We expect 9: 2 pod infra containers + 2 containers from the replication controller +
	//              1 pod infra container + 2 containers from the URL on first Kubelet +
	//              1 pod infra container + 2 containers from the URL on second Kubelet +
	//              1 pod infra container + 1 container from the service test.
	// The total number of container created is 9

	if len(createdConts) != 12 {
		glog.Fatalf("Expected 12 containers; got %v\n\nlist of created containers:\n\n%#v\n\nDocker 1 Created:\n\n%#v\n\nDocker 2 Created:\n\n%#v\n\n", len(createdConts), createdConts.List(), fakeDocker1.Created, fakeDocker2.Created)
	}
	glog.Infof("OK - found created containers: %#v", createdConts.List())

	// This test doesn't run with the others because it can't run in
	// parallel and also it schedules extra pods which would change the
	// above pod counting logic.
	runSchedulerNoPhantomPodsTest(kubeClient)

	glog.Infof("\n\nLogging high latency metrics from the 10250 kubelet")
	e2e.HighLatencyKubeletOperations(nil, 1*time.Second, "localhost:10250")
	glog.Infof("\n\nLogging high latency metrics from the 10251 kubelet")
	e2e.HighLatencyKubeletOperations(nil, 1*time.Second, "localhost:10251")
}
func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) {

	hpaResponse := serverResponse{http.StatusOK, &expapi.HorizontalPodAutoscalerList{
		Items: []expapi.HorizontalPodAutoscaler{
			{
				ObjectMeta: api.ObjectMeta{
					Name:      hpaName,
					Namespace: namespace,
				},
				Spec: expapi.HorizontalPodAutoscalerSpec{
					ScaleRef: &expapi.SubresourceReference{
						Kind:        "replicationController",
						Name:        rcName,
						Namespace:   namespace,
						Subresource: "scale",
					},
					MinCount: 1,
					MaxCount: 5,
					Target:   expapi.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse("0.3")},
				},
			}}}}

	scaleResponse := serverResponse{http.StatusOK, &expapi.Scale{
		ObjectMeta: api.ObjectMeta{
			Name:      rcName,
			Namespace: namespace,
		},
		Spec: expapi.ScaleSpec{
			Replicas: 1,
		},
		Status: expapi.ScaleStatus{
			Replicas: 1,
			Selector: map[string]string{"name": podNameLabel},
		},
	}}

	status := expapi.HorizontalPodAutoscalerStatus{
		CurrentReplicas: 1,
		DesiredReplicas: 3,
	}
	updateHpaResponse := serverResponse{http.StatusOK, &expapi.HorizontalPodAutoscaler{

		ObjectMeta: api.ObjectMeta{
			Name:      hpaName,
			Namespace: namespace,
		},
		Spec: expapi.HorizontalPodAutoscalerSpec{
			ScaleRef: &expapi.SubresourceReference{
				Kind:        "replicationController",
				Name:        rcName,
				Namespace:   namespace,
				Subresource: "scale",
			},
			MinCount: 1,
			MaxCount: 5,
			Target:   expapi.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse("0.3")},
		},
		Status: &status,
	}}

	testServer, handlers := makeTestServer(t,
		map[string]*serverResponse{
			hpaListHandler:   &hpaResponse,
			scaleHandler:     &scaleResponse,
			updateHpaHandler: &updateHpaResponse,
		})

	defer testServer.Close()
	kubeClient := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Experimental.Version()})
	expClient := client.NewExperimentalOrDie(&client.Config{Host: testServer.URL, Version: testapi.Experimental.Version()})

	fakeRC := fakeResourceConsumptionClient{metrics: map[api.ResourceName]expapi.ResourceConsumption{
		api.ResourceCPU: {Resource: api.ResourceCPU, Quantity: resource.MustParse("650m")},
	}}
	fake := fakeMetricsClient{consumption: &fakeRC}

	hpaController := New(kubeClient, expClient, &fake)

	err := hpaController.reconcileAutoscalers()
	if err != nil {
		t.Fatal("Failed to reconcile: %v", err)
	}
	for _, h := range handlers {
		h.ValidateRequestCount(t, 1)
	}
	obj, err := expClient.Codec.Decode([]byte(handlers[updateHpaHandler].RequestBody))
	if err != nil {
		t.Fatal("Failed to decode: %v %v", err)
	}
	hpa, _ := obj.(*expapi.HorizontalPodAutoscaler)

	assert.Equal(t, 3, hpa.Status.DesiredReplicas)
	assert.Equal(t, int64(650), hpa.Status.CurrentConsumption.Quantity.MilliValue())
	assert.NotNil(t, hpa.Status.LastScaleTimestamp)
}