Example #1
0
func (sq *SubmitQueue) doGenCommitters(config *github_util.Config) error {
	pushUsers, pullUsers, err := config.UsersWithAccess()
	if err != nil {
		glog.Fatalf("Unable to read committers from github: %v", err)
	}

	pushSet := sets.NewString()
	for _, user := range pushUsers {
		pushSet.Insert(*user.Login)
	}
	pullSet := sets.NewString()
	for _, user := range pullUsers {
		pullSet.Insert(*user.Login)
	}

	if err = writeWhitelist(sq.Committers, "# auto-generated by "+os.Args[0]+" gen-committers; manual additions should go in the whitelist", pushSet); err != nil {
		glog.Fatalf("Unable to write committers: %v", err)
	}
	glog.Info("Successfully updated committers file.")

	existingWhitelist, err := loadWhitelist(sq.Whitelist)
	if err != nil {
		glog.Fatalf("error loading whitelist; it will not be updated: %v", err)
	}

	neededInWhitelist := existingWhitelist.Union(pullSet)
	neededInWhitelist = neededInWhitelist.Difference(pushSet)
	if err = writeWhitelist(sq.Whitelist, "# auto-generated by "+os.Args[0]+" gen-committers; manual additions may be added by hand", neededInWhitelist); err != nil {
		glog.Fatalf("Unable to write additional user whitelist: %v", err)
	}
	glog.Info("Successfully update whitelist file.")
	return nil
}
Example #2
0
// Test public interface
func doTestIndex(t *testing.T, indexer Indexer) {
	mkObj := func(id string, val string) testStoreObject {
		return testStoreObject{id: id, val: val}
	}

	// Test Index
	expected := map[string]sets.String{}
	expected["b"] = sets.NewString("a", "c")
	expected["f"] = sets.NewString("e")
	expected["h"] = sets.NewString("g")
	indexer.Add(mkObj("a", "b"))
	indexer.Add(mkObj("c", "b"))
	indexer.Add(mkObj("e", "f"))
	indexer.Add(mkObj("g", "h"))
	{
		for k, v := range expected {
			found := sets.String{}
			indexResults, err := indexer.Index("by_val", mkObj("", k))
			if err != nil {
				t.Errorf("Unexpected error %v", err)
			}
			for _, item := range indexResults {
				found.Insert(item.(testStoreObject).id)
			}
			items := v.List()
			if !found.HasAll(items...) {
				t.Errorf("missing items, index %s, expected %v but found %v", k, items, found.List())
			}
		}
	}
}
Example #3
0
func TestRequirementConstructor(t *testing.T) {
	requirementConstructorTests := []struct {
		Key     string
		Op      Operator
		Vals    sets.String
		Success bool
	}{
		{"x", InOperator, nil, false},
		{"x", NotInOperator, sets.NewString(), false},
		{"x", InOperator, sets.NewString("foo"), true},
		{"x", NotInOperator, sets.NewString("foo"), true},
		{"x", ExistsOperator, nil, true},
		{"x", DoesNotExistOperator, nil, true},
		{"1foo", InOperator, sets.NewString("bar"), true},
		{"1234", InOperator, sets.NewString("bar"), true},
		{strings.Repeat("a", 254), ExistsOperator, nil, false}, //breaks DNS rule that len(key) <= 253
	}
	for _, rc := range requirementConstructorTests {
		if _, err := NewRequirement(rc.Key, rc.Op, rc.Vals); err == nil && !rc.Success {
			t.Errorf("expected error with key:%#v op:%v vals:%v, got no error", rc.Key, rc.Op, rc.Vals)
		} else if err != nil && rc.Success {
			t.Errorf("expected no error with key:%#v op:%v vals:%v, got:%v", rc.Key, rc.Op, rc.Vals, err)
		}
	}
}
Example #4
0
func TestAdd(t *testing.T) {
	testCases := []struct {
		sel         Selector
		key         string
		operator    Operator
		values      []string
		refSelector Selector
	}{
		{
			LabelSelector{},
			"key",
			InOperator,
			[]string{"value"},
			LabelSelector{Requirement{"key", InOperator, sets.NewString("value")}},
		},
		{
			LabelSelector{Requirement{"key", InOperator, sets.NewString("value")}},
			"key2",
			EqualsOperator,
			[]string{"value2"},
			LabelSelector{
				Requirement{"key", InOperator, sets.NewString("value")},
				Requirement{"key2", EqualsOperator, sets.NewString("value2")},
			},
		},
	}
	for _, ts := range testCases {
		ts.sel = ts.sel.Add(ts.key, ts.operator, ts.values)
		if !reflect.DeepEqual(ts.sel, ts.refSelector) {
			t.Errorf("Expected %v found %v", ts.refSelector, ts.sel)
		}
	}
}
Example #5
0
func init() {
	groupMeta, err := latest.RegisterGroup("extensions")
	if err != nil {
		glog.V(4).Infof("%v", err)
		return
	}
	registeredGroupVersions := registered.GroupVersionsForGroup("extensions")
	groupVersion := registeredGroupVersions[0]
	*groupMeta = latest.GroupMeta{
		GroupVersion: groupVersion,
		Group:        apiutil.GetGroup(groupVersion),
		Version:      apiutil.GetVersion(groupVersion),
		Codec:        runtime.CodecFor(api.Scheme, groupVersion),
	}
	var versions []string
	var groupVersions []string
	for i := len(registeredGroupVersions) - 1; i >= 0; i-- {
		versions = append(versions, apiutil.GetVersion(registeredGroupVersions[i]))
		groupVersions = append(groupVersions, registeredGroupVersions[i])
	}
	groupMeta.Versions = versions
	groupMeta.GroupVersions = groupVersions

	groupMeta.SelfLinker = runtime.SelfLinker(accessor)

	// the list of kinds that are scoped at the root of the api hierarchy
	// if a kind is not enumerated here, it is assumed to have a namespace scope
	rootScoped := sets.NewString()

	ignoredKinds := sets.NewString()

	groupMeta.RESTMapper = api.NewDefaultRESTMapper("extensions", groupVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped)
	api.RegisterRESTMapper(groupMeta.RESTMapper)
	groupMeta.InterfacesFor = interfacesFor
}
Example #6
0
func TestWaitFlagNew(t *testing.T) {
	fcmd := exec.FakeCmd{
		CombinedOutputScript: []exec.FakeCombinedOutputAction{
			// iptables version check
			func() ([]byte, error) { return []byte("iptables v1.4.22"), nil },
			// Success.
			func() ([]byte, error) { return []byte{}, nil },
		},
	}
	fexec := exec.FakeExec{
		CommandScript: []exec.FakeCommandAction{
			func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },
			func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },
		},
	}
	runner := New(&fexec, dbus.NewFake(nil, nil), ProtocolIpv4)
	defer runner.Destroy()
	err := runner.DeleteChain(TableNAT, Chain("FOOBAR"))
	if err != nil {
		t.Errorf("expected success, got %v", err)
	}
	if fcmd.CombinedOutputCalls != 2 {
		t.Errorf("expected 2 CombinedOutput() calls, got %d", fcmd.CombinedOutputCalls)
	}
	if !sets.NewString(fcmd.CombinedOutputLog[1]...).HasAll("iptables", "-w2") {
		t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[1])
	}
	if sets.NewString(fcmd.CombinedOutputLog[1]...).HasAny("-w") {
		t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[1])
	}
}
Example #7
0
func (s *serviceAccount) limitSecretReferences(serviceAccount *api.ServiceAccount, pod *api.Pod) error {
	// Ensure all secrets the pod references are allowed by the service account
	mountableSecrets := sets.NewString()
	for _, s := range serviceAccount.Secrets {
		mountableSecrets.Insert(s.Name)
	}
	for _, volume := range pod.Spec.Volumes {
		source := volume.VolumeSource
		if source.Secret == nil {
			continue
		}
		secretName := source.Secret.SecretName
		if !mountableSecrets.Has(secretName) {
			return fmt.Errorf("Volume with secret.secretName=\"%s\" is not allowed because service account %s does not reference that secret", secretName, serviceAccount.Name)
		}
	}

	// limit pull secret references as well
	pullSecrets := sets.NewString()
	for _, s := range serviceAccount.ImagePullSecrets {
		pullSecrets.Insert(s.Name)
	}
	for i, pullSecretRef := range pod.Spec.ImagePullSecrets {
		if !pullSecrets.Has(pullSecretRef.Name) {
			return fmt.Errorf(`imagePullSecrets[%d].name="%s" is not allowed because service account %s does not reference that imagePullSecret`, i, pullSecretRef.Name, serviceAccount.Name)
		}
	}
	return nil
}
Example #8
0
func init() {
	groupMeta, err := latest.RegisterGroup("")
	if err != nil {
		glog.V(4).Infof("%v", err)
		return
	}
	// Use the first API version in the list of registered versions as the latest.
	registeredGroupVersions := registered.GroupVersionsForGroup("")
	groupVersion := registeredGroupVersions[0]
	*groupMeta = latest.GroupMeta{
		GroupVersion: groupVersion,
		Group:        apiutil.GetGroup(groupVersion),
		Version:      apiutil.GetVersion(groupVersion),
		Codec:        runtime.CodecFor(api.Scheme, groupVersion),
	}
	var versions []string
	var groupVersions []string
	for i := len(registeredGroupVersions) - 1; i >= 0; i-- {
		versions = append(versions, apiutil.GetVersion(registeredGroupVersions[i]))
		groupVersions = append(groupVersions, registeredGroupVersions[i])
	}
	groupMeta.Versions = versions
	groupMeta.GroupVersions = groupVersions

	groupMeta.SelfLinker = runtime.SelfLinker(accessor)

	// the list of kinds that are scoped at the root of the api hierarchy
	// if a kind is not enumerated here, it is assumed to have a namespace scope
	// the list of kinds that are scoped at the root of the api hierarchy
	// if a kind is not enumerated here, it is assumed to have a namespace scope
	rootScoped := sets.NewString(
		"Node",
		"Minion",
		"Namespace",
		"PersistentVolume",
	)

	// these kinds should be excluded from the list of resources
	ignoredKinds := sets.NewString(
		"ListOptions",
		"DeleteOptions",
		"Status",
		"PodLogOptions",
		"PodExecOptions",
		"PodAttachOptions",
		"PodProxyOptions",
		"ThirdPartyResource",
		"ThirdPartyResourceData",
		"ThirdPartyResourceList")

	mapper := api.NewDefaultRESTMapper("", versions, interfacesFor, importPrefix, ignoredKinds, rootScoped)
	// setup aliases for groups of resources
	mapper.AddResourceAlias("all", userResources...)
	groupMeta.RESTMapper = mapper
	api.RegisterRESTMapper(groupMeta.RESTMapper)
	groupMeta.InterfacesFor = interfacesFor
}
Example #9
0
func TestPrinters(t *testing.T) {
	om := func(name string) api.ObjectMeta { return api.ObjectMeta{Name: name} }
	templatePrinter, err := NewTemplatePrinter([]byte("{{.name}}"))
	if err != nil {
		t.Fatal(err)
	}
	templatePrinter2, err := NewTemplatePrinter([]byte("{{len .items}}"))
	if err != nil {
		t.Fatal(err)
	}
	jsonpathPrinter, err := NewJSONPathPrinter("{.metadata.name}")
	if err != nil {
		t.Fatal(err)
	}
	printers := map[string]ResourcePrinter{
		"humanReadable":        NewHumanReadablePrinter(true, false, false, false, []string{}),
		"humanReadableHeaders": NewHumanReadablePrinter(false, false, false, false, []string{}),
		"json":                 &JSONPrinter{},
		"yaml":                 &YAMLPrinter{},
		"template":             templatePrinter,
		"template2":            templatePrinter2,
		"jsonpath":             jsonpathPrinter,
		"name":                 &NamePrinter{},
	}
	objects := map[string]runtime.Object{
		"pod":             &api.Pod{ObjectMeta: om("pod")},
		"emptyPodList":    &api.PodList{},
		"nonEmptyPodList": &api.PodList{Items: []api.Pod{{}}},
		"endpoints": &api.Endpoints{
			Subsets: []api.EndpointSubset{{
				Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}, {IP: "localhost"}},
				Ports:     []api.EndpointPort{{Port: 8080}},
			}}},
	}
	// map of printer name to set of objects it should fail on.
	expectedErrors := map[string]sets.String{
		"template2": sets.NewString("pod", "emptyPodList", "endpoints"),
		"jsonpath":  sets.NewString("emptyPodList", "nonEmptyPodList", "endpoints"),
	}

	for pName, p := range printers {
		for oName, obj := range objects {
			b := &bytes.Buffer{}
			if err := p.PrintObj(obj, b); err != nil {
				if set, found := expectedErrors[pName]; found && set.Has(oName) {
					// expected error
					continue
				}
				t.Errorf("printer '%v', object '%v'; error: '%v'", pName, oName, err)
			}
		}
	}
}
Example #10
0
// Makes sure that exactly the specified hosts are registered as instances with the load balancer
func (s *AWSCloud) ensureLoadBalancerInstances(loadBalancerName string, lbInstances []*elb.Instance, instances []*ec2.Instance) error {
	expected := sets.NewString()
	for _, instance := range instances {
		expected.Insert(orEmpty(instance.InstanceId))
	}

	actual := sets.NewString()
	for _, lbInstance := range lbInstances {
		actual.Insert(orEmpty(lbInstance.InstanceId))
	}

	additions := expected.Difference(actual)
	removals := actual.Difference(expected)

	addInstances := []*elb.Instance{}
	for instanceId := range additions {
		addInstance := &elb.Instance{}
		addInstance.InstanceId = aws.String(instanceId)
		addInstances = append(addInstances, addInstance)
	}

	removeInstances := []*elb.Instance{}
	for instanceId := range removals {
		removeInstance := &elb.Instance{}
		removeInstance.InstanceId = aws.String(instanceId)
		removeInstances = append(removeInstances, removeInstance)
	}

	if len(addInstances) > 0 {
		registerRequest := &elb.RegisterInstancesWithLoadBalancerInput{}
		registerRequest.Instances = addInstances
		registerRequest.LoadBalancerName = aws.String(loadBalancerName)
		_, err := s.elb.RegisterInstancesWithLoadBalancer(registerRequest)
		if err != nil {
			return err
		}
		glog.V(1).Infof("Instances added to load-balancer %s", loadBalancerName)
	}

	if len(removeInstances) > 0 {
		deregisterRequest := &elb.DeregisterInstancesFromLoadBalancerInput{}
		deregisterRequest.Instances = removeInstances
		deregisterRequest.LoadBalancerName = aws.String(loadBalancerName)
		_, err := s.elb.DeregisterInstancesFromLoadBalancer(deregisterRequest)
		if err != nil {
			return err
		}
		glog.V(1).Infof("Instances removed from load-balancer %s", loadBalancerName)
	}

	return nil
}
Example #11
0
func (im *realImageManager) detectImages(detected time.Time) error {
	images, err := im.dockerClient.ListImages(docker.ListImagesOptions{})
	if err != nil {
		return err
	}
	containers, err := im.dockerClient.ListContainers(docker.ListContainersOptions{
		All: true,
	})
	if err != nil {
		return err
	}

	// Make a set of images in use by containers.
	imagesInUse := sets.NewString()
	for _, container := range containers {
		imagesInUse.Insert(container.Image)
	}

	// Add new images and record those being used.
	now := time.Now()
	currentImages := sets.NewString()
	im.imageRecordsLock.Lock()
	defer im.imageRecordsLock.Unlock()
	for _, image := range images {
		currentImages.Insert(image.ID)

		// New image, set it as detected now.
		if _, ok := im.imageRecords[image.ID]; !ok {
			im.imageRecords[image.ID] = &imageRecord{
				detected: detected,
			}
		}

		// Set last used time to now if the image is being used.
		if isImageUsed(&image, imagesInUse) {
			im.imageRecords[image.ID].lastUsed = now
		}

		im.imageRecords[image.ID].size = image.VirtualSize
	}

	// Remove old images from our records.
	for image := range im.imageRecords {
		if !currentImages.Has(image) {
			delete(im.imageRecords, image)
		}
	}

	return nil
}
Example #12
0
func defaultPriorities() sets.String {
	return sets.NewString(
		// Prioritize nodes by least requested utilization.
		factory.RegisterPriorityFunction("LeastRequestedPriority", priorities.LeastRequestedPriority, 10),
		// Prioritizes nodes to help achieve balanced resource usage
		factory.RegisterPriorityFunction("BalancedResourceAllocation", priorities.BalancedResourceAllocation, 20),
		// spreads pods by minimizing the number of pods (belonging to the same service or replication controller) on the same node.
		factory.RegisterPriorityFunction("LeastResourceRemain", priorities.LeastResourceRemain, 30),

		factory.RegisterPriorityConfigFactory(
			"SelectorSpreadPriority",
			factory.PriorityConfigFactory{
				Function: func(args factory.PluginFactoryArgs) algorithm.PriorityFunction {
					return priorities.NewSelectorSpreadPriority(args.ServiceLister, args.ControllerLister)
				},
				Weight: 40,
			},
		),
		factory.RegisterPriorityConfigFactory(
			"NetworkSpreadPriority",
			factory.PriorityConfigFactory{
				Function: func(args factory.PluginFactoryArgs) algorithm.PriorityFunction {
					return priorities.NewNetworkSpreadPriority(args.ServiceLister, args.ControllerLister)
				},
				Weight: 50,
			},
		),
	)
}
Example #13
0
func TestTTLExpirationBasic(t *testing.T) {
	testObj := testStoreObject{id: "foo", val: "bar"}
	deleteChan := make(chan string)
	ttlStore := NewFakeExpirationStore(
		testStoreKeyFunc, deleteChan,
		&FakeExpirationPolicy{
			NeverExpire: sets.NewString(),
			RetrieveKeyFunc: func(obj interface{}) (string, error) {
				return obj.(*timestampedEntry).obj.(testStoreObject).id, nil
			},
		},
		util.RealClock{},
	)
	err := ttlStore.Add(testObj)
	if err != nil {
		t.Errorf("Unable to add obj %#v", testObj)
	}
	item, exists, err := ttlStore.Get(testObj)
	if err != nil {
		t.Errorf("Failed to get from store, %v", err)
	}
	if exists || item != nil {
		t.Errorf("Got unexpected item %#v", item)
	}
	key, _ := testStoreKeyFunc(testObj)
	select {
	case delKey := <-deleteChan:
		if delKey != key {
			t.Errorf("Unexpected delete for key %s", key)
		}
	case <-time.After(util.ForeverTestTimeout):
		t.Errorf("Unexpected timeout waiting on delete")
	}
	close(deleteChan)
}
Example #14
0
// LabelSet returns the name of all of he labels applied to the object as a
// kubernetes string set.
func (obj *MungeObject) LabelSet() sets.String {
	out := sets.NewString()
	for _, label := range obj.Issue.Labels {
		out.Insert(*label.Name)
	}
	return out
}
Example #15
0
// getBackendNames returns the names of backends in this L7 urlmap.
func (l *L7) getBackendNames() []string {
	if l.um == nil {
		return []string{}
	}
	beNames := sets.NewString()
	for _, pathMatcher := range l.um.PathMatchers {
		for _, pathRule := range pathMatcher.PathRules {
			// This is gross, but the urlmap only has links to backend services.
			parts := strings.Split(pathRule.Service, "/")
			name := parts[len(parts)-1]
			if name != "" {
				beNames.Insert(name)
			}
		}
	}
	// The default Service recorded in the urlMap is a link to the backend.
	// Note that this can either be user specified, or the L7 controller's
	// global default.
	parts := strings.Split(l.um.DefaultService, "/")
	defaultBackendName := parts[len(parts)-1]
	if defaultBackendName != "" {
		beNames.Insert(defaultBackendName)
	}
	return beNames.List()
}
Example #16
0
func newFakeMirrorClient() *fakeMirrorClient {
	m := fakeMirrorClient{}
	m.mirrorPods = sets.NewString()
	m.createCounts = make(map[string]int)
	m.deleteCounts = make(map[string]int)
	return &m
}
Example #17
0
func getSecretReferences(serviceAccount *api.ServiceAccount) sets.String {
	references := sets.NewString()
	for _, secret := range serviceAccount.Secrets {
		references.Insert(secret.Name)
	}
	return references
}
Example #18
0
func computeStatus(combinedStatus *github.CombinedStatus, requiredContexts []string) string {
	states := sets.String{}
	providers := sets.String{}

	if len(requiredContexts) == 0 {
		return *combinedStatus.State
	}

	requires := sets.NewString(requiredContexts...)
	for _, status := range combinedStatus.Statuses {
		if !requires.Has(*status.Context) {
			continue
		}
		states.Insert(*status.State)
		providers.Insert(*status.Context)
	}

	missing := requires.Difference(providers)
	if missing.Len() != 0 {
		glog.V(8).Infof("Failed to find %v in CombinedStatus for %s", missing.List(), *combinedStatus.SHA)
		return "incomplete"
	}
	switch {
	case states.Has("pending"):
		return "pending"
	case states.Has("error"):
		return "error"
	case states.Has("failure"):
		return "failure"
	default:
		return "success"
	}
}
// checks if the required cgroups subsystems are mounted.
// As of now, only 'cpu' and 'memory' are required.
func validateSystemRequirements(mountUtil mount.Interface) error {
	const (
		cgroupMountType = "cgroup"
		localErr        = "system validation failed"
	)
	mountPoints, err := mountUtil.List()
	if err != nil {
		return fmt.Errorf("%s - %v", localErr, err)
	}
	expectedCgroups := sets.NewString("cpu", "cpuacct", "cpuset", "memory")
	for _, mountPoint := range mountPoints {
		if mountPoint.Type == cgroupMountType {
			for _, opt := range mountPoint.Opts {
				if expectedCgroups.Has(opt) {
					expectedCgroups.Delete(opt)
				}
			}
		}
	}

	if expectedCgroups.Len() > 0 {
		return fmt.Errorf("%s - Following Cgroup subsystem not mounted: %v", localErr, expectedCgroups.List())
	}
	return nil
}
Example #20
0
func TestEnsureRuleAlreadyExists(t *testing.T) {
	fcmd := exec.FakeCmd{
		CombinedOutputScript: []exec.FakeCombinedOutputAction{
			// iptables version check
			func() ([]byte, error) { return []byte("iptables v1.9.22"), nil },
			// Success.
			func() ([]byte, error) { return []byte{}, nil },
		},
	}
	fexec := exec.FakeExec{
		CommandScript: []exec.FakeCommandAction{
			// iptables version check
			func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },
			// The second Command() call is checking the rule.  Success of that exec means "done".
			func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },
		},
	}
	runner := New(&fexec, dbus.NewFake(nil, nil), ProtocolIpv4)
	defer runner.Destroy()
	exists, err := runner.EnsureRule(Append, TableNAT, ChainOutput, "abc", "123")
	if err != nil {
		t.Errorf("expected success, got %v", err)
	}
	if !exists {
		t.Errorf("expected exists = true")
	}
	if fcmd.CombinedOutputCalls != 2 {
		t.Errorf("expected 2 CombinedOutput() calls, got %d", fcmd.CombinedOutputCalls)
	}
	if !sets.NewString(fcmd.CombinedOutputLog[1]...).HasAll("iptables", "-t", "nat", "-C", "OUTPUT", "abc", "123") {
		t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[1])
	}
}
Example #21
0
// newFakeClusterManager creates a new fake ClusterManager.
func newFakeClusterManager(clusterName string) *fakeClusterManager {
	fakeLbs := newFakeLoadBalancers(clusterName)
	fakeBackends := newFakeBackendServices()
	fakeIGs := newFakeInstanceGroups(sets.NewString())
	fakeHCs := newFakeHealthChecks()

	nodePool := NewNodePool(fakeIGs)
	healthChecker := NewHealthChecker(fakeHCs, "/")
	backendPool := NewBackendPool(
		fakeBackends,
		healthChecker, nodePool)
	l7Pool := NewLoadBalancerPool(
		fakeLbs,
		// TODO: change this
		backendPool,
		testDefaultBeNodePort,
	)
	cm := &ClusterManager{
		ClusterName:  clusterName,
		instancePool: nodePool,
		backendPool:  backendPool,
		l7Pool:       l7Pool,
	}
	return &fakeClusterManager{cm, fakeLbs, fakeBackends, fakeIGs}
}
Example #22
0
func TestDeleteRuleNew(t *testing.T) {
	fcmd := exec.FakeCmd{
		CombinedOutputScript: []exec.FakeCombinedOutputAction{
			// iptables version check
			func() ([]byte, error) { return []byte("iptables v1.9.22"), nil },
			// Success on the first call.
			func() ([]byte, error) { return []byte{}, nil },
			// Success on the second call.
			func() ([]byte, error) { return []byte{}, nil },
		},
	}
	fexec := exec.FakeExec{
		CommandScript: []exec.FakeCommandAction{
			// iptables version check
			func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },
			// The second Command() call is checking the rule.  Success of that means delete it.
			func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },
			func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },
		},
	}
	runner := New(&fexec, dbus.NewFake(nil, nil), ProtocolIpv4)
	defer runner.Destroy()
	err := runner.DeleteRule(TableNAT, ChainOutput, "abc", "123")
	if err != nil {
		t.Errorf("expected success, got %v", err)
	}
	if fcmd.CombinedOutputCalls != 3 {
		t.Errorf("expected 3 CombinedOutput() calls, got %d", fcmd.CombinedOutputCalls)
	}
	if !sets.NewString(fcmd.CombinedOutputLog[2]...).HasAll("iptables", "-t", "nat", "-D", "OUTPUT", "abc", "123") {
		t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[2])
	}
}
Example #23
0
// Sync syncs kubernetes instances with the instances in the instance group.
func (i *Instances) Sync(nodes []string) (err error) {
	glog.V(3).Infof("Syncing nodes %v", nodes)

	defer func() {
		// The node pool is only responsible for syncing nodes to instance
		// groups. It never creates/deletes, so if an instance groups is
		// not found there's nothing it can do about it anyway. Most cases
		// this will happen because the backend pool has deleted the instance
		// group, however if it happens because a user deletes the IG by mistake
		// we should just wait till the backend pool fixes it.
		if isHTTPErrorCode(err, http.StatusNotFound) {
			glog.Infof("Node pool encountered a 404, ignoring: %v", err)
			err = nil
		}
	}()

	pool := i.pool.snapshot()
	for name := range pool {
		gceNodes := sets.NewString()
		gceNodes, err = i.list(name)
		if err != nil {
			return err
		}
		kubeNodes := sets.NewString(nodes...)

		// A node deleted via kubernetes could still exist as a gce vm. We don't
		// want to route requests to it. Similarly, a node added to kubernetes
		// needs to get added to the instance group so we do route requests to it.

		removeNodes := gceNodes.Difference(kubeNodes).List()
		addNodes := kubeNodes.Difference(gceNodes).List()
		if len(removeNodes) != 0 {
			if err = i.Remove(
				name, gceNodes.Difference(kubeNodes).List()); err != nil {
				return err
			}
		}

		if len(addNodes) != 0 {
			if err = i.Add(
				name, kubeNodes.Difference(gceNodes).List()); err != nil {
				return err
			}
		}
	}
	return nil
}
Example #24
0
// UpdateTCPLoadBalancer is an implementation of TCPLoadBalancer.UpdateTCPLoadBalancer.
func (gce *GCECloud) UpdateTCPLoadBalancer(name, region string, hosts []string) error {
	pool, err := gce.service.TargetPools.Get(gce.projectID, region, name).Do()
	if err != nil {
		return err
	}
	existing := sets.NewString()
	for _, instance := range pool.Instances {
		existing.Insert(hostURLToComparablePath(instance))
	}

	var toAdd []*compute.InstanceReference
	var toRemove []*compute.InstanceReference
	for _, host := range hosts {
		link := makeComparableHostPath(gce.zone, host)
		if !existing.Has(link) {
			toAdd = append(toAdd, &compute.InstanceReference{Instance: link})
		}
		existing.Delete(link)
	}
	for link := range existing {
		toRemove = append(toRemove, &compute.InstanceReference{Instance: link})
	}

	if len(toAdd) > 0 {
		add := &compute.TargetPoolsAddInstanceRequest{Instances: toAdd}
		op, err := gce.service.TargetPools.AddInstance(gce.projectID, region, name, add).Do()
		if err != nil {
			return err
		}
		if err := gce.waitForRegionOp(op, region); err != nil {
			return err
		}
	}

	if len(toRemove) > 0 {
		rm := &compute.TargetPoolsRemoveInstanceRequest{Instances: toRemove}
		op, err := gce.service.TargetPools.RemoveInstance(gce.projectID, region, name, rm).Do()
		if err != nil {
			return err
		}
		if err := gce.waitForRegionOp(op, region); err != nil {
			return err
		}
	}

	// Try to verify that the correct number of nodes are now in the target pool.
	// We've been bitten by a bug here before (#11327) where all nodes were
	// accidentally removed and want to make similar problems easier to notice.
	updatedPool, err := gce.service.TargetPools.Get(gce.projectID, region, name).Do()
	if err != nil {
		return err
	}
	if len(updatedPool.Instances) != len(hosts) {
		glog.Errorf("Unexpected number of instances (%d) in target pool %s after updating (expected %d). Instances in updated pool: %s",
			len(updatedPool.Instances), name, len(hosts), strings.Join(updatedPool.Instances, ","))
		return fmt.Errorf("Unexpected number of instances (%d) in target pool %s after update (expected %d)", len(updatedPool.Instances), name, len(hosts))
	}
	return nil
}
Example #25
0
// getGeneratedFiles returns a list of all automatically generated files in the repo. These include
// docs, deep_copy, and conversions
//
// It would be 'better' to call this for every commit but that takes
// a whole lot of time for almost always the same information, and if
// our results are slightly wrong, who cares? Instead look for the
// generated files once and if someone changed what files are generated
// we'll size slightly wrong. No biggie.
func (s *SizeMunger) getGeneratedFiles(obj *github.MungeObject) {
	if s.genFiles != nil {
		return
	}
	files := sets.NewString()
	prefixes := []string{}
	s.genFiles = &files
	s.genPrefixes = &prefixes

	file := s.generatedFilesFile
	if len(file) == 0 {
		glog.Infof("No --generated-files-config= supplied, applying no labels")
		return
	}
	fp, err := os.Open(file)
	if err != nil {
		glog.Errorf("Unable to open %q: %v", file, err)
		return
	}

	defer fp.Close()
	scanner := bufio.NewScanner(fp)
	for scanner.Scan() {
		line := scanner.Text()
		if strings.HasPrefix(line, "#") || line == "" {
			continue
		}
		fields := strings.Fields(line)
		if len(fields) != 2 {
			glog.Errorf("Invalid line in generated docs config %s: %q", file, line)
			continue
		}
		eType := fields[0]
		file := fields[1]
		if eType == "prefix" {
			prefixes = append(prefixes, file)
		} else if eType == "path" {
			files.Insert(file)
		} else if eType == "paths-from-repo" {
			docs, err := obj.GetFileContents(file, "")
			if err != nil {
				continue
			}
			docSlice := strings.Split(docs, "\n")
			files.Insert(docSlice...)
		} else {
			glog.Errorf("Invalid line in generated docs config, unknown type: %s, %q", eType, line)
			continue
		}
	}
	if scanner.Err() != nil {
		glog.Errorf("Error scanning %s: %v", file, err)
		return
	}
	s.genFiles = &files
	s.genPrefixes = &prefixes

	return
}
Example #26
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())
	flag.Parse()

	var funcOut io.Writer
	if *functionDest == "-" {
		funcOut = os.Stdout
	} else {
		file, err := os.Create(*functionDest)
		if err != nil {
			glog.Fatalf("Couldn't open %v: %v", *functionDest, err)
		}
		defer file.Close()
		funcOut = file
	}

	data := new(bytes.Buffer)

	group, version := path.Split(*groupVersion)
	group = strings.TrimRight(group, "/")

	_, err := data.WriteString(fmt.Sprintf("package %v\n", version))
	if err != nil {
		glog.Fatalf("error writing package line: %v", err)
	}

	versionPath := pkgPath(group, version)
	generator := pkg_runtime.NewConversionGenerator(api.Scheme.Raw(), versionPath)
	apiShort := generator.AddImport(path.Join(pkgBase, "api"))
	generator.AddImport(path.Join(pkgBase, "api/resource"))
	// TODO(wojtek-t): Change the overwrites to a flag.
	generator.OverwritePackage(version, "")
	for _, knownType := range api.Scheme.KnownTypes(*groupVersion) {
		if knownType.PkgPath() != versionPath {
			continue
		}
		if err := generator.GenerateConversionsForType(version, knownType); err != nil {
			glog.Errorf("error while generating conversion functions for %v: %v", knownType, err)
		}
	}
	generator.RepackImports(sets.NewString())
	if err := generator.WriteImports(data); err != nil {
		glog.Fatalf("error while writing imports: %v", err)
	}
	if err := generator.WriteConversionFunctions(data); err != nil {
		glog.Fatalf("Error while writing conversion functions: %v", err)
	}
	if err := generator.RegisterConversionFunctions(data, fmt.Sprintf("%s.Scheme", apiShort)); err != nil {
		glog.Fatalf("Error while writing conversion functions: %v", err)
	}

	b, err := imports.Process("", data.Bytes(), nil)
	if err != nil {
		glog.Fatalf("error while update imports: %v", err)
	}
	if _, err := funcOut.Write(b); err != nil {
		glog.Fatalf("error while writing out the resulting file: %v", err)
	}
}
func TestFilterQuotaPods(t *testing.T) {
	pods := []api.Pod{
		{
			ObjectMeta: api.ObjectMeta{Name: "pod-running"},
			Status:     api.PodStatus{Phase: api.PodRunning},
		},
		{
			ObjectMeta: api.ObjectMeta{Name: "pod-pending"},
			Status:     api.PodStatus{Phase: api.PodPending},
		},
		{
			ObjectMeta: api.ObjectMeta{Name: "pod-succeeded"},
			Status:     api.PodStatus{Phase: api.PodSucceeded},
		},
		{
			ObjectMeta: api.ObjectMeta{Name: "pod-unknown"},
			Status:     api.PodStatus{Phase: api.PodUnknown},
		},
		{
			ObjectMeta: api.ObjectMeta{Name: "pod-failed"},
			Status:     api.PodStatus{Phase: api.PodFailed},
		},
		{
			ObjectMeta: api.ObjectMeta{Name: "pod-failed-with-restart-always"},
			Spec: api.PodSpec{
				RestartPolicy: api.RestartPolicyAlways,
			},
			Status: api.PodStatus{Phase: api.PodFailed},
		},
		{
			ObjectMeta: api.ObjectMeta{Name: "pod-failed-with-restart-on-failure"},
			Spec: api.PodSpec{
				RestartPolicy: api.RestartPolicyOnFailure,
			},
			Status: api.PodStatus{Phase: api.PodFailed},
		},
		{
			ObjectMeta: api.ObjectMeta{Name: "pod-failed-with-restart-never"},
			Spec: api.PodSpec{
				RestartPolicy: api.RestartPolicyNever,
			},
			Status: api.PodStatus{Phase: api.PodFailed},
		},
	}
	expectedResults := sets.NewString("pod-running",
		"pod-pending", "pod-unknown", "pod-failed-with-restart-always",
		"pod-failed-with-restart-on-failure")

	actualResults := sets.String{}
	result := FilterQuotaPods(pods)
	for i := range result {
		actualResults.Insert(result[i].Name)
	}

	if len(expectedResults) != len(actualResults) || !actualResults.HasAll(expectedResults.List()...) {
		t.Errorf("Expected results %v, Actual results %v", expectedResults, actualResults)
	}
}
Example #28
0
func TestTTLList(t *testing.T) {
	testObjs := []testStoreObject{
		{id: "foo", val: "bar"},
		{id: "foo1", val: "bar1"},
		{id: "foo2", val: "bar2"},
	}
	expireKeys := sets.NewString(testObjs[0].id, testObjs[2].id)
	deleteChan := make(chan string)
	defer close(deleteChan)

	ttlStore := NewFakeExpirationStore(
		testStoreKeyFunc, deleteChan,
		&FakeExpirationPolicy{
			NeverExpire: sets.NewString(testObjs[1].id),
			RetrieveKeyFunc: func(obj interface{}) (string, error) {
				return obj.(*timestampedEntry).obj.(testStoreObject).id, nil
			},
		},
		util.RealClock{},
	)
	for _, obj := range testObjs {
		err := ttlStore.Add(obj)
		if err != nil {
			t.Errorf("Unable to add obj %#v", obj)
		}
	}
	listObjs := ttlStore.List()
	if len(listObjs) != 1 || !reflect.DeepEqual(listObjs[0], testObjs[1]) {
		t.Errorf("List returned unexpected results %#v", listObjs)
	}

	// Make sure all our deletes come through in an acceptable rate (1/100ms)
	for expireKeys.Len() != 0 {
		select {
		case delKey := <-deleteChan:
			if !expireKeys.Has(delKey) {
				t.Errorf("Unexpected delete for key %s", delKey)
			}
			expireKeys.Delete(delKey)
		case <-time.After(util.ForeverTestTimeout):
			t.Errorf("Unexpected timeout waiting on delete")
			return
		}
	}
}
Example #29
0
// Creates new queue which will use given RateLimiter to oversee execution.
func NewRateLimitedTimedQueue(limiter util.RateLimiter) *RateLimitedTimedQueue {
	return &RateLimitedTimedQueue{
		queue: UniqueQueue{
			queue: TimedQueue{},
			set:   sets.NewString(),
		},
		limiter: limiter,
	}
}
Example #30
0
// NewHandler creates a new base handler that handles the passed
// in operations
func NewHandler(ops ...Operation) *Handler {
	operations := sets.NewString()
	for _, op := range ops {
		operations.Insert(string(op))
	}
	return &Handler{
		operations: operations,
	}
}