func ingressWatchFunc(c *client.Client) func(rv string) (watch.Interface, error) { return func(rv string) (watch.Interface, error) { options := api.ListOptions{ResourceVersion: rv} return c.Extensions().Ingress(api.NamespaceAll).Watch( labels.Everything(), fields.Everything(), options) } }
// Wait for job to reach completions. func waitForJobFinish(c *client.Client, ns, jobName string, completions int) error { return wait.Poll(poll, jobTimeout, func() (bool, error) { curr, err := c.Extensions().Jobs(ns).Get(jobName) if err != nil { return false, err } return curr.Status.Succeeded == completions, nil }) }
// getClusterUID returns the cluster UID. Rules for UID generation: // If the user specifies a --cluster-uid param it overwrites everything // else, check UID config map for a previously recorded uid // else, check if there are any working Ingresses // - remember that "" is the cluster uid // else, allocate a new uid func getClusterUID(kubeClient *client.Client, name string) (string, error) { cfgVault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName) if name != "" { glog.Infof("Using user provided cluster uid %v", name) // Don't save the uid in the vault, so users can rollback through // --cluster-uid="" return name, nil } existingUID, found, err := cfgVault.Get() if found { glog.Infof("Using saved cluster uid %q", existingUID) return existingUID, nil } else if err != nil { // This can fail because of: // 1. No such config map - found=false, err=nil // 2. No such key in config map - found=false, err=nil // 3. Apiserver flake - found=false, err!=nil // It is not safe to proceed in 3. return "", fmt.Errorf("Failed to retrieve current uid: %v, using %q as name", err, name) } // Check if the cluster has an Ingress with ip ings, err := kubeClient.Extensions().Ingress(api.NamespaceAll).List(api.ListOptions{LabelSelector: labels.Everything()}) if err != nil { return "", err } namer := utils.Namer{} for _, ing := range ings.Items { if len(ing.Status.LoadBalancer.Ingress) != 0 { c := namer.ParseName(loadbalancers.GCEResourceName(ing.Annotations, "forwarding-rule")) if c.ClusterName != "" { return c.ClusterName, cfgVault.Put(c.ClusterName) } glog.Infof("Found a working Ingress, assuming uid is empty string") return "", cfgVault.Put("") } } // Allocate new uid f, err := os.Open("/dev/urandom") if err != nil { return "", err } defer f.Close() b := make([]byte, 8) if _, err := f.Read(b); err != nil { return "", err } uid := fmt.Sprintf("%x", b) return uid, cfgVault.Put(uid) }
// Wait for job fail. func waitForJobFail(c *client.Client, ns, jobName string) error { return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) { curr, err := c.Extensions().Jobs(ns).Get(jobName) if err != nil { return false, err } for _, c := range curr.Status.Conditions { if c.Type == batch.JobFailed && c.Status == api.ConditionTrue { return true, nil } } return false, nil }) }
func waitForDeployment(c *k8sclient.Client, ns string, name string, sleepMillis time.Duration) error { util.Infof("Deployment %s waiting for it to be ready...\n", name) for { deployment, err := c.Extensions().Deployments(ns).Get(name) if err != nil { return err } available := deployment.Status.AvailableReplicas unavailable := deployment.Status.UnavailableReplicas if unavailable == 0 && available > 0 { util.Infof("DeploymentConfig %s now has %d available replicas\n", name, available) return nil } time.Sleep(sleepMillis) } }
// GetWorkloads returns a list of all workloads in the cluster. func GetWorkloads(client *k8sClient.Client, heapsterClient client.HeapsterClient, nsQuery *common.NamespaceQuery, metricQuery *dataselect.MetricQuery) (*Workloads, error) { log.Printf("Getting lists of all workloads") channels := &common.ResourceChannels{ ReplicationControllerList: common.GetReplicationControllerListChannel(client, nsQuery, 1), ReplicaSetList: common.GetReplicaSetListChannel(client.Extensions(), nsQuery, 1), JobList: common.GetJobListChannel(client.Batch(), nsQuery, 1), DaemonSetList: common.GetDaemonSetListChannel(client.Extensions(), nsQuery, 1), DeploymentList: common.GetDeploymentListChannel(client.Extensions(), nsQuery, 1), PetSetList: common.GetPetSetListChannel(client.Apps(), nsQuery, 1), ServiceList: common.GetServiceListChannel(client, nsQuery, 1), PodList: common.GetPodListChannel(client, nsQuery, 7), EventList: common.GetEventListChannel(client, nsQuery, 6), } return GetWorkloadsFromChannels(channels, heapsterClient, metricQuery) }
// createIngress creates an Ingress with num rules. Eg: // start = 1 num = 2 will given you a single Ingress with 2 rules: // Ingress { // foo1.bar.com: /foo1 // foo2.bar.com: /foo2 // } func createIngress(c *client.Client, ns string, start, num int) extensions.Ingress { ing := extensions.Ingress{ ObjectMeta: api.ObjectMeta{ Name: fmt.Sprintf("%v%d", appPrefix, start), Namespace: ns, }, Spec: extensions.IngressSpec{ Backend: &extensions.IngressBackend{ ServiceName: fmt.Sprintf("%v%d", appPrefix, start), ServicePort: intstr.FromInt(httpContainerPort), }, Rules: []extensions.IngressRule{}, }, } for i := start; i < start+num; i++ { ing.Spec.Rules = append(ing.Spec.Rules, ruleByIndex(i)) } Logf("Creating ingress %v", start) _, err := c.Extensions().Ingress(ns).Create(&ing) Expect(err).NotTo(HaveOccurred()) return ing }
func waitForDeployments(c *k8sclient.Client, ns string, waitAll bool, names []string, sleepMillis time.Duration) error { if waitAll { deployments, err := c.Extensions().Deployments(ns).List(api.ListOptions{}) if err != nil { return err } for _, deployment := range deployments.Items { name := deployment.Name err = waitForDeployment(c, ns, name, sleepMillis) if err != nil { return err } } } else { for _, name := range names { err := waitForDeployment(c, ns, name, sleepMillis) if err != nil { return err } } } return nil }
// DoTestStorageClasses tests storage classes for one api version. func DoTestStorageClasses(t *testing.T, client *client.Client, ns *api.Namespace) { // Make a storage class object. s := extensions.StorageClass{ TypeMeta: unversioned.TypeMeta{ Kind: "StorageClass", }, ObjectMeta: api.ObjectMeta{ Name: "gold", }, Provisioner: provisionerPluginName, } if _, err := client.Extensions().StorageClasses().Create(&s); err != nil { t.Errorf("unable to create test storage class: %v", err) } defer deleteStorageClassOrErrorf(t, client, s.Namespace, s.Name) // Template for pvcs that specify a storage class pvc := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "XXX", Namespace: ns.Name, Annotations: map[string]string{ "volume.beta.kubernetes.io/storage-class": "gold", }, }, Spec: api.PersistentVolumeClaimSpec{ Resources: api.ResourceRequirements{Requests: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("1G")}}, AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, }, } pvc.ObjectMeta.Name = "uses-storageclass" if _, err := client.PersistentVolumeClaims(ns.Name).Create(pvc); err != nil { t.Errorf("Failed to create pvc: %v", err) } defer deletePersistentVolumeClaimOrErrorf(t, client, ns.Name, pvc.Name) }
// gce project, just back off. Expect(ingController.Cleanup(false)).NotTo(HaveOccurred()) responseTimes = []time.Duration{} creationTimes = []time.Duration{} }) AfterEach(func() { Logf("Average creation time %+v, health check time %+v", creationTimes, responseTimes) if CurrentGinkgoTestDescription().Failed { kubectlLogLBController(client, ns) Logf("\nOutput of kubectl describe ing:\n") desc, _ := runKubectl("describe", "ing", fmt.Sprintf("--namespace=%v", ns)) Logf(desc) } // Delete all Ingress, then wait for the controller to cleanup. ings, err := client.Extensions().Ingress(ns).List(api.ListOptions{}) if err != nil { Logf("WARNING: Failed to list ingress: %+v", err) } else { for _, ing := range ings.Items { Logf("Deleting ingress %v/%v", ing.Namespace, ing.Name) if err := client.Extensions().Ingress(ns).Delete(ing.Name, nil); err != nil { Logf("WARNING: Failed to delete ingress %v: %v", ing.Name, err) } } } pollErr := wait.Poll(5*time.Second, lbCleanupTimeout, func() (bool, error) { if err := ingController.Cleanup(false); err != nil { Logf("Still waiting for glbc to cleanup: %v", err) return false, nil }
func getJob(c *client.Client, ns, name string) (*batch.Job, error) { return c.Extensions().Jobs(ns).Get(name) }
func deleteJob(c *client.Client, ns, name string) error { return c.Extensions().Jobs(ns).Delete(name, nil) }
} // Clean up the goproxyPod cleanup(goproxyPodPath, ns, goproxyPodSelector) } }) It("should support inline execution and attach", func() { nsFlag := fmt.Sprintf("--namespace=%v", ns) By("executing a command with run and attach with stdin") runOutput := newKubectlCommand(nsFlag, "run", "run-test", "--image=busybox", "--restart=Never", "--attach=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'"). withStdinData("abcd1234"). execOrDie() Expect(runOutput).To(ContainSubstring("abcd1234")) Expect(runOutput).To(ContainSubstring("stdin closed")) Expect(c.Extensions().Jobs(ns).Delete("run-test", api.NewDeleteOptions(0))).To(BeNil()) By("executing a command with run and attach without stdin") runOutput = newKubectlCommand(fmt.Sprintf("--namespace=%v", ns), "run", "run-test-2", "--image=busybox", "--restart=Never", "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'"). withStdinData("abcd1234"). execOrDie() Expect(runOutput).ToNot(ContainSubstring("abcd1234")) Expect(runOutput).To(ContainSubstring("stdin closed")) Expect(c.Extensions().Jobs(ns).Delete("run-test-2", api.NewDeleteOptions(0))).To(BeNil()) By("executing a command with run and attach with stdin with open stdin should remain running") runOutput = newKubectlCommand(nsFlag, "run", "run-test-3", "--image=busybox", "--restart=Never", "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'"). withStdinData("abcd1234\n"). execOrDie() Expect(runOutput).ToNot(ContainSubstring("stdin closed")) runTestPod, err := util.GetFirstPod(c, ns, map[string]string{"run": "run-test-3"})
func ingressWatchFunc(c *client.Client, ns string) func(options api.ListOptions) (watch.Interface, error) { return func(options api.ListOptions) (watch.Interface, error) { return c.Extensions().Ingress(ns).Watch(options) } }
// filled in BeforeEach var c *client.Client var ns string BeforeEach(func() { c = f.Client ns = f.Namespace.Name }) framework.KubeDescribe("DynamicProvisioner", func() { It("should create and delete persistent volumes [Slow]", func() { framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke") By("creating a StorageClass") class := newStorageClass() _, err := c.Extensions().StorageClasses().Create(class) defer c.Extensions().StorageClasses().Delete(class.Name) Expect(err).NotTo(HaveOccurred()) By("creating a claim with a dynamic provisioning annotation") claim := newClaim(ns, false) defer func() { c.PersistentVolumeClaims(ns).Delete(claim.Name) }() claim, err = c.PersistentVolumeClaims(ns).Create(claim) Expect(err).NotTo(HaveOccurred()) testDynamicProvisioning(c, claim) }) })
func deleteJob(c *client.Client, ns, name string) error { return c.Extensions().Jobs(ns).Delete(name, api.NewDeleteOptions(0)) }
func ingressListFunc(c *client.Client, ns string) func() (runtime.Object, error) { return func() (runtime.Object, error) { return c.Extensions().Ingress(ns).List(labels.Everything(), fields.Everything()) } }
func updateJob(c *client.Client, ns string, job *batch.Job) (*batch.Job, error) { return c.Extensions().Jobs(ns).Update(job) }
func ingressWatchFunc(c *client.Client) func(rv string) (watch.Interface, error) { return func(rv string) (watch.Interface, error) { return c.Extensions().Ingress(api.NamespaceAll).Watch( labels.Everything(), fields.Everything(), rv) } }
func createIngressForDomain(ns string, domain string, c *k8sclient.Client, fac *cmdutil.Factory) error { rapi.AddToScheme(kapi.Scheme) rapiv1.AddToScheme(kapi.Scheme) ingressClient := c.Extensions().Ingress(ns) ingresses, err := ingressClient.List(kapi.ListOptions{}) if err != nil { util.Errorf("Failed to load ingresses in namespace %s with error %v", ns, err) return err } rc, err := c.Services(ns).List(kapi.ListOptions{}) if err != nil { util.Errorf("Failed to load services in namespace %s with error %v", ns, err) return err } var labels = make(map[string]string) labels["provider"] = "fabric8" items := rc.Items for _, service := range items { name := service.ObjectMeta.Name serviceSpec := service.Spec found := false // TODO we should probably add an annotation to disable ingress creation if name != "jenkinshift" { for _, ingress := range ingresses.Items { if ingress.GetName() == name { found = true break } // TODO look for other ingresses with different names? for _, rule := range ingress.Spec.Rules { http := rule.HTTP if http != nil { for _, path := range http.Paths { ruleService := path.Backend.ServiceName if ruleService == name { found = true break } } } } } if !found { ports := serviceSpec.Ports hostName := name + "." + ns + "." + domain if len(ports) > 0 { rules := []extensions.IngressRule{} for _, port := range ports { rule := extensions.IngressRule{ Host: hostName, IngressRuleValue: extensions.IngressRuleValue{ HTTP: &extensions.HTTPIngressRuleValue{ Paths: []extensions.HTTPIngressPath{ { Backend: extensions.IngressBackend{ ServiceName: name, // we need to use target port until https://github.com/nginxinc/kubernetes-ingress/issues/41 is fixed //ServicePort: intstr.FromInt(port.Port), ServicePort: port.TargetPort, }, }, }, }, }, } rules = append(rules, rule) } ingress := extensions.Ingress{ ObjectMeta: kapi.ObjectMeta{ Labels: labels, Name: name, }, Spec: extensions.IngressSpec{ Rules: rules, }, } // lets create the ingress _, err = ingressClient.Create(&ingress) if err != nil { util.Errorf("Failed to create the ingress %s with error %v", name, err) return err } } } } } return nil }
// NewLoadBalancerController creates a controller for nginx loadbalancer func NewLoadBalancerController(kubeClient *client.Client, resyncPeriod time.Duration, defaultSvc, customErrorSvc nginx.Service, namespace string, lbInfo *lbInfo) (*loadBalancerController, error) { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(kubeClient.Events("")) lbc := loadBalancerController{ client: kubeClient, stopCh: make(chan struct{}), recorder: eventBroadcaster.NewRecorder( api.EventSource{Component: "nginx-lb-controller"}), lbInfo: lbInfo, } lbc.ingQueue = NewTaskQueue(lbc.syncIngress) lbc.configQueue = NewTaskQueue(lbc.syncConfig) lbc.ngx = nginx.NewManager(kubeClient, defaultSvc, customErrorSvc) // Ingress watch handlers pathHandlers := framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { addIng := obj.(*extensions.Ingress) lbc.recorder.Eventf(addIng, api.EventTypeNormal, "ADD", fmt.Sprintf("Adding ingress %s/%s", addIng.Namespace, addIng.Name)) lbc.ingQueue.enqueue(obj) }, DeleteFunc: lbc.ingQueue.enqueue, UpdateFunc: func(old, cur interface{}) { if !reflect.DeepEqual(old, cur) { glog.V(2).Infof("Ingress %v changed, syncing", cur.(*extensions.Ingress).Name) } lbc.ingQueue.enqueue(cur) }, } lbc.ingLister.Store, lbc.ingController = framework.NewInformer( &cache.ListWatch{ ListFunc: ingressListFunc(lbc.client, namespace), WatchFunc: ingressWatchFunc(lbc.client, namespace), }, &extensions.Ingress{}, resyncPeriod, pathHandlers) // Config watch handlers configHandlers := framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { lbc.configQueue.enqueue(obj) }, DeleteFunc: lbc.configQueue.enqueue, UpdateFunc: func(old, cur interface{}) { if !reflect.DeepEqual(old, cur) { glog.V(2).Infof("nginx rc changed, syncing") lbc.configQueue.enqueue(cur) } }, } lbc.configLister.Store, lbc.configController = framework.NewInformer( &cache.ListWatch{ ListFunc: func(api.ListOptions) (runtime.Object, error) { switch lbInfo.DeployType.(type) { case *api.ReplicationController: rc, err := kubeClient.ReplicationControllers(lbInfo.PodNamespace).Get(lbInfo.ObjectName) return &api.ReplicationControllerList{ Items: []api.ReplicationController{*rc}, }, err case *extensions.DaemonSet: ds, err := kubeClient.Extensions().DaemonSets(lbInfo.PodNamespace).Get(lbInfo.ObjectName) return &extensions.DaemonSetList{ Items: []extensions.DaemonSet{*ds}, }, err default: return nil, errInvalidKind } }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { switch lbInfo.DeployType.(type) { case *api.ReplicationController: options.LabelSelector = labels.SelectorFromSet(labels.Set{"name": lbInfo.ObjectName}) return kubeClient.ReplicationControllers(lbInfo.PodNamespace).Watch(options) case *extensions.DaemonSet: options.LabelSelector = labels.SelectorFromSet(labels.Set{"name": lbInfo.ObjectName}) return kubeClient.Extensions().DaemonSets(lbInfo.PodNamespace).Watch(options) default: return nil, errInvalidKind } }, }, &api.ReplicationController{}, resyncPeriod, configHandlers) return &lbc, nil }
func deleteStorageClassOrErrorf(t *testing.T, c *client.Client, ns, name string) { if err := c.Extensions().StorageClasses().Delete(name); err != nil { t.Errorf("unable to delete storage class %v: %v", name, err) } }
cleanup(goproxyPodPath, ns, goproxyPodSelector) } }) It("should support inline execution and attach", func() { SkipUnlessServerVersionGTE(jobsVersion, c) nsFlag := fmt.Sprintf("--namespace=%v", ns) By("executing a command with run and attach with stdin") runOutput := newKubectlCommand(nsFlag, "run", "run-test", "--image=busybox", "--restart=Never", "--attach=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'"). withStdinData("abcd1234"). execOrDie() Expect(runOutput).To(ContainSubstring("abcd1234")) Expect(runOutput).To(ContainSubstring("stdin closed")) Expect(c.Extensions().Jobs(ns).Delete("run-test", nil)).To(BeNil()) By("executing a command with run and attach without stdin") runOutput = newKubectlCommand(fmt.Sprintf("--namespace=%v", ns), "run", "run-test-2", "--image=busybox", "--restart=Never", "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'"). withStdinData("abcd1234"). execOrDie() Expect(runOutput).ToNot(ContainSubstring("abcd1234")) Expect(runOutput).To(ContainSubstring("stdin closed")) Expect(c.Extensions().Jobs(ns).Delete("run-test-2", nil)).To(BeNil()) By("executing a command with run and attach with stdin with open stdin should remain running") runOutput = newKubectlCommand(nsFlag, "run", "run-test-3", "--image=busybox", "--restart=Never", "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'"). withStdinData("abcd1234\n"). execOrDie() Expect(runOutput).ToNot(ContainSubstring("stdin closed")) runTestPod, err := util.GetFirstPod(c, ns, labels.SelectorFromSet(map[string]string{"run": "run-test-3"}))
func ingressWatchFunc(c *client.Client, ns string) func(options api.ListOptions) (watch.Interface, error) { return func(options api.ListOptions) (watch.Interface, error) { return c.Extensions().Ingress(ns).Watch( labels.Everything(), fields.Everything(), options) } }
func createJob(c *client.Client, ns string, job *extensions.Job) (*extensions.Job, error) { return c.Extensions().Jobs(ns).Create(job) }
func ingressListFunc(c *client.Client, ns string) func(api.ListOptions) (runtime.Object, error) { return func(opts api.ListOptions) (runtime.Object, error) { return c.Extensions().Ingress(ns).List(opts) } }
// foo0.bar.com: /foo0 // foo1.bar.com: /foo1 if numApps < numIng { Failf("Need more apps than Ingress") } appsPerIngress := numApps / numIng By(fmt.Sprintf("Creating %d rcs + svc, and %d apps per Ingress", numApps, appsPerIngress)) for appID := 0; appID < numApps; appID = appID + appsPerIngress { // Creates appsPerIngress apps, then creates one Ingress with paths to all the apps. for j := appID; j < appID+appsPerIngress; j++ { createApp(client, ns, j) } createIngress(client, ns, appID, appsPerIngress) } ings, err := client.Extensions().Ingress(ns).List( labels.Everything(), fields.Everything()) Expect(err).NotTo(HaveOccurred()) for _, ing := range ings.Items { // Wait for the loadbalancer IP. start := time.Now() address, err := waitForIngressAddress(client, ing.Namespace, ing.Name, lbPollTimeout) if err != nil { Failf("Ingress failed to acquire an IP address within %v", lbPollTimeout) } Expect(err).NotTo(HaveOccurred()) By(fmt.Sprintf("Found address %v for ingress %v, took %v to come online", address, ing.Name, time.Since(start))) creationTimes = append(creationTimes, time.Since(start))
func ingressListFunc(c *client.Client) func() (runtime.Object, error) { return func() (runtime.Object, error) { return c.Extensions().Ingress(api.NamespaceAll).List(labels.Everything(), fields.Everything()) } }