// Retrieves metrics information. func getMetrics(c *client.Client) (string, error) { body, err := c.Get().AbsPath("/metrics").DoRaw() if err != nil { return "", err } return string(body), nil }
// Query sends a command to the server and returns the Response func Query(c *client.Client, query string) (*influxdb.Response, error) { result, err := c.Get(). Prefix("proxy"). Namespace("kube-system"). Resource("services"). Name(influxdbService+":api"). Suffix("query"). Param("q", query). Param("db", influxdbDatabaseName). Param("epoch", "s"). Do(). Raw() if err != nil { return nil, err } var response influxdb.Response dec := json.NewDecoder(bytes.NewReader(result)) dec.UseNumber() err = dec.Decode(&response) if err != nil { return nil, err } return &response, nil }
// Performs a get on a node proxy endpoint given the nodename and rest client. func nodeProxyRequest(c *client.Client, node, endpoint string) (restclient.Result, error) { // proxy tends to hang in some cases when Node is not ready. Add an artificial timeout for this call. // This will leak a goroutine if proxy hangs. #22165 subResourceProxyAvailable, err := serverVersionGTE(subResourceServiceAndNodeProxyVersion, c) if err != nil { return restclient.Result{}, err } var result restclient.Result finished := make(chan struct{}) go func() { if subResourceProxyAvailable { result = c.Get(). Resource("nodes"). SubResource("proxy"). Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)). Suffix(endpoint). Do() } else { result = c.Get(). Prefix("proxy"). Resource("nodes"). Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)). Suffix(endpoint). Do() } finished <- struct{}{} }() select { case <-finished: return result, nil case <-time.After(proxyTimeout): return restclient.Result{}, nil } }
// serverAPIVersions fetches the server versions available from the groupless API at the given prefix func serverAPIVersions(c *kclient.Client, grouplessPrefix string) ([]unversioned.GroupVersion, error) { // Get versions doc body, err := c.Get().AbsPath(grouplessPrefix).Do().Raw() if err != nil { return []unversioned.GroupVersion{}, err } // Unmarshal var v unversioned.APIVersions err = json.Unmarshal(body, &v) if err != nil { return []unversioned.GroupVersion{}, fmt.Errorf("got '%s': %v", string(body), err) } // Convert to GroupVersion structs serverAPIVersions := []unversioned.GroupVersion{} for _, version := range v.Versions { gv, err := unversioned.ParseGroupVersion(version) if err != nil { return []unversioned.GroupVersion{}, err } serverAPIVersions = append(serverAPIVersions, gv) } return serverAPIVersions, nil }
func CheckCadvisorHealthOnAllNodes(c *client.Client, timeout time.Duration) { // It should be OK to list unschedulable Nodes here. By("getting list of nodes") nodeList, err := c.Nodes().List(api.ListOptions{}) framework.ExpectNoError(err) var errors []error retries := maxRetries for { errors = []error{} for _, node := range nodeList.Items { // cadvisor is not accessible directly unless its port (4194 by default) is exposed. // Here, we access '/stats/' REST endpoint on the kubelet which polls cadvisor internally. statsResource := fmt.Sprintf("api/v1/proxy/nodes/%s/stats/", node.Name) By(fmt.Sprintf("Querying stats from node %s using url %s", node.Name, statsResource)) _, err = c.Get().AbsPath(statsResource).Timeout(timeout).Do().Raw() if err != nil { errors = append(errors, err) } } if len(errors) == 0 { return } if retries--; retries <= 0 { break } framework.Logf("failed to retrieve kubelet stats -\n %v", errors) time.Sleep(sleepDuration) } framework.Failf("Failed after retrying %d times for cadvisor to be healthy on all nodes. Errors:\n%v", maxRetries, errors) }
// Performs a get on a node proxy endpoint given the nodename and rest client. func nodeProxyRequest(c *client.Client, node, endpoint string) client.Result { return c.Get(). Prefix("proxy"). Resource("nodes"). Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)). Suffix(endpoint). Do() }
// utility function for gomega Eventually func getPodLogs(c *client.Client, namespace, podName, containerName string) (string, error) { logs, err := c.Get().Resource("pods").Namespace(namespace).Name(podName).SubResource("log").Param("container", containerName).Do().Raw() if err != nil { return "", err } if err == nil && strings.Contains(string(logs), "Internal Error") { return "", fmt.Errorf("Internal Error") } return string(logs), err }
// Resets latency metrics in apiserver. func resetMetrics(c *client.Client) error { Logf("Resetting latency metrics in apiserver...") body, err := c.Get().AbsPath("/resetMetrics").DoRaw() if err != nil { return err } if string(body) != "metrics reset\n" { return fmt.Errorf("Unexpected response: %q", string(body)) } return nil }
func makeRequestToGuestbook(c *client.Client, cmd, value string, ns string) (string, error) { result, err := c.Get(). Prefix("proxy"). Namespace(ns). Resource("services"). Name("frontend"). Suffix("/guestbook.php"). Param("cmd", cmd). Param("key", "messages"). Param("value", value). Do(). Raw() return string(result), err }
// availSize returns the available disk space on a given node by querying node stats which // is in turn obtained internally from cadvisor. func availSize(c *client.Client, node *api.Node) (uint64, error) { statsResource := fmt.Sprintf("api/v1/proxy/nodes/%s/stats/", node.Name) Logf("Querying stats for node %s using url %s", node.Name, statsResource) res, err := c.Get().AbsPath(statsResource).Timeout(time.Minute).Do().Raw() if err != nil { return 0, fmt.Errorf("error querying cAdvisor API: %v", err) } ci := cadvisorapi.ContainerInfo{} err = json.Unmarshal(res, &ci) if err != nil { return 0, fmt.Errorf("couldn't unmarshal container info: %v", err) } return ci.Stats[len(ci.Stats)-1].Filesystem[0].Available, nil }
func CheckCadvisorHealthOnAllNodes(c *client.Client, timeout time.Duration) { // It should be OK to list unschedulable Nodes here. By("getting list of nodes") nodeList, err := c.Nodes().List(api.ListOptions{}) framework.ExpectNoError(err) var errors []error // returns maxRetries, sleepDuration readConfig := func() (int, time.Duration) { // Read in configuration settings, reasonable defaults. retry := framework.TestContext.Cadvisor.MaxRetries if framework.TestContext.Cadvisor.MaxRetries == 0 { retry = 6 framework.Logf("Overriding default retry value of zero to %d", retry) } sleepDurationMS := framework.TestContext.Cadvisor.SleepDurationMS if sleepDurationMS == 0 { sleepDurationMS = 10000 framework.Logf("Overriding default milliseconds value of zero to %d", sleepDurationMS) } return retry, time.Duration(sleepDurationMS) * time.Millisecond } maxRetries, sleepDuration := readConfig() for { errors = []error{} for _, node := range nodeList.Items { // cadvisor is not accessible directly unless its port (4194 by default) is exposed. // Here, we access '/stats/' REST endpoint on the kubelet which polls cadvisor internally. statsResource := fmt.Sprintf("api/v1/proxy/nodes/%s/stats/", node.Name) By(fmt.Sprintf("Querying stats from node %s using url %s", node.Name, statsResource)) _, err = c.Get().AbsPath(statsResource).Timeout(timeout).Do().Raw() if err != nil { errors = append(errors, err) } } if len(errors) == 0 { return } if maxRetries--; maxRetries <= 0 { break } framework.Logf("failed to retrieve kubelet stats -\n %v", errors) time.Sleep(sleepDuration) } framework.Failf("Failed after retrying %d times for cadvisor to be healthy on all nodes. Errors:\n%v", maxRetries, errors) }
// readTransactions reads # of transactions from the k8petstore web server endpoint. // for more details see the source of the k8petstore web server. func readTransactions(c *client.Client, ns string) (error, int) { body, err := c.Get(). Namespace(ns). Prefix("proxy"). Resource("services"). Name("frontend"). Suffix("llen"). DoRaw() if err != nil { return err, -1 } else { totalTrans, err := strconv.Atoi(string(body)) return err, totalTrans } }
func makeRequestToGuestbook(c *client.Client, cmd, value string, ns string) (string, error) { proxyRequest, errProxy := getServicesProxyRequest(c, c.Get()) if errProxy != nil { return "", errProxy } result, err := proxyRequest.Namespace(ns). Name("frontend"). Suffix("/guestbook.php"). Param("cmd", cmd). Param("key", "messages"). Param("value", value). Do(). Raw() return string(result), err }
func assertFilesExist(fileNames []string, fileDir string, pod *api.Pod, client *client.Client) { var failed []string expectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) { failed = []string{} for _, fileName := range fileNames { if _, err := client.Get(). Namespace(pod.Namespace). Resource("pods"). SubResource("proxy"). Name(pod.Name). Suffix(fileDir, fileName). Do().Raw(); err != nil { Logf("Unable to read %s from pod %s: %v", fileName, pod.Name, err) failed = append(failed, fileName) } } if len(failed) == 0 { return true, nil } Logf("Lookups using %s failed for: %v\n", pod.Name, failed) return false, nil })) Expect(len(failed)).To(Equal(0)) }
// readTransactions reads # of transactions from the k8petstore web server endpoint. // for more details see the source of the k8petstore web server. func readTransactions(c *client.Client, ns string) (error, int) { proxyRequest, errProxy := getServicesProxyRequest(c, c.Get()) if errProxy != nil { return errProxy, -1 } body, err := proxyRequest.Namespace(ns). Name("frontend"). Suffix("llen"). DoRaw() if err != nil { return err, -1 } else { totalTrans, err := strconv.Atoi(string(body)) return err, totalTrans } }
// Retrieves debug information. func getDebugInfo(c *client.Client) (map[string]string, error) { data := make(map[string]string) for _, key := range []string{"block", "goroutine", "heap", "threadcreate"} { resp, err := http.Get(c.Get().AbsPath(fmt.Sprintf("debug/pprof/%s", key)).URL().String() + "?debug=2") if err != nil { Logf("Warning: Error trying to fetch %s debug data: %v", key, err) continue } body, err := ioutil.ReadAll(resp.Body) resp.Body.Close() if err != nil { Logf("Warning: Error trying to read %s debug data: %v", key, err) } data[key] = string(body) } return data, nil }
// serverAPIVersions fetches the server versions available from the groupless API at the given prefix func serverAPIVersions(c *kclient.Client, grouplessPrefix string) ([]unversioned.GroupVersion, error) { // Get versions doc var v unversioned.APIVersions if err := c.Get().AbsPath(grouplessPrefix).Do().Into(&v); err != nil { return []unversioned.GroupVersion{}, err } // Convert to GroupVersion structs serverAPIVersions := []unversioned.GroupVersion{} for _, version := range v.Versions { gv, err := unversioned.ParseGroupVersion(version) if err != nil { return []unversioned.GroupVersion{}, err } serverAPIVersions = append(serverAPIVersions, gv) } return serverAPIVersions, nil }
func TypeOfMaster(c *client.Client) MasterType { res, err := c.Get().AbsPath("").DoRaw() if err != nil { Fatalf("Could not discover the type of your installation: %v", err) } var rp api.RootPaths err = json.Unmarshal(res, &rp) if err != nil { Fatalf("Could not discover the type of your installation: %v", err) } for _, p := range rp.Paths { if p == "/oapi" { return OpenShift } } return Kubernetes }
func makeHttpRequestToService(c *client.Client, ns, service, path string, timeout time.Duration) (string, error) { var result []byte var err error for t := time.Now(); time.Since(t) < timeout; time.Sleep(poll) { result, err = c.Get(). Prefix("proxy"). Namespace(ns). Resource("services"). Name(service). Suffix(path). Do(). Raw() if err != nil { break } } return string(result), err }
func assertFilesContain(fileNames []string, fileDir string, pod *api.Pod, client *client.Client, check bool, expected string) { var failed []string framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) { failed = []string{} subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourcePodProxyVersion, client) if err != nil { return false, err } var contents []byte for _, fileName := range fileNames { if subResourceProxyAvailable { contents, err = client.Get(). Namespace(pod.Namespace). Resource("pods"). SubResource("proxy"). Name(pod.Name). Suffix(fileDir, fileName). Do().Raw() } else { contents, err = client.Get(). Prefix("proxy"). Resource("pods"). Namespace(pod.Namespace). Name(pod.Name). Suffix(fileDir, fileName). Do().Raw() } if err != nil { framework.Logf("Unable to read %s from pod %s: %v", fileName, pod.Name, err) failed = append(failed, fileName) } else if check && strings.TrimSpace(string(contents)) != expected { framework.Logf("File %s from pod %s contains '%s' instead of '%s'", fileName, pod.Name, string(contents), expected) failed = append(failed, fileName) } } if len(failed) == 0 { return true, nil } framework.Logf("Lookups using %s failed for: %v\n", pod.Name, failed) return false, nil })) Expect(len(failed)).To(Equal(0)) }
func makeHttpRequestToService(c *client.Client, ns, service, path string, timeout time.Duration) (string, error) { var result []byte var err error for t := time.Now(); time.Since(t) < timeout; time.Sleep(framework.Poll) { proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.Get()) if errProxy != nil { break } result, err = proxyRequest.Namespace(ns). Name(service). Suffix(path). Do(). Raw() if err != nil { break } } return string(result), err }
func assertFilesExist(fileNames []string, fileDir string, pod *api.Pod, client *client.Client) { var failed []string framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) { failed = []string{} subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourcePodProxyVersion, client) if err != nil { return false, err } for _, fileName := range fileNames { if subResourceProxyAvailable { _, err = client.Get(). Namespace(pod.Namespace). Resource("pods"). SubResource("proxy"). Name(pod.Name). Suffix(fileDir, fileName). Do().Raw() } else { _, err = client.Get(). Prefix("proxy"). Resource("pods"). Namespace(pod.Namespace). Name(pod.Name). Suffix(fileDir, fileName). Do().Raw() } if err != nil { framework.Logf("Unable to read %s from pod %s: %v", fileName, pod.Name, err) failed = append(failed, fileName) } } if len(failed) == 0 { return true, nil } framework.Logf("Lookups using %s failed for: %v\n", pod.Name, failed) return false, nil })) Expect(len(failed)).To(Equal(0)) }
// Performs a get on a node proxy endpoint given the nodename and rest client. func nodeProxyRequest(c *client.Client, node, endpoint string) (client.Result, error) { subResourceProxyAvailable, err := serverVersionGTE(subResourceServiceAndNodeProxyVersion, c) if err != nil { return client.Result{}, err } var result client.Result if subResourceProxyAvailable { result = c.Get(). Resource("nodes"). SubResource("proxy"). Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)). Suffix(endpoint). Do() } else { result = c.Get(). Prefix("proxy"). Resource("nodes"). Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)). Suffix(endpoint). Do() } return result, nil }
func getNodeStatsSummary(c *client.Client, nodeName string) (*stats.Summary, error) { subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c) if err != nil { return nil, err } var data []byte if subResourceProxyAvailable { data, err = c.Get(). Resource("nodes"). SubResource("proxy"). Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)). Suffix("stats/summary"). SetHeader("Content-Type", "application/json"). Do().Raw() } else { data, err = c.Get(). Prefix("proxy"). Resource("nodes"). Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)). Suffix("stats/summary"). SetHeader("Content-Type", "application/json"). Do().Raw() } if err != nil { return nil, err } var summary *stats.Summary err = json.Unmarshal(data, &summary) if err != nil { return nil, err } return summary, nil }
func runSelfLinkTestOnNamespace(t *testing.T, c *client.Client, namespace string) { podBody := api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "selflinktest", Namespace: namespace, Labels: map[string]string{ "name": "selflinktest", }, }, Spec: api.PodSpec{ Containers: []api.Container{ {Name: "name", Image: "image"}, }, }, } pod, err := c.Pods(namespace).Create(&podBody) if err != nil { t.Fatalf("Failed creating selflinktest pod: %v", err) } if err = c.Get().RequestURI(pod.SelfLink).Do().Into(pod); err != nil { t.Errorf("Failed listing pod with supplied self link '%v': %v", pod.SelfLink, err) } podList, err := c.Pods(namespace).List(api.ListOptions{}) if err != nil { t.Errorf("Failed listing pods: %v", err) } if err = c.Get().RequestURI(podList.SelfLink).Do().Into(podList); err != nil { t.Errorf("Failed listing pods with supplied self link '%v': %v", podList.SelfLink, err) } found := false for i := range podList.Items { item := &podList.Items[i] if item.Name != "selflinktest" { continue } found = true err = c.Get().RequestURI(item.SelfLink).Do().Into(pod) if err != nil { t.Errorf("Failed listing pod with supplied self link '%v': %v", item.SelfLink, err) } break } if !found { t.Errorf("never found selflinktest pod in namespace %s", namespace) } }
func build(conf *Config, s3Client *s3.S3, kubeClient *client.Client, builderKey, rawGitSha string) error { repo := conf.Repository gitSha, err := git.NewSha(rawGitSha) if err != nil { return err } appName := conf.App() repoDir := filepath.Join(conf.GitHome, repo) buildDir := filepath.Join(repoDir, "build") slugName := fmt.Sprintf("%s:git-%s", appName, gitSha.Short()) if err := os.MkdirAll(buildDir, os.ModeDir); err != nil { return fmt.Errorf("making the build directory %s (%s)", buildDir, err) } tmpDir, err := ioutil.TempDir(buildDir, "tmp") if err != nil { return fmt.Errorf("unable to create tmpdir %s (%s)", buildDir, err) } slugBuilderInfo := storage.NewSlugBuilderInfo(s3Client.Endpoint, appName, slugName, gitSha) // Get the application config from the controller, so we can check for a custom buildpack URL appConf, err := getAppConfig(conf, builderKey, conf.Username, appName) if err != nil { return fmt.Errorf("getting app config for %s (%s)", appName, err) } log.Debug("got the following config back for app %s: %+v", appName, *appConf) var buildPackURL string if buildPackURLInterface, ok := appConf.Values["BUILDPACK_URL"]; ok { if bpStr, ok := buildPackURLInterface.(string); ok { log.Debug("found custom buildpack URL %s", bpStr) buildPackURL = bpStr } } // build a tarball from the new objects appTgz := fmt.Sprintf("%s.tar.gz", appName) gitArchiveCmd := repoCmd(repoDir, "git", "archive", "--format=tar.gz", fmt.Sprintf("--output=%s", appTgz), gitSha.Short()) gitArchiveCmd.Stdout = os.Stdout gitArchiveCmd.Stderr = os.Stderr if err := run(gitArchiveCmd); err != nil { return fmt.Errorf("running %s (%s)", strings.Join(gitArchiveCmd.Args, " "), err) } absAppTgz := fmt.Sprintf("%s/%s", repoDir, appTgz) // untar the archive into the temp dir tarCmd := repoCmd(repoDir, "tar", "-xzf", appTgz, "-C", fmt.Sprintf("%s/", tmpDir)) tarCmd.Stdout = os.Stdout tarCmd.Stderr = os.Stderr if err := run(tarCmd); err != nil { return fmt.Errorf("running %s (%s)", strings.Join(tarCmd.Args, " "), err) } bType := getBuildTypeForDir(tmpDir) usingDockerfile := bType == buildTypeDockerfile procType := pkg.ProcessType{} if bType == buildTypeProcfile { rawProcFile, err := ioutil.ReadFile(fmt.Sprintf("%s/Procfile", tmpDir)) if err != nil { return fmt.Errorf("reading %s/Procfile", tmpDir) } if err := yaml.Unmarshal(rawProcFile, &procType); err != nil { return fmt.Errorf("procfile %s/ProcFile is malformed (%s)", tmpDir, err) } } bucketName := "git" if err := storage.CreateBucket(s3Client, bucketName); err != nil { log.Warn("create bucket error: %+v", err) } appTgzReader, err := os.Open(absAppTgz) if err != nil { return fmt.Errorf("opening %s for read (%s)", appTgz, err) } log.Debug("Uploading tar to %s/%s/%s", s3Client.Endpoint, bucketName, slugBuilderInfo.TarKey()) if err := storage.UploadObject(s3Client, bucketName, slugBuilderInfo.TarKey(), appTgzReader); err != nil { return fmt.Errorf("uploading %s to %s/%s (%v)", absAppTgz, bucketName, slugBuilderInfo.TarKey(), err) } creds := storage.CredsOK() var pod *api.Pod var buildPodName string if usingDockerfile { buildPodName = dockerBuilderPodName(appName, gitSha.Short()) pod = dockerBuilderPod( conf.Debug, creds, buildPodName, conf.PodNamespace, appConf.Values, slugBuilderInfo.TarURL(), slugName, ) } else { buildPodName = slugBuilderPodName(appName, gitSha.Short()) pod = slugbuilderPod( conf.Debug, creds, buildPodName, conf.PodNamespace, appConf.Values, slugBuilderInfo.TarURL(), slugBuilderInfo.PushURL(), buildPackURL, ) } log.Info("Starting build... but first, coffee!") log.Debug("Starting pod %s", buildPodName) json, err := prettyPrintJSON(pod) if err == nil { log.Debug("Pod spec: %v", json) } else { log.Debug("Error creating json representaion of pod spec: %v", err) } podsInterface := kubeClient.Pods(conf.PodNamespace) newPod, err := podsInterface.Create(pod) if err != nil { return fmt.Errorf("creating builder pod (%s)", err) } if err := waitForPod(kubeClient, newPod.Namespace, newPod.Name, conf.BuilderPodTickDuration(), conf.BuilderPodWaitDuration()); err != nil { return fmt.Errorf("watching events for builder pod startup (%s)", err) } req := kubeClient.Get().Namespace(newPod.Namespace).Name(newPod.Name).Resource("pods").SubResource("log").VersionedParams( &api.PodLogOptions{ Follow: true, }, api.Scheme) rc, err := req.Stream() if err != nil { return fmt.Errorf("attempting to stream logs (%s)", err) } defer rc.Close() size, err := io.Copy(os.Stdout, rc) if err != nil { return fmt.Errorf("fetching builder logs (%s)", err) } log.Debug("size of streamed logs %v", size) // check the state and exit code of the build pod. // if the code is not 0 return error if err := waitForPodEnd(kubeClient, newPod.Namespace, newPod.Name, conf.BuilderPodTickDuration(), conf.BuilderPodWaitDuration()); err != nil { return fmt.Errorf("error getting builder pod status (%s)", err) } buildPod, err := kubeClient.Pods(newPod.Namespace).Get(newPod.Name) if err != nil { return fmt.Errorf("error getting builder pod status (%s)", err) } for _, containerStatus := range buildPod.Status.ContainerStatuses { state := containerStatus.State.Terminated if state.ExitCode != 0 { return fmt.Errorf("Stopping build.") } } // poll the s3 server to ensure the slug exists err = wait.PollImmediate(conf.ObjectStorageTickDuration(), conf.ObjectStorageWaitDuration(), func() (bool, error) { exists, err := storage.ObjectExists(s3Client, bucketName, slugBuilderInfo.PushKey()) if err != nil { return false, fmt.Errorf("Checking if object %s/%s exists (%s)", bucketName, slugBuilderInfo.PushKey(), err) } return exists, nil }) if err != nil { return fmt.Errorf("Timed out waiting for object in storage. Aborting build...") } log.Info("Build complete.") log.Info("Launching app.") log.Info("Launching...") buildHook := &pkg.BuildHook{ Sha: gitSha.Short(), ReceiveUser: conf.Username, ReceiveRepo: appName, Image: appName, Procfile: procType, } if !usingDockerfile { buildHook.Dockerfile = "" // need this to tell the controller what URL to give the slug runner buildHook.Image = slugBuilderInfo.PushURL() + "/slug.tgz" } else { buildHook.Dockerfile = "true" } buildHookResp, err := publishRelease(conf, builderKey, buildHook) if err != nil { return fmt.Errorf("publishing release (%s)", err) } release, ok := buildHookResp.Release["version"] if !ok { return fmt.Errorf("No release returned from Deis controller") } log.Info("Done, %s:v%d deployed to Deis\n", appName, release) log.Info("Use 'deis open' to view this application in your browser\n") log.Info("To learn more, use 'deis help' or visit http://deis.io\n") gcCmd := repoCmd(repoDir, "git", "gc") if err := run(gcCmd); err != nil { return fmt.Errorf("cleaning up the repository with %s (%s)", strings.Join(gcCmd.Args, " "), err) } return nil }
func testPreStop(c *client.Client, ns string) { // This is the server that will receive the preStop notification podDescr := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "server", }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "server", Image: "gcr.io/google_containers/nettest:1.6", Ports: []api.ContainerPort{{ContainerPort: 8080}}, }, }, }, } By(fmt.Sprintf("Creating server pod %s in namespace %s", podDescr.Name, ns)) _, err := c.Pods(ns).Create(podDescr) expectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name)) // At the end of the test, clean up by removing the pod. defer func() { By("Deleting the server pod") c.Pods(ns).Delete(podDescr.Name, nil) }() By("Waiting for pods to come up.") err = waitForPodRunningInNamespace(c, podDescr.Name, ns) expectNoError(err, "waiting for server pod to start") val := "{\"Source\": \"prestop\"}" podOut, err := c.Pods(ns).Get(podDescr.Name) expectNoError(err, "getting pod info") preStopDescr := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "tester", }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "tester", Image: "gcr.io/google_containers/busybox", Command: []string{"sleep", "600"}, Lifecycle: &api.Lifecycle{ PreStop: &api.Handler{ Exec: &api.ExecAction{ Command: []string{ "wget", "-O-", "--post-data=" + val, fmt.Sprintf("http://%s:8080/write", podOut.Status.PodIP), }, }, }, }, }, }, }, } By(fmt.Sprintf("Creating tester pod %s in namespace %s", podDescr.Name, ns)) _, err = c.Pods(ns).Create(preStopDescr) expectNoError(err, fmt.Sprintf("creating pod %s", preStopDescr.Name)) deletePreStop := true // At the end of the test, clean up by removing the pod. defer func() { if deletePreStop { By("Deleting the tester pod") c.Pods(ns).Delete(preStopDescr.Name, nil) } }() err = waitForPodRunningInNamespace(c, preStopDescr.Name, ns) expectNoError(err, "waiting for tester pod to start") // Delete the pod with the preStop handler. By("Deleting pre-stop pod") if err := c.Pods(ns).Delete(preStopDescr.Name, nil); err == nil { deletePreStop = false } expectNoError(err, fmt.Sprintf("deleting pod: %s", preStopDescr.Name)) // Validate that the server received the web poke. err = wait.Poll(time.Second*5, time.Second*60, func() (bool, error) { if body, err := c.Get(). Namespace(ns).Prefix("proxy"). Resource("pods"). Name(podDescr.Name). Suffix("read"). DoRaw(); err != nil { By(fmt.Sprintf("Error validating prestop: %v", err)) } else { Logf("Saw: %s", string(body)) state := State{} err := json.Unmarshal(body, &state) if err != nil { Logf("Error parsing: %v", err) return false, nil } if state.Received["prestop"] != 0 { return true, nil } } return false, nil }) expectNoError(err, "validating pre-stop.") }
// Retrieves scheduler metrics information. func getSchedulingLatency(c *client.Client) (SchedulingLatency, error) { result := SchedulingLatency{} // Check if master Node is registered nodes, err := c.Nodes().List(api.ListOptions{}) expectNoError(err) var data string var masterRegistered = false for _, node := range nodes.Items { if strings.HasSuffix(node.Name, "master") { masterRegistered = true } } if masterRegistered { rawData, err := c.Get(). Prefix("proxy"). Namespace(api.NamespaceSystem). Resource("pods"). Name(fmt.Sprintf("kube-scheduler-%v:%v", testContext.CloudConfig.MasterName, ports.SchedulerPort)). Suffix("metrics"). Do().Raw() expectNoError(err) data = string(rawData) } else { // If master is not registered fall back to old method of using SSH. cmd := "curl http://localhost:10251/metrics" sshResult, err := SSH(cmd, getMasterHost()+":22", testContext.Provider) if err != nil || sshResult.Code != 0 { return result, fmt.Errorf("unexpected error (code: %d) in ssh connection to master: %#v", sshResult.Code, err) } data = sshResult.Stdout } samples, err := extractMetricSamples(data) if err != nil { return result, err } for _, sample := range samples { var metric *LatencyMetric = nil switch sample.Metric[model.MetricNameLabel] { case "scheduler_scheduling_algorithm_latency_microseconds": metric = &result.Scheduling case "scheduler_binding_latency_microseconds": metric = &result.Binding case "scheduler_e2e_scheduling_latency_microseconds": metric = &result.Total } if metric == nil { continue } latency := sample.Value quantile, err := strconv.ParseFloat(string(sample.Metric[model.QuantileLabel]), 64) if err != nil { return result, err } setQuantile(metric, quantile, time.Duration(int64(latency))*time.Microsecond) } return result, nil }
// Start a client pod using given VolumeSource (exported by startVolumeServer()) // and check that the pod sees the data from the server pod. func testVolumeClient(client *client.Client, config VolumeTestConfig, volume api.VolumeSource, expectedContent string) { By(fmt.Sprint("starting ", config.prefix, " client")) podClient := client.Pods(config.namespace) clientPod := &api.Pod{ TypeMeta: api.TypeMeta{ Kind: "Pod", APIVersion: "v1", }, ObjectMeta: api.ObjectMeta{ Name: config.prefix + "-client", Labels: map[string]string{ "role": config.prefix + "-client", }, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: config.prefix + "-client", Image: "gcr.io/google_containers/nginx:1.7.9", Ports: []api.ContainerPort{ { Name: "web", ContainerPort: 80, Protocol: api.ProtocolTCP, }, }, VolumeMounts: []api.VolumeMount{ { Name: config.prefix + "-volume", MountPath: "/usr/share/nginx/html", }, }, }, }, Volumes: []api.Volume{ { Name: config.prefix + "-volume", VolumeSource: volume, }, }, }, } if _, err := podClient.Create(clientPod); err != nil { Failf("Failed to create %s pod: %v", clientPod.Name, err) } expectNoError(waitForPodRunningInNamespace(client, clientPod.Name, config.namespace)) By("reading a web page from the client") body, err := client.Get(). Namespace(config.namespace). Prefix("proxy"). Resource("pods"). Name(clientPod.Name). DoRaw() expectNoError(err, "Cannot read web page: %v", err) Logf("body: %v", string(body)) By("checking the page content") Expect(body).To(ContainSubstring(expectedContent)) }
func runSelfLinkTestOnNamespace(c *client.Client, namespace string) { svcBody := api.Service{ ObjectMeta: api.ObjectMeta{ Name: "selflinktest", Namespace: namespace, Labels: map[string]string{ "name": "selflinktest", }, }, Spec: api.ServiceSpec{ // This is here because validation requires it. Selector: map[string]string{ "foo": "bar", }, Ports: []api.ServicePort{{ Port: 12345, Protocol: "TCP", }}, SessionAffinity: "None", }, } services := c.Services(namespace) svc, err := services.Create(&svcBody) if err != nil { glog.Fatalf("Failed creating selflinktest service: %v", err) } err = c.Get().RequestURI(svc.SelfLink).Do().Into(svc) if err != nil { glog.Fatalf("Failed listing service with supplied self link '%v': %v", svc.SelfLink, err) } svcList, err := services.List(labels.Everything()) if err != nil { glog.Fatalf("Failed listing services: %v", err) } err = c.Get().RequestURI(svcList.SelfLink).Do().Into(svcList) if err != nil { glog.Fatalf("Failed listing services with supplied self link '%v': %v", svcList.SelfLink, err) } found := false for i := range svcList.Items { item := &svcList.Items[i] if item.Name != "selflinktest" { continue } found = true err = c.Get().RequestURI(item.SelfLink).Do().Into(svc) if err != nil { glog.Fatalf("Failed listing service with supplied self link '%v': %v", item.SelfLink, err) } break } if !found { glog.Fatalf("never found selflinktest service in namespace %s", namespace) } glog.Infof("Self link test passed in namespace %s", namespace) // TODO: Should test PUT at some point, too. }