// Tr translate content to target language. func Tr(lang, format string, args ...interface{}) string { var section string parts := strings.SplitN(format, ".", 2) if len(parts) == 2 { section = parts[0] format = parts[1] } value, ok := locales.Get(lang, section, format) if ok { format = value } if len(args) > 0 { params := make([]interface{}, 0, len(args)) for _, arg := range args { if arg != nil { val := reflect.ValueOf(arg) if val.Kind() == reflect.Slice { for i := 0; i < val.Len(); i++ { params = append(params, val.Index(i).Interface()) } } else { params = append(params, arg) } } } return fmt.Sprintf(format, params...) } return fmt.Sprintf(format) }
func RenderSpecialLink(rawBytes []byte, urlPrefix string) []byte { ms := MentionPattern.FindAll(rawBytes, -1) for _, m := range ms { rawBytes = bytes.Replace(rawBytes, m, []byte(fmt.Sprintf(`<a href="/user/%s">%s</a>`, m[1:], m)), -1) } ms = commitPattern.FindAll(rawBytes, -1) for _, m := range ms { m = bytes.TrimSpace(m) i := strings.Index(string(m), "commit/") j := strings.Index(string(m), "#") if j == -1 { j = len(m) } rawBytes = bytes.Replace(rawBytes, m, []byte(fmt.Sprintf( ` <code><a href="%s">%s</a></code>`, m, ShortSha(string(m[i+7:j])))), -1) } ms = issueFullPattern.FindAll(rawBytes, -1) for _, m := range ms { m = bytes.TrimSpace(m) i := strings.Index(string(m), "issues/") j := strings.Index(string(m), "#") if j == -1 { j = len(m) } rawBytes = bytes.Replace(rawBytes, m, []byte(fmt.Sprintf( ` <a href="%s">#%s</a>`, m, ShortSha(string(m[i+7:j])))), -1) } ms = issueIndexPattern.FindAll(rawBytes, -1) for _, m := range ms { rawBytes = bytes.Replace(rawBytes, m, []byte(fmt.Sprintf( `<a href="%s/issues/%s">%s</a>`, urlPrefix, m[1:], m)), -1) } return rawBytes }
func (s *SpecValidator) validateDefaultValueSchemaAgainstSchema(path, in string, schema *spec.Schema) *Result { res := new(Result) if schema != nil { if schema.Default != nil { res.Merge(NewSchemaValidator(schema, s.spec.Spec(), path, s.KnownFormats).Validate(schema.Default)) } if schema.Items != nil { if schema.Items.Schema != nil { res.Merge(s.validateDefaultValueSchemaAgainstSchema(path+".items", in, schema.Items.Schema)) } for i, sch := range schema.Items.Schemas { res.Merge(s.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("%s.items[%d]", path, i), in, &sch)) } } if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { res.Merge(s.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("%s.additionalItems", path), in, schema.AdditionalItems.Schema)) } for propName, prop := range schema.Properties { res.Merge(s.validateDefaultValueSchemaAgainstSchema(path+"."+propName, in, &prop)) } for propName, prop := range schema.PatternProperties { res.Merge(s.validateDefaultValueSchemaAgainstSchema(path+"."+propName, in, &prop)) } if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { res.Merge(s.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("%s.additionalProperties", path), in, schema.AdditionalProperties.Schema)) } for i, aoSch := range schema.AllOf { res.Merge(s.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("%s.allOf[%d]", path, i), in, &aoSch)) } } return res }
// Run runs the the tunnel session func (s *Session) Run() (err error) { defer s.recoverPanic("Session.Run") go func() { defer s.recoverPanic("Session.mux.Wait") code, err, debug1 := s.mux.Wait() // s.Info("Session mux shutdown with code %v error %v debug %v", code, err, debug) log.Println(fmt.Sprintf("[INFO] Session mux shutdown with code %v error %v debug %v", code, err, debug1)) }() defer s.mux.Close() // A tunnel session starts with an auth stream if err = s.handleAuth(); err != nil { return } // then we handle new streams sent from the client for { stream, err := s.mux.Accept() if err != nil { s.Shutdown() err := fmt.Errorf(fmt.Sprintf("Failed to accept stream: %v", err)) log.Println(fmt.Sprintf("[ERROR] %v", err)) return err } go s.handleStream(conn.Wrap(stream, "stream", s.id)) } }
func newRepo(name string, simpleDocker SimpleDocker.SimpleDocker) (Repo, error) { log.WithFields(log.Fields{ "name": name, }).Info("Creating new repo") r := Repo{ Name: name, SimpleDocker: simpleDocker, } startTime := time.Now() repoName := fmt.Sprintf("ihsw/%s", name) if err := r.pullImage(repoName); err != nil { log.WithFields(log.Fields{ "name": name, "err": err.Error(), "repoName": repoName, }).Warn("Could not pull image") return Repo{}, err } log.WithFields(log.Fields{ "name": name, "duration": fmt.Sprintf("%v", time.Now().Sub(startTime)), }).Info("Repo create success") return r, nil }
func describeServicePorts(spec kapi.ServiceSpec) string { switch len(spec.Ports) { case 0: return " no ports" case 1: port := portOrNodePort(spec, spec.Ports[0]) if spec.Ports[0].TargetPort.String() == "0" || spec.ClusterIP == kapi.ClusterIPNone || port == spec.Ports[0].TargetPort.String() { return fmt.Sprintf(":%s", port) } return fmt.Sprintf(":%s -> %s", port, spec.Ports[0].TargetPort.String()) default: pairs := []string{} for _, port := range spec.Ports { externalPort := portOrNodePort(spec, port) if port.TargetPort.String() == "0" || spec.ClusterIP == kapi.ClusterIPNone { pairs = append(pairs, externalPort) continue } if port.Port == int(port.TargetPort.IntVal) { pairs = append(pairs, port.TargetPort.String()) } else { pairs = append(pairs, fmt.Sprintf("%s->%s", externalPort, port.TargetPort.String())) } } return " ports " + strings.Join(pairs, ", ") } }
// buildURL builds the URL for the operation. func (s *IndicesGetTemplateService) buildURL() (string, url.Values, error) { // Build URL var err error var path string if len(s.name) > 0 { path, err = uritemplates.Expand("/_template/{name}", map[string]string{ "name": strings.Join(s.name, ","), }) } else { path = "/_template" } if err != nil { return "", url.Values{}, err } // Add query string parameters params := url.Values{} if s.pretty { params.Set("pretty", "1") } if s.flatSettings != nil { params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) } if s.local != nil { params.Set("local", fmt.Sprintf("%v", *s.local)) } return path, params, nil }
func (f *Field) Dot() string { s := "digraph G {\n" // nodes for _, n := range *f { s += fmt.Sprintf( "\t%s [shape=box,label=\"%s\"];\n", n.Name(), NodeLabel(n), //n, ) } s += "\n" // edges for _, n := range *f { D("Dot: adding edges for %d children of %s", len(n.Children()), n.Name()) for _, child := range n.Children() { s += fmt.Sprintf("\t%s -> %s;\n", n.Name(), child.Name()) } } s += "}" return s }
func (e *exitError) Error() string { if e.cause != nil { return fmt.Sprintf("task: non-zero exit (%v): %v", e.code, e.cause) } return fmt.Sprintf("task: non-zero exit (%v)", e.code) }
func describeDeployments(f formatter, dcNode *deploygraph.DeploymentConfigNode, activeDeployment *kubegraph.ReplicationControllerNode, inactiveDeployments []*kubegraph.ReplicationControllerNode, count int) []string { if dcNode == nil { return nil } out := []string{} deploymentsToPrint := append([]*kubegraph.ReplicationControllerNode{}, inactiveDeployments...) if activeDeployment == nil { on, auto := describeDeploymentConfigTriggers(dcNode.DeploymentConfig) if dcNode.DeploymentConfig.Status.LatestVersion == 0 { out = append(out, fmt.Sprintf("deployment #1 waiting %s", on)) } else if auto { out = append(out, fmt.Sprintf("deployment #%d pending %s", dcNode.DeploymentConfig.Status.LatestVersion, on)) } // TODO: detect new image available? } else { deploymentsToPrint = append([]*kubegraph.ReplicationControllerNode{activeDeployment}, inactiveDeployments...) } for i, deployment := range deploymentsToPrint { out = append(out, describeDeploymentStatus(deployment.ReplicationController, i == 0, dcNode.DeploymentConfig.Spec.Test)) switch { case count == -1: if deployutil.DeploymentStatusFor(deployment) == deployapi.DeploymentStatusComplete { return out } default: if i+1 >= count { return out } } } return out }
func TestMountMoreThan42Layers(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) defer d.Cleanup() var last string var expected int for i := 1; i < 127; i++ { expected++ var ( parent = fmt.Sprintf("%d", i-1) current = fmt.Sprintf("%d", i) ) if parent == "0" { parent = "" } else { parent = hash(parent) } current = hash(current) if err := d.Create(current, parent); err != nil { t.Logf("Current layer %d", i) t.Fatal(err) } point, err := d.Get(current) if err != nil { t.Logf("Current layer %d", i) t.Fatal(err) } f, err := os.Create(path.Join(point, current)) if err != nil { t.Logf("Current layer %d", i) t.Fatal(err) } f.Close() if i%10 == 0 { if err := os.Remove(path.Join(point, parent)); err != nil { t.Logf("Current layer %d", i) t.Fatal(err) } expected-- } last = current } // Perform the actual mount for the top most image point, err := d.Get(last) if err != nil { t.Fatal(err) } files, err := ioutil.ReadDir(point) if err != nil { t.Fatal(err) } if len(files) != expected { t.Fatalf("Expected %d got %d", expected, len(files)) } }
func describeRouteInServiceGroup(f formatter, routeNode *routegraph.RouteNode) []string { // markers should cover printing information about admission failure requested, other, errors := extractRouteInfo(routeNode.Route) var lines []string if requested { lines = append(lines, describeRouteExposed(routeNode.Spec.Host, routeNode.Route, len(errors) > 0)) } for _, s := range other { lines = append(lines, describeRouteExposed(s, routeNode.Route, len(errors) > 0)) } if len(lines) == 0 { switch { case len(errors) >= 1: // router rejected the output lines = append(lines, fmt.Sprintf("%s not accepted: %s", f.ResourceName(routeNode), errors[0])) case len(routeNode.Spec.Host) == 0: // no errors or output, likely no router running and no default domain lines = append(lines, fmt.Sprintf("%s has no host set", f.ResourceName(routeNode))) case len(routeNode.Status.Ingress) == 0: // host set, but no ingress, an older legacy router lines = append(lines, describeRouteExposed(routeNode.Spec.Host, routeNode.Route, false)) default: // multiple conditions but no host exposed, use the generic legacy output lines = append(lines, fmt.Sprintf("exposed as %s by %s", routeNode.Spec.Host, f.ResourceName(routeNode))) } } return lines }
func describeDeploymentInServiceGroup(f formatter, deploy graphview.DeploymentConfigPipeline) []string { local := namespacedFormatter{currentNamespace: deploy.Deployment.Namespace} includeLastPass := deploy.ActiveDeployment == nil if len(deploy.Images) == 1 { format := "%s deploys %s %s" if deploy.Deployment.Spec.Test { format = "%s test deploys %s %s" } lines := []string{fmt.Sprintf(format, f.ResourceName(deploy.Deployment), describeImageInPipeline(local, deploy.Images[0], deploy.Deployment.Namespace), describeDeploymentConfigTrigger(deploy.Deployment.DeploymentConfig))} if len(lines[0]) > 120 && strings.Contains(lines[0], " <- ") { segments := strings.SplitN(lines[0], " <- ", 2) lines[0] = segments[0] + " <-" lines = append(lines, segments[1]) } lines = append(lines, indentLines(" ", describeAdditionalBuildDetail(deploy.Images[0].Build, deploy.Images[0].LastSuccessfulBuild, deploy.Images[0].LastUnsuccessfulBuild, deploy.Images[0].ActiveBuilds, deploy.Images[0].DestinationResolved, includeLastPass)...)...) lines = append(lines, describeDeployments(local, deploy.Deployment, deploy.ActiveDeployment, deploy.InactiveDeployments, 3)...) return lines } format := "%s deploys %s" if deploy.Deployment.Spec.Test { format = "%s test deploys %s" } lines := []string{fmt.Sprintf(format, f.ResourceName(deploy.Deployment), describeDeploymentConfigTrigger(deploy.Deployment.DeploymentConfig))} for _, image := range deploy.Images { lines = append(lines, describeImageInPipeline(local, image, deploy.Deployment.Namespace)) lines = append(lines, indentLines(" ", describeAdditionalBuildDetail(image.Build, image.LastSuccessfulBuild, image.LastUnsuccessfulBuild, image.ActiveBuilds, image.DestinationResolved, includeLastPass)...)...) lines = append(lines, describeDeployments(local, deploy.Deployment, deploy.ActiveDeployment, deploy.InactiveDeployments, 3)...) } return lines }
func describeProjectAndServer(f formatter, project *projectapi.Project, server string) string { if len(server) == 0 { return fmt.Sprintf("In project %s on server %s\n", projectapi.DisplayNameAndNameForProject(project), server) } return fmt.Sprintf("In project %s on server %s\n", projectapi.DisplayNameAndNameForProject(project), server) }
func (checker *codecEqualChecker) Check(params []interface{}, names []string) (result bool, error string) { gotContent, ok := params[0].(string) if !ok { return false, fmt.Sprintf("expected string, got %T", params[0]) } expectContent := params[1] expectContentBytes, err := checker.marshal(expectContent) if err != nil { return false, fmt.Sprintf("cannot marshal expected contents: %v", err) } var expectContentVal interface{} if err := checker.unmarshal(expectContentBytes, &expectContentVal); err != nil { return false, fmt.Sprintf("cannot unmarshal expected contents: %v", err) } var gotContentVal interface{} if err := checker.unmarshal([]byte(gotContent), &gotContentVal); err != nil { return false, fmt.Sprintf("cannot unmarshal obtained contents: %v; %q", err, gotContent) } if ok, err := DeepEqual(gotContentVal, expectContentVal); !ok { return false, err.Error() } return true, "" }
func (this *Castaway) String() string { if this == nil { return "nil" } keysForCastMapValueMessage := make([]int32, 0, len(this.CastMapValueMessage)) for k := range this.CastMapValueMessage { keysForCastMapValueMessage = append(keysForCastMapValueMessage, k) } github_com_gogo_protobuf_sortkeys.Int32s(keysForCastMapValueMessage) mapStringForCastMapValueMessage := "map[int32]MyWilson{" for _, k := range keysForCastMapValueMessage { mapStringForCastMapValueMessage += fmt.Sprintf("%v: %v,", k, this.CastMapValueMessage[k]) } mapStringForCastMapValueMessage += "}" keysForCastMapValueMessageNullable := make([]int32, 0, len(this.CastMapValueMessageNullable)) for k := range this.CastMapValueMessageNullable { keysForCastMapValueMessageNullable = append(keysForCastMapValueMessageNullable, k) } github_com_gogo_protobuf_sortkeys.Int32s(keysForCastMapValueMessageNullable) mapStringForCastMapValueMessageNullable := "map[int32]*MyWilson{" for _, k := range keysForCastMapValueMessageNullable { mapStringForCastMapValueMessageNullable += fmt.Sprintf("%v: %v,", k, this.CastMapValueMessageNullable[k]) } mapStringForCastMapValueMessageNullable += "}" s := strings.Join([]string{`&Castaway{`, `CastMapValueMessage:` + mapStringForCastMapValueMessage + `,`, `CastMapValueMessageNullable:` + mapStringForCastMapValueMessageNullable + `,`, `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s }
// getPlanInfo returns the column metadata and a constructor for a new // valuesNode for the virtual table. We use deferred construction here // so as to avoid populating a RowContainer during query preparation, // where we can't guarantee it will be Close()d in case of error. func (e virtualTableEntry) getPlanInfo() (ResultColumns, nodeConstructor) { var columns ResultColumns for _, col := range e.desc.Columns { columns = append(columns, ResultColumn{ Name: col.Name, Typ: col.Type.ToDatumType(), }) } constructor := func(p *planner) (planNode, error) { v := p.newContainerValuesNode(columns, 0) err := e.tableDef.populate(p, func(datums ...parser.Datum) error { if r, c := len(datums), len(v.columns); r != c { panic(fmt.Sprintf("datum row count and column count differ: %d vs %d", r, c)) } for i, col := range v.columns { datum := datums[i] if !(datum == parser.DNull || datum.ResolvedType().Equal(col.Typ)) { panic(fmt.Sprintf("datum column %q expected to be type %s; found type %s", col.Name, col.Typ, datum.ResolvedType())) } } _, err := v.rows.AddRow(datums) return err }) if err != nil { v.Close() return nil, err } return v, nil } return columns, constructor }
// TestLogLevelDEV tests the basic functioning of the logger in DEV mode. func TestLogLevelDEV(t *testing.T) { t.Log("Given the need to log DEV and USER messages.") { t.Log("\tWhen we set the logging level to DEV.") { log.Init(&logdest, func() int { return log.DEV }) resetLog() defer displayLog() dt := time.Now().Format("2006/01/02 15:04:05") log1 := fmt.Sprintf("%s log_test.go:81: DEV : context : FuncName : Message 1 no format\n", dt) log2 := fmt.Sprintf("%s log_test.go:82: USER : context : FuncName : Message 2 with format: A, B\n", dt) log3 := fmt.Sprintf("%s log_test.go:83: ERROR : context : FuncName : An error : Message 3 with format: C, D\n", dt) log.Dev("context", "FuncName", "Message 1 no format") log.User("context", "FuncName", "Message 2 with format: %s, %s", "A", "B") log.Error("context", "FuncName", errors.New("An error"), "Message 3 with format: %s, %s", "C", "D") if logdest.String() == log1+log2+log3 { t.Logf("\t\t%v : Should log the expected trace line.", succeed) } else { t.Log("***>", logdest.String()) t.Log("***>", log1+log2+log3) t.Errorf("\t\t%v : Should log the expected trace line.", failed) } } } }
func (options *Html) Header(out *bytes.Buffer, text func() bool, level int, id string) { marker := out.Len() doubleSpace(out) if id != "" { out.WriteString(fmt.Sprintf("<h%d id=\"%s\">", level, id)) } else if options.flags&HTML_TOC != 0 { // headerCount is incremented in htmlTocHeader out.WriteString(fmt.Sprintf("<h%d id=\"toc_%d\">", level, options.headerCount)) } else { out.WriteString(fmt.Sprintf("<h%d>", level)) } tocMarker := out.Len() if !text() { out.Truncate(marker) return } // are we building a table of contents? if options.flags&HTML_TOC != 0 { options.TocHeaderWithAnchor(out.Bytes()[tocMarker:], level, id) } out.WriteString(fmt.Sprintf("</h%d>\n", level)) }
func makeLinkVariables(service api.Service, machine string) []api.EnvVar { prefix := makeEnvVariableName(service.ID) var port string if service.ContainerPort.Kind == util.IntstrString { port = service.ContainerPort.StrVal } else { port = strconv.Itoa(service.ContainerPort.IntVal) } portPrefix := prefix + "_PORT_" + makeEnvVariableName(port) + "_TCP" return []api.EnvVar{ { Name: prefix + "_PORT", Value: fmt.Sprintf("tcp://%s:%d", machine, service.Port), }, { Name: portPrefix, Value: fmt.Sprintf("tcp://%s:%d", machine, service.Port), }, { Name: portPrefix + "_PROTO", Value: "tcp", }, { Name: portPrefix + "_PORT", Value: strconv.Itoa(service.Port), }, { Name: portPrefix + "_ADDR", Value: machine, }, } }
// TestV2Only ensures that a daemon in v2-only mode does not // attempt to contact any v1 registry endpoints. func (s *DockerRegistrySuite) TestV2Only(c *check.C) { reg, err := newTestRegistry(c) c.Assert(err, check.IsNil) reg.registerHandler("/v2/", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(404) }) reg.registerHandler("/v1/.*", func(w http.ResponseWriter, r *http.Request) { c.Fatal("V1 registry contacted") }) repoName := fmt.Sprintf("%s/busybox", reg.hostport) err = s.d.Start("--insecure-registry", reg.hostport, "--disable-legacy-registry=true") c.Assert(err, check.IsNil) dockerfileName, cleanup, err := makefile(fmt.Sprintf("FROM %s/busybox", reg.hostport)) c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) defer cleanup() s.d.Cmd("build", "--file", dockerfileName, ".") s.d.Cmd("run", repoName) s.d.Cmd("login", "-u", "richard", "-p", "testtest", "-e", "*****@*****.**", reg.hostport) s.d.Cmd("tag", "busybox", repoName) s.d.Cmd("push", repoName) s.d.Cmd("pull", repoName) }
func (ss *StoreServer) createVolumeHandler(w http.ResponseWriter, r *http.Request) { body, err := ioutil.ReadAll(r.Body) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } var volIDIP VolumeIDIP if err = json.Unmarshal(body, &volIDIP); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } id := volIDIP.ID volIDStr := fmt.Sprintf("%d", id) volPath := filepath.Join(ss.volumeDir, volIDStr+".vol") needleMapPath := filepath.Join(ss.volumeDir, fmt.Sprintf("needle_map_vol%d", id)) file, err := os.OpenFile(volPath, os.O_RDWR|os.O_CREATE, 0644) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } v, err := storage.NewVolume(id, file, needleMapPath, ss.garbageThreshold) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } ss.localVolIDIPs = append(ss.localVolIDIPs, volIDIP) bytes, err := json.Marshal(ss.localVolIDIPs) if err = ioutil.WriteFile(filepath.Join(ss.volumeDir, "volIDIPs.json"), bytes, 0644); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } ss.volumeMap[id] = v }
// TestRegistryResourceLocation tests if proper resource location URL is returner // for different build states. // Note: For this test, the mocked pod is set to "Running" phase, so the test // is evaluating the outcome based only on build state. func TestRegistryResourceLocation(t *testing.T) { expectedLocations := map[api.BuildPhase]string{ api.BuildPhaseComplete: fmt.Sprintf("https://foo-host:12345/containerLogs/%s/running-build/foo-container", kapi.NamespaceDefault), api.BuildPhaseFailed: fmt.Sprintf("https://foo-host:12345/containerLogs/%s/running-build/foo-container", kapi.NamespaceDefault), api.BuildPhaseRunning: fmt.Sprintf("https://foo-host:12345/containerLogs/%s/running-build/foo-container", kapi.NamespaceDefault), api.BuildPhaseNew: "", api.BuildPhasePending: "", api.BuildPhaseError: "", api.BuildPhaseCancelled: "", } ctx := kapi.NewDefaultContext() for BuildPhase, expectedLocation := range expectedLocations { location, err := resourceLocationHelper(BuildPhase, "running", ctx) switch BuildPhase { case api.BuildPhaseError, api.BuildPhaseCancelled: if err == nil { t.Errorf("Expected error when Build is in %s state, got nothing", BuildPhase) } default: if err != nil { t.Errorf("Unexpected error: %v", err) } } if location != expectedLocation { t.Errorf("Status: %s Expected Location: %s, Got %s", BuildPhase, expectedLocation, location) } } }
func (ss *StoreServer) uploadHandler(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() vars := mux.Vars(r) fileIDStr := vars["fileID"] volID, needleID, cookie, err := newFileID(fileIDStr) if err != nil { helper.WriteJson(w, result{Error: err.Error()}, http.StatusInternalServerError) return } if ss.volumeMap[volID] == nil { helper.WriteJson(w, result{Error: fmt.Sprintf("no volume %d", volID)}, http.StatusInternalServerError) return } data, name, err := parseUpload(r) if err != nil { helper.WriteJson(w, result{Error: err.Error()}, http.StatusInternalServerError) return } n := storage.NewNeedle(cookie, needleID, data, name) if err = ss.volumeMap[volID].AppendNeedle(n); err != nil { helper.WriteJson(w, result{Error: err.Error()}, http.StatusInternalServerError) return } fi, _ := ss.volumeMap[volID].StoreFile.Stat() vi := volumeInfo{ ID: volID, Size: fi.Size(), } viBytes, _ := json.Marshal(vi) for i := range ss.conf.Directories { // send volume information to directory server var b bytes.Buffer b.Write(viBytes) _, err := postAndError("http://"+ss.conf.Directories[i]+"/vol/info", "application/json", &b) if err == nil { break } else { log4go.Warn("send volumeInfo to directory get err: %s", err.Error()) } } for _, localVolIDIP := range ss.localVolIDIPs { if localVolIDIP.ID == volID { for _, ip := range localVolIDIP.IP { if ip != ss.Addr { if err = replicateUpload(fmt.Sprintf("http://%s/replicate/%s", ip, fileIDStr), string(name), data); err != nil { helper.WriteJson(w, result{Error: err.Error()}, http.StatusInternalServerError) return } } } break } } res := result{ Name: string(name), Size: len(data), } helper.WriteJson(w, res, http.StatusOK) }
func (slvgs *softLayer_Virtual_Guest_Service) SetTags(instanceId int, tags []string) (bool, error) { var tagStringBuffer bytes.Buffer for i, tag := range tags { tagStringBuffer.WriteString(tag) if i != len(tags)-1 { tagStringBuffer.WriteString(", ") } } setTagsParameters := datatypes.SoftLayer_Virtual_Guest_SetTags_Parameters{ Parameters: []string{tagStringBuffer.String()}, } requestBody, err := json.Marshal(setTagsParameters) if err != nil { return false, err } response, err := slvgs.client.DoRawHttpRequest(fmt.Sprintf("%s/%d/setTags.json", slvgs.GetName(), instanceId), "POST", bytes.NewBuffer(requestBody)) if err != nil { return false, err } if res := string(response[:]); res != "true" { return false, errors.New(fmt.Sprintf("Failed to setTags for instance with id '%d', got '%s' as response from the API.", instanceId, res)) } return true, nil }
func print256Colors() { ansi.DisableColors(false) stdout := colorable.NewColorableStdout() bgColors := []string{""} for i := 0; i < 256; i++ { key := fmt.Sprintf(":%d", i) bgColors = append(bgColors, key) } keys := []string{} for fg := range ansi.Colors { n, err := strconv.Atoi(fg) if err == nil { keys = append(keys, fmt.Sprintf("%3d", n)) } } sort.Strings(keys) for _, fg := range keys { for _, bg := range bgColors { fmt.Fprintln(stdout, padColor(fg, []string{"" + bg, "+b" + bg, "+u" + bg})) fmt.Fprintln(stdout, padColor(fg, []string{"+B" + bg, "+Bb" + bg})) } } }
// TestQueueOneReaderOneWriter confirms the queue is FIFO func TestQueueOneReaderOneWriter(t *testing.T) { clus := newClusterGRPC(t, &clusterConfig{size: 1}) defer clus.Terminate(t) go func() { etcdc := recipe.NewEtcdClient(clus.RandConn()) q := recipe.NewQueue(etcdc, "testq") for i := 0; i < 5; i++ { if err := q.Enqueue(fmt.Sprintf("%d", i)); err != nil { t.Fatalf("error enqueuing (%v)", err) } } }() etcdc := recipe.NewEtcdClient(clus.RandConn()) q := recipe.NewQueue(etcdc, "testq") for i := 0; i < 5; i++ { s, err := q.Dequeue() if err != nil { t.Fatalf("error dequeueing (%v)", err) } if s != fmt.Sprintf("%d", i) { t.Fatalf("expected dequeue value %v, got %v", s, i) } } }
func (self *SourceManager) monitorFlume() { for self.isRunning { time.Sleep(1 * time.Second) monitor := "FLUME_TPS|" for k, v := range self.sourceServers { succ, fail := v.monitor() monitor += fmt.Sprintf("%s|%d/%d \t", k, succ, fail) } log.Println(monitor) mk := make([]string, 0) monitor = "FLUME_POOL|\n" for k, _ := range self.hp2flumeClientPool { mk = append(mk, k.Host+":"+strconv.Itoa(k.Port)) } sort.Strings(mk) for _, hp := range mk { v, ok := self.hp2flumeClientPool[config.NewHostPort(hp)] if !ok { continue } active, core, max := v.FlumePool.MonitorPool() monitor += fmt.Sprintf("%s|%d/%d/%d\n", hp, active, core, max) } log.Println(monitor) } }
func doTest0644FSGroup(f *framework.Framework, image string, medium api.StorageMedium) { var ( volumePath = "/test-volume" filePath = path.Join(volumePath, "test-file") source = &api.EmptyDirVolumeSource{Medium: medium} pod = testPodWithVolume(image, volumePath, source) ) pod.Spec.Containers[0].Args = []string{ fmt.Sprintf("--fs_type=%v", volumePath), fmt.Sprintf("--new_file_0644=%v", filePath), fmt.Sprintf("--file_perm=%v", filePath), } fsGroup := int64(123) pod.Spec.SecurityContext.FSGroup = &fsGroup msg := fmt.Sprintf("emptydir 0644 on %v", formatMedium(medium)) out := []string{ "perms of file \"/test-volume/test-file\": -rw-r--r--", "content of file \"/test-volume/test-file\": mount-tester new file", } if medium == api.StorageMediumMemory { out = append(out, "mount type of \"/test-volume\": tmpfs") } f.TestContainerOutput(msg, pod, 0, out) }
func testAuthChallengeNormalization(t *testing.T, host string) { scm := NewSimpleChallengeManager() url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host)) if err != nil { t.Fatal(err) } resp := &http.Response{ Request: &http.Request{ URL: url, }, Header: make(http.Header), StatusCode: http.StatusUnauthorized, } resp.Header.Add("WWW-Authenticate", fmt.Sprintf("Bearer realm=\"https://%s/token\",service=\"registry.example.com\"", host)) err = scm.AddResponse(resp) if err != nil { t.Fatal(err) } lowered := *url lowered.Host = strings.ToLower(lowered.Host) c, err := scm.GetChallenges(lowered) if err != nil { t.Fatal(err) } if len(c) == 0 { t.Fatal("Expected challenge for lower-cased-host URL") } }