// check if a given version is valid in all the possible containers func isValidVersion(file string, tags, sources, binaries []string) bool { // tip is always a valid version if file == "tip" || file == "go" { return true } // look on the sources first that is the smaller collection for _, ver := range sources { if file == ver { return true } } // now look on the binaries collection using binary search index := sort.SearchStrings(binaries, file) if len(binaries) > index && binaries[index] == file { return true } // now look in the git tags using binary search // now look in the mercurial tags using binary search index = sort.SearchStrings(tags, file) if len(tags) > index && tags[index] == file { return true } return false }
func TestTelegrafParseLine(t *testing.T) { s := Telegraf{} r, err := s.ParseLine("> mem,host=ubuntu available_percent=78.43483331332489,buffered=199602176i,used=1802661888i,used_percent=21.56516668667511 1469886743") require.NoError(t, err) assert.Len(t, r.Elements, 4) for _, line := range r.Elements { assert.Equal(t, line.Plugin, "telegraf.mem") validGauges := []string{"mem_available.percent", "mem_buffered", "mem_used", "mem_used.percent"} sort.Strings(validGauges) i := sort.SearchStrings(validGauges, line.Gauge) var gaugeFound = i < len(validGauges) && validGauges[i] == line.Gauge assert.True(t, gaugeFound, "Valid Gauge Name") } r, err = s.ParseLine("> system,host=ubuntu load1=0.11,load15=0.06,load5=0.05,n_cpus=4i,n_users=2i,uptime=7252i 1469891972000000000") require.NoError(t, err) assert.Len(t, r.Elements, 6) for _, line := range r.Elements { assert.Equal(t, line.Plugin, "telegraf.system") validGauges := []string{"system_load1", "system_load15", "system_load5", "system_n.cpus", "system_n.users", "system_uptime"} sort.Strings(validGauges) i := sort.SearchStrings(validGauges, line.Gauge) var gaugeFound = i < len(validGauges) && validGauges[i] == line.Gauge assert.True(t, gaugeFound, "Valid Gauge Name") } }
func createMaintainerManagersDirectoriesMap(pth, cpth, maintainerEmail, userName string) error { names, err := ioutil.ReadDir(pth) if err != nil { return err } // Look for the MaintainerManager File var ( foundMaintainerManagersFile = false iAmOneOfTheMaintainerManagers = false belongsToOtherMaintainerManagers = false ) for _, name := range names { if strings.EqualFold(name.Name(), MaintainerManagersFileName) { foundMaintainerManagersFile = true var ids = &[]string{} ids, fileMaintainers, err = getMaintainerManagersIds(path.Join(pth, name.Name())) maintainersIds = append(maintainersIds, (*ids)...) sort.Strings(maintainersIds) if err != nil { return err } i := sort.SearchStrings(*ids, maintainerEmail) if i < len(*ids) && (*ids)[i] == maintainerEmail { iAmOneOfTheMaintainerManagers = true } else { i := sort.SearchStrings(*ids, userName) if i < len(*ids) && (*ids)[i] == userName { iAmOneOfTheMaintainerManagers = true } } } } // Save the maintainers list related to the current directory tmpcpth := cpth if cpth == "" { tmpcpth = "." } if foundMaintainerManagersFile { maintainersDirMap[tmpcpth] = fileMaintainers } // Check if we need to add the directory to the maintainer's directories mapping tree if (!foundMaintainerManagersFile && !belongsToOthers) || iAmOneOfTheMaintainerManagers { currentPath := []string{tmpcpth} maintainerDirMap.paths = append(maintainerDirMap.paths, currentPath...) } else if foundMaintainerManagersFile || belongsToOthers { belongsToOtherMaintainerManagers = true } for _, name := range names { if name.IsDir() && name.Name()[0] != '.' { tmpcpth := path.Join(cpth, name.Name()) newPath := path.Join(pth, name.Name()) belongsToOthers = belongsToOtherMaintainerManagers createMaintainerManagersDirectoriesMap(newPath, tmpcpth, maintainerEmail, userName) } } return err }
func TestHostnamesInheritFromDefault(t *testing.T) { assert := assert.Assert(t) yml := loadFile("config_hostnames_inherit_from_default.yml") setting, err := kasi.ParseConfig(yml) assert.Equal(err, nil) assert.Equal(len(setting.Services), 1) assert.Equal(len(setting.Services[0].Hostnames), 2) for _, h := range []string{"my0.github.com", "my1.github.com"} { if sort.SearchStrings(setting.Services[0].Hostnames, h) == 2 { t.Error("failed to parse hostnames") } } ymlOverride := loadFile("config_hostnames_override_default.yml") setting, err = kasi.ParseConfig(ymlOverride) assert.Nil(err) assert.Equal(len(setting.Services), 1) assert.Equal(len(setting.Services[0].Hostnames), 1) for _, h := range []string{"my2.github.com"} { if sort.SearchStrings(setting.Services[0].Hostnames, h) == 2 { t.Error("failed to parse hostnames") } } }
// KeyFromStrings parses a triple of strings to a numerical key. func KeyFromStrings(s [3]string) Key { initFlag.Do(initWords) a := sort.SearchStrings(words1, s[0]) b := sort.SearchStrings(words2, s[1]) c := sort.SearchStrings(words3, s[2]) return Key{uint16(a), uint16(b), uint16(c)} }
// checkRequestDupKeys gives rpctypes.ErrDuplicateKey if the same key is modified twice func checkRequestDupKeys(reqs []*pb.RequestUnion) error { // check put overlap keys := make(map[string]struct{}) for _, requ := range reqs { tv, ok := requ.Request.(*pb.RequestUnion_RequestPut) if !ok { continue } preq := tv.RequestPut if preq == nil { continue } key := string(preq.Key) if _, ok := keys[key]; ok { return rpctypes.ErrDuplicateKey } keys[key] = struct{}{} } // no need to check deletes if no puts; delete overlaps are permitted if len(keys) == 0 { return nil } // sort keys for range checking sortedKeys := []string{} for k := range keys { sortedKeys = append(sortedKeys, k) } sort.Strings(sortedKeys) // check put overlap with deletes for _, requ := range reqs { tv, ok := requ.Request.(*pb.RequestUnion_RequestDeleteRange) if !ok { continue } dreq := tv.RequestDeleteRange if dreq == nil { continue } key := string(dreq.Key) if dreq.RangeEnd == nil { if _, found := keys[key]; found { return rpctypes.ErrDuplicateKey } } else { lo := sort.SearchStrings(sortedKeys, key) hi := sort.SearchStrings(sortedKeys, string(dreq.RangeEnd)) if lo != hi { // element between lo and hi => overlap return rpctypes.ErrDuplicateKey } } } return nil }
func (r *UpsideDownCouchDocIDReader) Advance(docID index.IndexInternalID) (index.IndexInternalID, error) { if r.onlyMode { r.onlyPos = sort.SearchStrings(r.only, string(docID)) if r.onlyPos >= len(r.only) { // advanced to key after our last only key return nil, nil } r.iterator.Seek(NewBackIndexRow([]byte(r.only[r.onlyPos]), nil, nil).Key()) key, val, valid := r.iterator.Current() var rv index.IndexInternalID for valid && r.onlyPos < len(r.only) { br, err := NewBackIndexRowKV(key, val) if err != nil { return nil, err } if !bytes.Equal(br.doc, []byte(r.only[r.onlyPos])) { // the only key we seek'd to didn't exist // now look for the closest key that did exist in only r.onlyPos = sort.SearchStrings(r.only, string(br.doc)) if r.onlyPos >= len(r.only) { // advanced to key after our last only key return nil, nil } // now seek to this new only key r.iterator.Seek(NewBackIndexRow([]byte(r.only[r.onlyPos]), nil, nil).Key()) key, val, valid = r.iterator.Current() continue } else { rv = append([]byte(nil), br.doc...) break } } if valid && r.onlyPos < len(r.only) { ok := r.nextOnly() if ok { r.iterator.Seek(NewBackIndexRow([]byte(r.only[r.onlyPos]), nil, nil).Key()) } return rv, nil } } else { bir := NewBackIndexRow(docID, nil, nil) r.iterator.Seek(bir.Key()) key, val, valid := r.iterator.Current() if valid { br, err := NewBackIndexRowKV(key, val) if err != nil { return nil, err } rv := append([]byte(nil), br.doc...) r.iterator.Next() return rv, nil } } return nil, nil }
func TestRandString(t *testing.T) { tt := testing2.Wrap(t) slice := []string{"1", "2", "3", "4"} tt.True(sort.SearchStrings(slice, RandIn(slice)) >= 0) tt.True(sort.SearchStrings(slice, RandIn(slice)) >= 0) tt.True(sort.SearchStrings(slice, RandIn(slice)) >= 0) tt.True(sort.SearchStrings(slice, RandIn(slice)) >= 0) tt.True(RandIn(nil) == "") tt.True(RandIn([]string{}) == "") }
func constructFilters(operator string, field string, value interface{}, logic string) { condition := "" switch operator { case "eq": condition = "=" case "neq": condition = "<>" case "startswith": condition = "like" case "contains": condition = "like" case "endswith": condition = "like" case "doesnotcontain": condition = "not like" case "gt": condition = ">" case "gte": condition = ">=" case "lte": condition = "<=" case "lt": condition = "<" } sort.Strings(FiltersDB) sort.Strings(FiltersWO) sort.Strings(FiltersMain) sort.Strings(FiltersPlant) sort.Strings(FieldStr) val := tk.ToString(value) str := sort.SearchStrings(FieldStr, field) if str < len(FieldStr) && FieldStr[str] == field { val = "'" + val + "'" } db := sort.SearchStrings(FiltersDB, field) wo := sort.SearchStrings(FiltersWO, field) main := sort.SearchStrings(FiltersMain, field) plant := sort.SearchStrings(FiltersPlant, field) if db < len(FiltersDB) && FiltersDB[db] == field { FilDB = append(FilDB, " "+logic+" "+field+" "+condition+" "+val) } else if wo < len(FiltersWO) && FiltersWO[wo] == field { FilWO = append(FilWO, " "+logic+" "+"RESULT."+field+" "+condition+" "+val+" ") } else if main < len(FiltersMain) && FiltersMain[main] == field { FilMain = append(FilMain, " "+logic+" "+field+" "+condition+" "+val+" ") } else if plant < len(FiltersPlant) && FiltersPlant[plant] == field { FilPlant = append(FilPlant, " "+logic+" "+field+" "+condition+" "+val+" ") } }
func validateIDmappings(spec *specs.LinuxSpec, rspec *specs.LinuxRuntimeSpec) error { ums := rspec.Linux.UIDMappings gms := rspec.Linux.GIDMappings if ums != nil { out, _ := ioutil.ReadFile("/proc/1/uid_map") uidbytes := bytes.Split(out, []byte{'\n'}) mappings := []string{} //convert the content of /proc/1/uid_map to stringslice // and each line in the file convert to string ,Formmat:HostID+ContainerID+Size for _, uidbyte := range uidbytes { uidstr := strings.Fields(string(uidbyte)) if len(uidstr) == 3 { mapping := uidstr[1] + "+" + uidstr[0] + "+" + uidstr[2] mappings = append(mappings, mapping) } } // covert struct IDmappings of rumtime.json to string and check whether is set in container for _, um := range ums { hostid := strconv.Itoa(int(um.HostID)) containerid := strconv.Itoa(int(um.ContainerID)) size := strconv.Itoa(int(um.Size)) mappingset := hostid + "+" + containerid + "+" + size if sort.SearchStrings(mappings, mappingset) == len(mappings) { return fmt.Errorf("uidmapping failed: %v ", mappingset) } } } if gms != nil { out, _ := ioutil.ReadFile("/proc/1/gid_map") gidbytes := bytes.Split(out, []byte{'\n'}) mappings := []string{} //convert the content of /proc/1/gid_map to stringslice // and each line in the file convert to string ,Formmat:HostID+ContainerID+Size for _, gidbyte := range gidbytes { gidstr := strings.Fields(string(gidbyte)) if len(gidstr) == 3 { mapping := gidstr[1] + "+" + gidstr[0] + "+" + gidstr[2] mappings = append(mappings, mapping) } } // covert struct IDmappings of rumtime.json to string and check whether is set in container for _, gm := range gms { hostid := strconv.Itoa(int(gm.HostID)) containerid := strconv.Itoa(int(gm.ContainerID)) size := strconv.Itoa(int(gm.Size)) mappingset := hostid + "+" + containerid + "+" + size if sort.SearchStrings(mappings, mappingset) == len(mappings) { return fmt.Errorf("gidmapping failed: %v ", mappingset) } } } return nil }
func TestExplainTrace(t *testing.T) { defer leaktest.AfterTest(t)() s, sqlDB, _ := setup(t) defer cleanup(s, sqlDB) if _, err := sqlDB.Exec(`CREATE DATABASE test; CREATE TABLE test.foo (id INT PRIMARY KEY)`); err != nil { t.Fatal(err) } rows, err := sqlDB.Query(`EXPLAIN (TRACE) INSERT INTO test.foo VALUES (1)`) if err != nil { t.Fatal(err) } expParts := []string{"coordinator", "node 1"} var parts []string pretty := rowsToStrings(rows) for _, row := range pretty[1:] { part := row[3] // Operation if ind := sort.SearchStrings(parts, part); ind == len(parts) || parts[ind] != part { parts = append(parts, part) sort.Strings(parts) } } sort.Strings(expParts) if err := rows.Err(); err != nil { t.Fatal(err) } if !reflect.DeepEqual(expParts, parts) { t.Fatalf("expected %v, got %v\n\nResults:\n%v", expParts, parts, prettyPrint(pretty)) } }
func (me *Website) Configure(serverBind, wwwRoot, charsetDynamic, charsetStatic, indices string, debug, listDirs bool) { debugf("website.Configure(%q, %q, %q, %q, %q, %v, %v)", serverBind, wwwRoot, charsetDynamic, charsetStatic, indices, debug, listDirs) me.WwwRoot = wwwRoot me.CharsetDynamic = charsetDynamic me.CharsetStatic = charsetStatic me.ListDirs = listDirs me.Debug = debug sortedIndices := make([]string, len(me.Indices)) copy(sortedIndices, me.Indices) sort.Strings(sortedIndices) for _, part := range strings.Split(indices, ",") { trimmed := strings.TrimSpace(part) if sort.SearchStrings(sortedIndices, trimmed) > -1 { debugf("*NOT* appending duplicate index name %q into %v", trimmed, me.Indices) } else { debugf("Adding index name %q to %v", trimmed, me.Indices) me.Indices = append(me.Indices, trimmed) } } if me.s == nil { me.s = newServerContext(me, me.PackageName, serverBind, me.WwwRoot, debug) } me.configured = true }
func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { tx := as.be.BatchTx() tx.Lock() defer tx.Unlock() user := getUser(tx, r.User) if user == nil { return nil, ErrUserNotFound } if r.Role != rootRole { role := getRole(tx, r.Role) if role == nil { return nil, ErrRoleNotFound } } idx := sort.SearchStrings(user.Roles, r.Role) if idx < len(user.Roles) && strings.Compare(user.Roles[idx], r.Role) == 0 { plog.Warningf("user %s is already granted role %s", r.User, r.Role) return &pb.AuthUserGrantRoleResponse{}, nil } user.Roles = append(user.Roles, r.Role) sort.Sort(sort.StringSlice(user.Roles)) putUser(tx, user) as.invalidateCachedPerm(r.User) as.commitRevision(tx) plog.Noticef("granted role %s to user %s", r.Role, r.User) return &pb.AuthUserGrantRoleResponse{}, nil }
// disabledSurvey returns true if sid is found in config.Survey.Disabled. func disabledSurvey(sid string) bool { if sid == "" { return true } i := sort.SearchStrings(config.Survey.Disabled, sid) return i < len(config.Survey.Disabled) && config.Survey.Disabled[i] == sid }
// addSessionSurvey adds session sid to the feedback survey list of user uid. // It returns a list of all sessions the user has submitted a feedback for, including sid. // If the user has already submitted a feedback for sid, errBadData is returned. func addSessionSurvey(c context.Context, uid, sid string) ([]string, error) { perr := prefixedErr("addSessionSurvey") cred, err := getCredentials(c, uid) if err != nil { return nil, perr(err) } var data *appFolderData for _, fresh := range []bool{false, true} { if data, err = getAppFolderData(c, cred, fresh); err != nil { break } // prevent double submission sort.Strings(data.Survey) i := sort.SearchStrings(data.Survey, sid) if i < len(data.Survey) && data.Survey[i] == sid { return nil, errBadData } data.Survey = append(data.Survey, sid) err = storeAppFolderData(c, cred, data) if err != errConflict { break } } if err != nil { return nil, perr(err) } return data.Survey, nil }
// index returns the index of the tag for the given base, script and region or // its parent if the tag is not available. If the match is for a parent entry, // the excess script and region are returned. func (ts *tagSet) index(base language.Base, scr language.Script, reg language.Region) (int, language.Script, language.Region) { lang := base.String() index := -1 if (scr != language.Script{} || reg != language.Region{}) { if scr == zzzz { scr = language.Script{} } if reg == zz { reg = language.Region{} } i := sort.SearchStrings(ts.long, lang) // All entries have either a script or a region and not both. scrStr, regStr := scr.String(), reg.String() for ; i < len(ts.long) && strings.HasPrefix(ts.long[i], lang); i++ { if s := ts.long[i][len(lang)+1:]; s == scrStr { scr = language.Script{} index = i + ts.single.len() break } else if s == regStr { reg = language.Region{} index = i + ts.single.len() break } } } if index == -1 { index = ts.single.index(lang) } return index, scr, reg }
// DistributeInstances is a common function for implement the // state.InstanceDistributor policy based on availability zone // spread. func DistributeInstances(env ZonedEnviron, candidates, group []instance.Id) ([]instance.Id, error) { // Determine the best availability zones for the group. zoneInstances, err := internalAvailabilityZoneAllocations(env, group) if err != nil || len(zoneInstances) == 0 { return nil, err } // Determine which of the candidates are eligible based on whether // they are allocated in one of the best availability zones. var allEligible []string for i := range zoneInstances { if i > 0 && len(zoneInstances[i].Instances) > len(zoneInstances[i-1].Instances) { break } for _, id := range zoneInstances[i].Instances { allEligible = append(allEligible, string(id)) } } sort.Strings(allEligible) eligible := make([]instance.Id, 0, len(candidates)) for _, candidate := range candidates { n := sort.SearchStrings(allEligible, string(candidate)) if n >= 0 && n < len(allEligible) { eligible = append(eligible, candidate) } } return eligible, nil }
func (n *navigation) maintainSelected(name string) { i := sort.SearchStrings(n.current.names, name) if i == len(n.current.names) { i-- } n.current.selected = i }
func runEnvVars(cmd *Command, envVarKeys []string) { w := tabwriter.NewWriter(os.Stdout, 1, 2, 2, ' ', 0) defer w.Flush() var envVars []cloud66.StackEnvVar var err error stack := mustStack() envVars, err = client.StackEnvVars(stack.Uid) must(err) sort.Strings(envVarKeys) if len(envVarKeys) == 0 { printEnvVarsList(w, envVars) } else { // filter out the unwanted env_vars var filteredEnvVars []cloud66.StackEnvVar for _, i := range envVars { sorted := sort.SearchStrings(envVarKeys, i.Key) if sorted < len(envVarKeys) && envVarKeys[sorted] == i.Key { filteredEnvVars = append(filteredEnvVars, i) } } printEnvVarsList(w, filteredEnvVars) } }
// contains determines whether the given string is contained in the // given list of strings, which must have been previously sorted using // sort.Strings. func contains(ns []string, n string) bool { i := sort.SearchStrings(ns, n) if i >= len(ns) { return false } return ns[i] == n }
// extractTestCases parses the test selection string and assembles the list // of requested test cases as fully qualified paths. // NOTE: The form of the selection string is of the form // 1,2,5:10,55 or testName // // Here, each number or range of numbers refers to indexed test cases as // provided by the -s commandline flag. func extractTestCases(testDir, selection string, testNames []string) []string { var selectedNames []string for _, s := range strings.Split(selection, ",") { item := strings.TrimSpace(s) var items []int var err error if strings.Contains(item, ":") { if items, err = misc.ConvertRangeToList(item); err != nil { log.Printf(fmt.Sprint(err)) continue } selectedNames = appendTestCases(items, selectedNames, testNames) } else if i, err := strconv.Atoi(item); err == nil { selectedNames = appendTestCases([]int{i}, selectedNames, testNames) } else { // item provided corresponds to a test name, make sure it exists if testNames[sort.SearchStrings(testNames, item)] != item { continue // if we can't find the requested testcase we just skip it } selectedNames = append(selectedNames, item) } } testPaths := make([]string, len(selectedNames)) for i, name := range selectedNames { testPaths[i] = filepath.Join(testDir, name) } return testPaths }
//订阅关系改变 func (self *BindExchanger) onBindChanged(topic, groupId string, newbinds []*Binding) { if len(groupId) <= 0 { delete(self.exchanger, topic) return } //不是当前服务可以处理的topic则直接丢地啊哦 if sort.SearchStrings(self.topics, topic) == len(self.topics) { log.Warn("BindExchanger|onBindChanged|UnAccept Bindings|%s|%s|%s\n", topic, self.topics, newbinds) return } v, ok := self.exchanger[topic] if !ok { v = make(map[string][]*Binding, 10) self.exchanger[topic] = v } if len(newbinds) > 0 { v[groupId] = newbinds } else { delete(v, groupId) } }
// Remove a string from a slice func (s Strings) Remove(a string) []string { i := sort.SearchStrings(s, a) if s[i] != a { return s } return append(s[:i], s[i+1:]...) }
func (self *Storage) DeleteAppID(uaid, appid string, clearOnly bool) (err error) { if appid == "" { return sperrors.NoChannelError } appIDArray, err := self.fetchAppIDArray(uaid) if err != nil { return err } pos := sort.SearchStrings(appIDArray, appid) if pos > -1 { self.storeAppIDArray(uaid, remove(appIDArray, pos)) pk, err := GenPK(uaid, appid) if err != nil { return err } rec, err := self.fetchRec(pk) if err == nil { rec["s"] = DELETED err = self.storeRec(pk, rec) } else { self.log.Error("storage", fmt.Sprintf("Could not delete %s, %s", pk, err), nil) } } else { err = sperrors.InvalidChannelError } return err }
func (d Dictionary) Index(word string) (int, error) { i := sort.SearchStrings(d.dict, word) if i >= len(d.dict) || d.dict[i] != word { return -1, fmt.Errorf("Word %q not found.", word) } return i, nil }
func (ds *MemDataStore) GetImages(tags []string, page, pageSize int) []*model.Image { images := make([]*model.Image, 0) skip := (page - 1) * pageSize imageLoop: for _, img := range ds.fileList { for _, tag := range tags { cleanTag := strings.ToLower(strings.TrimPrefix(tag, "-")) pos := sort.SearchStrings(img.Tags, cleanTag) if strings.HasPrefix(tag, "-") { if pos < len(img.Tags) && img.Tags[pos] == cleanTag { continue imageLoop } } else { if pos == len(img.Tags) || img.Tags[pos] != cleanTag { continue imageLoop } } } if skip > 0 { skip-- continue imageLoop } images = append(images, img) if int(len(images)) == pageSize { break } } return images }
func runEnvVars(c *cli.Context) { w := tabwriter.NewWriter(os.Stdout, 1, 2, 2, ' ', 0) defer w.Flush() var envVars []cloud66.StackEnvVar var err error stack := mustStack(c) envVars, err = client.StackEnvVars(stack.Uid) must(err) envVarKeys := c.Args() flagShowHistory := c.Bool("history") sort.Strings(envVarKeys) if len(envVarKeys) == 0 { printEnvVarsList(w, envVars, flagShowHistory) } else { // filter out the unwanted env_vars var filteredEnvVars []cloud66.StackEnvVar for _, i := range envVars { sorted := sort.SearchStrings(envVarKeys, i.Key) if sorted < len(envVarKeys) && envVarKeys[sorted] == i.Key { filteredEnvVars = append(filteredEnvVars, i) } } printEnvVarsList(w, filteredEnvVars, flagShowHistory) } }
// 18.6.1. Get relationship types func TestGetRelationshipTypes(t *testing.T) { db := connectTest(t) defer cleanup(t, db) relTypes := []string{} for i := 0; i < 10; i++ { relTypes = append(relTypes, rndStr(t)) } // Create relationships n0, _ := db.CreateNode(Props{}) n1, _ := db.CreateNode(Props{}) rels := []*Relationship{} for _, rt := range relTypes { aRel, _ := n0.Relate(rt, n1.Id(), Props{}) rels = append(rels, aRel) } // Get all relationship types, and confirm the list of types contains at least // all those randomly-generated values in relTypes. It cannot be guaranteed // that the database will not contain other relationship types beyond these. foundRelTypes, err := db.RelTypes() if err != nil { t.Error(err) } for _, rt := range relTypes { assert.True(t, sort.SearchStrings(foundRelTypes, rt) < len(foundRelTypes), "Could not find expected relationship type: "+rt) } }
// Validate validates the Settings data and returns an error describing // all problems or nil if there are none. func (s Settings) Validate() error { // TODO: winlogbeat should not try to validate top-level beats config validKeys := []string{ "fields", "fields_under_root", "tags", "name", "refresh_topology_freq", "ignore_outgoing", "topology_expire", "geoip", "queue_size", "bulk_queue_size", "max_procs", "processors", "logging", "output", "path", "winlogbeat", } sort.Strings(validKeys) // Check for invalid top-level keys. var errs multierror.Errors for k := range s.Raw { k = strings.ToLower(k) i := sort.SearchStrings(validKeys, k) if i >= len(validKeys) || validKeys[i] != k { errs = append(errs, fmt.Errorf("Invalid top-level key '%s' "+ "found. Valid keys are %s", k, strings.Join(validKeys, ", "))) } } err := s.Winlogbeat.Validate() if err != nil { errs = append(errs, err) } return errs.Err() }
func TestTLSSNI02ChallengeCert(t *testing.T) { const ( token = "evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA" // echo -n evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA | shasum -a 256 sanA = "7ea0aaa69214e71e02cebb18bb867736.09b730209baabf60e43d4999979ff139.token.acme.invalid" // echo -n <token.testKeyECThumbprint> | shasum -a 256 sanB = "dbbd5eefe7b4d06eb9d1d9f5acb4c7cd.a27d320e4b30332f0b6cb441734ad7b0.ka.acme.invalid" ) client := &Client{Key: testKeyEC} tlscert, name, err := client.TLSSNI02ChallengeCert(token) if err != nil { t.Fatal(err) } if n := len(tlscert.Certificate); n != 1 { t.Fatalf("len(tlscert.Certificate) = %d; want 1", n) } cert, err := x509.ParseCertificate(tlscert.Certificate[0]) if err != nil { t.Fatal(err) } names := []string{sanA, sanB} if !reflect.DeepEqual(cert.DNSNames, names) { t.Fatalf("cert.DNSNames = %v;\nwant %v", cert.DNSNames, names) } sort.Strings(cert.DNSNames) i := sort.SearchStrings(cert.DNSNames, name) if i >= len(cert.DNSNames) || cert.DNSNames[i] != name { t.Errorf("%v doesn't have %q", cert.DNSNames, name) } }