func (ereporter *EtcdReporter) NewReporter(sc *configuration.ServiceConf) error { config := client.Config{ Endpoints: sc.ReporterHosts, Transport: client.DefaultTransport, } c, err := client.New(config) if err != nil { glog.Errorf("SRegister: create etcd client failed. Error: %v", err) glog.Flush() return err } else { ereporter.etcdClient = client.NewKeysAPI(c) } ereporter.path = "/" if sc.ReporterPath != "" { ereporter.path = sc.ReporterPath } str, derr := getServiceData(sc) if derr != nil { glog.Errorf("SRegister: geenrate service value failed. Error: %v", derr) glog.Flush() return derr } ereporter.value = str ereporter.key = "" return nil }
func send(tcpconn *net.TCPConn, rsp *Response) (err error) { Len := uint32(PkgLenSize) + uint32(len(rsp.Head)) + uint32(len(rsp.Body)) Hlen := uint16(Uint16Size) + uint16(len(rsp.Head)) data := make([]byte, 0, int(Len)) // len:0, cap:Len; TODO(zog): cache buf := bytes.NewBuffer(data) // TODO(zog): 复用 binary.Write(buf, binary.BigEndian, Len) binary.Write(buf, binary.BigEndian, Hlen) buf.Write(rsp.Head) buf.Write(rsp.Body) if debug { glog.Infof("sent bytes to %s, len: %d", tcpconn.RemoteAddr().String(), len(buf.Bytes())) glog.Flush() } tcpconn.SetDeadline(time.Now().Add(100 * time.Millisecond)) if _, err = tcpconn.Write(buf.Bytes()); err != nil { return err } if debug { glog.Infof("sent data(len:%d): %v", buf.Len(), buf.Bytes()) glog.Flush() } return nil }
//调用相应的处理函数前进行预处理 func makeHandler(fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { //防止程序panic,this is a safe handle. defer func() { if e, ok := recover().(error); ok { http.Error(w, "internal error.", http.StatusInternalServerError) glog.Errorf("WARN: panic in %v. - %v", fn, e) } }() //end //记录访问日志 glog.V(0).Infof("%s\t%s\t%s\t%s\t%s\t%s", r.Method, r.RemoteAddr, r.URL.Path, r.Proto, r.Referer(), r.UserAgent()) //目前只接受get请求,其余请求忽略 switch r.Method { case "GET": m := validPath.FindStringSubmatch(r.URL.Path) if m == nil { http.NotFound(w, r) return } fn(w, r, r.URL.Path) glog.Flush() return default: http.Error(w, "method not support...", http.StatusInternalServerError) glog.Flush() return } } }
func main() { defer glog.Flush() glog.CopyStandardLogTo("info") flag.Parse() runtime.GOMAXPROCS(runtime.NumCPU()) httpClientBuilder := http_client_builder.New().WithoutProxy() httpClient := httpClientBuilder.Build() requestbuilderProvider := http_requestbuilder.NewHTTPRequestBuilderProvider() downloader := debian_url_downloader.New(httpClient.Do, requestbuilderProvider.NewHTTPRequestBuilder) lineInspector := debian_line_inspector.New(downloader.DownloadURL) hasChanged := debian_apt_source_has_changed.New(lineInspector.HasLineChanged) bool, err := do( hasChanged, *pathPtr, ) if err != nil { glog.Exit(err) } if bool { glog.Flush() os.Exit(0) } else { glog.Flush() os.Exit(1) } }
func main() { flag.Parse() // SetupDB() dbConn, err := mssql.NewConn(*mssrv, *msdb, *msuser, *mspass) if dbConn == nil || err != nil { log.Fatal(err) } if log.V(2) { log.Infof("DOP %d", *dop) log.Flush() } migTables(dbConn, *dop) log.Flush() }
func main() { flag.Parse() timeout, err := time.ParseDuration(*timeoutDuration) if err != nil { glog.Fatalf("proxy: failed to parse timeout duration: %s", err) } proxy, err := mixnet.NewProxyContext(*configPath, *network, *proxyAddr, timeout) if err != nil { glog.Fatalf("failed to configure proxy: %s", err) } defer proxy.Close() sigs := make(chan os.Signal, 1) signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM, syscall.SIGKILL) go func() { sig := <-sigs proxy.Close() glog.Infof("router: closing on signal: %s", sig) signo := int(sig.(syscall.Signal)) os.Exit(0x80 + signo) }() if err = serveClients(*routerAddr, proxy); err != nil { glog.Errorf("proxy: error while serving: %s", err) } glog.Flush() }
// test mesos.List func Test_List(t *testing.T) { defer log.Flush() md := FakeMasterDetector{} httpServer, httpClient, httpTransport := makeHttpMocks() defer httpServer.Close() cacheTTL := 500 * time.Millisecond mesosClient, err := createMesosClient(md, httpClient, httpTransport, cacheTTL) mesosCloud := &MesosCloud{client: mesosClient, config: createDefaultConfig()} clusters, err := mesosCloud.List(".*") // recognizes the language of all strings if err != nil { t.Fatalf("List does not yield an error: %#v", err) } if len(clusters) != 3 { t.Fatalf("List with a catch-all filter should return a list of size 3: (actual: %#v)", clusters) } clusters, err = mesosCloud.List("$^") // end-of-string followed by start-of-string: recognizes the empty language if err != nil { t.Fatalf("List does not yield an error: %#v", err) } if len(clusters) != 0 { t.Fatalf("List with a reject-all filter should return a list of size 0: (actual: %#v)", clusters) } }
func main() { var ( config *Config zk *Zookeeper p *Pitchfork err error ) flag.Parse() defer log.Flush() log.Infof("bfs pitchfork start") if config, err = NewConfig(configFile); err != nil { log.Errorf("NewConfig(\"%s\") error(%v)", configFile, err) return } log.Infof("init zookeeper...") if zk, err = NewZookeeper(config.ZookeeperAddrs, config.ZookeeperTimeout, config.ZookeeperPitchforkRoot, config.ZookeeperStoreRoot, config.ZookeeperVolumeRoot); err != nil { log.Errorf("NewZookeeper() failed, Quit now") return } log.Infof("register pitchfork...") if p, err = NewPitchfork(zk, config); err != nil { log.Errorf("pitchfork NewPitchfork() failed, Quit now") return } log.Infof("starts probe stores...") go p.Probe() StartSignal() return }
func main() { glog.Info("Web Server:Loading...") b := flag.Bool("benchmark", false, "") runtime.GOMAXPROCS(runtime.NumCPU()) flag.Parse() if *b { glog.Info("Benchmark Mode") // Creat a file f, err := os.Create("pprof") if err != nil { glog.Fatal(err) } if err = pprof.StartCPUProfile(f); err != nil { glog.Fatal(err) } defer pprof.StopCPUProfile() } defer glog.Flush() glog.Info("Read the config.") if err := InitConf(); err != nil { glog.Fatal(err) } glog.Info("[Web]: listener start.") go start() signal.HandleSignal(signal.InitSignal()) }
func main() { defer glog.Flush() glog.CopyStandardLogTo("info") flag.Parse() runtime.GOMAXPROCS(runtime.NumCPU()) httpClientBuilder := http_client_builder.New() httpClient := httpClientBuilder.Build() latestInformations := atlassian_utils_latest_information.New(bitbucket.JSON_URL, httpClient.Get) latestUrl := atlassian_utils_latest_tar_gz_url.New(latestInformations.VersionInformations) latestVersion := atlassian_utils_latest_version.New(latestInformations.VersionInformations) commandListProvider := func() command_list.CommandList { return command_list.New() } config_parser := debian_config_parser.New() copier := debian_copier.New() zipExtractor := debian_zip_extractor.New() tarGzExtractor := debian_tar_gz_extractor.New() requestbuilderProvider := http_requestbuilder.NewHTTPRequestBuilderProvider() debianPackageCreator := debian_package_creator.New(commandListProvider, copier, tarGzExtractor.ExtractTarGz, zipExtractor.ExtractZip, httpClient.Do, requestbuilderProvider.NewHTTPRequestBuilder) creatorByReader := debian_package_creator_by_reader.New(commandListProvider, debianPackageCreator, tarGzExtractor.ExtractTarGz) latestDebianPackageCreator := debian_latest_package_creator.New(httpClient.Get, latestUrl.LatestConfluenceTarGzUrl, latestVersion.LatestVersion, creatorByReader.CreatePackage) err := do( latestDebianPackageCreator.CreateLatestDebianPackage, config_parser, *configPtr, latestVersion.LatestVersion, *targetDirPtr, ) if err != nil { glog.Exit(err) } }
func main() { defer glog.Flush() flag.Parse() if *inputFile == "" { glog.Fatal("--input flag cannot be empty") } b, err := ioutil.ReadFile(*inputFile) if err != nil { glog.Fatal(err) } var userAgents []*userAgent err = json.Unmarshal(b, &userAgents) if err != nil { glog.Fatal(err) } var all, success int for _, client := range userAgents { all++ if testClient(client) { success++ } } glog.Infof("done %d/%d %.2f%%", success, all, (float64(success)/float64(all))*100.0) }
func setupLogging(duration time.Duration) { go func() { for _ = range time.Tick(duration) { glog.Flush() } }() }
func main() { flag.Parse() defer glog.Flush() reg := descriptor.NewRegistry() glog.V(1).Info("Processing code generator request") f := os.Stdin if *file != "stdin" { f, _ = os.Open("input.txt") } req, err := parseReq(f) if err != nil { glog.Fatal(err) } if req.Parameter != nil { for _, p := range strings.Split(req.GetParameter(), ",") { spec := strings.SplitN(p, "=", 2) if len(spec) == 1 { if err := flag.CommandLine.Set(spec[0], ""); err != nil { glog.Fatalf("Cannot set flag %s", p) } continue } name, value := spec[0], spec[1] if strings.HasPrefix(name, "M") { reg.AddPkgMap(name[1:], value) continue } if err := flag.CommandLine.Set(name, value); err != nil { glog.Fatalf("Cannot set flag %s", p) } } } g := genswagger.New(reg) reg.SetPrefix(*importPrefix) if err := reg.Load(req); err != nil { emitError(err) return } var targets []*descriptor.File for _, target := range req.FileToGenerate { f, err := reg.LookupFile(target) if err != nil { glog.Fatal(err) } targets = append(targets, f) } out, err := g.Generate(targets) glog.V(1).Info("Processed code generator request") if err != nil { emitError(err) return } emitFiles(out) }
// pushImage pushes a docker image to the registry specified in its tag. // The method will retry to push the image when following scenarios occur: // - Docker registry is down temporarily or permanently // - other image is being pushed to the registry // If any other scenario the push will fail, without retries. func pushImage(client DockerClient, name string, authConfig docker.AuthConfiguration) error { repository, tag := docker.ParseRepositoryTag(name) opts := docker.PushImageOptions{ Name: repository, Tag: tag, } if glog.V(5) { opts.OutputStream = os.Stderr } var err error for retries := 0; retries <= DefaultPushRetryCount; retries++ { err = client.PushImage(opts, authConfig) if err == nil { return nil } errMsg := fmt.Sprintf("%s", err) if !strings.Contains(errMsg, "ping attempt failed with error") && !strings.Contains(errMsg, "is already in progress") { return err } util.HandleError(fmt.Errorf("push for image %s failed, will retry in %s seconds ...", name, DefaultPushRetryDelay)) glog.Flush() time.Sleep(DefaultPushRetryDelay) } return err }
func main() { defer glog.Flush() glog.CopyStandardLogTo("info") flag.Parse() runtime.GOMAXPROCS(runtime.NumCPU()) httpClient := http_client_builder.New().WithoutProxy().Build() requestbuilder_executor := aptly_requestbuilder_executor.New(httpClient.Do) requestbuilder := http_requestbuilder.NewHTTPRequestBuilderProvider() repo_publisher := aptly_repo_publisher.New(requestbuilder_executor, requestbuilder) repo_creater := aptly_repo_creater.New(requestbuilder_executor, requestbuilder, repo_publisher.PublishNewRepo) if len(*repoURLPtr) == 0 { *repoURLPtr = *apiURLPtr } err := do( repo_creater, *repoURLPtr, *apiURLPtr, *apiUserPtr, *apiPasswordPtr, *apiPasswordFilePtr, *repoPtr, *distributionPtr, strings.Split(*architecturePtr, ","), ) if err != nil { glog.Exit(err) } }
// test mesos.GetNodeResources func Test_GetNodeResources(t *testing.T) { defer log.Flush() md := FakeMasterDetector{} httpServer, httpClient, httpTransport := makeHttpMocks() defer httpServer.Close() cacheTTL := 500 * time.Millisecond mesosClient, err := createMesosClient(md, httpClient, httpTransport, cacheTTL) mesosCloud := &MesosCloud{client: mesosClient, config: createDefaultConfig()} resources, err := mesosCloud.GetNodeResources("mesos1.internal.company.com") if err != nil { t.Fatalf("GetNodeResources does not yield an error: %#v", err) } expectedCpu := inf.NewDec(8, 0) expectedMem := inf.NewDec(15360, 0) actualCpu := resources.Capacity["cpu"].Amount actualMem := resources.Capacity["memory"].Amount if actualCpu.Cmp(expectedCpu) != 0 { t.Fatalf("GetNodeResources should return the expected amount of cpu: (expected: %#v, vactual: %#v)", expectedCpu, actualCpu) } if actualMem.Cmp(expectedMem) != 0 { t.Fatalf("GetNodeResources should return the expected amount of memory: (expected: %#v, vactual: %#v)", expectedMem, actualMem) } }
func main() { flag.Parse() signal.Notify(quitchan, syscall.SIGINT, syscall.SIGKILL, syscall.SIGHUP, syscall.SIGTERM) go dummy() grid.StartClientApi() glog.Flush() }
//parse service json file func parseServiceJson(filePath string, sc *configuration.ServiceConf) ([]serviceChecker, error) { data, err := ioutil.ReadFile(filePath) if err != nil { glog.Warningf("SRegister: service file %s doesn't exists. Error: %v.", filePath, err) return nil, err } jsonErr := json.Unmarshal(data, sc) if jsonErr != nil { glog.Errorf("SRegister: parse service file %s failed. Error: %v.", filePath, jsonErr) glog.Flush() return nil, jsonErr } checksSize := len(sc.Checks) checks := make([]serviceChecker, 0, checksSize) for i := 0; i < checksSize; i++ { checkData := sc.Checks[i].(map[string]interface{}) switch checkData["type"].(string) { case "tcp": ts := new(tcpService) ts.newService(sc.Name, sc.Host, sc.Port, checkData) checks = append(checks, ts) case "http": hs := new(httpService) hs.newService(sc.Name, sc.Host, sc.Port, checkData) checks = append(checks, hs) } } return checks, nil }
// pushImage pushes a docker image to the registry specified in its tag. // The method will retry to push the image when following scenarios occur: // - Docker registry is down temporarily or permanently // - other image is being pushed to the registry // If any other scenario the push will fail, without retries. func pushImage(client DockerClient, name string, authConfig docker.AuthConfiguration) error { repository, tag := docker.ParseRepositoryTag(name) opts := docker.PushImageOptions{ Name: repository, Tag: tag, } if glog.V(5) { opts.OutputStream = os.Stderr } var err error var retriableError = false for retries := 0; retries <= DefaultPushRetryCount; retries++ { err = client.PushImage(opts, authConfig) if err == nil { return nil } errMsg := fmt.Sprintf("%s", err) for _, errorString := range RetriableErrors { if strings.Contains(errMsg, errorString) { retriableError = true break } } if !retriableError { return err } utilruntime.HandleError(fmt.Errorf("push for image %s failed, will retry in %s ...", name, DefaultPushRetryDelay)) glog.Flush() time.Sleep(DefaultPushRetryDelay) } return err }
func main() { var ( c *Config zk *Zookeeper d *Directory err error ) flag.Parse() defer log.Flush() runtime.GOMAXPROCS(runtime.NumCPU()) log.Infof("bfs directory start") if c, err = NewConfig(configFile); err != nil { log.Errorf("NewConfig(\"%s\") error(%v)", configFile, err) return } log.Infof("init zookeeper...") if zk, err = NewZookeeper(c.ZkAddrs, c.ZkTimeout, c.ZkStoreRoot, c.ZkVolumeRoot, c.ZkGroupRoot); err != nil { log.Errorf("NewZookeeper() failed, Quit now") return } log.Infof("new directory...") if d, err = NewDirectory(c, zk); err != nil { log.Errorf("pitchfork NewDirectory() failed, Quit now") return } log.Infof("init http api...") StartApi(c.ApiListen, d) if c.PprofEnable { log.Infof("init http pprof...") StartPprof(c.PprofListen) } StartSignal() return }
// test mesos.readConfig func Test_readConfig(t *testing.T) { defer log.Flush() configString := ` [mesos-cloud] mesos-master = leader.mesos:5050 http-client-timeout = 500ms state-cache-ttl = 1h` reader := bytes.NewBufferString(configString) config, err := readConfig(reader) if err != nil { t.Fatalf("Reading configuration does not yield an error: %#v", err) } if config.MesosMaster != "leader.mesos:5050" { t.Fatalf("Parsed config has the expected MesosMaster value") } if config.MesosHttpClientTimeout.Duration != time.Duration(500)*time.Millisecond { t.Fatalf("Parsed config has the expected MesosHttpClientTimeout value") } if config.StateCacheTTL.Duration != time.Duration(1)*time.Hour { t.Fatalf("Parsed config has the expected StateCacheTTL value") } }
func Start(port string, onStart func()) { // Logging init flag.Set("log_dir", utils.GetRuntimeDir(config.GetString("log_dir"))) flag.Set("alsologtostderr", "true") flag.Parse() defer glog.Flush() m := martini.Classic() m.Use(render.Renderer(render.Options{ Charset: "UTF-8", // Sets encoding for json and html content-types. Default is "UTF-8". Delims: render.Delims{"${", "}"}, Directory: utils.GetRuntimeDir("resources/views"), })) m.Use(martini.Static(utils.GetRuntimeDir("public"))) controller.MappingController(m) http.Handle("/rpc", rpc.GetServer()) http.Handle("/", m) if db.IsConnected() { defer db.Close() } onStart() for _, fn := range methods { go fn() } http.ListenAndServe(":"+port, nil) }
func (c *ModifyUserAuthController) DoModify() { // 参数获取 glog.Infoln("--modify request start--") user := c.GetString("user") glog.Infof("accout is %s \n", user) name := c.GetString("name") glog.Infof("image name is %s \n", name) mtype, err := c.GetInt("mtype") if err != nil { ret := map[string]interface{}{"success": false, "msg": "mtype参数错误"} c.Data["json"] = ret glog.Errorln("mtype 错误!!") c.ServeJson() return } glog.Infof("mtype is %s", mtype) ispull, err := c.GetBool("ispull") if err != nil { ret := map[string]interface{}{"success": false, "msg": "ispull参数错误"} c.Data["json"] = ret glog.Errorln("ispull 错误!!") c.ServeJson() return } glog.Infof("ispull is %s \n", ispull) success := models.ACManager.Update(user, &name, mtype, ispull) ret := map[string]interface{}{"success": success} c.Data["json"] = ret c.ServeJson() // 直接返回json数据 glog.Flush() }
func main() { flag.Parse() timeout, err := time.ParseDuration(*timeoutDuration) if err != nil { glog.Fatalf("router: failed to parse timeout duration: %s", err) } hp, err := mixnet.NewRouterContext(*configPath, *routerNetwork, *routerAddr, *batchSize, timeout, &x509Identity, tao.Parent()) if err != nil { glog.Fatalf("failed to configure router: %s", err) } sigs := make(chan os.Signal, 1) signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM, syscall.SIGKILL) go func() { sig := <-sigs hp.Close() glog.Infof("router: closing on signal: %s", sig) signo := int(sig.(syscall.Signal)) os.Exit(0x80 + signo) }() if err := serveMixnetProxies(hp); err != nil { glog.Errorf("router: error while serving: %s", err) } glog.Flush() }
func main() { defer glog.Flush() glog.CopyStandardLogTo("info") flag.Parse() runtime.GOMAXPROCS(runtime.NumCPU()) commandListProvider := func() command_list.CommandList { return command_list.New() } config_parser := debian_config_parser.New() copier := debian_copier.New() zipExtractor := debian_zip_extractor.New() tarGzExtractor := debian_tar_gz_extractor.New() httpClientBuilder := http_client_builder.New().WithoutProxy() httpClient := httpClientBuilder.Build() requestbuilderProvider := http_requestbuilder.NewHTTPRequestBuilderProvider() debianPackageCreator := debian_package_creator.New(commandListProvider, copier, tarGzExtractor.ExtractTarGz, zipExtractor.ExtractZip, httpClient.Do, requestbuilderProvider.NewHTTPRequestBuilder) creatorByReader := debian_package_creator_by_reader.New(commandListProvider, debianPackageCreator, tarGzExtractor.ExtractTarGz) debianPackageCreatorArchive := debian_package_creator_archive.New(creatorByReader.CreatePackage) err := do( debianPackageCreatorArchive, config_parser, *tarGzPathPtr, *configPtr, *versionPtr, *targetDirPtr, ) if err != nil { glog.Exit(err) } }
func TestLogDir(t *testing.T) { logDir := "hehe" args := []string{ "--log_dir=hehe", } cfg := NewConfigure() cfg.ParseConfigure(args) if _, err := os.Stat(logDir); !os.IsNotExist(err) { derr := os.RemoveAll(logDir) if derr != nil { t.Fatalf("remove dir failed, Error: %v", derr) } } derr := os.Mkdir(logDir, os.ModePerm) if derr != nil { t.Fatalf("create dir failed, Error: %v", derr) } glog.Infof("test...") glog.Flush() if files, err := ioutil.ReadDir(logDir); err != nil || len(files) == 0 { t.Fatalf("write log failed") } if _, err := os.Stat(logDir); !os.IsNotExist(err) { derr := os.RemoveAll(logDir) if derr != nil { t.Fatalf("remove dir failed, Error: %v", derr) } } }
func main() { var err error flag.Parse() defer func() { glog.Flush() }() if version { showVersion() return } if testMode { fmt.Println("config test ok") return } server = NewServer() if err = server.Init(configFile); err != nil { glog.Errorf("[Pusher]Init server failed, %s", err.Error()) return } glog.V(2).Info("[Pusher]Init server success") if err = server.Run(); err != nil { glog.Errorf("[Pusher]Run server failed, %s", err.Error()) return } }
func main() { var ( c *Config z *Zookeeper s *Store err error ) flag.Parse() defer log.Flush() log.Infof("bfs store[%s] start", Ver) if c, err = NewConfig(configFile); err != nil { log.Errorf("NewConfig(\"%s\") error(%v)", configFile, err) return } if c.Pprof.Enable { StartPprof(c.Pprof.Addr) } if z, err = NewZookeeper(c.Zookeeper.Addrs, c.Zookeeper.Timeout, path.Join(c.Zookeeper.Root, c.ServerId)); err != nil { return } if s, err = NewStore(z, c.Index); err != nil { log.Errorf("store init error(%v)", err) return } StartStat(s, c.Stat) StartApi(s, c.Api) StartAdmin(s, c.Admin) if err = z.SetStore(c.Stat, c.Admin, c.Api); err != nil { log.Errorf("zk.SetStore() error(%v)", err) return } StartSignal() return }
func main() { flag.Parse() storageDriver, err := NewStorageDriver(*argDbDriver) if err != nil { glog.Fatalf("Failed to connect to database: %s", err) } containerManager, err := manager.New(storageDriver) if err != nil { glog.Fatalf("Failed to create a Container Manager: %s", err) } // Register Docker. if err := docker.Register(containerManager); err != nil { glog.Errorf("Docker registration failed: %v.", err) } // Register the raw driver. if err := raw.Register(containerManager); err != nil { glog.Fatalf("raw registration failed: %v.", err) } // Handler for static content. http.HandleFunc(static.StaticResource, func(w http.ResponseWriter, r *http.Request) { err := static.HandleRequest(w, r.URL) if err != nil { fmt.Fprintf(w, "%s", err) } }) // Register API handler. if err := api.RegisterHandlers(containerManager); err != nil { glog.Fatalf("failed to register API handlers: %s", err) } // Redirect / to containers page. http.Handle("/", http.RedirectHandler(pages.ContainersPage, http.StatusTemporaryRedirect)) // Register the handler for the containers page. http.HandleFunc(pages.ContainersPage, func(w http.ResponseWriter, r *http.Request) { err := pages.ServerContainersPage(containerManager, w, r.URL) if err != nil { fmt.Fprintf(w, "%s", err) } }) defer glog.Flush() go func() { glog.Fatal(containerManager.Start()) }() glog.Infof("Starting cAdvisor version: %q", info.VERSION) glog.Infof("About to serve on port ", *argPort) addr := fmt.Sprintf(":%v", *argPort) glog.Fatal(http.ListenAndServe(addr, nil)) }
// test mesos.ListClusters func Test_ListClusters(t *testing.T) { defer log.Flush() md := FakeMasterDetector{} httpServer, httpClient, httpTransport := makeHttpMocks() defer httpServer.Close() cacheTTL := 500 * time.Millisecond mesosClient, err := createMesosClient(md, httpClient, httpTransport, cacheTTL) mesosCloud := &MesosCloud{client: mesosClient, config: createDefaultConfig()} clusters, err := mesosCloud.ListClusters() if err != nil { t.Fatalf("ListClusters does not yield an error: %#v", err) } if len(clusters) != 1 { t.Fatalf("ListClusters should return a list of size 1: (actual: %#v)", clusters) } expectedClusterNames := []string{"mesos"} if !reflect.DeepEqual(clusters, expectedClusterNames) { t.Fatalf("ListClusters should return the expected list of names: (expected: %#v, actual: %#v)", expectedClusterNames, clusters) } }