// The main function sets up the connection to the storage backend for // aggregated events (e.g. MongoDB) and fires up an HTTPs server which acts as // an endpoint for docker notifications. func main() { flag.Parse() rand.Seed(time.Now().UnixNano()) glog.CopyStandardLogTo("INFO") // Create our application context ctx, _ := NewAppContext() // Load config file given by first argument configFilePath := flag.Arg(0) if configFilePath == "" { glog.Exit("Config file not specified") } c, err := LoadConfig(configFilePath) if err != nil { glog.Exit(err) } ctx.Config = c // Connect to MongoDB session, err := createMongoDbSession(c) if err != nil { glog.Exit(err) } defer session.Close() ctx.Session = session // Wait for errors on inserts and updates and for flushing changes to disk session.SetSafe(&mgo.Safe{FSync: true}) collection := ctx.Session.DB(ctx.Config.DialInfo.DialInfo.Database).C(ctx.Config.Collection) // The repository structure shall have a uniqe key on the repository's // name field index := mgo.Index{ Key: []string{"repositoryname"}, Unique: true, DropDups: true, Background: true, Sparse: true, } if err = collection.EnsureIndex(index); err != nil { glog.Exitf("It looks like your mongo database is incosinstent. ", "Make sure you have no duplicate entries for repository names.") } // Setup HTTP endpoint var httpConnectionString = ctx.Config.GetEndpointConnectionString() glog.Infof("About to listen on \"%s%s\".", httpConnectionString, ctx.Config.Server.Route) mux := http.NewServeMux() appHandler := &appHandler{ctx: ctx} mux.Handle(ctx.Config.Server.Route, appHandler) err = http.ListenAndServeTLS(httpConnectionString, ctx.Config.Server.Ssl.Cert, ctx.Config.Server.Ssl.CertKey, mux) if err != nil { glog.Exit(err) } glog.Info("Exiting.") }
// Required interface for flag.Var func (config *Config) Set(runonceConfig string) error { _, err := os.Stat(runonceConfig) if err != nil { log.Exit("missing config file: ", err.Error()) } configData, err := ioutil.ReadFile(runonceConfig) if err != nil { log.Exit("failed to read config file: ", err.Error()) } return json.Unmarshal([]byte(configData), config) }
func main() { flag.Parse() log.Infof("Simulating %v clients.", *count) for i := 0; i < *count; i++ { id := uuid.New() log.Infof("client %v with id %v", i, id) client, err := doorman.NewWithID(*addr, id, doorman.DialOpts(grpc.WithInsecure())) if err != nil { log.Exit(err) } defer client.Close() res, err := client.Resource(*resource, *initialCapacity) if err != nil { log.Exit(err) } go manipulateCapacity(res, *initialCapacity, id) conn, err := grpc.Dial(*target, grpc.WithInsecure()) if err != nil { log.Exitf("did not connect: %v", err) } defer conn.Close() c := pb.NewGreeterClient(conn) rl := ratelimiter.NewQPS(res) for i := 0; i < *workers; i++ { go func() { ctx := context.Background() for { if err := rl.Wait(ctx); err != nil { log.Exitf("rl.Wait: %v", err) } ctx, cancel := context.WithTimeout(ctx, 30*time.Second) if _, err := c.SayHello(ctx, &pb.HelloRequest{Name: *resource}); err != nil { log.Error(err) } cancel() } }() } } http.Handle("/metrics", prometheus.Handler()) http.ListenAndServe(fmt.Sprintf(":%v", *port), nil) }
func main() { defer glog.Flush() glog.CopyStandardLogTo("info") flag.Parse() runtime.GOMAXPROCS(runtime.NumCPU()) httpClient := http_client_builder.New().WithoutProxy().Build() httpRequestBuilderProvider := http_requestbuilder.NewHTTPRequestBuilderProvider() package_lister := aptly_package_lister.New(httpClient.Do, httpRequestBuilderProvider.NewHTTPRequestBuilder) if len(*repoURLPtr) == 0 { *repoURLPtr = *apiURLPtr } writer := os.Stdout err := do( writer, package_lister, *repoURLPtr, *apiURLPtr, *apiUserPtr, *apiPasswordPtr, *apiPasswordFilePtr, *repoPtr, ) if err != nil { glog.Exit(err) } }
func main() { defer glog.Flush() glog.CopyStandardLogTo("info") flag.Parse() runtime.GOMAXPROCS(runtime.NumCPU()) httpClientBuilder := http_client_builder.New() httpClient := httpClientBuilder.Build() latestVersion := sonar_latest_version.New(httpClient.Get) urlByVersion := url_by_version.New() latestUrl := sonar_latest_zip_url.New(latestVersion.LatestSonarVersion, urlByVersion.SonarZipUrlForVersion) commandListProvider := func() command_list.CommandList { return command_list.New() } config_parser := debian_config_parser.New() copier := debian_copier.New() zipExtractor := debian_zip_extractor.New() tarGzExtractor := debian_tar_gz_extractor.New() requestbuilderProvider := http_requestbuilder.NewHTTPRequestBuilderProvider() debianPackageCreator := debian_package_creator.New(commandListProvider, copier, tarGzExtractor.ExtractTarGz, zipExtractor.ExtractZip, httpClient.Do, requestbuilderProvider.NewHTTPRequestBuilder) creatorByReader := debian_package_creator_by_reader.New(commandListProvider, debianPackageCreator, zipExtractor.ExtractZip) latestDebianPackageCreator := debian_latest_package_creator.New(httpClient.Get, latestUrl.LatestSonarZipUrl, latestVersion.LatestSonarVersion, creatorByReader.CreatePackage) writer := os.Stdout err := do(writer, latestDebianPackageCreator.CreateLatestDebianPackage, config_parser.ParseFileToConfig, latestVersion.LatestSonarVersion, *configPtr) if err != nil { glog.Exit(err) } }
func main() { flag.Parse() domain, err := tao.LoadDomain(*configPath, []byte(*domainPass)) if err != nil { glog.Exitf("Couldn't load the config path %s: %s\n", *configPath, err) return } sock, err := net.Listen(*network, *addr) if err != nil { glog.Exit("Couldn't bind socket to address:", err) return } fmt.Println("tcca: accepting connections") for { conn, err := sock.Accept() if err != nil { glog.Exitf("Couldn't accept a connection on %s: %s", *addr, err) return } go tao.HandleCARequest(conn, domain.Keys.SigningKey, domain.Guard) } }
func main() { defer glog.Flush() glog.CopyStandardLogTo("info") flag.Parse() runtime.GOMAXPROCS(runtime.NumCPU()) httpClient := http_client_builder.New().WithoutProxy().Build() httpRequestBuilderProvider := http_requestbuilder.NewHTTPRequestBuilderProvider() packageLister := aptly_package_lister.New(httpClient.Do, httpRequestBuilderProvider.NewHTTPRequestBuilder) requestbuilder_executor := aptly_requestbuilder_executor.New(httpClient.Do) repoPublisher := aptly_repo_publisher.New(requestbuilder_executor, httpRequestBuilderProvider) packageDeleter := aptly_package_deleter.New(httpClient.Do, httpRequestBuilderProvider.NewHTTPRequestBuilder, repoPublisher.PublishRepo) repoCleaner := aptly_repo_cleaner.New(packageDeleter.DeletePackagesByKey, packageLister.ListPackages) if len(*repoURLPtr) == 0 { *repoURLPtr = *apiURLPtr } err := do( repoCleaner, *repoURLPtr, *apiURLPtr, *apiUserPtr, *apiPasswordPtr, *apiPasswordFilePtr, *repoPtr, *distributionPtr, ) if err != nil { glog.Exit(err) } }
func main() { defer glog.Flush() glog.CopyStandardLogTo("info") flag.Parse() runtime.GOMAXPROCS(runtime.NumCPU()) httpClientBuilder := http_client_builder.New().WithoutProxy() httpClient := httpClientBuilder.Build() requestbuilderProvider := http_requestbuilder.NewHTTPRequestBuilderProvider() downloader := debian_url_downloader.New(httpClient.Do, requestbuilderProvider.NewHTTPRequestBuilder) lineInspector := debian_line_inspector.New(downloader.DownloadURL) hasChanged := debian_apt_source_has_changed.New(lineInspector.HasLineChanged) bool, err := do( hasChanged, *pathPtr, ) if err != nil { glog.Exit(err) } if bool { glog.Flush() os.Exit(0) } else { glog.Flush() os.Exit(1) } }
func main() { defer glog.Flush() glog.CopyStandardLogTo("info") flag.Parse() runtime.GOMAXPROCS(runtime.NumCPU()) httpClient := http_client_builder.New().WithoutProxy().Build() requestbuilder_executor := aptly_requestbuilder_executor.New(httpClient.Do) requestbuilder := http_requestbuilder.NewHTTPRequestBuilderProvider() repo_publisher := aptly_repo_publisher.New(requestbuilder_executor, requestbuilder) repo_creater := aptly_repo_creater.New(requestbuilder_executor, requestbuilder, repo_publisher.PublishNewRepo) if len(*repoURLPtr) == 0 { *repoURLPtr = *apiURLPtr } err := do( repo_creater, *repoURLPtr, *apiURLPtr, *apiUserPtr, *apiPasswordPtr, *apiPasswordFilePtr, *repoPtr, *distributionPtr, strings.Split(*architecturePtr, ","), ) if err != nil { glog.Exit(err) } }
func main() { defer glog.Flush() glog.CopyStandardLogTo("info") flag.Parse() runtime.GOMAXPROCS(runtime.NumCPU()) httpClientBuilder := http_client_builder.New() httpClient := httpClientBuilder.Build() latestInformations := atlassian_utils_latest_information.New(bitbucket.JSON_URL, httpClient.Get) latestUrl := atlassian_utils_latest_tar_gz_url.New(latestInformations.VersionInformations) latestVersion := atlassian_utils_latest_version.New(latestInformations.VersionInformations) commandListProvider := func() command_list.CommandList { return command_list.New() } config_parser := debian_config_parser.New() copier := debian_copier.New() zipExtractor := debian_zip_extractor.New() tarGzExtractor := debian_tar_gz_extractor.New() requestbuilderProvider := http_requestbuilder.NewHTTPRequestBuilderProvider() debianPackageCreator := debian_package_creator.New(commandListProvider, copier, tarGzExtractor.ExtractTarGz, zipExtractor.ExtractZip, httpClient.Do, requestbuilderProvider.NewHTTPRequestBuilder) creatorByReader := debian_package_creator_by_reader.New(commandListProvider, debianPackageCreator, tarGzExtractor.ExtractTarGz) latestDebianPackageCreator := debian_latest_package_creator.New(httpClient.Get, latestUrl.LatestConfluenceTarGzUrl, latestVersion.LatestVersion, creatorByReader.CreatePackage) err := do( latestDebianPackageCreator.CreateLatestDebianPackage, config_parser, *configPtr, latestVersion.LatestVersion, *targetDirPtr, ) if err != nil { glog.Exit(err) } }
func main() { defer glog.Flush() glog.CopyStandardLogTo("info") flag.Parse() runtime.GOMAXPROCS(runtime.NumCPU()) commandListProvider := func() debian_command_list.CommandList { return debian_command_list.New() } configBuilderWithConfig := func(config *debian_config.Config) debian_config_builder.ConfigBuilder { return debian_config_builder.NewWithConfig(config) } config_parser := debian_config_parser.New() copier := debian_copier.New() zipExtractor := debian_zip_extractor.New() tarGzExtractor := debian_tar_gz_extractor.New() httpClientBuilder := http_client_builder.New().WithoutProxy() httpClient := httpClientBuilder.Build() requestbuilderProvider := http_requestbuilder.NewHTTPRequestBuilderProvider() debianPackageCreator := debian_package_creator.New(commandListProvider, copier, tarGzExtractor.ExtractTarGz, zipExtractor.ExtractZip, httpClient.Do, requestbuilderProvider.NewHTTPRequestBuilder) err := do(config_parser, configBuilderWithConfig, debianPackageCreator, *configPtr, *namePtr, *versionPtr, *sourcePtr, *targetPtr) if err != nil { glog.Exit(err) } }
func main() { defer glog.Flush() glog.CopyStandardLogTo("info") flag.Parse() runtime.GOMAXPROCS(runtime.NumCPU()) commandListProvider := func() command_list.CommandList { return command_list.New() } config_parser := debian_config_parser.New() copier := debian_copier.New() zipExtractor := debian_zip_extractor.New() tarGzExtractor := debian_tar_gz_extractor.New() httpClientBuilder := http_client_builder.New().WithoutProxy() httpClient := httpClientBuilder.Build() requestbuilderProvider := http_requestbuilder.NewHTTPRequestBuilderProvider() debianPackageCreator := debian_package_creator.New(commandListProvider, copier, tarGzExtractor.ExtractTarGz, zipExtractor.ExtractZip, httpClient.Do, requestbuilderProvider.NewHTTPRequestBuilder) creatorByReader := debian_package_creator_by_reader.New(commandListProvider, debianPackageCreator, tarGzExtractor.ExtractTarGz) debianPackageCreatorArchive := debian_package_creator_archive.New(creatorByReader.CreatePackage) err := do( debianPackageCreatorArchive, config_parser, *tarGzPathPtr, *configPtr, *versionPtr, *targetDirPtr, ) if err != nil { glog.Exit(err) } }
func main() { defer glog.Flush() glog.CopyStandardLogTo("info") flag.Parse() runtime.GOMAXPROCS(runtime.NumCPU()) if err := do(); err != nil { glog.Exit(err) } }
func parseIP(address string) net.IP { addr, err := net.LookupIP(address) if err != nil { log.Exit(err) } if len(addr) < 1 { log.Exitf("failed to parse IP from address '%v'", address) } return addr[0] }
func main() { flag.Parse() if *server == "" || *resource == "" { log.Exit("both --server and --resource must be specified") } if *clientID == "" { log.Exit("--client_id must be set") } var opts []grpc.DialOption if len(*caFile) != 0 { var creds credentials.TransportAuthenticator var err error creds, err = credentials.NewClientTLSFromFile(*caFile, "") if err != nil { log.Exitf("Failed to create TLS credentials %v", err) } opts = append(opts, grpc.WithTransportCredentials(creds)) } else { opts = append(opts, grpc.WithInsecure()) } client, err := doorman.NewWithID(*server, *clientID, doorman.DialOpts(opts...)) if err != nil { log.Exitf("could not create client: %v", err) } defer client.Close() resource, err := client.Resource(*resource, *wants) if err != nil { log.Exitf("could not acquire resource: %v", err) } fmt.Println(<-resource.Capacity()) }
func main() { flag.Parse() domain, err := tao.LoadDomain(*configPath, []byte(*domainPass)) if err != nil { glog.Exitf("Couldn't load the config path %s: %s\n", *configPath, err) return } // Set up temporary keys for the connection, since the only thing that // matters to the remote client is that they receive a correctly-signed new // attestation from the policy key. keys, err := tao.NewTemporaryKeys(tao.Signing) if err != nil { glog.Exit("Couldn't set up temporary keys for the connection:", err) return } keys.Cert, err = keys.SigningKey.CreateSelfSignedX509(&pkix.Name{ Organization: []string{"Google Tao Demo"}}) if err != nil { glog.Exit("Couldn't set up a self-signed cert:", err) return } sock, err := net.Listen(*network, *addr) if err != nil { glog.Exit("Couldn't bind socket to address:", err) return } fmt.Println("tcca: accepting connections") for { conn, err := sock.Accept() if err != nil { glog.Exitf("Couldn't accept a connection on %s: %s", *addr, err) return } go tao.HandleCARequest(conn, domain.Keys.SigningKey, domain.Guard) } }
func cred(fwinfo *mesos.FrameworkInfo) *mesos.Credential { cred := (*mesos.Credential)(nil) mesosAuthPrincipal := config.Runonce.AuthPrincipal if mesosAuthPrincipal != "" { fwinfo.Principal = proto.String(mesosAuthPrincipal) cred = &mesos.Credential{ Principal: proto.String(mesosAuthPrincipal), } mesosAuthSecretFile := config.Runonce.AuthSecretFile if mesosAuthSecretFile != "" { _, err := os.Stat(mesosAuthSecretFile) if err != nil { log.Exit("missing secret file: ", err.Error()) } secret, err := ioutil.ReadFile(mesosAuthSecretFile) if err != nil { log.Exit("failed to read secret file: ", err.Error()) } cred.Secret = proto.String(strings.TrimSuffix(string(secret), "\n")) } } return cred }
// No polling: run once and exit. func runOnce() { if success, newProfiles := loadNewProfiles(); !success { if len(newProfiles) > 0 { glog.Exitf("Not all profiles were successfully loaded. Loaded: %v", newProfiles) } else { glog.Exit("Error loading profiles.") } } else { if len(newProfiles) > 0 { glog.Infof("Successfully loaded profiles: %v", newProfiles) } else { glog.Warning("No new profiles found.") } } }
func main() { defer glog.Flush() glog.CopyStandardLogTo("info") flag.Parse() runtime.GOMAXPROCS(runtime.NumCPU()) writer := os.Stdout httpClientBuilder := http_client_builder.New() httpClient := httpClientBuilder.Build() latestVersion := latest_version.New(httpClient.Get) err := do(writer, latestVersion.LatestGolangVersion) if err != nil { glog.Exit(err) } }
// RunOneShot performs the work of the one_shot commandline flag; after compiling programs mtail will read all of the log files in full, once, dump the metric results at the end, and then exit. func (m *Mtail) RunOneShot() { fmt.Println("Oneshot results:") for _, pathname := range m.o.LogPaths { err := m.OneShot(pathname) if err != nil { glog.Exitf("Failed one shot mode for %q: %s\n", pathname, err) } } if m.o.OneShotMetrics { fmt.Printf("Metrics store:") if err := m.WriteMetrics(os.Stdout); err != nil { glog.Exit(err) } } m.Close() }
func main() { defer glog.Flush() glog.CopyStandardLogTo("info") flag.Parse() runtime.GOMAXPROCS(runtime.NumCPU()) z := zip_extractor.New() err := do( z.ExtractZipFile, *zipPtr, *targetPtr, ) if err != nil { glog.Exit(err) } }
func main() { defer glog.Flush() glog.CopyStandardLogTo("info") flag.Parse() runtime.GOMAXPROCS(runtime.NumCPU()) writer := os.Stdout httpClientBuilder := http_client_builder.New() httpClient := httpClientBuilder.Build() latestVersion := latest_version.New(httpClient.Get) urlByVersion := url_by_version.New() latestZipUrl := latest_zip_url.New(latestVersion.LatestSonarVersion, urlByVersion.SonarZipUrlForVersion) err := do(writer, latestZipUrl.LatestSonarZipUrl) if err != nil { glog.Exit(err) } }
func main() { defer glog.Flush() glog.CopyStandardLogTo("info") flag.Parse() runtime.GOMAXPROCS(runtime.NumCPU()) httpClientBuilder := http_client_builder.New() httpClient := httpClientBuilder.Build() bambooLatestInformations := atlassian_utils_latest_information.New(bamboo.JSON_URL, httpClient.Get) bambooLatestVersion := atlassian_utils_latest_version.New(bambooLatestInformations.VersionInformations) confluenceLatestInformations := atlassian_utils_latest_information.New(confluence.JSON_URL, httpClient.Get) confluenceLatestVersion := atlassian_utils_latest_version.New(confluenceLatestInformations.VersionInformations) jiraCorelatestInformations := atlassian_utils_latest_information.New(jira_core.JSON_URL, httpClient.Get) jiraCoreLatestVersion := atlassian_utils_latest_version.New(jiraCorelatestInformations.VersionInformations) jiraServiceDeskLatestInformations := atlassian_utils_latest_information.New(jira_servicedesk.JSON_URL, httpClient.Get) jiraServiceDeskLatestVersion := atlassian_utils_latest_version.New(jiraServiceDeskLatestInformations.VersionInformations) jiraSoftwareLatestInformations := atlassian_utils_latest_information.New(jira_software.JSON_URL, httpClient.Get) jiraSoftwareLatestVersion := atlassian_utils_latest_version.New(jiraSoftwareLatestInformations.VersionInformations) bitbucketLatestInformations := atlassian_utils_latest_information.New(bitbucket.JSON_URL, httpClient.Get) bitbucketLatestVersion := atlassian_utils_latest_version.New(bitbucketLatestInformations.VersionInformations) crowdLatestInformations := atlassian_utils_latest_information.New(crowd.JSON_URL, httpClient.Get) crowdLatestVersion := atlassian_utils_latest_version.New(crowdLatestInformations.VersionInformations) writer := os.Stdout err := do( writer, bambooLatestVersion.LatestVersion, confluenceLatestVersion.LatestVersion, jiraCoreLatestVersion.LatestVersion, jiraServiceDeskLatestVersion.LatestVersion, jiraSoftwareLatestVersion.LatestVersion, bitbucketLatestVersion.LatestVersion, crowdLatestVersion.LatestVersion, ) if err != nil { glog.Exit(err) } }
func main() { defer glog.Flush() glog.CopyStandardLogTo("info") flag.Parse() runtime.GOMAXPROCS(runtime.NumCPU()) httpClient := http_client_builder.New().WithoutProxy().Build() httpRequestBuilderProvider := http_requestbuilder.NewHTTPRequestBuilderProvider() requestbuilder_executor := aptly_requestbuilder_executor.New(httpClient.Do) requestbuilder := http_requestbuilder.NewHTTPRequestBuilderProvider() repo_publisher := aptly_repo_publisher.New(requestbuilder_executor, requestbuilder) packageUploader := aptly_package_uploader.New(requestbuilder_executor, requestbuilder, repo_publisher.PublishRepo) packageCopier := aptly_package_copier.New(packageUploader, requestbuilder, httpClient.Do) packageLister := aptly_package_lister.New(httpClient.Do, httpRequestBuilderProvider.NewHTTPRequestBuilder) packageDetailLister := aptly_model_lister.New(packageLister.ListPackages) packageVersion := aptly_package_versions.New(packageDetailLister.ListPackageDetails) packageLastestVersion := aptly_package_latest_version.New(packageVersion.PackageVersions) packageDetailLatestLister := aptly_model_latest_lister.New(packageDetailLister.ListPackageDetails) if len(*repoURLPtr) == 0 { *repoURLPtr = *apiURLPtr } err := do( packageCopier, packageLastestVersion, packageDetailLatestLister, *repoURLPtr, *apiURLPtr, *apiUserPtr, *apiPasswordPtr, *apiPasswordFilePtr, *sourcePtr, *targetPtr, *targetDistributionPtr, *namePtr, *versionPtr, ) if err != nil { glog.Exit(err) } }
func main() { defer glog.Flush() glog.CopyStandardLogTo("info") flag.Parse() runtime.GOMAXPROCS(runtime.NumCPU()) httpClientBuilder := http_client_builder.New() httpClient := httpClientBuilder.Build() latestInformations := atlassian_utils_latest_information.New(crowd.JSON_URL, httpClient.Get) latestVersion := atlassian_utils_latest_version.New(latestInformations.VersionInformations) writer := os.Stdout err := do( writer, latestVersion.LatestVersion, ) if err != nil { glog.Exit(err) } }
func main() { defer glog.Flush() glog.CopyStandardLogTo("info") flag.Parse() runtime.GOMAXPROCS(runtime.NumCPU()) httpClientBuilder := http_client_builder.New() httpClient := httpClientBuilder.Build() latestInformations := atlassian_utils_latest_information.New(bitbucket.JSON_URL, httpClient.Get) latestUrl := atlassian_utils_latest_tar_gz_url.New(latestInformations.VersionInformations) writer := os.Stdout err := do( writer, latestUrl.LatestConfluenceTarGzUrl, ) if err != nil { glog.Exit(err) } }
func main() { flag.Parse() if *progs == "" { glog.Exitf("No mtail program directory specified; use -progs") } if *logs == "" && *logFds == "" { glog.Exitf("No logs specified to tail; use -logs or -logfds") } var logPathnames []string for _, pathname := range strings.Split(*logs, ",") { if pathname != "" { logPathnames = append(logPathnames, pathname) } } var logDescriptors []int for _, fdStr := range strings.Split(*logFds, ",") { fdNum, err := strconv.Atoi(fdStr) if err == nil { logDescriptors = append(logDescriptors, fdNum) } } if len(logPathnames) == 0 && len(logDescriptors) == 0 { glog.Exit("No logs to tail.") } o := mtail.Options{ Progs: *progs, LogPaths: logPathnames, LogFds: logDescriptors, Port: *port, OneShot: *oneShot, OneShotMetrics: *oneShotMetrics, CompileOnly: *compileOnly, DumpBytecode: *dumpBytecode, SyslogUseCurrentYear: *syslogUseCurrentYear, } m, err := mtail.New(o) if err != nil { glog.Fatalf("couldn't start: %s", err) } m.Run() }
// Serve begins the long-running mode of mtail, in which it watches the log // files for changes and sends any new lines found into the lines channel for // pick up by the virtual machines. It will continue to do so until it is // signalled to exit. func (m *Mtail) Serve() { err := m.StartTailing() if err != nil { glog.Exitf("tailing failed: %s", err) } http.Handle("/", m) http.HandleFunc("/json", http.HandlerFunc(m.e.HandleJSON)) http.HandleFunc("/metrics", http.HandlerFunc(m.e.HandlePrometheusMetrics)) http.HandleFunc("/varz", http.HandlerFunc(m.e.HandleVarz)) http.HandleFunc("/quitquitquit", http.HandlerFunc(m.handleQuit)) m.e.StartMetricPush() go func() { glog.Infof("Listening on port %s", m.o.Port) err := http.ListenAndServe(":"+m.o.Port, nil) if err != nil { glog.Exit(err) } }() m.shutdownHandler() }
func main() { flag.Parse() if *progs == "" { glog.Exitf("No mtail program directory specified; use -progs") } if *logs == "" { glog.Exitf("No logs specified to tail; use -logs") } var logPathnames []string for _, pathname := range strings.Split(*logs, ",") { if pathname != "" { logPathnames = append(logPathnames, pathname) } } if len(logPathnames) == 0 { glog.Exit("No logs to tail.") } o := mtail.Options{ Progs: *progs, LogPaths: logPathnames, Port: *port, } m, err := mtail.New(o) if err != nil { glog.Fatalf("couldn't start: %s", err) } c := newMtailCollector(m) prometheus.MustRegister(c) go monitor() http.Handle("/metrics", prometheus.Handler()) log.Fatal(http.ListenAndServe(":"+*port, nil)) }
func main() { flag.Parse() cfg, err := conf.ReadConfigFile(*configFile) if err != nil { log.Exitf("Failed to read configuration file: %v", err) } clusterName := cfgOpt(cfg, "cluster", "name") if clusterName == "" { log.Exit("Unable to get cluster name") } anycastEnabled := config.DefaultEngineConfig().AnycastEnabled if opt := cfgOpt(cfg, "cluster", "anycast_enabled"); opt != "" { if anycastEnabled, err = cfg.GetBool("cluster", "anycast_enabled"); err != nil { log.Exitf("Unable to parse cluster anycast_enabled: %v", err) } } clusterVIPv4, err := cfgIP(cfg, "cluster", "vip_ipv4") if err != nil { log.Exitf("Unable to get cluster vip_ipv4: %v", err) } clusterVIPv6, err := cfgIP(cfg, "cluster", "vip_ipv6") if err != nil { log.Exitf("Unable to get cluster vip_ipv6: %v", err) } nodeIPv4, err := cfgIP(cfg, "cluster", "node_ipv4") if err != nil { log.Exitf("Unable to get cluster node_ipv4: %v", err) } nodeIPv6, err := cfgIP(cfg, "cluster", "node_ipv6") if err != nil { log.Exitf("Unable to get cluster node_ipv6: %v", err) } peerIPv4, err := cfgIP(cfg, "cluster", "peer_ipv4") if err != nil { log.Exitf("Unable to get cluster peer_ipv4: %v", err) } peerIPv6, err := cfgIP(cfg, "cluster", "peer_ipv6") if err != nil { log.Exitf("Unable to get cluster peer_ipv6: %v", err) } // The default VRID may be overridden via the config file. vrid := config.DefaultEngineConfig().VRID if cfg.HasOption("cluster", "vrid") { id, err := cfg.GetInt("cluster", "vrid") if err != nil { log.Exitf("Unable to get VRID: %v", err) } if id < 1 || id > 255 { log.Exitf("Invalid VRID %d - must be between 1 and 255 inclusive", id) } vrid = uint8(id) } // Optional primary, secondary and tertiary configuration servers. configServers := make([]string, 0) for _, level := range []string{"primary", "secondary", "tertiary"} { if server := cfgOpt(cfg, "config_server", level); server != "" { configServers = append(configServers, server) } } if len(configServers) == 0 { configServers = config.DefaultEngineConfig().ConfigServers } nodeInterface := config.DefaultEngineConfig().NodeInterface if opt := cfgOpt(cfg, "interface", "node"); opt != "" { nodeInterface = opt } lbInterface := config.DefaultEngineConfig().LBInterface if opt := cfgOpt(cfg, "interface", "lb"); opt != "" { lbInterface = opt } // Additional anycast addresses. serviceAnycastIPv4 := config.DefaultEngineConfig().ServiceAnycastIPv4 serviceAnycastIPv6 := config.DefaultEngineConfig().ServiceAnycastIPv6 if cfg.HasSection("extra_service_anycast") { opts, err := cfg.GetOptions("extra_service_anycast") if err != nil { log.Exitf("Unable to get extra_serivce_anycast options: %v", err) } for _, opt := range opts { ip, err := cfgIP(cfg, "extra_service_anycast", opt) if err != nil { log.Exitf("Unable to get extra_service_anycast option %q: %v", opt, err) } if !seesaw.IsAnycast(ip) { log.Exitf("%q is not an anycast address", ip) } if ip.To4() != nil { serviceAnycastIPv4 = append(serviceAnycastIPv4, ip) } else { serviceAnycastIPv6 = append(serviceAnycastIPv6, ip) } } } // Override some of the defaults. engineCfg := config.DefaultEngineConfig() engineCfg.AnycastEnabled = anycastEnabled engineCfg.ConfigFile = *configFile engineCfg.ConfigServers = configServers engineCfg.ClusterFile = *clusterFile engineCfg.ClusterName = clusterName engineCfg.ClusterVIP.IPv4Addr = clusterVIPv4 engineCfg.ClusterVIP.IPv6Addr = clusterVIPv6 engineCfg.LBInterface = lbInterface engineCfg.NCCSocket = *nccSocket engineCfg.Node.IPv4Addr = nodeIPv4 engineCfg.Node.IPv6Addr = nodeIPv6 engineCfg.NodeInterface = nodeInterface engineCfg.Peer.IPv4Addr = peerIPv4 engineCfg.Peer.IPv6Addr = peerIPv6 engineCfg.ServiceAnycastIPv4 = serviceAnycastIPv4 engineCfg.ServiceAnycastIPv6 = serviceAnycastIPv6 engineCfg.SocketPath = *socketPath engineCfg.VRID = vrid // Gentlemen, start your engines... engine := engine.NewEngine(&engineCfg) server.ShutdownHandler(engine) server.ServerRunDirectory("engine", 0, 0) // TODO(jsing): Drop privileges before starting engine. engine.Run() }