// materializeFile takes a physical file or stream (named pipe, user input, // ...) and returns an io.Reader and the number of bytes that can be read // from it. func materializeFile(log *logging.Logger, f *os.File) (io.Reader, int64, error) { fi, err := f.Stat() if err != nil { return nil, 0, err } // If the file is actually a char device (like user typed input) // or a named pipe (like a streamed in file), buffer it up. // // When uploading a file, you need to either explicitly set the // Content-Length header or send a chunked request. Since the // github upload server doesn't accept chunked encoding, we have // to set the size of the file manually. Since a stream doesn't have a // predefined length, it's read entirely into a byte buffer. if fi.Mode()&(os.ModeCharDevice|os.ModeNamedPipe) == 1 { log.Debug("input was a stream, buffering up") var buf bytes.Buffer n, err := buf.ReadFrom(f) if err != nil { return nil, 0, errors.New("req: could not buffer up input stream: " + err.Error()) } return &buf, n, err } // We know the os.File is most likely an actual file now. n, err := GetFileSize(f) return f, n, err }
func LatestRelease(log *logging.Logger, user, repo, token string) (*Release, error) { // If latestReleaseApi DOESN'T give an error, return the release. if latestRelease, err := latestReleaseApi(log, user, repo, token); err == nil { return latestRelease, nil } // The enterprise api doesnt support the latest release endpoint. Get // all releases and compare the published date to get the latest. releases, err := Releases(log, user, repo, token) if err != nil { return nil, err } var latestRelIndex = -1 maxDate := time.Time{} for i, release := range releases { if relDate := *release.Published; relDate.After(maxDate) { maxDate = relDate latestRelIndex = i } } if latestRelIndex == -1 { return nil, fmt.Errorf("could not find the latest release") } log.Debugf("Scanning %d releases, latest release is '%s'", len(releases), releases[latestRelIndex]) return &releases[latestRelIndex], nil }
func initMakefile(log *log.Logger, projectDir, projectType string) error { if projectType != ProjectTypeGo { return nil } path := filepath.Join(projectDir, makefilePath) if info, err := os.Stat(path); os.IsNotExist(err) { log.Infof("Creating %s", makefilePath) options := struct { GoVersion string }{ GoVersion: "1.7.3-alpine", } t, err := template.New("makefile").Parse(initialMakefile) if err != nil { return maskAny(err) } buffer := &bytes.Buffer{} if err := t.Execute(buffer, options); err != nil { return maskAny(err) } if err := ioutil.WriteFile(path, buffer.Bytes(), 0644); err != nil { return maskAny(err) } return nil } else if err != nil { return maskAny(err) } else if info.IsDir() { return maskAny(fmt.Errorf("%s must be a file", path)) } else { log.Debugf("%s already initialized in %s", gitIgnorePath, projectDir) return nil } }
func NewForwardInput(logger *logging.Logger, bind string, port Port) (*ForwardInput, error) { _codec := codec.MsgpackHandle{} _codec.MapType = reflect.TypeOf(map[string]interface{}(nil)) _codec.RawToString = false addr, err := net.ResolveTCPAddr("tcp", bind) if err != nil { logger.Error("%s", err.Error()) return nil, err } listener, err := net.ListenTCP("tcp", addr) if err != nil { logger.Error("%s", err.Error()) return nil, err } return &ForwardInput{ port: port, logger: logger, bind: bind, listener: listener, codec: &_codec, clients: make(map[*net.TCPConn]*forwardClient), clientsMtx: sync.Mutex{}, entries: 0, wg: sync.WaitGroup{}, acceptChan: make(chan *net.TCPConn), shutdownChan: make(chan struct{}), isShuttingDown: uintptr(0), }, nil }
/* create a new request that sends the auth token */ func NewAuthRequest(log *logging.Logger, method, url, bodyType, token string, headers map[string]string, body io.Reader) (*http.Request, error) { log.Debugf("creating request: %s %s %s %s", method, url, bodyType, token) var n int64 // content length var err error if f, ok := body.(*os.File); ok { // Retrieve the content-length and buffer up if necessary. body, n, err = materializeFile(log, f) if err != nil { return nil, err } } req, err := http.NewRequest(method, url, body) if err != nil { return nil, err } if n != 0 { log.Debugf("setting content-length to '%d'", n) req.ContentLength = n } if bodyType != "" { req.Header.Set("Content-Type", bodyType) } req.Header.Set("Authorization", fmt.Sprintf("token %s", token)) for k, v := range headers { req.Header.Set(k, v) } return req, nil }
func UpdateAllMachines(flags *UpdateFlags, log *logging.Logger) error { // Get all members members, err := flags.GetClusterMembers(log) if err != nil { return maskAny(err) } // Pull image on all machines log.Infof("Pulling gluon image on %d machines", len(members)) var pullGroup errgroup.Group for _, m := range members { m := m pullGroup.Go(func() error { return maskAny(pullImage(m, *flags, log)) }) } if err := pullGroup.Wait(); err != nil { return maskAny(err) } // Update all machines, one at a time for index, m := range members { if index > 0 { log.Infof("Waiting %s...", flags.MachineDelay) time.Sleep(flags.MachineDelay) } if err := updateMachine(m, *flags, log); err != nil { return maskAny(err) } } return nil }
// HTTP logging handler func LoggingHandler(inner http.Handler, log *logging.Logger) http.Handler{ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request){ sw := MakeLogger(w) inner.ServeHTTP(sw, r) log.Info(buildCommonLogLine(r, *r.URL, time.Now(), sw.Status(), sw.Size())) }) }
// Get executes a `go get` with a cache support. func Get(log *log.Logger, flags *GetFlags) error { // Check GOPATH if gopath == "" { return maskAny(errors.New("Specify GOPATH")) } gopathDir := strings.Split(gopath, string(os.PathListSeparator))[0] // Get cache dir cachedir, cacheIsValid, err := cache.Dir(flags.Package, cacheValid) if err != nil { return maskAny(err) } if !cacheIsValid { // Cache has become invalid log.Info(updating("Refreshing cache of %s"), flags.Package) // Execute `go get` towards the cache directory if err := runGoGet(log, flags.Package, cachedir); err != nil { return maskAny(err) } } // Sync with local gopath if err := os.MkdirAll(gopathDir, 0777); err != nil { return maskAny(err) } if err := util.ExecPrintError(nil, "rsync", "-a", filepath.Join(cachedir, srcDir), gopathDir); err != nil { return maskAny(err) } return nil }
// Execute a given command without waiting for its result. func ExecDetached(log *log.Logger, cmdName string, arguments ...string) error { if log != nil { log.Debug("Running %s %v", cmdName, arguments) } cmd := exec.Command(cmdName, arguments...) return cmd.Start() }
func NewRateLimiter(cfg config.RateLimiting, red *redis.Pool, logger *logging.Logger) (RateLimitingMiddleware, error) { t := new(RedisSimpleRateThrottler) t.burstSize = int64(cfg.Burst) t.requestsPerSecond = int64(cfg.RequestsPerSecond) t.redisPool = red t.logger = logger if w, err := time.ParseDuration(cfg.Window); err != nil { return nil, err } else { t.window = w } logger.Info("Initialize rate limiter (burst size %d)", t.burstSize) return t, nil }
func initGitIgnore(log *log.Logger, projectDir string) error { path := filepath.Join(projectDir, gitIgnorePath) if info, err := os.Stat(path); os.IsNotExist(err) { log.Infof("Creating %s", gitIgnorePath) if err := ioutil.WriteFile(path, []byte(initialGitIgnore), 0644); err != nil { return maskAny(err) } return nil } else if err != nil { return maskAny(err) } else if info.IsDir() { return maskAny(fmt.Errorf("%s must be a file", path)) } else { log.Debugf("%s already initialized in %s", gitIgnorePath, projectDir) return nil } }
// SetupLOG sets up logger with the correct parameters for the whole cilium architecture. func SetupLOG(logger *l.Logger, logLevel string) { hostname, _ := os.Hostname() fileFormat := l.MustStringFormatter( `%{time:` + RFC3339Milli + `} ` + hostname + ` %{level:.4s} %{id:03x} %{shortfunc} > %{message}`, ) level, err := l.LogLevel(logLevel) if err != nil { logger.Fatal(err) } backend := l.NewLogBackend(os.Stderr, "", 0) oBF := l.NewBackendFormatter(backend, fileFormat) backendLeveled := l.SetBackend(oBF) backendLeveled.SetLevel(level, "") logger.SetBackend(backendLeveled) }
func runRemoteCommand(member service.ClusterMember, userName string, log *logging.Logger, command, stdin string, quiet bool) (string, error) { hostAddress := member.ClusterIP cmd := exec.Command("ssh", "-o", "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no", userName+"@"+hostAddress, command) var stdOut, stdErr bytes.Buffer cmd.Stdout = &stdOut cmd.Stderr = &stdErr if stdin != "" { cmd.Stdin = strings.NewReader(stdin) } if err := cmd.Run(); err != nil { if !quiet { log.Errorf("SSH failed: %s %s", cmd.Path, strings.Join(cmd.Args, " ")) } return "", errgo.NoteMask(err, stdErr.String()) } out := stdOut.String() out = strings.TrimSuffix(out, "\n") return out, nil }
func GithubGet(log *logging.Logger, uri string, v interface{}) error { resp, err := http.Get(ApiURL() + uri) if resp != nil { defer resp.Body.Close() } if err != nil { return fmt.Errorf("could not fetch releases, %v", err) } log.Debugf("GET %s -> %v", ApiURL()+uri, resp) if resp.StatusCode != http.StatusOK { return fmt.Errorf("github did not response with 200 OK but with %v", resp.Status) } r := resp.Body if err = json.NewDecoder(r).Decode(v); err != nil { return fmt.Errorf("could not unmarshall JSON into Release struct, %v", err) } return nil }
func initGit(log *log.Logger, projectDir string) error { path := filepath.Join(projectDir, gitDirPath) if info, err := os.Stat(path); os.IsNotExist(err) { if err := util.ExecuteInDir(projectDir, func() error { output, err := util.Exec(log, "git", "init") if err != nil { log.Error(output) return maskAny(err) } return nil }); err != nil { return maskAny(err) } } else if err != nil { return maskAny(err) } else if !info.IsDir() { return maskAny(fmt.Errorf("%s must be a directory", path)) } else { log.Debugf("Git already initialized in %s", projectDir) } return nil }
func migrate(from, to migration.Backend, baseKey string, log *logging.Logger) error { keys, err := from.List(baseKey) if err != nil { return maskAny(err) } for _, key := range keys { key = path.Join(baseKey, key) log.Debugf("Migrating %s", key) value, err := from.Get(key) if err != nil { return maskAny(err) } if value != nil { if err := to.Set(key, value); err != nil { return maskAny(err) } } if err := migrate(from, to, key, log); err != nil { return maskAny(err) } } return nil }
// Execute a `git status a b` func Diff(log *log.Logger, a, b string) (string, error) { args := []string{"diff", a, b, } if msg, err := util.Exec(log, cmdName, args...); err != nil { if log != nil { log.Error(msg) } else { fmt.Printf("%s\n", msg) } return "", maskAny(err) } else { return strings.TrimSpace(msg), nil } }
// Execute a `git status` func Status(log *log.Logger, porcelain bool) (string, error) { args := []string{"status"} if porcelain { args = append(args, "--porcelain") } if msg, err := util.Exec(log, cmdName, args...); err != nil { if log != nil { log.Error(msg) } else { fmt.Printf("%s\n", msg) } return "", maskAny(err) } else { return strings.TrimSpace(msg), nil } }
func updateMachine(member service.ClusterMember, flags UpdateFlags, log *logging.Logger) error { askConfirmation := flags.AskConfirmation log.Infof("Updating %s...", member.ClusterIP) // Extract gluon binary cmd := fmt.Sprintf("docker run --rm -v /home/core/bin/:/destination/ %s", flags.GluonImage) if _, err := runRemoteCommand(member, flags.UserName, log, cmd, "", false); err != nil { return maskAny(err) } // Update image version on disk if _, err := runRemoteCommand(member, flags.UserName, log, "sudo tee /etc/pulcy/gluon-image", flags.GluonImage, false); err != nil { return maskAny(err) } // Setup new gluon version if _, err := runRemoteCommand(member, flags.UserName, log, "sudo systemctl restart gluon", "", false); err != nil { return maskAny(err) } // Reboot if needed if flags.Reboot { log.Infof("Rebooting %s...", member.ClusterIP) runRemoteCommand(member, flags.UserName, log, "sudo reboot -f", "", true) time.Sleep(time.Second * 15) if err := waitUntilMachineUp(member, flags, log); err != nil { return maskAny(err) } if !member.EtcdProxy { log.Warningf("Core machine %s is back up, check services", member.ClusterIP) askConfirmation = true } else { log.Infof("Machine %s is back up", member.ClusterIP) } } if askConfirmation { confirm("Can we continue?") } return nil }
// Get executes a `go get` with a cache support. func Vendor(log *log.Logger, flags *VendorFlags) error { // Get cache dir cachedir, _, err := cache.Dir(flags.Package, time.Millisecond) if err != nil { return maskAny(err) } // Cache has become invalid log.Info(updating("Fetching %s"), flags.Package) // Execute `go get` towards the cache directory if err := runGoGet(log, flags.Package, cachedir); err != nil { return maskAny(err) } // Sync with vendor dir if err := os.MkdirAll(flags.VendorDir, 0777); err != nil { return maskAny(err) } if err := util.ExecPrintError(nil, "rsync", "--exclude", ".git", "-a", filepath.Join(cachedir, srcDir)+"/", flags.VendorDir); err != nil { return maskAny(err) } return nil }
// Get ensures that flags.Folder contains an up to date copy of flags.RepoUrl checked out to flags.Version. func Get(log *log.Logger, flags *Flags) error { // Get cache dir cachedir, cacheIsValid, err := cache.Dir(flags.RepoUrl, 0) if err != nil { return maskAny(err) } // Expand folder flags.Folder, err = filepath.Abs(flags.Folder) if err != nil { return maskAny(err) } // Get current folder wd, _ := os.Getwd() linked := false if flags.AllowLink { if info, err := util.ParseVCSURL(flags.RepoUrl); err == nil { siblingPath := filepath.Join(filepath.Dir(wd), info.Name) if _, err := os.Stat(siblingPath); err == nil { //log.Infof("Sibling folder %s exists", siblingPath) util.ExecuteInDir(siblingPath, func() error { remote, err := git.GetRemoteOriginUrl(nil) if err != nil { return maskAny(err) } if remote == flags.RepoUrl { if relPath, err := filepath.Rel(filepath.Dir(flags.Folder), siblingPath); err == nil { if err := os.Symlink(relPath, flags.Folder); err == nil { log.Infof("Linked -> %s", siblingPath) linked = true if vendorDir, err := golang.GetVendorDir(siblingPath); err != nil { return maskAny(err) } else { // Flatten sibling in copy-only mode if err := golang.Flatten(log, &golang.FlattenFlags{ VendorDir: vendorDir, NoRemove: true, }); err != nil { return maskAny(err) } } } } } return nil }) } } } if linked { return nil } // Fill cache if needed cloned := false if !cacheIsValid { // Clone repo into cachedir if err := git.Clone(log, flags.RepoUrl, cachedir); err != nil { return maskAny(err) } cloned = true } // Make sure a clone exists _, err = os.Stat(flags.Folder) if os.IsNotExist(err) { // Sync into target folder if err := os.MkdirAll(flags.Folder, 0777); err != nil { return maskAny(err) } if err := util.ExecPrintError(nil, "rsync", "-a", appendDirSep(cachedir), appendDirSep(flags.Folder)); err != nil { return maskAny(err) } } // Change dir to folder if err := os.Chdir(flags.Folder); err != nil { return maskAny(err) } // Specific version needed? if flags.Version == "" { // Get latest version if !cloned { localCommit, err := git.GetLatestLocalCommit(nil, flags.Folder, defaultGetBranch, false) if err != nil { return maskAny(err) } remoteCommit, err := git.GetLatestRemoteCommit(nil, flags.RepoUrl, defaultGetBranch) if err != nil { return maskAny(err) } if localCommit != remoteCommit { if err := git.Pull(log, "origin"); err != nil { return maskAny(err) } } else { log.Info(allGood("%s is up to date\n"), makeRel(wd, flags.Folder)) } } } else { // Get latest (local) version localVersion, err := git.GetLatestTag(nil, flags.Folder) if err != nil { return maskAny(err) } if localVersion != flags.Version { // Checkout requested version if cloned { log.Info(updating("Checking out version %s in %s.\n"), flags.Version, makeRel(wd, flags.Folder)) } else { log.Info(updating("Found version %s, wanted %s. Updating %s now.\n"), localVersion, flags.Version, makeRel(wd, flags.Folder)) } // Fetch latest changes if err := git.Fetch(log, "origin"); err != nil { return maskAny(err) } if err := git.FetchTags(log, "origin"); err != nil { return maskAny(err) } // Checkout intended version if err := git.Checkout(log, flags.Version); err != nil { return maskAny(err) } } else { log.Info(allGood("Found correct version. No changes needed in %s\n"), makeRel(wd, flags.Folder)) } // Get latest remote version remoteVersion, err := git.GetLatestRemoteTag(nil, flags.RepoUrl) if err != nil { return maskAny(err) } if remoteVersion != flags.Version { log.Warning(attention("Update available for %s: '%s' => '%s'\n"), makeRel(wd, flags.Folder), flags.Version, remoteVersion) } } return nil }
func scanJournals(logger *logging.Logger, pathPrefix string, pathSuffix string) (map[string]*FileJournal, error) { journals := make(map[string]*FileJournal) dirname, basename := filepath.Split(pathPrefix) if dirname == "" { dirname = "." } d, err := os.OpenFile(dirname, os.O_RDONLY, 0) if err != nil { return nil, err } defer d.Close() finfo, err := d.Stat() if err != nil { return nil, err } if !finfo.IsDir() { return nil, errors.New(fmt.Sprintf("%s is not a directory", dirname)) } for { files_, err := d.Readdir(100) if err == io.EOF { break } else if err != nil { return nil, err } for _, finfo := range files_ { file := finfo.Name() if !strings.HasSuffix(file, pathSuffix) { continue } variablePortion := file[len(basename) : len(file)-len(pathSuffix)] info, err := DecodeJournalPath(variablePortion) if err != nil { logger.Warning("Unexpected file under the designated directory space (%s) - %s", dirname, file) continue } journalProto, ok := journals[info.Key] if !ok { journalProto = &FileJournal{ key: info.Key, chunks: FileJournalChunkDequeue{nil, nil, 0, sync.Mutex{}}, writer: nil, } journals[info.Key] = journalProto } chunk := &FileJournalChunk{ head: FileJournalChunkDequeueHead{nil, journalProto.chunks.last}, container: &journalProto.chunks, Type: info.Type, Path: pathPrefix + info.VariablePortion + pathSuffix, TSuffix: info.TSuffix, Timestamp: info.Timestamp, UniqueId: info.UniqueId, Size: finfo.Size(), refcount: 1, } if journalProto.chunks.last == nil { journalProto.chunks.first = chunk } else { journalProto.chunks.last.head.next = chunk } journalProto.chunks.last = chunk journalProto.chunks.count += 1 } } for _, journalProto := range journals { sortChunksByTimestamp(&journalProto.chunks) err := validateChunks(&journalProto.chunks) if err != nil { return nil, err } } return journals, nil }
func buildDispatcher( startup *StartupConfig, cfg *config.Configuration, consul *api.Client, handler *proxy.ProxyHandler, rpool *redis.Pool, logger *logging.Logger, lastIndex uint64, ) (dispatcher.Dispatcher, uint64, error) { var disp dispatcher.Dispatcher var err error var meta *api.QueryMeta var configs api.KVPairs var localCfg config.Configuration = *cfg var appCfgs map[string]config.Application = make(map[string]config.Application) dispLogger := logging.MustGetLogger("dispatch") switch startup.DispatchingMode { case "path": disp, err = dispatcher.NewPathBasedDispatcher(&localCfg, dispLogger, handler) case "host": disp, err = dispatcher.NewHostBasedDispatcher(&localCfg, dispLogger, handler) default: err = fmt.Errorf("unsupported dispatching mode: '%s'", startup.DispatchingMode) } if err != nil { return nil, 0, fmt.Errorf("error while creating proxy builder: %s", err) } applicationConfigBase := startup.ConsulBaseKey + "/applications" queryOpts := api.QueryOptions{ WaitIndex: lastIndex, WaitTime: 30 * time.Minute, } logger.Info("loading gateway config from KV %s", startup.ConsulBaseKey) configs, meta, err = consul.KV().List(startup.ConsulBaseKey, &queryOpts) if err != nil { return nil, 0, err } for _, cfgKVPair := range configs { logger.Debug("found KV pair with key '%s'", cfgKVPair.Key) switch strings.TrimPrefix(startup.ConsulBaseKey+"/", cfgKVPair.Key) { case "authentication": if err := json.Unmarshal(cfgKVPair.Value, &localCfg.Authentication); err != nil { return nil, meta.LastIndex, fmt.Errorf("JSON error on consul KV pair '%s': %s", cfgKVPair.Key, err) } case "rate_limiting": if err := json.Unmarshal(cfgKVPair.Value, &localCfg.RateLimiting); err != nil { return nil, meta.LastIndex, fmt.Errorf("JSON error on consul KV pair '%s': %s", cfgKVPair.Key, err) } } if strings.HasPrefix(cfgKVPair.Key, applicationConfigBase) { var appCfg config.Application if err := json.Unmarshal(cfgKVPair.Value, &appCfg); err != nil { return nil, meta.LastIndex, fmt.Errorf("JSON error on consul KV pair '%s': %s", cfgKVPair.Key, err) } name := strings.TrimPrefix(cfgKVPair.Key, applicationConfigBase+"/") appCfgs[name] = appCfg } } authHandler, err := auth.NewAuthDecorator(&localCfg.Authentication, rpool, logging.MustGetLogger("auth"), startup.UiDir) if err != nil { return nil, meta.LastIndex, err } rlim, err := ratelimit.NewRateLimiter(localCfg.RateLimiting, rpool, logging.MustGetLogger("ratelimiter")) if err != nil { logger.Fatal(fmt.Sprintf("error while configuring rate limiting: %s", err)) } cch := cache.NewCache(4096) // Order is important here! Behaviours will be called in LIFO order; // behaviours that are added last will be called first! disp.AddBehaviour(dispatcher.NewCachingBehaviour(cch)) disp.AddBehaviour(dispatcher.NewAuthenticationBehaviour(authHandler)) disp.AddBehaviour(dispatcher.NewRatelimitBehaviour(rlim)) for name, appCfg := range appCfgs { logger.Info("registering application '%s' from Consul", name) if err := disp.RegisterApplication(name, appCfg); err != nil { return nil, meta.LastIndex, err } } for name, appCfg := range localCfg.Applications { logger.Info("registering application '%s' from local config", name) if err := disp.RegisterApplication(name, appCfg); err != nil { return nil, meta.LastIndex, err } } if err = disp.Initialize(); err != nil { return nil, meta.LastIndex, err } return disp, meta.LastIndex, nil }
func Release(log *log.Logger, flags *Flags) error { // Detect environment hasMakefile := false isDev := flags.ReleaseType == "dev" if _, err := os.Stat(makefileFile); err == nil { hasMakefile = true log.Info("Found %s", makefileFile) } hasGruntfile := false if _, err := os.Stat(gruntfileFile); err == nil { hasGruntfile = true log.Info("Found %s", gruntfileFile) } hasDockerfile := false if _, err := os.Stat(dockerfileFile); err == nil { hasDockerfile = true log.Info("Found %s", dockerfileFile) } // Read the current version and name info, err := GetProjectInfo() if err != nil { return maskAny(err) } log.Info("Found old version %s", info.Version) version, err := semver.NewVersion(info.Version) if err != nil { return maskAny(err) } // Check repository state if !isDev { if err := checkRepoClean(log, info.GitBranch); err != nil { return maskAny(err) } } // Bump version switch flags.ReleaseType { case "major": version.Major++ version.Minor = 0 version.Patch = 0 case "minor": version.Minor++ version.Patch = 0 case "patch": version.Patch++ case "dev": // Do not change version default: return errgo.Newf("Unknown release type %s", flags.ReleaseType) } version.Metadata = "" // Write new release version if !isDev { if err := writeVersion(log, version.String(), info.Manifests, info.GradleConfigFile, false); err != nil { return maskAny(err) } } // Build project if hasGruntfile && !info.NoGrunt { if _, err := os.Stat(nodeModulesFolder); os.IsNotExist(err) { log.Info("Folder %s not found", nodeModulesFolder) if err := util.ExecPrintError(log, "npm", "install"); err != nil { return maskAny(err) } } if err := util.ExecPrintError(log, "grunt", "build-release"); err != nil { return maskAny(err) } } if hasMakefile { // Clean first if !isDev { if err := util.ExecPrintError(log, "make", info.Targets.CleanTarget); err != nil { return maskAny(err) } } // Now build makeArgs := []string{} if info.Targets.ReleaseTarget != "" { makeArgs = append(makeArgs, info.Targets.ReleaseTarget) } if err := util.ExecPrintError(log, "make", makeArgs...); err != nil { return maskAny(err) } } if hasDockerfile { // Build docker images tagVersion := version.String() if isDev { tagVersion = strings.Replace(time.Now().Format("2006-01-02-15-04-05"), "-", "", -1) } imageAndVersion := fmt.Sprintf("%s:%s", info.Image, tagVersion) imageAndMajorVersion := fmt.Sprintf("%s:%d", info.Image, version.Major) imageAndMinorVersion := fmt.Sprintf("%s:%d.%d", info.Image, version.Major, version.Minor) imageAndLatest := fmt.Sprintf("%s:latest", info.Image) buildTag := path.Join(info.Namespace, imageAndVersion) buildLatestTag := path.Join(info.Namespace, imageAndLatest) buildMajorVersionTag := path.Join(info.Namespace, imageAndMajorVersion) buildMinorVersionTag := path.Join(info.Namespace, imageAndMinorVersion) if err := util.ExecPrintError(log, "docker", "build", "--tag", buildTag, "."); err != nil { return maskAny(err) } if info.TagLatest { util.ExecSilent(log, "docker", "rmi", buildLatestTag) if err := util.ExecPrintError(log, "docker", "tag", buildTag, buildLatestTag); err != nil { return maskAny(err) } } if info.TagMajorVersion && !isDev { util.ExecSilent(log, "docker", "rmi", buildMajorVersionTag) if err := util.ExecPrintError(log, "docker", "tag", buildTag, buildMajorVersionTag); err != nil { return maskAny(err) } } if info.TagMinorVersion && !isDev { util.ExecSilent(log, "docker", "rmi", buildMinorVersionTag) if err := util.ExecPrintError(log, "docker", "tag", buildTag, buildMinorVersionTag); err != nil { return maskAny(err) } } registry := flags.DockerRegistry if info.Registry != "" { registry = info.Registry } namespace := info.Namespace if registry != "" || namespace != "" { // Push image to registry if err := docker.Push(log, imageAndVersion, registry, namespace); err != nil { return maskAny(err) } if info.TagLatest { // Push latest image to registry if err := docker.Push(log, imageAndLatest, registry, namespace); err != nil { return maskAny(err) } } if info.TagMajorVersion && !isDev { // Push major version image to registry if err := docker.Push(log, imageAndMajorVersion, registry, namespace); err != nil { return maskAny(err) } } if info.TagMinorVersion && !isDev { // Push minor version image to registry if err := docker.Push(log, imageAndMinorVersion, registry, namespace); err != nil { return maskAny(err) } } } } // Build succeeded, re-write new release version and commit if !isDev { if err := writeVersion(log, version.String(), info.Manifests, info.GradleConfigFile, true); err != nil { return maskAny(err) } // Tag version if err := git.Tag(log, version.String()); err != nil { return maskAny(err) } // Create github release (if needed) if err := createGithubRelease(log, version.String(), *info); err != nil { return maskAny(err) } // Update version to "+git" working version version.Metadata = "git" // Write new release version if err := writeVersion(log, version.String(), info.Manifests, info.GradleConfigFile, true); err != nil { return maskAny(err) } // Push changes if err := git.Push(log, "origin", false); err != nil { return maskAny(err) } // Push tags if err := git.Push(log, "origin", true); err != nil { return maskAny(err) } } return nil }
func createGithubRelease(log *log.Logger, version string, info ProjectInfo) error { // Are assets specified? if len(info.GithubAssets) == 0 { log.Debugf("No github-assets specified, no github release is created") return nil } // Check existance of all assets for _, asset := range info.GithubAssets { if _, err := os.Stat(asset.RelPath); err != nil { return maskAny(fmt.Errorf("Cannot stat asset '%s': %v", asset.RelPath, err)) } } // Is the repository URL suitable for github releases? url, err := git.GetRemoteOriginUrl(log) if err != nil { return maskAny(err) } repoInfo, err := vcsurl.Parse(url) if err != nil { return maskAny(err) } if repoInfo.RepoHost != vcsurl.GitHub || repoInfo.VCS != vcsurl.Git { return maskAny(fmt.Errorf("Cannot create github-release because repository is not a git repo or not hosted on github")) } // Load github token token, err := github.LoadGithubToken() if err != nil { return maskAny(err) } gs := github.GithubService{ Logger: log, Token: token, User: repoInfo.Username, Repository: repoInfo.Name, } // Create github release relOpt := github.ReleaseCreate{ TagName: version, Name: fmt.Sprintf("v%s", version), } if err := gs.CreateRelease(relOpt); err != nil { return maskAny(err) } // Attach assets for _, asset := range info.GithubAssets { opt := github.UploadAssetOptions{ TagName: version, FileName: filepath.Base(asset.RelPath), Label: asset.Label, Path: asset.RelPath, } if err := gs.UploadAsset(opt); err != nil { return maskAny(err) } } // Update tags if err := git.FetchTags(log, "origin"); err != nil { return maskAny(err) } return nil }