示例#1
1
文件: runner.go 项目: gwatts/dyndump
// actionRunner handles running an action which may take a while to complete
// providing progress bars and signal handling.
func actionRunner(cmd *cli.Cmd, action action) func() {
	cmd.Spec = "[--silent] [--no-progress] " + cmd.Spec
	silent := cmd.BoolOpt("silent", false, "Set to true to disable all non-error output")
	noProgress := cmd.BoolOpt("no-progress", false, "Set to true to disable the progress bar")

	return func() {
		var infoWriter io.Writer = os.Stderr
		var ticker <-chan time.Time

		if err := action.init(); err != nil {
			fail("Initialization failed: %v", err)
		}

		done, err := action.start(infoWriter)
		if err != nil {
			fail("Startup failed: %v", err)
		}

		var bar *pb.ProgressBar
		if !*silent && !*noProgress {
			ticker = time.Tick(statsFrequency)
			bar = action.newProgressBar()
			if bar != nil {
				bar.Output = os.Stderr
				bar.ShowSpeed = true
				bar.ManualUpdate = true
				bar.SetMaxWidth(78)
				bar.Start()
				bar.Update()
			}
		}
		if *silent {
			infoWriter = ioutil.Discard
		}

		sigchan := make(chan os.Signal, 1)
		signal.Notify(sigchan, syscall.SIGTERM, syscall.SIGKILL, syscall.SIGINT)

	LOOP:
		for {
			select {
			case <-ticker:
				action.updateProgress(bar)
				bar.Update()

			case <-sigchan:
				bar.Finish()
				fmt.Fprintf(os.Stderr, "\nAborting..")
				action.abort()
				<-done
				fmt.Fprintf(os.Stderr, "Aborted.\n")
				break LOOP

			case err := <-done:
				if err != nil {
					fail("Processing failed: %v", err)
				}
				break LOOP
			}
		}
		if bar != nil {
			bar.Finish()
		}

		if !*silent {
			action.printFinalStats(infoWriter)
		}
	}
}
func HostMerge(List []Host, ShowBar bool) []string {
	count := 0
	filterList := []string{""}
	length := len(List)
	var bar *pb.ProgressBar
	if ShowBar == true {
		bar = pb.StartNew(length)
		bar.SetMaxWidth(80)
	}
	for _, Host := range List {
		length = len(filterList[count])
		if length == 0 {
			filterList[count] = Host.Hostname
		} else if length+Host.length() <= 255 && length != 0 {
			filterList[count] += "|"
			filterList[count] += Host.Hostname
		} else {
			count++
			filterList = append(filterList, Host.Hostname)
			// filterList[count] = Ref.Referrer
		}
		if ShowBar == true {
			bar.Increment()
			time.Sleep(time.Millisecond * 50)
		}
	}
	if ShowBar == true {
		bar.Finish()
	}
	return filterList
}
示例#3
0
// CheckMetadata downloads the metadata about all of the files currently
// stored on Drive and compares it with the local cache.
func (gd *GDrive) CheckMetadata(filename string, report func(string)) error {
	idToFile, err := gd.getIdToFile(filename)
	if err != nil {
		return err
	}

	// This will almost certainly take a while, so put up a progress bar.
	var bar *pb.ProgressBar
	if !gd.quiet {
		bar = pb.New(len(idToFile))
		bar.ShowBar = true
		bar.ShowCounters = false
		bar.Output = os.Stderr
		bar.Prefix("Checking metadata cache: ")
		bar.Start()
	}

	err = gd.runQuery("trashed=false", func(f *drive.File) {
		if file, ok := idToFile[f.Id]; ok {
			df := newFile(f.Title, f)
			if !filesEqual(df, file) {
				report(fmt.Sprintf("%s: metadata mismatch.\nLocal: %+v\nDrive: %+v",
					file.Path, file, df))
			}
			if bar != nil {
				bar.Increment()
			}
			delete(idToFile, f.Id)
		} else {
			// It'd be preferable to have "sharedWithMe=false" included in
			// the query string above, but the combination of that with
			// "trashed=false" seems to lead to no results being returned.
			if f.Shared == false {
				report(fmt.Sprintf("%s: found on Drive, not in local cache [%+v]",
					f.Title, f))
			}
		}
	})

	for _, f := range idToFile {
		report(fmt.Sprintf("%s: found in local cache, not on Drive [%+v]",
			f.Path, f))
	}

	if bar != nil {
		bar.Finish()
	}
	return nil
}
示例#4
0
// download a file with the HTTP/HTTPS protocol showing a progress bar. The destination file is
// always overwritten.
func download(rawurl string, destinationPath string) {
	tempDestinationPath := destinationPath + ".tmp"

	destination, err := os.Create(tempDestinationPath)
	if err != nil {
		log.Fatalf("Unable to open the destination file: %s", tempDestinationPath)
	}
	defer destination.Close()

	response, err := customGet(rawurl)
	if err != nil {
		log.Fatalf("Unable to open a connection to %s", rawurl)
	}
	defer response.Body.Close()

	if response.StatusCode != http.StatusOK {
		log.Fatalf("Unexpected HTTP response code. Wanted 200 but got %d", response.StatusCode)
	}

	var progressBar *pb.ProgressBar

	contentLength, err := strconv.Atoi(response.Header.Get("Content-Length"))
	if err == nil {
		progressBar = pb.New(int(contentLength))
	} else {
		progressBar = pb.New(0)
	}
	defer progressBar.Finish()

	progressBar.ShowSpeed = true
	progressBar.SetRefreshRate(time.Millisecond * 1000)
	progressBar.SetUnits(pb.U_BYTES)
	progressBar.Start()

	writer := io.MultiWriter(destination, progressBar)

	io.Copy(writer, response.Body)
	destination.Close()
	os.Rename(tempDestinationPath, destinationPath)
}
示例#5
0
func runClusterBackup(args *docopt.Args) error {
	client, err := getClusterClient()
	if err != nil {
		return err
	}

	var bar *pb.ProgressBar
	var progress backup.ProgressBar
	if term.IsTerminal(os.Stderr.Fd()) {
		bar = pb.New(0)
		bar.SetUnits(pb.U_BYTES)
		bar.ShowBar = false
		bar.ShowSpeed = true
		bar.Output = os.Stderr
		bar.Start()
		progress = bar
	}

	var dest io.Writer = os.Stdout
	if filename := args.String["--file"]; filename != "" {
		f, err := os.Create(filename)
		if err != nil {
			return err
		}
		defer f.Close()
		dest = f
	}

	fmt.Fprintln(os.Stderr, "Creating cluster backup...")

	if err := backup.Run(client, dest, progress); err != nil {
		return err
	}
	if bar != nil {
		bar.Finish()
	}
	fmt.Fprintln(os.Stderr, "Backup complete.")

	return nil
}
示例#6
0
// processSystemArchives processes archives for given system
func (h *Harvester) processSystemArchives(s *system.System, archives []string) error {
	var bar *pb.ProgressBar

	nb := len(archives)

	// extract archives
	if !s.Options.Quiet {
		fmt.Printf("[%s] Extracting %v archive(s)\n", s.Infos.Name, nb)

		if !s.Options.Debug {
			bar = pb.StartNew(nb)
			bar.ShowCounters = true
			bar.ShowPercent = false
			bar.ShowTimeLeft = true
			bar.SetMaxWidth(80)
		}
	}

	for _, archive := range archives {
		if !s.Options.Quiet && !s.Options.Debug {
			bar.Increment()
		}

		if err := s.ProcessArchive(archive, h.Options.Output); err != nil {
			return err
		}
	}

	if !s.Options.Quiet && !s.Options.Debug {
		bar.Finish()
		fmt.Printf("[%s] Processed %v files (skipped: %v)\n", s.Infos.Name, s.Processed, s.Skipped)
	}

	fmt.Printf("[%s] Selected %v games\n", s.Infos.Name, len(s.Games))

	return nil
}
示例#7
0
文件: utils.go 项目: cactus/rollcage
// Fetch http file url to destination dest, with or without progress.
func FetchHTTPFile(url string, dest string, progress bool) (err error) {
	gologit.Debugf("Creating file: %s\n", dest)
	out, err := os.Create(dest)
	if err != nil {
		return err
	}
	defer out.Close()

	var r io.Reader

	gologit.Debugf("Fetching url: %s\n", url)
	resp, err := http.Get(url)
	defer resp.Body.Close()

	if resp.StatusCode != http.StatusOK {
		return fmt.Errorf("Server return non-200 status: %v", resp.Status)
	}

	msgPrefix := fmt.Sprintf("%s: ", path.Base(dest))
	var bar *pb.ProgressBar
	i, _ := strconv.Atoi(resp.Header.Get("Content-Length"))
	if i > 0 && progress {
		bar = pb.New(i).Prefix(msgPrefix).SetUnits(pb.U_BYTES)
		bar.ShowSpeed = true
		bar.RefreshRate = time.Millisecond * 700
		bar.ShowFinalTime = false
		bar.ShowTimeLeft = false
		bar.Start()
		defer bar.Finish()
		r = bar.NewProxyReader(resp.Body)
	} else {
		r = resp.Body
	}
	_, err = io.Copy(out, r)
	return err
}
示例#8
0
func (ctx *Context) Load() error {
	var bar *pb.ProgressBar
	if Verbose {
		log.Println("loading database")
	}
	db, err := LoadDB(ctx.DatabaseName, *decompress)
	if os.IsNotExist(err) {
		log.Printf("database not found")
		return nil
	} else if err != nil {
		log.Printf("error loading database: %s", err)
		return err
	}

	if Verbose {
		log.Println("loading teams")
		bar = pb.StartNew(len(db.Teams))
	}
	for _, team := range db.Teams {
		ctx.AddTeam(team)
		if Verbose {
			bar.Increment()
		}
	}
	if Verbose {
		bar.Finish()
	}

	if Verbose {
		log.Println("loading match history")
		bar = pb.StartNew(len(db.Outcomes))
	}
	loadedOutcomes := map[string]*Outcome{}
	for _, outcome := range db.Outcomes {
		loadedOutcomes[outcome.ID] = outcome
		if Verbose {
			bar.Increment()
		}
	}
	if Verbose {
		bar.Finish()
	}

	if Verbose {
		bar = pb.StartNew(len(db.Matches))
	}
	for _, match := range db.Matches {
		outcome, ok := loadedOutcomes[match.OutcomeID]
		if !ok {
			log.Panicf("corrupted history %q", match.ID)
		}
		ctx.AddMatch(match, outcome)
		if Verbose {
			bar.Increment()
		}
	}
	if Verbose {
		bar.Finish()
	}
	return nil
}
示例#9
0
func main() {
	clientID := flag.String("id", "", "Github client ID")
	clientSecret := flag.String("secret", "", "Github client secret")
	file := flag.String("file", "", "File containing the list of packages")
	output := flag.String("output", "gddofork.out", "Output file")
	progress := flag.Bool("progress", false, "Show a progress bar")
	flag.Parse()

	var auth *gddoexp.GithubAuth
	if (clientID != nil && *clientID != "") || (clientSecret != nil && *clientSecret != "") {
		if *clientID == "" || *clientSecret == "" {
			fmt.Println("to enable Gthub authentication, you need to inform the id and secret")
			flag.PrintDefaults()
			return
		}

		auth = &gddoexp.GithubAuth{
			ID:     *clientID,
			Secret: *clientSecret,
		}
	}

	var pkgs []database.Package
	var err error

	if file != nil && *file != "" {
		pkgs, err = readFromFile(*file)
	} else {
		pkgs, err = readFromStdin()
	}

	if err != nil {
		fmt.Println(err)
		return
	}

	o, err := os.OpenFile(*output, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
	if err != nil {
		fmt.Println("error creating output file:", err)
		return
	}
	defer o.Close()

	log.SetOutput(o)
	log.Println("BEGIN")
	log.Printf("%d packages will be analyzed", len(pkgs))

	var progressBar *pb.ProgressBar
	if progress != nil && *progress {
		progressBar = pb.StartNew(len(pkgs))
	}

	var cache int

	for response := range gddoexp.AreFastForkPackages(pkgs, auth) {
		if progress != nil && *progress {
			progressBar.Increment()
		}

		if response.Cache {
			cache++
		}

		if response.Error != nil {
			log.Println(response.Error)
		} else if response.FastFork {
			log.Printf("package “%s” is a fast fork\n", response.Path)
			if progress != nil && !*progress {
				fmt.Println(response.Path)
			}
		} else {
			log.Printf("package “%s” is not a fast fork\n", response.Path)
		}
	}

	if progress != nil && *progress {
		progressBar.Finish()
	}

	log.Println("Cache hits:", cache)
	log.Println("END")
}
示例#10
0
// Synchronize a local directory hierarchy with Google Drive.
// localPath is the file or directory to start with, driveRoot is
// the directory into which the file/directory will be sent
func syncHierarchyUp(localPath string, driveRoot string, encrypt bool, trustTimes bool,
	maxSymlinkDepth int) int {
	if encrypt && key == nil {
		key = decryptEncryptionKey()
	}

	fileMappings, nUploadErrors := compileUploadFileTree(localPath, driveRoot,
		encrypt, trustTimes, maxSymlinkDepth)
	if len(fileMappings) == 0 {
		message("No files to be uploaded.")
		return 0
	}

	nBytesToUpload := int64(0)
	for _, info := range fileMappings {
		if !info.LocalFileInfo.IsDir() {
			nBytesToUpload += info.LocalFileInfo.Size()
		}
	}

	// Given the list of files to sync, first find all of the directories and
	// then either get or create a Drive folder for each one.
	directoryMappingMap := make(map[string]localToRemoteFileMapping)
	var directoryNames []string
	for _, localfile := range fileMappings {
		if localfile.LocalFileInfo.IsDir() {
			directoryNames = append(directoryNames, localfile.DrivePath)
			directoryMappingMap[localfile.DrivePath] = localfile
		}
	}

	// Now sort the directories by name, which ensures that the parent of each
	// directory has already been created if we need to create its children.
	sort.Strings(directoryNames)

	if len(directoryNames) > 0 {
		// Actually create/update the directories.
		var dirProgressBar *pb.ProgressBar
		if !quiet {
			dirProgressBar = pb.New(len(directoryNames))
			dirProgressBar.Output = os.Stderr
			dirProgressBar.Prefix("Directories: ")
			dirProgressBar.Start()
		}

		// Sync each of the directories, which serves to create any missing ones.
		for _, dirName := range directoryNames {
			file := directoryMappingMap[dirName]
			err := syncFileUp(file.LocalPath, file.LocalFileInfo, file.DrivePath,
				encrypt, dirProgressBar)
			if err != nil {
				// Errors creating directories are basically unrecoverable,
				// as they'll prevent us from later uploading any files in
				// them.
				printErrorAndExit(err)
			}
		}
		if dirProgressBar != nil {
			dirProgressBar.Finish()
		}
	}

	var fileProgressBar *pb.ProgressBar
	if !quiet {
		fileProgressBar = pb.New64(nBytesToUpload).SetUnits(pb.U_BYTES)
		fileProgressBar.Output = os.Stderr
		fileProgressBar.Prefix("Files: ")
		fileProgressBar.Start()
	}

	// Sort the files by size, small to large.
	sort.Sort(localToRemoteBySize(fileMappings))

	// The two indices uploadFrontIndex and uploadBackIndex point to the
	// range of elements in the fileMappings array that haven't yet been
	// uploaded.
	uploadFrontIndex := 0
	uploadBackIndex := len(fileMappings) - 1

	// First, upload any large files that will use the resumable upload
	// protocol using a single thread; more threads here doesn't generally
	// help improve bandwidth utilizaiton and seems to make rate limit
	// errors from the Drive API more frequent...
	for ; uploadBackIndex >= 0; uploadBackIndex-- {
		if fileMappings[uploadBackIndex].LocalFileInfo.Size() < resumableUploadMinSize {
			break
		}

		fm := fileMappings[uploadBackIndex]
		if fm.LocalFileInfo.IsDir() {
			continue
		}

		if err := syncFileUp(fm.LocalPath, fm.LocalFileInfo, fm.DrivePath, encrypt,
			fileProgressBar); err != nil {
			addErrorAndPrintMessage(&nUploadErrors, fm.LocalPath, err)
		}
	}

	// Upload worker threads send a value over this channel when
	// they're done; the code that launches them waits for all of them
	// to do so before returning.
	doneChan := make(chan int, nWorkers)

	// Now that multiple threads are running, we need a mutex to protect
	// access to uploadFrontIndex and uploadBackIndex.
	var uploadIndexMutex sync.Mutex

	// All but one of the upload threads will grab files to upload starting
	// from the begining of the fileMappings array, thus doing the smallest
	// files first; one thread starts from the back of the array, doing the
	// largest files first.  In this way, the large files help saturate the
	// available upload bandwidth and hide the fixed overhead of creating
	// the smaller files.
	uploadWorker := func(startFromFront bool) {
		for {
			uploadIndexMutex.Lock()
			if uploadFrontIndex > uploadBackIndex {
				// All files have been uploaded.
				debug.Printf("All files uploaded [%d,%d]; exiting",
					uploadFrontIndex, uploadBackIndex)
				uploadIndexMutex.Unlock()
				doneChan <- 1
				break
			}

			// Get the index into fileMappings for the next file this
			// worker should upload.
			var index int
			if startFromFront {
				index = uploadFrontIndex
				uploadFrontIndex++
			} else {
				index = uploadBackIndex
				uploadBackIndex--
			}
			uploadIndexMutex.Unlock()

			fm := fileMappings[index]
			if fm.LocalFileInfo.IsDir() {
				// Directories have already been taken care of.
				continue
			}

			err := syncFileUp(fm.LocalPath, fm.LocalFileInfo, fm.DrivePath, encrypt,
				fileProgressBar)
			if err != nil {
				atomic.AddInt32(&nUploadErrors, 1)
				fmt.Fprintf(os.Stderr, "\nskicka: %s: %v\n", fm.LocalPath, err)
			}
		}
	}

	// Launch the workers.
	for i := 0; i < nWorkers; i++ {
		// All workers except the first one start from the front of
		// the array.
		go uploadWorker(i != 0)
	}

	// Wait for all of the workers to finish.
	for i := 0; i < nWorkers; i++ {
		<-doneChan
	}
	if fileProgressBar != nil {
		fileProgressBar.Finish()
	}

	if nUploadErrors > 0 {
		fmt.Fprintf(os.Stderr, "skicka: %d files not uploaded due to errors. "+
			"This may be a transient failure; try uploading again.\n", nUploadErrors)
	}
	return int(nUploadErrors)
}
示例#11
0
文件: export.go 项目: imjorge/flynn
func runImport(args *docopt.Args, client controller.Client) error {
	jobs, err := strconv.Atoi(args.String["--jobs"])
	if err != nil {
		return err
	}
	var src io.Reader = os.Stdin
	if filename := args.String["--file"]; filename != "" {
		f, err := os.Open(filename)
		if err != nil {
			return fmt.Errorf("error opening export file: %s", err)
		}
		defer f.Close()
		src = f
	}
	tr := tar.NewReader(src)

	var (
		app         *ct.App
		release     *ct.Release
		artifacts   []*ct.Artifact
		formation   *ct.Formation
		routes      []router.Route
		legacySlug  io.Reader
		dockerImage struct {
			config struct {
				Tag string `json:"tag"`
			}
			archive io.Reader
		}
		pgDump     io.Reader
		mysqlDump  io.Reader
		uploadSize int64
	)
	numResources := 0
	numRoutes := 1
	layers := make(map[string]io.Reader)

	for {
		header, err := tr.Next()
		if err == io.EOF {
			break
		} else if err != nil {
			return fmt.Errorf("error reading export tar: %s", err)
		}

		filename := path.Base(header.Name)
		if strings.HasSuffix(filename, ".layer") {
			f, err := ioutil.TempFile("", "flynn-layer-")
			if err != nil {
				return fmt.Errorf("error creating layer tempfile: %s", err)
			}
			defer f.Close()
			defer os.Remove(f.Name())
			if _, err := io.Copy(f, tr); err != nil {
				return fmt.Errorf("error reading %s: %s", header.Name, err)
			}
			if _, err := f.Seek(0, os.SEEK_SET); err != nil {
				return fmt.Errorf("error seeking layer tempfile: %s", err)
			}
			layers[strings.TrimSuffix(filename, ".layer")] = f
			uploadSize += header.Size
			continue
		}

		switch filename {
		case "app.json":
			app = &ct.App{}
			if err := json.NewDecoder(tr).Decode(app); err != nil {
				return fmt.Errorf("error decoding app: %s", err)
			}
			app.ID = ""
		case "release.json":
			release = &ct.Release{}
			if err := json.NewDecoder(tr).Decode(release); err != nil {
				return fmt.Errorf("error decoding release: %s", err)
			}
			release.ID = ""
			release.ArtifactIDs = nil
		case "artifacts.json":
			if err := json.NewDecoder(tr).Decode(&artifacts); err != nil {
				return fmt.Errorf("error decoding artifacts: %s", err)
			}
		case "formation.json":
			formation = &ct.Formation{}
			if err := json.NewDecoder(tr).Decode(formation); err != nil {
				return fmt.Errorf("error decoding formation: %s", err)
			}
			formation.AppID = ""
			formation.ReleaseID = ""
		case "routes.json":
			if err := json.NewDecoder(tr).Decode(&routes); err != nil {
				return fmt.Errorf("error decoding routes: %s", err)
			}
			for _, route := range routes {
				route.ID = ""
				route.ParentRef = ""
			}
		case "slug.tar.gz":
			f, err := ioutil.TempFile("", "slug.tar.gz")
			if err != nil {
				return fmt.Errorf("error creating slug tempfile: %s", err)
			}
			defer f.Close()
			defer os.Remove(f.Name())
			if _, err := io.Copy(f, tr); err != nil {
				return fmt.Errorf("error reading slug: %s", err)
			}
			if _, err := f.Seek(0, os.SEEK_SET); err != nil {
				return fmt.Errorf("error seeking slug tempfile: %s", err)
			}
			legacySlug = f
			uploadSize += header.Size
		case "docker-image.json":
			if err := json.NewDecoder(tr).Decode(&dockerImage.config); err != nil {
				return fmt.Errorf("error decoding docker image json: %s", err)
			}
		case "docker-image.tar":
			f, err := ioutil.TempFile("", "docker-image.tar")
			if err != nil {
				return fmt.Errorf("error creating docker image tempfile: %s", err)
			}
			defer f.Close()
			defer os.Remove(f.Name())
			if _, err := io.Copy(f, tr); err != nil {
				return fmt.Errorf("error reading docker image: %s", err)
			}
			if _, err := f.Seek(0, os.SEEK_SET); err != nil {
				return fmt.Errorf("error seeking docker image tempfile: %s", err)
			}
			dockerImage.archive = f
			uploadSize += header.Size
		case "postgres.dump":
			f, err := ioutil.TempFile("", "postgres.dump")
			if err != nil {
				return fmt.Errorf("error creating db tempfile: %s", err)
			}
			defer f.Close()
			defer os.Remove(f.Name())
			if _, err := io.Copy(f, tr); err != nil {
				return fmt.Errorf("error reading db dump: %s", err)
			}
			if _, err := f.Seek(0, os.SEEK_SET); err != nil {
				return fmt.Errorf("error seeking db tempfile: %s", err)
			}
			pgDump = f
			uploadSize += header.Size
		case "mysql.dump":
			f, err := ioutil.TempFile("", "mysql.dump")
			if err != nil {
				return fmt.Errorf("error creating db tempfile: %s", err)
			}
			defer f.Close()
			defer os.Remove(f.Name())
			if _, err := io.Copy(f, tr); err != nil {
				return fmt.Errorf("error reading db dump: %s", err)
			}
			if _, err := f.Seek(0, os.SEEK_SET); err != nil {
				return fmt.Errorf("error seeking db tempfile: %s", err)
			}
			mysqlDump = f
			uploadSize += header.Size
		}
	}

	if app == nil {
		return fmt.Errorf("missing app.json")
	}
	oldName := app.Name
	if name := args.String["--name"]; name != "" {
		app.Name = name
	}
	if err := client.CreateApp(app); err != nil {
		return fmt.Errorf("error creating app: %s", err)
	}

	var bar *pb.ProgressBar
	if !args.Bool["--quiet"] && uploadSize > 0 && term.IsTerminal(os.Stderr.Fd()) {
		bar = pb.New(0)
		bar.SetUnits(pb.U_BYTES)
		bar.Total = uploadSize
		bar.ShowSpeed = true
		bar.Output = os.Stderr
		bar.Start()
		defer bar.Finish()
	}

	if pgDump != nil && release != nil {
		res, err := client.ProvisionResource(&ct.ResourceReq{
			ProviderID: "postgres",
			Apps:       []string{app.ID},
		})
		if err != nil {
			return fmt.Errorf("error provisioning postgres resource: %s", err)
		}
		numResources++

		if release.Env == nil {
			release.Env = make(map[string]string, len(res.Env))
		}
		for k, v := range res.Env {
			release.Env[k] = v
		}

		config, err := getPgRunConfig(client, app.ID, release)
		if err != nil {
			return fmt.Errorf("error getting postgres config: %s", err)
		}
		config.Stdin = pgDump
		if bar != nil {
			config.Stdin = bar.NewProxyReader(config.Stdin)
		}
		config.Exit = false
		if err := pgRestore(client, config, jobs); err != nil {
			return fmt.Errorf("error restoring postgres database: %s", err)
		}
	}

	if mysqlDump != nil && release != nil {
		res, err := client.ProvisionResource(&ct.ResourceReq{
			ProviderID: "mysql",
			Apps:       []string{app.ID},
		})
		if err != nil {
			return fmt.Errorf("error provisioning mysql resource: %s", err)
		}
		numResources++

		if release.Env == nil {
			release.Env = make(map[string]string, len(res.Env))
		}
		for k, v := range res.Env {
			release.Env[k] = v
		}

		config, err := getMysqlRunConfig(client, app.ID, release)
		if err != nil {
			return fmt.Errorf("error getting mysql config: %s", err)
		}
		config.Stdin = mysqlDump
		if bar != nil {
			config.Stdin = bar.NewProxyReader(config.Stdin)
		}
		config.Exit = false
		if err := mysqlRestore(client, config); err != nil {
			return fmt.Errorf("error restoring mysql database: %s", err)
		}
	}

	if release != nil && release.Env["FLYNN_REDIS"] != "" {
		res, err := client.ProvisionResource(&ct.ResourceReq{
			ProviderID: "redis",
			Apps:       []string{app.ID},
		})
		if err != nil {
			return fmt.Errorf("error provisioning redis resource: %s", err)
		}
		numResources++

		if release.Env == nil {
			release.Env = make(map[string]string, len(res.Env))
		}
		for k, v := range res.Env {
			release.Env[k] = v
		}
	}

	var uploadLegacySlug bool

	if legacySlug != nil {
		if err := func() error {
			gitreceiveRelease, err := client.GetAppRelease("gitreceive")
			if err != nil {
				return fmt.Errorf("unable to retrieve gitreceive release: %s", err)
			}

			// handle legacy clusters which reference Docker image URIs
			if uri, ok := gitreceiveRelease.Env["SLUGRUNNER_IMAGE_URI"]; ok {
				artifact := &ct.Artifact{
					Type: ct.DeprecatedArtifactTypeDocker,
					URI:  uri,
				}
				if err := client.CreateArtifact(artifact); err != nil {
					return fmt.Errorf("error creating image artifact: %s", err)
				}
				uploadLegacySlug = true
				release.ArtifactIDs = []string{artifact.ID}
				return nil
			}

			slugBuilderID, ok := gitreceiveRelease.Env["SLUGBUILDER_IMAGE_ID"]
			if !ok {
				return fmt.Errorf("gitreceive env missing slugbuilder image")
			}
			slugRunnerID, ok := gitreceiveRelease.Env["SLUGRUNNER_IMAGE_ID"]
			if !ok {
				return fmt.Errorf("gitreceive env missing slugrunner image")
			}

			// handle legacy tarball based slugbuilders (which are Docker based)
			slugBuilderImage, err := client.GetArtifact(slugBuilderID)
			if err != nil {
				return fmt.Errorf("unable to get slugbuilder image artifact: %s", err)
			}
			if slugBuilderImage.Type == ct.DeprecatedArtifactTypeDocker {
				uploadLegacySlug = true
				release.ArtifactIDs = []string{slugRunnerID}
				return nil
			}

			// Use slugbuilder to convert the legacy slug to a
			// Flynn squashfs image
			slugImageID := random.UUID()
			config := runConfig{
				App:        app.ID,
				Release:    gitreceiveRelease.ID,
				ReleaseEnv: true,
				Artifacts:  []string{slugBuilderID},
				DisableLog: true,
				Args:       []string{"/bin/convert-legacy-slug.sh"},
				Stdin:      legacySlug,
				Stdout:     ioutil.Discard,
				Stderr:     ioutil.Discard,
				Env:        map[string]string{"SLUG_IMAGE_ID": slugImageID},
			}
			if bar != nil {
				config.Stdin = bar.NewProxyReader(config.Stdin)
			}
			if err := runJob(client, config); err != nil {
				return fmt.Errorf("error uploading slug: %s", err)
			}
			release.ID = ""
			release.ArtifactIDs = []string{slugRunnerID, slugImageID}
			if release.Meta == nil {
				release.Meta = make(map[string]string, 1)
			}
			release.Meta["git"] = "true"
			return nil
		}(); err != nil {
			return err
		}
	} else if dockerImage.config.Tag != "" && dockerImage.archive != nil {
		// load the docker image into the Docker daemon
		cmd := exec.Command("docker", "load")
		cmd.Stdin = dockerImage.archive
		if out, err := cmd.CombinedOutput(); err != nil {
			return fmt.Errorf("error running docker load: %s: %q", err, out)
		}

		// use the tag from the config (which will now be applied to
		// the loaded image) to push the image to docker-receive
		cluster, err := getCluster()
		if err != nil {
			return err
		}
		host, err := cluster.DockerPushHost()
		if err != nil {
			return err
		}
		tag := fmt.Sprintf("%s/%s:flynn-import-%s", host, app.Name, random.String(8))
		if out, err := exec.Command("docker", "tag", dockerImage.config.Tag, tag).CombinedOutput(); err != nil {
			return fmt.Errorf("error tagging docker image: %s: %q", err, out)
		}

		artifact, err := dockerPush(client, app.Name, tag)
		if err != nil {
			return fmt.Errorf("error pushing docker image: %s", err)
		}

		release.ArtifactIDs = []string{artifact.ID}
	} else if len(artifacts) > 0 {
		// import blobstore Flynn artifacts
		blobstoreRelease, err := client.GetAppRelease("blobstore")
		if err != nil {
			return fmt.Errorf("unable to retrieve blobstore release: %s", err)
		}
		upload := func(id, url string) error {
			layer, ok := layers[id]
			if !ok {
				return fmt.Errorf("missing layer in export: %s", id)
			}
			config := runConfig{
				App:        app.ID,
				Release:    blobstoreRelease.ID,
				DisableLog: true,
				Args:       []string{"curl", "--request", "PUT", "--upload-file", "-", url},
				Stdin:      layer,
				Stdout:     ioutil.Discard,
				Stderr:     ioutil.Discard,
			}
			if bar != nil {
				config.Stdin = bar.NewProxyReader(config.Stdin)
			}
			if err := runJob(client, config); err != nil {
				return fmt.Errorf("error uploading layer: %s", err)
			}
			return nil
		}

		release.ArtifactIDs = make([]string, len(artifacts))
		for i, artifact := range artifacts {
			if artifact.Type != ct.ArtifactTypeFlynn {
				continue
			}
			if !artifact.Blobstore() {
				continue
			}
			for _, rootfs := range artifact.Manifest().Rootfs {
				for _, layer := range rootfs.Layers {
					if err := upload(layer.ID, artifact.LayerURL(layer)); err != nil {
						return err
					}
				}
			}
			artifact.ID = ""
			if err := client.CreateArtifact(artifact); err != nil {
				return fmt.Errorf("error creating artifact: %s", err)
			}
			release.ArtifactIDs[i] = artifact.ID
		}

		// use the current slugrunner image for slug releases
		if release.IsGitDeploy() {
			gitreceiveRelease, err := client.GetAppRelease("gitreceive")
			if err != nil {
				return fmt.Errorf("unable to retrieve gitreceive release: %s", err)
			}
			slugRunnerID, ok := gitreceiveRelease.Env["SLUGRUNNER_IMAGE_ID"]
			if !ok {
				return fmt.Errorf("gitreceive env missing slugrunner image")
			}
			release.ArtifactIDs[0] = slugRunnerID
		}
	}

	if release != nil {
		for t, proc := range release.Processes {
			// update legacy slug releases to use Args rather than the
			// deprecated Entrypoint and Cmd fields
			if release.IsGitDeploy() && len(proc.Args) == 0 {
				proc.Args = append([]string{"/runner/init"}, proc.DeprecatedCmd...)
				proc.DeprecatedCmd = nil
			}
			for i, port := range proc.Ports {
				if port.Service != nil && strings.HasPrefix(port.Service.Name, oldName) {
					proc.Ports[i].Service.Name = strings.Replace(port.Service.Name, oldName, app.Name, 1)
				}
			}
			release.Processes[t] = proc
		}
		if err := client.CreateRelease(release); err != nil {
			return fmt.Errorf("error creating release: %s", err)
		}
		if err := client.SetAppRelease(app.ID, release.ID); err != nil {
			return fmt.Errorf("error setting app release: %s", err)
		}
	}

	if uploadLegacySlug {
		slugURI := fmt.Sprintf("http://blobstore.discoverd/%s/slug.tgz", random.UUID())
		config := runConfig{
			App:        app.ID,
			Release:    release.ID,
			DisableLog: true,
			Args:       []string{"curl", "--request", "PUT", "--upload-file", "-", slugURI},
			Stdin:      legacySlug,
			Stdout:     ioutil.Discard,
			Stderr:     ioutil.Discard,
		}
		if bar != nil {
			config.Stdin = bar.NewProxyReader(config.Stdin)
		}
		if err := runJob(client, config); err != nil {
			return fmt.Errorf("error uploading slug: %s", err)
		}
		slugArtifact := &ct.Artifact{
			Type: ct.DeprecatedArtifactTypeFile,
			URI:  slugURI,
		}
		if err := client.CreateArtifact(slugArtifact); err != nil {
			return fmt.Errorf("error creating slug artifact: %s", err)
		}
		release.ID = ""
		release.ArtifactIDs = append(release.ArtifactIDs, slugArtifact.ID)
		if release.Meta == nil {
			release.Meta = make(map[string]string, 1)
		}
		release.Meta["git"] = "true"
		if err := client.CreateRelease(release); err != nil {
			return fmt.Errorf("error creating release: %s", err)
		}
		if err := client.SetAppRelease(app.ID, release.ID); err != nil {
			return fmt.Errorf("error setting app release: %s", err)
		}
	}

	if formation != nil && release != nil {
		formation.ReleaseID = release.ID
		formation.AppID = app.ID
		if err := client.PutFormation(formation); err != nil {
			return fmt.Errorf("error creating formation: %s", err)
		}
	}

	if args.Bool["--routes"] {
		for _, route := range routes {
			if err := client.CreateRoute(app.ID, &route); err != nil {
				if e, ok := err.(hh.JSONError); ok && e.Code == hh.ConflictErrorCode {
					// If the cluster domain matches then the default route
					// exported will conflict with the one created automatically.
					continue
				}
				return fmt.Errorf("error creating route: %s", err)
			}
			numRoutes++
		}
	}

	fmt.Printf("Imported %s (added %d routes, provisioned %d resources)\n", app.Name, numRoutes, numResources)

	return nil
}
示例#12
0
func (gd *GDrive) getMetadataChanges(svc *drive.Service, startChangeId int64,
	changeChan chan<- []*drive.Change, errorChan chan<- error) {
	var about *drive.About
	var err error

	// Get the Drive About information in order to figure out how many
	// changes we need to download to get up to date.
	for try := 0; ; try++ {
		about, err = svc.About.Get().Do()
		if err == nil {
			break
		} else {
			err = gd.tryToHandleDriveAPIError(err, try)
		}
		if err != nil {
			errorChan <- err
			return
		}
	}

	// Don't clutter the output with a progress bar unless it looks like
	// downloading changes may take a while.
	// TODO: consider using timer.AfterFunc to put up the progress bar if
	// we're not done after a few seconds?  It's not clear if this is worth
	// the trouble.
	var bar *pb.ProgressBar
	numChanges := about.LargestChangeId - startChangeId
	if numChanges > 1000 && !gd.quiet {
		bar = pb.New64(numChanges)
		bar.ShowBar = true
		bar.ShowCounters = false
		bar.Output = os.Stderr
		bar.Prefix("Updating metadata cache: ")
		bar.Start()
	}

	pageToken := ""
	try := 0
	// Keep asking Drive for more changes until we get through them all.
	for {
		// Only ask for the fields in the drive.Change structure that we
		// actually to be filled in to save some bandwidth...
		fields := []googleapi.Field{"nextPageToken",
			"items/id", "items/fileId", "items/deleted",
			"items/file/id", "items/file/parents", "items/file/title",
			"items/file/fileSize", "items/file/mimeType", "items/file/properties",
			"items/file/modifiedDate", "items/file/md5Checksum", "items/file/labels"}
		q := svc.Changes.List().MaxResults(1000).IncludeSubscribed(false).Fields(fields...)
		if startChangeId >= 0 {
			q = q.StartChangeId(startChangeId + 1)
		}
		if pageToken != "" {
			q = q.PageToken(pageToken)
		}

		r, err := q.Do()
		if err != nil {
			err = gd.tryToHandleDriveAPIError(err, try)
			if err != nil {
				errorChan <- err
				return
			}
			try++
			continue
		}

		// Success. Reset the try counter in case we had errors leading up
		// to this.
		try = 0

		if len(r.Items) > 0 {
			// Send the changes along to the goroutine that's updating the
			// local cache.
			changeChan <- r.Items

			if bar != nil {
				bar.Set(int(r.Items[len(r.Items)-1].Id - startChangeId))
			}
		}

		pageToken = string(r.NextPageToken)
		if pageToken == "" {
			break
		}
	}
	// Signal that no more changes are coming.
	close(changeChan)

	if bar != nil {
		bar.Finish()
	}

	gd.debug("Done updating metadata from Drive")
}
示例#13
0
func finishProgressBar(pb *pb.ProgressBar) {
	pb.Set64(pb.Total)
	pb.Finish()
}
示例#14
0
文件: client.go 项目: gmelika/rack
// PostMultipartP posts a multipart message in the MIME internet format with a callback function with a string stating the upload Progress.
func (c *Client) PostMultipartP(path string, files map[string][]byte, params Params, out interface{}, callback func(s string)) error {
	body := &bytes.Buffer{}

	writer := multipart.NewWriter(body)

	for name, source := range files {
		part, err := writer.CreateFormFile(name, "source.tgz")
		if err != nil {
			return err
		}

		_, err = io.Copy(part, bytes.NewReader(source))
		if err != nil {
			return err
		}
	}

	for name, value := range params {
		writer.WriteField(name, value)
	}

	err := writer.Close()
	if err != nil {
		return err
	}

	var bodyReader io.Reader
	bodyReader = body

	var bar *pb.ProgressBar

	if callback != nil {
		bar = pb.New(body.Len()).SetUnits(pb.U_BYTES)
		bar.NotPrint = true
		bar.ShowBar = false
		bar.Callback = callback

		bar.Start()
		bodyReader = bar.NewProxyReader(body)
	}

	req, err := c.request("POST", path, bodyReader)

	if err != nil {
		return err
	}

	req.SetBasicAuth("convox", string(c.Password))

	req.Header.Set("Content-Type", writer.FormDataContentType())

	res, err := c.client().Do(req)

	if err != nil {
		return err
	}

	defer res.Body.Close()

	if err := responseError(res); err != nil {
		return err
	}

	data, err := ioutil.ReadAll(res.Body)

	if err != nil {
		return err
	}

	if out != nil {
		err = json.Unmarshal(data, out)

		if err != nil {
			return err
		}
	}

	if callback != nil {
		bar.Finish()
	}

	return nil
}
示例#15
0
func importCSV(filename string, connStr string, schema string, tableName string, ignoreErrors bool, skipHeader bool, fields string, delimiter string) error {

	db, err := connect(connStr, schema)
	if err != nil {
		return err
	}
	defer db.Close()

	var reader *csv.Reader
	var bar *pb.ProgressBar
	if filename != "" {
		file, err := os.Open(filename)
		if err != nil {
			return err
		}
		defer file.Close()

		bar = NewProgressBar(file)
		reader = csv.NewReader(io.TeeReader(file, bar))
	} else {
		reader = csv.NewReader(os.Stdin)
	}

	reader.Comma, _ = utf8.DecodeRuneInString(delimiter)
	reader.LazyQuotes = true

	columns, err := parseColumns(reader, skipHeader, fields)
	if err != nil {
		return err
	}

	reader.FieldsPerRecord = len(columns)

	i, err := NewCSVImport(db, schema, tableName, columns)
	if err != nil {
		return err
	}

	var success, failed int
	if filename != "" {
		bar.Start()
		err, success, failed = copyCSVRows(i, reader, ignoreErrors, delimiter, columns)
		bar.Finish()
	} else {
		err, success, failed = copyCSVRows(i, reader, ignoreErrors, delimiter, columns)
	}

	if err != nil {
		lineNumber := success + failed
		if !skipHeader {
			lineNumber++
		}
		return errors.New(fmt.Sprintf("line %d: %s", lineNumber, err))
	} else {
		fmt.Println(fmt.Sprintf("%d rows imported into %s.%s", success, schema, tableName))

		if ignoreErrors && failed > 0 {
			fmt.Println(fmt.Sprintf("%d rows could not be imported into %s.%s and have been written to stderr.", failed, schema, tableName))
		}

		return i.Commit()
	}
}
示例#16
0
func stat(cmd *cobra.Command, args []string) error {

	if len(args) != 1 {
		cmd.Help()
		return nil
	}

	token, err := getToken()
	if err != nil {
		return err
	}

	con, err := grpc.Dial(metaAddr, grpc.WithInsecure())
	if err != nil {
		return err
	}
	defer con.Close()

	c := pb.NewMetaClient(con)

	benchStart := time.Now()

	total := 0
	errorProbes := 0

	errChan := make(chan error)
	resChan := make(chan string)
	doneChan := make(chan bool)
	limitChan := make(chan int, concurrencyFlag)

	for i := 0; i < concurrencyFlag; i++ {
		limitChan <- 1
	}

	var bar *br.ProgressBar
	if progressBar {
		bar = br.StartNew(probesFlag)
	}

	for i := 0; i < probesFlag; i++ {
		go func() {
			<-limitChan
			defer func() {
				limitChan <- 1
			}()
			in := &pb.StatReq{}
			in.AccessToken = token
			in.Path = args[0]
			in.Children = childrenFlag
			ctx := context.Background()
			_, err := c.Stat(ctx, in)
			if err != nil {
				errChan <- err
				return
			}
			doneChan <- true
			resChan <- ""
		}()
	}

	for {
		select {
		case _ = <-doneChan:
			total++
			if progressBar {
				bar.Increment()
			}
		case _ = <-resChan:
		case err := <-errChan:
			log.Error(err)
			errorProbes++
			total++
			if progressBar {
				bar.Increment()
			}
		}
		if total == probesFlag {
			break
		}
	}

	if progressBar {
		bar.Finish()
	}

	numberRequests := probesFlag
	concurrency := concurrencyFlag
	totalTime := time.Since(benchStart).Seconds()
	failedRequests := errorProbes
	frequency := float64(numberRequests-failedRequests) / totalTime
	period := float64(1 / frequency)

	data := [][]string{
		{"#NUMBER", "CONCURRENCY", "TIME", "FAILED", "FREQ", "PERIOD"},
		{fmt.Sprintf("%d", numberRequests), fmt.Sprintf("%d", concurrency), fmt.Sprintf("%f", totalTime), fmt.Sprintf("%d", failedRequests), fmt.Sprintf("%f", frequency), fmt.Sprintf("%f", period)},
	}
	w := csv.NewWriter(output)
	w.Comma = ' '
	for _, d := range data {
		if err := w.Write(d); err != nil {
			return err
		}
	}
	w.Flush()

	if err := w.Error(); err != nil {
		return err
	}
	return nil
}
示例#17
0
文件: update.go 项目: kildevaeld/lang
Cobra is a CLI library for Go that empowers applications.
This application is a tool to generate the needed files
to quickly create a Cobra application.`,
	Run: func(cmd *cobra.Command, args []string) {
		// TODO: Work your own magic here
		var bar *pb.ProgressBar

		err := service.Update(func(step lang.Step, p, t int64) {
			if bar == nil {
				bar = createBar("Updating manifests", t)
			}
			bar.Set64(p)
		})

		if bar != nil {
			bar.Finish()
		}

		if err != nil {
			printError(err)
		}

	},
}

func init() {
	RootCmd.AddCommand(updateCmd)

	// Here you will define your flags and configuration settings.

	// Cobra supports Persistent Flags which will work for this command
示例#18
0
func main() {
	kingpin.CommandLine.HelpFlag.Short('h')
	kingpin.Parse()

	if *downloadFlag == true {
		*downloadFlag = false
	} else {
		*downloadFlag = true
	}

	w := new(tabwriter.Writer)
	var output io.Writer
	if *confirmFlag == false {
		var err error
		// log.SetFlags(0)
		LogFileLocation := flag.String("log", "BotSifter.log", "Specifies path of the log file")
		output, err = os.OpenFile(*LogFileLocation, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
		if err != nil {
			log.Println(err)
			return
		}
	} else {
		log.SetFlags(0)
	}

	if output == nil {
		output = os.Stdout
	}
	w.Init(output, 4, 4, 2, ' ', 0)
	log.SetOutput(w)

	//Read config file
	var GooFig GoogleConfig
	data, err := ioutil.ReadFile(*configFile)

	if err != nil {
		fmt.Println("Unable to open configuration file: " + *configFile)
		return
	}
	//Load config data from file into struct
	err = yaml.Unmarshal(data, &GooFig)
	if err != nil {
		log.Println(err)
		return
	}
	fmt.Println("\nConfig File: \t\t[" + *configFile + "]")
	fmt.Println("Include Refferers File: [" + GooFig.RefWhite + "]")
	fmt.Println("Exclude Refferers File: [" + GooFig.RefBlack + "]")
	fmt.Println("Include UA File: \t[" + GooFig.UAWhite + "]")
	fmt.Println("Exclude UA File: \t[" + GooFig.UABlack + "]")
	fmt.Println("Exclude Hostname File: \t[" + GooFig.HostInc + "]")

	//Loading variables from config struct
	WebPropertyId := GooFig.UACode
	AccountID := WebPropertyId[3:11]

	//Authentication settings
	conf := &oauth2.Config{
		ClientID:     GooFig.ClientID,
		ClientSecret: GooFig.ClientSecret,
		RedirectURL:  GooFig.RedirectURL,
		Scopes: []string{
			"https://www.googleapis.com/auth/analytics",
			"https://www.googleapis.com/auth/analytics.edit",
			"https://www.googleapis.com/auth/analytics.manage.users",
		},
		Endpoint: google.Endpoint,
	}

	//If the config data doesn't contain Auth Token we need to get one
	fmt.Println("")
	if GooFig.Token == nil {
		fmt.Print("Authenticating user...")
		GooFig.Token = auth(conf, GooFig.Port)
		fmt.Println("\t\t\t\t\tCompleted")
	}

	//Load new client and service to talk with Google api
	fmt.Print("Setting up Google client...")
	client := conf.Client(oauth2.NoContext, GooFig.Token)
	service, err := analytics.New(client)
	if err != nil {
		log.Fatalln(err)
		// GooFig.Token = nil
	}

	fmt.Println("\t\t\t\t\tCompleted")
	//Retrieve BotSifter list from server if the cleanFlag is false
	var resp Response
	var respDisplay Response
	//Load csv files into appropriate structs
	fmt.Print("Loading includes, excludes and hostname lists...")
	uainc := ReadReferrerList(GooFig.UAWhite)
	uaexc := ReadReferrerList(GooFig.UABlack)
	refs := ReadReferrerList(GooFig.RefWhite)
	excs := ReadReferrerList(GooFig.RefBlack)
	hosts := ReadHostList(GooFig.HostInc)

	fmt.Println("\t\tCompleted")

	if *cleanFlag == false {
		if *downloadFlag == true {
			fmt.Print("Downloading BotSifter Referrer List...")
			resp = retreiveList(GooFig.Person)
			respDisplay = resp
			fmt.Println("\t\t\t\tCompleted")
		}

		if resp.Status == "Unauthorized" {
			fmt.Println("Download failed: Invalid username/password")
			return
		}

		//Append contents from includeList.csv onto the ReferrerList struct and remove duplicate entries
		fmt.Print("Merging local include data with BotSifter data...")
		resp.ReferrerList = append(resp.ReferrerList, refs...)
		resp.UserAgentList = append(resp.UserAgentList, uainc...)
		RemoveDuplicates(&resp.UserAgentList)
		RemoveDuplicates(&resp.ReferrerList)
		fmt.Println("\t\tCompleted")

		//Remove contents from ReferrerList which were found on the excludeList.csv
		fmt.Print("Removing local exclude data from BotSifter data...")
		resultsRef := []Referrer{}
		for _, compFilter := range resp.ReferrerList {
			found := false
			for _, exc := range excs {
				if compFilter.Referrer == exc.Referrer {
					found = true
				}
			}
			if !found {
				resultsRef = append(resultsRef, compFilter)
			}
		}
		resp.ReferrerList = resultsRef

		resultsUA := []Referrer{}
		for _, compFilter := range resp.UserAgentList {
			found := false
			for _, exc := range uaexc {
				if compFilter.Referrer == exc.Referrer {
					found = true
				}
			}
			if !found {
				resultsUA = append(resultsUA, compFilter)
			}
		}
		resp.UserAgentList = resultsUA
		fmt.Println("\t\tCompleted")
	}

	fmt.Print("Download current BotSifter filters to build comparison lists...")
	//List current Botsifter filters in GA account
	filters, err := service.Management.Filters.List(AccountID).Do()
	if err != nil {
		log.Fatalln(err)
	}

	var oldFilterListUA []Referrer
	var oldFilterListRef []Referrer
	var oldFilterListHost []Host

	for _, oldFilter := range filters.Items {
		if strings.Contains(oldFilter.Name, "BotSifter UA") == true {
			if filterExpression := oldFilter.ExcludeDetails; filterExpression != nil {
				filterExpression.ExpressionValue = (strings.Replace(filterExpression.ExpressionValue, "\\.", ".", -1))
				filterExpression.ExpressionValue = (strings.Replace(filterExpression.ExpressionValue, "\\+", "+", -1))
				for _, ref := range strings.Split(filterExpression.ExpressionValue, "|") {
					oldFilterListUA = append(oldFilterListUA, Referrer{ref, "", ""})
				}
			}
		}
		if strings.Contains(oldFilter.Name, "BotSifter Ref") == true {
			if filterExpression := oldFilter.ExcludeDetails; filterExpression != nil {
				filterExpression.ExpressionValue = (strings.Replace(filterExpression.ExpressionValue, "\\.", ".", -1))
				filterExpression.ExpressionValue = (strings.Replace(filterExpression.ExpressionValue, "\\+", "+", -1))
				for _, ref := range strings.Split(filterExpression.ExpressionValue, "|") {
					oldFilterListRef = append(oldFilterListRef, Referrer{ref, "", ""})
				}
			}
		}
		if strings.Contains(oldFilter.Name, "BotSifter Hostname") == true {
			if filterExpression := oldFilter.IncludeDetails; filterExpression != nil {
				filterExpression.ExpressionValue = (strings.Replace(filterExpression.ExpressionValue, "\\.", ".", -1))
				filterExpression.ExpressionValue = (strings.Replace(filterExpression.ExpressionValue, "\\+", "+", -1))
				for _, ref := range strings.Split(filterExpression.ExpressionValue, "|") {
					oldFilterListHost = append(oldFilterListHost, Host{ref})
				}
			}
		}

	}
	onlyInNewListRefs := Referrers(resp.ReferrerList).findEntriesOnlyInLeftSide(oldFilterListRef)
	onlyInOldListRefs := Referrers(oldFilterListRef).findEntriesOnlyInLeftSide(resp.ReferrerList)
	inBothListsRefs := Referrers(resp.ReferrerList).findInBoth(oldFilterListRef)

	onlyInNewListUAs := Referrers(resp.UserAgentList).findEntriesOnlyInLeftSide(oldFilterListUA)
	onlyInOldListUAs := Referrers(oldFilterListUA).findEntriesOnlyInLeftSide(resp.UserAgentList)
	inBothListsUAs := Referrers(resp.UserAgentList).findInBoth(oldFilterListUA)

	onlyInNewListHosts := Hosts(hosts).findEntriesOnlyInLeftSide(oldFilterListHost)
	onlyInOldListHosts := Hosts(oldFilterListHost).findEntriesOnlyInLeftSide(hosts)
	inBothListsHosts := Hosts(hosts).findInBoth(oldFilterListHost)

	var Ref Referrer
	resultsRef := []Referrer{}

	for _, Ref = range resp.ReferrerList {
		Ref.Referrer = (strings.Replace(Ref.Referrer, ".", "\\.", -1))
		Ref.Referrer = (strings.Replace(Ref.Referrer, "+", "\\+", -1))
		resultsRef = append(resultsRef, Ref)

	}
	resp.ReferrerList = resultsRef

	resultsUA := []Referrer{}
	for _, Ref = range resp.UserAgentList {
		Ref.Referrer = (strings.Replace(Ref.Referrer, ".", "\\.", -1))
		Ref.Referrer = (strings.Replace(Ref.Referrer, "+", "\\+", -1))
		resultsUA = append(resultsUA, Ref)

	}
	resp.UserAgentList = resultsUA

	resultsHost := []Host{}
	for _, h := range hosts {
		h.Hostname = (strings.Replace(h.Hostname, ".", "\\.", -1))
		h.Hostname = (strings.Replace(h.Hostname, "+", "\\+", -1))
		resultsHost = append(resultsHost, h)

	}
	hosts = resultsHost

	fmt.Println("\tCompleted")
	fmt.Println("")
	log.Println("Current Botsifter Bots:")
	log.Println("\n#################### CURRENT BotSifter BOTS ####################")
	log.Println("Referrers:\n")
	log.Println("\tRANK\tNAME\tSCORE")
	for _, Ref = range respDisplay.ReferrerList {
		log.Println(Ref)
	}
	log.Println("")
	log.Println("User Agents:\n")
	log.Println("\tRANK\tNAME\tSCORE")
	for _, Ref = range respDisplay.UserAgentList {
		log.Println(Ref)
	}
	log.Println("")
	log.Println("\nBotSifter will make the following changes to your GA Account[" + GooFig.UACode + "]:")
	log.Println("\n#################### HOST CHANGES ####################")
	log.Println("Added Hosts:\n")
	if onlyInNewListHosts != nil {
		sort.Sort(onlyInNewListHosts)
		for _, h := range onlyInNewListHosts {
			log.Println(h)
		}
	} else {
		log.Println("\tNONE")
	}
	log.Println("")
	log.Println("Removed Hosts:\n")
	if onlyInOldListHosts != nil {
		sort.Sort(onlyInOldListUAs)
		for _, h := range onlyInOldListHosts {
			log.Println(h)
		}
	} else {
		log.Println("\tNONE")
	}
	// log.Println(strings.Trim(fmt.Sprint(onlyInOldListRefs), "[]"))
	log.Println("")
	log.Println("Hosts unchange:\n")
	if inBothListsHosts != nil {
		sort.Sort(inBothListsUAs)
		for _, h := range inBothListsHosts {
			log.Println(h)
		}
	} else {
		log.Println("\tNONE")
	}

	log.Println("\n#################### REFERRER CHANGES ####################")
	log.Println("Added Referrers:\n")
	if onlyInNewListRefs != nil {
		log.Println("\tRANK\tNAME\tSCORE")
		sort.Sort(onlyInNewListRefs)
		for _, Ref = range onlyInNewListRefs {
			log.Println(Ref)
		}
	} else {
		log.Println("\tNONE")
	}
	log.Println("")
	log.Println("Removed Referrers:\n")
	if onlyInOldListRefs != nil {
		log.Println("\tRANK\tNAME\tSCORE")
		sort.Sort(onlyInOldListRefs)
		for _, Ref = range onlyInOldListRefs {
			log.Println(Ref)
		}
	} else {
		log.Println("\tNONE")
	}
	// log.Println(strings.Trim(fmt.Sprint(onlyInOldListRefs), "[]"))
	log.Println("")
	log.Println("Referrers unchange:\n")
	if inBothListsRefs != nil {
		log.Println("\tRANK\tNAME\tSCORE")
		sort.Sort(inBothListsRefs)
		for _, Ref = range inBothListsRefs {
			log.Println(Ref)
		}
	} else {
		log.Println("\tNONE")
	}
	log.Println("\n#################### USER AGENTS CHANGES ####################")
	log.Println("Added User Agents:\n")
	if onlyInNewListUAs != nil {
		log.Println("\tRANK\tNAME\tSCORE")
		sort.Sort(onlyInNewListUAs)
		for _, Ref = range onlyInNewListUAs {
			log.Println(Ref)
		}
	} else {
		log.Println("\tNONE")
	}
	log.Println("")
	log.Println("Removed User Agents:\n")
	if onlyInOldListUAs != nil {
		log.Println("\tRANK\tNAME\tSCORE")
		sort.Sort(onlyInOldListUAs)
		for _, Ref = range onlyInOldListUAs {
			log.Println(Ref)
		}
	} else {
		log.Println("\tNONE")
	}
	// log.Println(strings.Trim(fmt.Sprint(onlyInOldListRefs), "[]"))
	log.Println("")
	log.Println("User Agents unchange:\n")
	if inBothListsUAs != nil {
		log.Println("\tRANK\tNAME\tSCORE")
		sort.Sort(inBothListsUAs)
		for _, Ref = range inBothListsUAs {
			log.Println(Ref)
		}
	} else {
		log.Println("\tNONE")
	}
	w.Flush()
	// log.Println(strings.Trim(fmt.Sprint(inBothListsRefs), "[]"))
	log.Println("")

	if *confirmFlag == false {

		length := len(filters.Items)
		var bar *pb.ProgressBar
		if length != 0 {
			bar = pb.StartNew(length)
			bar.SetMaxWidth(80)
			fmt.Println("Deleting old BotSifter filters ")
			for _, eachFilter := range filters.Items {

				if strings.Contains(eachFilter.Name, "BotSifter") == true {
					service.Management.Filters.Delete(AccountID, eachFilter.Id).Do()
				}
				bar.Increment()
				time.Sleep(time.Millisecond * 250)
			}
			bar.Finish()
		} else {
			fmt.Println("No filters to delete")
		}

		//If cleanFlag entered then end program here
		if *cleanFlag == true {
			return
		}

		//If view is not defined in config file then ask user which one to apply filters too
		if GooFig.View == "" {
			//List all views
			profiles, err := service.Management.Profiles.List(AccountID, WebPropertyId).Do()
			if err != nil {
				log.Println(err)
			}

			for i, profile := range profiles.Items {
				fmt.Printf("%d. %s\n", i, profile.Name)
			}

			reader := bufio.NewReader(os.Stdin)
			fmt.Printf("Please select a profile to apply filters too: ")
			index := 0
			for {
				selectedProfileIndex, _ := reader.ReadString('\n')
				index, err = strconv.Atoi(strings.TrimSuffix(selectedProfileIndex, "\n"))
				if err == nil && index < len(profiles.Items) {
					break
				} else {
					fmt.Println("Invalid input", index, err)
				}
			}
			GooFig.View = profiles.Items[index].Id
		}
		//Prepare filters
		fmt.Println("Preparing Filter - combining multiple Referrers")
		var filterList []string
		filterList = RefMerge(resp.ReferrerList, true)

		//Build new filters from ReferrerList struct
		fmt.Println("Creating referral filters")
		var FilterIds []string
		length = len(filterList)
		bar = pb.StartNew(length)
		bar.SetMaxWidth(80)
		for i, newFilter := range filterList {
			counter := strconv.Itoa(i + 1)
			filter := &analytics.Filter{
				Name: "BotSifter Ref Spam" + counter,
				Type: "EXCLUDE",
				ExcludeDetails: &analytics.FilterExpression{
					Field:           "REFERRAL",
					ExpressionValue: newFilter,
					CaseSensitive:   false,
				},
			}
			filter, err = service.Management.Filters.Insert(AccountID, filter).Do()
			if err != nil {
				fmt.Print("\n")
				fmt.Println(err)
				return
			}
			//Save filter Ids for later
			FilterIds = append(FilterIds, filter.Id)
			bar.Increment()
			time.Sleep(time.Millisecond * 250)
		}
		bar.Finish()

		//Prepare filters
		fmt.Println("Preparing Filter - combining multiple User Agents")
		var filterListua []string
		filterListua = RefMerge(resp.UserAgentList, true)

		//Build new filters from ReferrerList struct
		fmt.Println("Creating User Agent filters")

		length = len(filterListua)
		bar = pb.StartNew(length)
		bar.SetMaxWidth(80)
		for i, newFilter := range filterListua {
			counter := strconv.Itoa(i + 1)
			filter := &analytics.Filter{
				Name: "BotSifter UA Spam" + counter,
				Type: "EXCLUDE",
				ExcludeDetails: &analytics.FilterExpression{
					Field:           "USER_DEFINED_VALUE",
					ExpressionValue: newFilter,
					CaseSensitive:   false,
				},
			}
			filter, err = service.Management.Filters.Insert(AccountID, filter).Do()

			if err != nil {
				fmt.Print("\n")
				fmt.Println(err)
				return
			}
			//Save filter Ids for later
			FilterIds = append(FilterIds, filter.Id)
			bar.Increment()
			time.Sleep(time.Millisecond * 250)
		}
		bar.Finish()

		if len(hosts) != 0 {
			var hostList []string
			hostList = HostMerge(hosts, false)

			//If there's hosts build "include Hostname" rule(s)
			fmt.Println("Creating Hostname filter(s)")
			length = len(hostList)
			bar = pb.StartNew(length)
			bar.SetMaxWidth(80)
			for i, newHost := range hostList {
				counter := strconv.Itoa(i)
				filter := &analytics.Filter{
					Name: "BotSifter Hostname Spam" + counter,
					Type: "INCLUDE",
					IncludeDetails: &analytics.FilterExpression{
						Field:           "PAGE_HOSTNAME",
						ExpressionValue: newHost,
						CaseSensitive:   false,
					},
				}

				filter, err = service.Management.Filters.Insert(AccountID, filter).Do()
				if err != nil {
					log.Println(err)
					return
				}

				//Save filter Ids for later
				FilterIds = append(FilterIds, filter.Id)
				bar.Increment()
				time.Sleep(time.Millisecond * 250)
			}
			bar.Finish()
		}

		//connecting built filters to profile user selected
		fmt.Println("Connecting filters to profile")
		length = len(FilterIds)
		bar = pb.StartNew(length)
		bar.SetMaxWidth(80)
		for _, newLink := range FilterIds {
			profilefilterlink := &analytics.ProfileFilterLink{
				FilterRef: &analytics.FilterRef{Id: newLink},
			}

			_, err := service.Management.ProfileFilterLinks.Insert(AccountID, WebPropertyId, GooFig.View, profilefilterlink).Do()
			if err != nil {
				log.Println("Error Connecting Filter to View\n")
			}
			bar.Increment()
			time.Sleep(time.Millisecond * 250)
		}
		bar.Finish()
	}
	fmt.Println("Saving configuration data to " + *configFile)
	//Marshal data to save into config file
	data, err = yaml.Marshal(&GooFig)
	if err != nil {
		log.Println(err)
		return
	}

	//Write config file
	err = ioutil.WriteFile(*configFile, data, 0644)

	if err != nil {
		log.Println(err)
		return
	}
	fmt.Println("Completed")

}
示例#19
0
func main() {
	file := flag.String("file", "", "File containing the list of packages")
	output := flag.String("output", "gddoscore.out", "Output file")
	progress := flag.Bool("progress", false, "Show a progress bar")
	flag.Parse()

	var pkgs map[string]bool
	var err error

	if file != nil && *file != "" {
		pkgs, err = readFromFile(*file)
	} else {
		pkgs, err = readFromStdin()
	}

	if err != nil {
		fmt.Println(err)
		return
	}

	db, err := database.New()
	if err != nil {
		fmt.Println("error connecting to database:", err)
		return
	}

	o, err := os.OpenFile(*output, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
	if err != nil {
		fmt.Println("error creating output file:", err)
		return
	}
	defer o.Close()

	log.SetOutput(o)
	log.Println("BEGIN")
	log.Printf("%d packages will be analyzed\n", len(pkgs))

	var progressBar *pb.ProgressBar
	if progress != nil && *progress {
		progressBar = pb.StartNew(len(pkgs))
	}

	db.Do(func(pkg *database.PackageInfo) error {
		if _, ok := pkgs[pkg.PDoc.ImportPath]; !ok {
			// we aren't analyzing this package
			return nil
		}

		if pkg.Score == 0 {
			log.Printf("package “%s” has no score", pkg.PDoc.ImportPath)
		} else {
			log.Printf("package “%s” has score", pkg.PDoc.ImportPath)
			if progress != nil && !*progress {
				fmt.Println(pkg.PDoc.ImportPath)
			}
		}

		if progress != nil && *progress {
			progressBar.Increment()
		}

		return nil
	})

	if progress != nil && *progress {
		progressBar.Finish()
	}

	log.Println("END")
}
示例#20
0
func upload(cmd *cobra.Command, args []string) error {
	if len(args) != 1 {
		cmd.Help()
		return nil
	}

	if concurrencyFlag > probesFlag {
		concurrencyFlag = probesFlag
	}
	if concurrencyFlag == 0 {
		concurrencyFlag++
	}

	token, err := getToken()
	if err != nil {
		log.Error(err)
		return err
	}

	var fns []string
	if cernDistributionFlag {
		vals, err := createCERNDistribution()
		if err != nil {
			return err
		}
		fns = vals
	} else {
		fd, err := createFile(fmt.Sprintf("testfile-manual-count-%d-bs-%d", countFlag, bsFlag), "1", countFlag, bsFlag)
		if err != nil {
			return err
		}
		fns = []string{fd.Name()}
		fd.Close()
	}

	defer func() {
		for _, v := range fns {
			os.RemoveAll(v)
		}
	}()

	benchStart := time.Now()

	total := 0
	errorProbes := 0

	errChan := make(chan error)
	resChan := make(chan string)
	doneChan := make(chan bool)
	limitChan := make(chan int, concurrencyFlag)

	for i := 0; i < concurrencyFlag; i++ {
		limitChan <- 1
	}

	var bar *pb.ProgressBar
	if progressBar {
		fmt.Printf("There are %d possible files to upload\n", len(fns))
		bar = pb.StartNew(probesFlag)
	}

	for i := 0; i < probesFlag; i++ {
		rand.Seed(time.Now().UnixNano())
		filename := fns[rand.Intn(len(fns))]
		go func(fn string) {
			<-limitChan
			defer func() {
				limitChan <- 1
			}()

			// open again the file
			lfd, err := os.Open(fn)
			if err != nil {
				errChan <- err
				return
			}
			defer lfd.Close()

			c := &http.Client{} // connections are reused if we reuse the client
			// PUT will close the fd
			// is it possible that the HTTP client is reusing connections so is being blocked?
			target := args[0]
			if randomTargetFlag {
				rawUUID, err := uuid.NewV4()
				if err != nil {
					errChan <- err
					return
				}
				target += rawUUID.String()
			}
			req, err := http.NewRequest("PUT", dataAddr+target, lfd)
			if err != nil {
				errChan <- err
				return
			}

			req.Header.Add("Content-Type", "application/octet-stream")
			req.Header.Add("Authorization", "Bearer "+token)
			req.Header.Add("CIO-Checksum", checksumFlag)

			res, err := c.Do(req)
			if err != nil {
				errChan <- err
				return
			}

			err = res.Body.Close()
			if err != nil {
				errChan <- err
				return
			}

			if res.StatusCode != 201 {
				err := fmt.Errorf("Request failed with status code %d", res.StatusCode)
				errChan <- err
				return
			}

			doneChan <- true
			resChan <- ""
			return
		}(filename)
	}

	for {
		select {
		case _ = <-doneChan:
			total++
			if progressBar {
				bar.Increment()
			}
		case _ = <-resChan:
		case err := <-errChan:
			log.Error(err)
			errorProbes++
			total++
			if progressBar {
				bar.Increment()
			}
		}

		if total == probesFlag {
			break
		}
	}

	if progressBar {
		bar.Finish()
	}

	numberRequests := probesFlag
	concurrency := concurrencyFlag
	totalTime := time.Since(benchStart).Seconds()
	failedRequests := errorProbes
	frequency := float64(numberRequests-failedRequests) / totalTime
	period := float64(1 / frequency)
	volume := numberRequests * countFlag * bsFlag / 1024 / 1024
	throughput := float64(volume) / totalTime
	data := [][]string{
		{"#NUMBER", "CONCURRENCY", "TIME", "FAILED", "FREQ", "PERIOD", "VOLUME", "THROUGHPUT"},
		{fmt.Sprintf("%d", numberRequests), fmt.Sprintf("%d", concurrency), fmt.Sprintf("%f", totalTime), fmt.Sprintf("%d", failedRequests), fmt.Sprintf("%f", frequency), fmt.Sprintf("%f", period), fmt.Sprintf("%d", volume), fmt.Sprintf("%f", throughput)},
	}
	w := csv.NewWriter(output)
	w.Comma = ' '
	for _, d := range data {
		if err := w.Write(d); err != nil {
			return err
		}
	}
	w.Flush()

	if err := w.Error(); err != nil {
		return err
	}
	return nil
}
示例#21
0
func main() {
	flag.Usage = usage

	help := flag.Bool("help", false, "show this message")
	version := flag.Bool("version", false, "show version")

	failpath := flag.String("faildir", "", "dir where failed torrentzips should be copied")

	flag.Parse()

	if *help {
		flag.Usage()
		os.Exit(0)
	}

	if *version {
		fmt.Fprintf(os.Stdout, "%s version %s, Copyright (c) 2013 Uwe Hoffmann. All rights reserved.\n", os.Args[0], versionStr)
		os.Exit(0)
	}

	if *failpath == "" {
		flag.Usage()
		os.Exit(0)
	}

	cv := new(countVisitor)

	for _, name := range flag.Args() {
		fmt.Fprintf(os.Stdout, "initial scan of %s to determine amount of work\n", name)

		err := filepath.Walk(name, cv.visit)
		if err != nil {
			fmt.Fprintf(os.Stderr, "failed to count in dir %s: %v\n", name, err)
			os.Exit(1)
		}
	}

	mg := int(cv.numBytes / megabyte)

	fmt.Fprintf(os.Stdout, "found %d files and %d MB to do. starting work...\n", cv.numFiles, mg)

	var byteProgress *pb.ProgressBar

	if mg > 10 {
		pb.BarStart = "MB ["

		byteProgress = pb.New(mg)
		byteProgress.RefreshRate = 5 * time.Second
		byteProgress.ShowCounters = true
		byteProgress.Start()
	}

	inwork := make(chan *workUnit)

	sv := &scanVisitor{
		inwork: inwork,
	}

	wg := new(sync.WaitGroup)
	wg.Add(cv.numFiles)

	for i := 0; i < 8; i++ {
		worker := &testWorker{
			byteProgress: byteProgress,
			failpath:     *failpath,
			inwork:       inwork,
			wg:           wg,
		}

		go worker.run()
	}

	for _, name := range flag.Args() {
		err := filepath.Walk(name, sv.visit)
		if err != nil {
			fmt.Fprintf(os.Stderr, "failed to scan dir %s: %v\n", name, err)
			os.Exit(1)
		}
	}

	wg.Wait()
	close(inwork)

	if byteProgress != nil {
		byteProgress.Set(int(byteProgress.Total))
		byteProgress.Finish()
	}

	fmt.Fprintf(os.Stdout, "Done.\n")
}
示例#22
0
func install(l, version string) error {
	var currentStep lang.Step
	var bar *pb.ProgressBar
	var process *Process
	fmt.Printf("Installing %s@%s\n", l, version)
	err := service.Install(l, version, binaryFlag, func(step lang.Step, progress, total int64) {

		if currentStep != step {
			if bar != nil {
				bar.NotPrint = true
				bar.Finish()
				fmt.Printf(ascii2.EraseLine)
				bar = nil
			}

			if process != nil {
				process.Done("")
				process = nil
			}

			if total > 0 {
				bar = pb.New64(total).Prefix("  " + stepToMsg(step) + "\t\t")
				bar.SetWidth(40)
				bar.ShowCounters = false
				//fmt.Printf("%s\n", step)
				//bar.NotPrint = true
				bar.Start()
				currentStep = step

			} else {
				process := &Process{Msg: stepToMsg(step) + "\t\t"}
				process.Start()
			}

		}
		if bar != nil {
			bar.Set64(progress)
		}

	})

	if bar != nil {
		bar.NotPrint = true
		bar.Finish()

		fmt.Printf(ascii2.EraseLines(2) + ascii2.EraseLine + fmt.Sprintf("  %s installed", l))
	}

	if process != nil {
		process.Done("\n")
	}
	//fmt.Printf(ascii2.EraseLine + ascii2.CursorUp(1) + ascii2.EraseLine)

	if err != nil {
		fmt.Printf("Could not install %s@%s: \n  %s\n", l, version, err.Error())
	} else {
		fmt.Printf("  %s@%s installed!\n\n", l, version)
	}

	return err
}
示例#23
0
func runImport(args *docopt.Args, client controller.Client) error {
	var src io.Reader = os.Stdin
	if filename := args.String["--file"]; filename != "" {
		f, err := os.Open(filename)
		if err != nil {
			return fmt.Errorf("error opening export file: %s", err)
		}
		defer f.Close()
		src = f
	}
	tr := tar.NewReader(src)

	var (
		app           *ct.App
		release       *ct.Release
		imageArtifact *ct.Artifact
		formation     *ct.Formation
		routes        []router.Route
		slug          io.Reader
		dockerImage   struct {
			config struct {
				Tag string `json:"tag"`
			}
			archive io.Reader
		}
		pgDump     io.Reader
		mysqlDump  io.Reader
		uploadSize int64
	)
	numResources := 0
	numRoutes := 1

	for {
		header, err := tr.Next()
		if err == io.EOF {
			break
		} else if err != nil {
			return fmt.Errorf("error reading export tar: %s", err)
		}

		switch path.Base(header.Name) {
		case "app.json":
			app = &ct.App{}
			if err := json.NewDecoder(tr).Decode(app); err != nil {
				return fmt.Errorf("error decoding app: %s", err)
			}
			app.ID = ""
		case "release.json":
			release = &ct.Release{}
			if err := json.NewDecoder(tr).Decode(release); err != nil {
				return fmt.Errorf("error decoding release: %s", err)
			}
			release.ID = ""
			release.ArtifactIDs = nil
		case "artifact.json":
			imageArtifact = &ct.Artifact{}
			if err := json.NewDecoder(tr).Decode(imageArtifact); err != nil {
				return fmt.Errorf("error decoding image artifact: %s", err)
			}
			imageArtifact.ID = ""
		case "formation.json":
			formation = &ct.Formation{}
			if err := json.NewDecoder(tr).Decode(formation); err != nil {
				return fmt.Errorf("error decoding formation: %s", err)
			}
			formation.AppID = ""
			formation.ReleaseID = ""
		case "routes.json":
			if err := json.NewDecoder(tr).Decode(&routes); err != nil {
				return fmt.Errorf("error decoding routes: %s", err)
			}
			for _, route := range routes {
				route.ID = ""
				route.ParentRef = ""
			}
		case "slug.tar.gz":
			f, err := ioutil.TempFile("", "slug.tar.gz")
			if err != nil {
				return fmt.Errorf("error creating slug tempfile: %s", err)
			}
			defer f.Close()
			defer os.Remove(f.Name())
			if _, err := io.Copy(f, tr); err != nil {
				return fmt.Errorf("error reading slug: %s", err)
			}
			if _, err := f.Seek(0, os.SEEK_SET); err != nil {
				return fmt.Errorf("error seeking slug tempfile: %s", err)
			}
			slug = f
			uploadSize += header.Size
		case "docker-image.json":
			if err := json.NewDecoder(tr).Decode(&dockerImage.config); err != nil {
				return fmt.Errorf("error decoding docker image json: %s", err)
			}
		case "docker-image.tar":
			f, err := ioutil.TempFile("", "docker-image.tar")
			if err != nil {
				return fmt.Errorf("error creating docker image tempfile: %s", err)
			}
			defer f.Close()
			defer os.Remove(f.Name())
			if _, err := io.Copy(f, tr); err != nil {
				return fmt.Errorf("error reading docker image: %s", err)
			}
			if _, err := f.Seek(0, os.SEEK_SET); err != nil {
				return fmt.Errorf("error seeking docker image tempfile: %s", err)
			}
			dockerImage.archive = f
			uploadSize += header.Size
		case "postgres.dump":
			f, err := ioutil.TempFile("", "postgres.dump")
			if err != nil {
				return fmt.Errorf("error creating db tempfile: %s", err)
			}
			defer f.Close()
			defer os.Remove(f.Name())
			if _, err := io.Copy(f, tr); err != nil {
				return fmt.Errorf("error reading db dump: %s", err)
			}
			if _, err := f.Seek(0, os.SEEK_SET); err != nil {
				return fmt.Errorf("error seeking db tempfile: %s", err)
			}
			pgDump = f
			uploadSize += header.Size
		case "mysql.dump":
			f, err := ioutil.TempFile("", "mysql.dump")
			if err != nil {
				return fmt.Errorf("error creating db tempfile: %s", err)
			}
			defer f.Close()
			defer os.Remove(f.Name())
			if _, err := io.Copy(f, tr); err != nil {
				return fmt.Errorf("error reading db dump: %s", err)
			}
			if _, err := f.Seek(0, os.SEEK_SET); err != nil {
				return fmt.Errorf("error seeking db tempfile: %s", err)
			}
			mysqlDump = f
			uploadSize += header.Size
		}
	}

	if app == nil {
		return fmt.Errorf("missing app.json")
	}
	oldName := app.Name
	if name := args.String["--name"]; name != "" {
		app.Name = name
	}
	if err := client.CreateApp(app); err != nil {
		return fmt.Errorf("error creating app: %s", err)
	}

	var bar *pb.ProgressBar
	if !args.Bool["--quiet"] && uploadSize > 0 && term.IsTerminal(os.Stderr.Fd()) {
		bar = pb.New(0)
		bar.SetUnits(pb.U_BYTES)
		bar.Total = uploadSize
		bar.ShowSpeed = true
		bar.Output = os.Stderr
		bar.Start()
		defer bar.Finish()
	}

	if pgDump != nil && release != nil {
		res, err := client.ProvisionResource(&ct.ResourceReq{
			ProviderID: "postgres",
			Apps:       []string{app.ID},
		})
		if err != nil {
			return fmt.Errorf("error provisioning postgres resource: %s", err)
		}
		numResources++

		if release.Env == nil {
			release.Env = make(map[string]string, len(res.Env))
		}
		for k, v := range res.Env {
			release.Env[k] = v
		}

		config, err := getPgRunConfig(client, app.ID, release)
		if err != nil {
			return fmt.Errorf("error getting postgres config: %s", err)
		}
		config.Stdin = pgDump
		if bar != nil {
			config.Stdin = bar.NewProxyReader(config.Stdin)
		}
		config.Exit = false
		if err := pgRestore(client, config); err != nil {
			return fmt.Errorf("error restoring postgres database: %s", err)
		}
	}

	if mysqlDump != nil && release != nil {
		res, err := client.ProvisionResource(&ct.ResourceReq{
			ProviderID: "mysql",
			Apps:       []string{app.ID},
		})
		if err != nil {
			return fmt.Errorf("error provisioning mysql resource: %s", err)
		}
		numResources++

		if release.Env == nil {
			release.Env = make(map[string]string, len(res.Env))
		}
		for k, v := range res.Env {
			release.Env[k] = v
		}

		config, err := getMysqlRunConfig(client, app.ID, release)
		if err != nil {
			return fmt.Errorf("error getting mysql config: %s", err)
		}
		config.Stdin = mysqlDump
		if bar != nil {
			config.Stdin = bar.NewProxyReader(config.Stdin)
		}
		config.Exit = false
		if err := mysqlRestore(client, config); err != nil {
			return fmt.Errorf("error restoring mysql database: %s", err)
		}
	}

	if release != nil && release.Env["FLYNN_REDIS"] != "" {
		res, err := client.ProvisionResource(&ct.ResourceReq{
			ProviderID: "redis",
			Apps:       []string{app.ID},
		})
		if err != nil {
			return fmt.Errorf("error provisioning redis resource: %s", err)
		}
		numResources++

		if release.Env == nil {
			release.Env = make(map[string]string, len(res.Env))
		}
		for k, v := range res.Env {
			release.Env[k] = v
		}
	}

	uploadSlug := release != nil && imageArtifact != nil && slug != nil

	if uploadSlug {
		// Use current slugrunner as the artifact
		gitreceiveRelease, err := client.GetAppRelease("gitreceive")
		if err != nil {
			return fmt.Errorf("unable to retrieve gitreceive release: %s", err)
		}
		if id, ok := gitreceiveRelease.Env["SLUGRUNNER_IMAGE_ID"]; ok {
			imageArtifact, err = client.GetArtifact(id)
			if err != nil {
				return fmt.Errorf("unable to get slugrunner image artifact: %s", err)
			}
		} else if uri, ok := gitreceiveRelease.Env["SLUGRUNNER_IMAGE_URI"]; ok {
			imageArtifact = &ct.Artifact{
				Type: host.ArtifactTypeDocker,
				URI:  uri,
			}
		} else {
			return fmt.Errorf("gitreceive env missing slug runner image")
		}
	}

	if dockerImage.config.Tag != "" && dockerImage.archive != nil {
		// load the docker image into the Docker daemon
		cmd := exec.Command("docker", "load")
		cmd.Stdin = dockerImage.archive
		if out, err := cmd.CombinedOutput(); err != nil {
			return fmt.Errorf("error running docker load: %s: %q", err, out)
		}

		// use the tag from the config (which will now be applied to
		// the loaded image) to push the image to docker-receive
		cluster, err := getCluster()
		if err != nil {
			return err
		}
		host, err := cluster.DockerPushHost()
		if err != nil {
			return err
		}
		tag := fmt.Sprintf("%s/%s:latest", host, app.Name)
		if out, err := exec.Command("docker", "tag", "--force", dockerImage.config.Tag, tag).CombinedOutput(); err != nil {
			return fmt.Errorf("error tagging docker image: %s: %q", err, out)
		}

		artifact, err := dockerPush(client, app.Name, tag)
		if err != nil {
			return fmt.Errorf("error pushing docker image: %s", err)
		}

		release.ArtifactIDs = []string{artifact.ID}
	} else if imageArtifact != nil {
		if imageArtifact.ID == "" {
			if err := client.CreateArtifact(imageArtifact); err != nil {
				return fmt.Errorf("error creating image artifact: %s", err)
			}
		}
		release.ArtifactIDs = []string{imageArtifact.ID}
	}

	if release != nil {
		for t, proc := range release.Processes {
			for i, port := range proc.Ports {
				if port.Service != nil && strings.HasPrefix(port.Service.Name, oldName) {
					proc.Ports[i].Service.Name = strings.Replace(port.Service.Name, oldName, app.Name, 1)
				}
			}
			release.Processes[t] = proc
		}
		if err := client.CreateRelease(release); err != nil {
			return fmt.Errorf("error creating release: %s", err)
		}
		if err := client.SetAppRelease(app.ID, release.ID); err != nil {
			return fmt.Errorf("error setting app release: %s", err)
		}
	}

	if uploadSlug {
		slugURI := fmt.Sprintf("http://blobstore.discoverd/%s/slug.tgz", random.UUID())
		config := runConfig{
			App:        app.ID,
			Release:    release.ID,
			DisableLog: true,
			Args:       []string{"curl", "--request", "PUT", "--upload-file", "-", slugURI},
			Stdin:      slug,
			Stdout:     ioutil.Discard,
			Stderr:     ioutil.Discard,
		}
		if bar != nil {
			config.Stdin = bar.NewProxyReader(config.Stdin)
		}
		if err := runJob(client, config); err != nil {
			return fmt.Errorf("error uploading slug: %s", err)
		}
		slugArtifact := &ct.Artifact{
			Type: host.ArtifactTypeFile,
			URI:  slugURI,
		}
		if err := client.CreateArtifact(slugArtifact); err != nil {
			return fmt.Errorf("error creating slug artifact: %s", err)
		}
		release.ID = ""
		release.ArtifactIDs = append(release.ArtifactIDs, slugArtifact.ID)
		if release.Meta == nil {
			release.Meta = make(map[string]string, 1)
		}
		release.Meta["git"] = "true"
		if err := client.CreateRelease(release); err != nil {
			return fmt.Errorf("error creating release: %s", err)
		}
		if err := client.SetAppRelease(app.ID, release.ID); err != nil {
			return fmt.Errorf("error setting app release: %s", err)
		}
	}

	if formation != nil && release != nil {
		formation.ReleaseID = release.ID
		formation.AppID = app.ID
		if err := client.PutFormation(formation); err != nil {
			return fmt.Errorf("error creating formation: %s", err)
		}
	}

	if args.Bool["--routes"] {
		for _, route := range routes {
			if err := client.CreateRoute(app.ID, &route); err != nil {
				if e, ok := err.(hh.JSONError); ok && e.Code == hh.ConflictErrorCode {
					// If the cluster domain matches then the default route
					// exported will conflict with the one created automatically.
					continue
				}
				return fmt.Errorf("error creating route: %s", err)
			}
			numRoutes++
		}
	}

	fmt.Printf("Imported %s (added %d routes, provisioned %d resources)\n", app.Name, numRoutes, numResources)

	return nil
}
示例#24
0
func main() {
	clientID := flag.String("id", "", "Github client ID")
	clientSecret := flag.String("secret", "", "Github client secret")
	output := flag.String("output", "gddoexp.out", "Output file")
	progress := flag.Bool("progress", false, "Show a progress bar")
	flag.Parse()

	var auth *gddoexp.GithubAuth
	if (clientID != nil && *clientID != "") || (clientSecret != nil && *clientSecret != "") {
		if *clientID == "" || *clientSecret == "" {
			fmt.Println("to enable Gthub authentication, you need to inform the id and secret")
			flag.PrintDefaults()
			return
		}

		auth = &gddoexp.GithubAuth{
			ID:     *clientID,
			Secret: *clientSecret,
		}
	}

	// add cache to avoid repeated requests to Github
	gddoexp.HTTPClient = &http.Client{
		Transport: httpcache.NewTransport(
			diskcache.New(path.Join(os.Getenv("HOME"), ".gddoexp")),
		),
	}

	db, err := database.New()
	if err != nil {
		fmt.Println("error connecting to database:", err)
		return
	}

	pkgs, err := db.AllPackages()
	if err != nil {
		fmt.Println("error retrieving all packages:", err)
		return
	}

	file, err := os.OpenFile(*output, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
	if err != nil {
		fmt.Println("error creating output file:", err)
		return
	}
	defer file.Close()

	log.SetOutput(file)
	log.Println("BEGIN")
	log.Printf("%d packages will be analyzed", len(pkgs))

	var progressBar *pb.ProgressBar
	if progress != nil && *progress {
		progressBar = pb.StartNew(len(pkgs))
	}

	var cache int

	for response := range gddoexp.ShouldSuppressPackages(pkgs, db, auth) {
		if progress != nil && *progress {
			progressBar.Increment()
		}

		if response.Cache {
			cache++
		}

		if response.Error != nil {
			log.Println(response.Error)
		} else if response.Suppress {
			log.Printf("package “%s” should be suppressed\n", response.Package.Path)
			if progress != nil && !*progress {
				fmt.Println(response.Package.Path)
			}
		}
	}

	if progress != nil && *progress {
		progressBar.Finish()
	}

	log.Println("Cache hits:", cache)
	log.Println("END")
}