Пример #1
0
// mainDiff main for 'diff'.
func mainDiff(ctx *cli.Context) {
	checkDiffSyntax(ctx)

	// Additional command speific theme customization.
	console.SetColor("DiffMessage", color.New(color.FgGreen, color.Bold))
	console.SetColor("DiffOnlyInFirst", color.New(color.FgRed, color.Bold))
	console.SetColor("DiffType", color.New(color.FgYellow, color.Bold))
	console.SetColor("DiffSize", color.New(color.FgMagenta, color.Bold))

	config := mustGetMcConfig()
	firstArg := ctx.Args().First()
	secondArg := ctx.Args().Last()

	firstURL := getAliasURL(firstArg, config.Aliases)
	secondURL := getAliasURL(secondArg, config.Aliases)

	newFirstURL := stripRecursiveURL(firstURL)
	for diff := range doDiffMain(newFirstURL, secondURL, isURLRecursive(firstURL)) {
		if diff.Error != nil {
			// Print in new line and adjust to top so that we don't print over the ongoing scan bar
			if !globalQuietFlag && !globalJSONFlag {
				console.Eraseline()
			}
		}
		fatalIf(diff.Error.Trace(newFirstURL, secondURL), "Failed to diff ‘"+firstURL+"’ and ‘"+secondURL+"’.")
		printMsg(diff)
	}
	// Print in new line and adjust to top so that we don't print over the ongoing scan bar
	if !globalQuietFlag && !globalJSONFlag {
		console.Eraseline()
	}
	console.Println(console.Colorize("DiffMessage", "Done."))
}
Пример #2
0
// doPrepareMirrorURLs scans the source URL and prepares a list of objects for mirroring.
func doPrepareMirrorURLs(session *sessionV2, trapCh <-chan bool) {
	sourceURL := session.Header.CommandArgs[0] // first one is source.
	targetURLs := session.Header.CommandArgs[1:]
	var totalBytes int64
	var totalObjects int

	// Create a session data file to store the processed URLs.
	dataFP := session.NewDataWriter()

	var scanBar scanBarFunc
	if !globalQuietFlag && !globalJSONFlag { // set up progress bar
		scanBar = scanBarFactory()
	}

	URLsCh := prepareMirrorURLs(sourceURL, targetURLs)
	done := false
	for done == false {
		select {
		case sURLs, ok := <-URLsCh:
			if !ok { // Done with URL prepration
				done = true
				break
			}
			if sURLs.Error != nil {
				// Print in new line and adjust to top so that we don't print over the ongoing scan bar
				if !globalQuietFlag && !globalJSONFlag {
					console.Eraseline()
				}
				errorIf(sURLs.Error.Trace(), "Unable to prepare URLs for mirroring.")
				break
			}
			if sURLs.isEmpty() {
				break
			}
			jsonData, err := json.Marshal(sURLs)
			if err != nil {
				session.Delete()
				fatalIf(probe.NewError(err), "Unable to marshal URLs into JSON.")
			}
			fmt.Fprintln(dataFP, string(jsonData))
			if !globalQuietFlag && !globalJSONFlag {
				scanBar(sURLs.SourceContent.Name)
			}

			totalBytes += sURLs.SourceContent.Size
			totalObjects++
		case <-trapCh:
			// Print in new line and adjust to top so that we don't print over the ongoing scan bar
			if !globalQuietFlag && !globalJSONFlag {
				console.Eraseline()
			}
			session.Delete() // If we are interrupted during the URL scanning, we drop the session.
			os.Exit(0)
		}
	}
	session.Header.TotalBytes = totalBytes
	session.Header.TotalObjects = totalObjects
	session.Save()
}
Пример #3
0
func (ps *ProgressStatus) fatalIf(err *probe.Error, msg string) {
	// remove progressbar
	console.Eraseline()
	fatalIf(err, msg)

	ps.progressBar.Update()
}
Пример #4
0
func mainRm(ctx *cli.Context) {
	checkRmSyntax(ctx)
	var incomplete bool
	var force bool

	setRmPalette(ctx.GlobalString("colors"))

	args := ctx.Args()
	if len(args) != 1 {
		if len(args) == 2 && args.Get(0) == "force" && args.Get(1) == "incomplete" ||
			len(args) == 2 && args.Get(0) == "incomplete" && args.Get(1) == "force" {
			args = args[:]
		} else {
			if args.Last() == "force" {
				force = true
				args = args[:len(args)-1]
			}
			if args.Last() == "incomplete" {
				incomplete = true
				args = args[:len(args)-1]
			}
		}
	}

	URLs, err := args2URLs(args)
	fatalIf(err.Trace(ctx.Args()...), "Unable to parse arguments.")

	rmPrint := rmPrinterFuncGenerate()

	// execute for incomplete
	if incomplete {
		for _, url := range URLs {
			if isURLRecursive(url) && force {
				rmAllIncompleteUploads(stripRecursiveURL(url), rmPrint)
			} else {
				rmIncompleteUpload(url, rmPrint)
			}
		}
		return
	}
	for _, url := range URLs {
		if isURLRecursive(url) && force {
			rmAll(stripRecursiveURL(url), rmPrint)
		} else {
			rmSingle(url, rmPrint)
		}
	}
	if !globalJSONFlag && !globalQuietFlag {
		console.Eraseline()
	}
}
Пример #5
0
func doCopySession(session *sessionV2) {
	trapCh := signalTrap(os.Interrupt, os.Kill)

	if !session.HasData() {
		doPrepareCopyURLs(session, trapCh)
	}

	var progressReader interface{}
	if !globalQuietFlag && !globalJSONFlag { // set up progress bar
		progressReader = newProgressBar(session.Header.TotalBytes)
	} else {
		progressReader = newAccounter(session.Header.TotalBytes)
	}

	// Prepare URL scanner from session data file.
	scanner := bufio.NewScanner(session.NewDataReader())
	// isCopied returns true if an object has been already copied
	// or not. This is useful when we resume from a session.
	isCopied := isCopiedFactory(session.Header.LastCopied)

	wg := new(sync.WaitGroup)
	// Limit number of copy routines based on available CPU resources.
	cpQueue := make(chan bool, int(math.Max(float64(runtime.NumCPU())-1, 1)))
	defer close(cpQueue)

	// Status channel for receiveing copy return status.
	statusCh := make(chan copyURLs)

	// Go routine to monitor doCopy status and signal traps.
	wg.Add(1)
	go func() {
		defer wg.Done()
		for {
			select {
			case cpURLs, ok := <-statusCh: // Receive status.
				if !ok { // We are done here. Top level function has returned.
					if !globalQuietFlag && !globalJSONFlag {
						progressReader.(*barSend).Finish()
					} else {
						console.Println(console.Colorize("Copy", progressReader.(*accounter).Finish()))
					}
					return
				}
				if cpURLs.Error == nil {
					session.Header.LastCopied = cpURLs.SourceContent.Name
					session.Save()
				} else {
					// Print in new line and adjust to top so that we don't print over the ongoing progress bar
					if !globalQuietFlag && !globalJSONFlag {
						console.Eraseline()
					}
					errorIf(cpURLs.Error.Trace(), fmt.Sprintf("Failed to copy ‘%s’.", cpURLs.SourceContent.Name))
					// all the cases which are handled where session should be saved are contained in the following
					// switch case, we shouldn't be saving sessions for all errors since some errors might need to be
					// reported to user properly.
					//
					// All other critical cases should be handled properly gracefully
					// handle more errors and save the session.
					switch cpURLs.Error.ToGoError().(type) {
					case *net.OpError:
						gracefulSessionSave(session)
					case net.Error:
						gracefulSessionSave(session)
					}
				}
			case <-trapCh: // Receive interrupt notification.
				if !globalQuietFlag && !globalJSONFlag {
					console.Eraseline()
				}
				gracefulSessionSave(session)
			}
		}
	}()

	// Go routine to perform concurrently copying.
	wg.Add(1)
	go func() {
		defer wg.Done()
		copyWg := new(sync.WaitGroup)
		defer close(statusCh)

		for scanner.Scan() {
			var cpURLs copyURLs
			json.Unmarshal([]byte(scanner.Text()), &cpURLs)
			if isCopied(cpURLs.SourceContent.Name) {
				doCopyFake(cpURLs, progressReader)
			} else {
				// Wait for other copy routines to
				// complete. We only have limited CPU
				// and network resources.
				cpQueue <- true
				// Account for each copy routines we start.
				copyWg.Add(1)
				// Do copying in background concurrently.
				go doCopy(cpURLs, progressReader, cpQueue, copyWg, statusCh)
			}
		}
		copyWg.Wait()
	}()
	wg.Wait()
}
Пример #6
0
// doPrepareCopyURLs scans the source URL and prepares a list of objects for copying.
func doPrepareCopyURLs(session *sessionV2, trapCh <-chan bool) {
	// Separate source and target. 'cp' can take only one target,
	// but any number of sources, even the recursive URLs mixed in-between.
	sourceURLs := session.Header.CommandArgs[:len(session.Header.CommandArgs)-1]
	targetURL := session.Header.CommandArgs[len(session.Header.CommandArgs)-1] // Last one is target

	var totalBytes int64
	var totalObjects int

	// Create a session data file to store the processed URLs.
	dataFP := session.NewDataWriter()

	var scanBar scanBarFunc
	if !globalQuietFlag && !globalJSONFlag { // set up progress bar
		scanBar = scanBarFactory()
	}

	URLsCh := prepareCopyURLs(sourceURLs, targetURL)
	done := false

	for done == false {
		select {
		case cpURLs, ok := <-URLsCh:
			if !ok { // Done with URL prepration
				done = true
				break
			}
			if cpURLs.Error != nil {
				// Print in new line and adjust to top so that we don't print over the ongoing scan bar
				if !globalQuietFlag && !globalJSONFlag {
					console.Eraseline()
				}
				errorIf(cpURLs.Error.Trace(), "Unable to prepare URL for copying.")
				break
			}

			jsonData, err := json.Marshal(cpURLs)
			if err != nil {
				session.Delete()
				fatalIf(probe.NewError(err), "Unable to prepare URL for copying. Error in JSON marshaling.")
			}
			fmt.Fprintln(dataFP, string(jsonData))
			if !globalQuietFlag && !globalJSONFlag {
				scanBar(cpURLs.SourceContent.Name)
			}

			totalBytes += cpURLs.SourceContent.Size
			totalObjects++
		case <-trapCh:
			// Print in new line and adjust to top so that we don't print over the ongoing scan bar
			if !globalQuietFlag && !globalJSONFlag {
				console.Eraseline()
			}
			session.Delete() // If we are interrupted during the URL scanning, we drop the session.
			os.Exit(0)
		}
	}
	session.Header.TotalBytes = totalBytes
	session.Header.TotalObjects = totalObjects
	session.Save()
}
Пример #7
0
// Println prints line, ignored for quietstatus
func (ps *ProgressStatus) Println(data ...interface{}) {
	console.Eraseline()
	console.Println(data...)
}
Пример #8
0
// Implements a jitter backoff loop for formatting all disks during
// initialization of the server.
func retryFormattingDisks(firstDisk bool, endpoints []*url.URL, storageDisks []StorageAPI) error {
	if len(endpoints) == 0 {
		return errInvalidArgument
	}
	if storageDisks == nil {
		return errInvalidArgument
	}

	// Create a done channel to control 'ListObjects' go routine.
	doneCh := make(chan struct{}, 1)

	// Indicate to our routine to exit cleanly upon return.
	defer close(doneCh)

	// prepare getElapsedTime() to calculate elapsed time since we started trying formatting disks.
	// All times are rounded to avoid showing milli, micro and nano seconds
	formatStartTime := time.Now().Round(time.Second)
	getElapsedTime := func() string {
		return time.Now().Round(time.Second).Sub(formatStartTime).String()
	}

	// Wait on the jitter retry loop.
	retryTimerCh := newRetryTimer(time.Second, time.Second*30, MaxJitter, doneCh)
	for {
		select {
		case retryCount := <-retryTimerCh:
			// Attempt to load all `format.json` from all disks.
			formatConfigs, sErrs := loadAllFormats(storageDisks)
			if retryCount > 5 {
				// After 5 retry attempts we start printing actual errors
				// for disks not being available.
				printRetryMsg(sErrs, storageDisks)
			}
			if len(formatConfigs) == 1 {
				err := genericFormatCheckFS(formatConfigs[0], sErrs[0])
				if err != nil {
					// For an new directory or existing data.
					if err == errUnformattedDisk || err == errCorruptedFormat {
						return initFormatFS(storageDisks[0])
					}
					return err
				}
				return nil
			} // Check if this is a XL or distributed XL, anything > 1 is considered XL backend.
			// Pre-emptively check if one of the formatted disks
			// is invalid. This function returns success for the
			// most part unless one of the formats is not consistent
			// with expected XL format. For example if a user is trying
			// to pool FS backend.
			if err := checkFormatXLValues(formatConfigs); err != nil {
				return err
			}
			switch prepForInitXL(firstDisk, sErrs, len(storageDisks)) {
			case Abort:
				return errCorruptedFormat
			case FormatDisks:
				console.Eraseline()
				printFormatMsg(endpoints, storageDisks, printOnceFn())
				return initFormatXL(storageDisks)
			case InitObjectLayer:
				console.Eraseline()
				// Validate formats loaded before proceeding forward.
				err := genericFormatCheckXL(formatConfigs, sErrs)
				if err == nil {
					printRegularMsg(endpoints, storageDisks, printOnceFn())
				}
				return err
			case WaitForHeal:
				// Validate formats loaded before proceeding forward.
				err := genericFormatCheckXL(formatConfigs, sErrs)
				if err == nil {
					printHealMsg(endpoints, storageDisks, printOnceFn())
				}
				return err
			case WaitForQuorum:
				console.Printf(
					"Initializing data volume. Waiting for minimum %d servers to come online. (elapsed %s)\n",
					len(storageDisks)/2+1, getElapsedTime(),
				)
			case WaitForConfig:
				// Print configuration errors.
				printConfigErrMsg(storageDisks, sErrs, printOnceFn())
			case WaitForAll:
				console.Printf("Initializing data volume for first time. Waiting for other servers to come online (elapsed %s)\n", getElapsedTime())
			case WaitForFormatting:
				console.Printf("Initializing data volume for first time. Waiting for first server to come online (elapsed %s)\n", getElapsedTime())
			}
		case <-globalServiceDoneCh:
			return errors.New("Initializing data volumes gracefully stopped")
		}
	}
}
Пример #9
0
func deltaSourceTargets(sourceClnt client.Client, targetClnts []client.Client) <-chan mirrorURLs {
	mirrorURLsCh := make(chan mirrorURLs)

	go func() {
		defer close(mirrorURLsCh)
		id := newRandomID(8)

		doneCh := make(chan bool)
		defer close(doneCh)
		go func(doneCh <-chan bool) {
			cursorCh := cursorAnimate()
			for {
				select {
				case <-time.Tick(100 * time.Millisecond):
					if !globalQuietFlag && !globalJSONFlag {
						console.PrintC("\r" + "Scanning.. " + string(<-cursorCh))
					}
				case <-doneCh:
					return
				}
			}
		}(doneCh)

		sourceSortedList := sortedList{}
		targetSortedList := make([]*sortedList, len(targetClnts))

		surldelimited := sourceClnt.URL().String()
		err := sourceSortedList.Create(sourceClnt, id+".src")
		if err != nil {
			mirrorURLsCh <- mirrorURLs{
				Error: err.Trace(),
			}
			return
		}

		turldelimited := make([]string, len(targetClnts))
		for i := range targetClnts {
			turldelimited[i] = targetClnts[i].URL().String()
			targetSortedList[i] = &sortedList{}
			err := targetSortedList[i].Create(targetClnts[i], id+"."+strconv.Itoa(i))
			if err != nil {
				// FIXME: do cleanup by calling Delete()
				mirrorURLsCh <- mirrorURLs{
					Error: err.Trace(),
				}
				return
			}
		}
		for source := range sourceSortedList.List(true) {
			if source.Content.Type.IsDir() {
				continue
			}
			targetContents := make([]*client.Content, 0, len(targetClnts))
			for i, t := range targetSortedList {
				match, err := t.Match(source.Content)
				if err != nil || match {
					// continue on io.EOF or if the keys matches
					// FIXME: handle other errors and ignore this target for future calls
					continue
				}
				targetContents = append(targetContents, &client.Content{Name: turldelimited[i] + source.Content.Name})
			}
			source.Content.Name = surldelimited + source.Content.Name
			if len(targetContents) > 0 {
				mirrorURLsCh <- mirrorURLs{
					SourceContent:  source.Content,
					TargetContents: targetContents,
				}
			}
		}
		if err := sourceSortedList.Delete(); err != nil {
			mirrorURLsCh <- mirrorURLs{
				Error: err.Trace(),
			}
		}
		for _, t := range targetSortedList {
			if err := t.Delete(); err != nil {
				mirrorURLsCh <- mirrorURLs{
					Error: err.Trace(),
				}
			}
		}
		doneCh <- true
		if !globalQuietFlag && !globalJSONFlag {
			console.Eraseline()
		}
	}()
	return mirrorURLsCh
}
Пример #10
0
func doCopySession(session *sessionV6) {
	trapCh := signalTrap(os.Interrupt, syscall.SIGTERM)

	if !session.HasData() {
		doPrepareCopyURLs(session, trapCh)
	}

	// Enable accounting reader by default.
	accntReader := newAccounter(session.Header.TotalBytes)

	// Enable progress bar reader only during default mode.
	var progressReader *barSend
	if !globalQuiet && !globalJSON { // set up progress bar
		progressReader = newProgressBar(session.Header.TotalBytes)
	}

	// Prepare URL scanner from session data file.
	scanner := bufio.NewScanner(session.NewDataReader())
	// isCopied returns true if an object has been already copied
	// or not. This is useful when we resume from a session.
	isCopied := isCopiedFactory(session.Header.LastCopied)

	wg := new(sync.WaitGroup)
	// Limit number of copy routines based on available CPU resources.
	cpQueue := make(chan bool, int(math.Max(float64(runtime.NumCPU())-1, 1)))
	defer close(cpQueue)

	// Status channel for receiveing copy return status.
	statusCh := make(chan copyURLs)

	// Go routine to monitor doCopy status and signal traps.
	wg.Add(1)
	go func() {
		defer wg.Done()
		for {
			select {
			case cpURLs, ok := <-statusCh: // Receive status.
				if !ok { // We are done here. Top level function has returned.
					if !globalQuiet && !globalJSON {
						progressReader.Finish()
					}
					if globalQuiet {
						accntStat := accntReader.Stat()
						cpStatMessage := copyStatMessage{
							Total:       accntStat.Total,
							Transferred: accntStat.Transferred,
							Speed:       accntStat.Speed,
						}
						console.Println(console.Colorize("Copy", cpStatMessage.String()))
					}
					return
				}
				if cpURLs.Error == nil {
					session.Header.LastCopied = cpURLs.SourceContent.URL.String()
					session.Save()
				} else {
					// Print in new line and adjust to top so that we don't print over the ongoing progress bar
					if !globalQuiet && !globalJSON {
						console.Eraseline()
					}
					errorIf(cpURLs.Error.Trace(cpURLs.SourceContent.URL.String()),
						fmt.Sprintf("Failed to copy ‘%s’.", cpURLs.SourceContent.URL.String()))
					// for all non critical errors we can continue for the remaining files
					switch cpURLs.Error.ToGoError().(type) {
					// handle this specifically for filesystem related errors.
					case client.BrokenSymlink:
						continue
					case client.TooManyLevelsSymlink:
						continue
					case client.PathNotFound:
						continue
					case client.PathInsufficientPermission:
						continue
					}
					// for critical errors we should exit. Session can be resumed after the user figures out the problem
					session.CloseAndDie()
				}
			case <-trapCh: // Receive interrupt notification.
				if !globalQuiet && !globalJSON {
					console.Eraseline()
				}
				session.CloseAndDie()
			}
		}
	}()

	// Go routine to perform concurrently copying.
	wg.Add(1)
	go func() {
		defer wg.Done()
		copyWg := new(sync.WaitGroup)
		defer close(statusCh)

		for scanner.Scan() {
			var cpURLs copyURLs
			json.Unmarshal([]byte(scanner.Text()), &cpURLs)
			if isCopied(cpURLs.SourceContent.URL.String()) {
				doCopyFake(cpURLs, progressReader)
			} else {
				// Wait for other copy routines to
				// complete. We only have limited CPU
				// and network resources.
				cpQueue <- true
				// Account for each copy routines we start.
				copyWg.Add(1)
				// Do copying in background concurrently.
				go doCopy(cpURLs, progressReader, accntReader, cpQueue, copyWg, statusCh)
			}
		}
		copyWg.Wait()
	}()
	wg.Wait()
}
Пример #11
0
// doPrepareCopyURLs scans the source URL and prepares a list of objects for copying.
func doPrepareCopyURLs(session *sessionV6, trapCh <-chan bool) {
	// Separate source and target. 'cp' can take only one target,
	// but any number of sources.
	sourceURLs := session.Header.CommandArgs[:len(session.Header.CommandArgs)-1]
	targetURL := session.Header.CommandArgs[len(session.Header.CommandArgs)-1] // Last one is target

	var totalBytes int64
	var totalObjects int

	// Access recursive flag inside the session header.
	isRecursive := session.Header.CommandBoolFlags["recursive"]

	// Create a session data file to store the processed URLs.
	dataFP := session.NewDataWriter()

	var scanBar scanBarFunc
	if !globalQuiet && !globalJSON { // set up progress bar
		scanBar = scanBarFactory()
	}

	URLsCh := prepareCopyURLs(sourceURLs, targetURL, isRecursive)
	done := false

	for done == false {
		select {
		case cpURLs, ok := <-URLsCh:
			if !ok { // Done with URL prepration
				done = true
				break
			}
			if cpURLs.Error != nil {
				// Print in new line and adjust to top so that we don't print over the ongoing scan bar
				if !globalQuiet && !globalJSON {
					console.Eraseline()
				}
				if strings.Contains(cpURLs.Error.ToGoError().Error(), " is a folder.") {
					errorIf(cpURLs.Error.Trace(), "Folder cannot be copied. Please use ‘...’ suffix.")
				} else {
					errorIf(cpURLs.Error.Trace(), "Unable to prepare URL for copying.")
				}
				break
			}

			jsonData, err := json.Marshal(cpURLs)
			if err != nil {
				session.Delete()
				fatalIf(probe.NewError(err), "Unable to prepare URL for copying. Error in JSON marshaling.")
			}
			fmt.Fprintln(dataFP, string(jsonData))
			if !globalQuiet && !globalJSON {
				scanBar(cpURLs.SourceContent.URL.String())
			}

			totalBytes += cpURLs.SourceContent.Size
			totalObjects++
		case <-trapCh:
			// Print in new line and adjust to top so that we don't print over the ongoing scan bar
			if !globalQuiet && !globalJSON {
				console.Eraseline()
			}
			session.Delete() // If we are interrupted during the URL scanning, we drop the session.
			os.Exit(0)
		}
	}
	session.Header.TotalBytes = totalBytes
	session.Header.TotalObjects = totalObjects
	session.Save()
}
Пример #12
0
func dodiffRecursive(firstClnt, secondClnt client.Client, ch chan DiffMessage) {
	firstTrie := patricia.NewTrie()
	secondTrie := patricia.NewTrie()
	wg := new(sync.WaitGroup)

	type urlAttr struct {
		Size int64
		Type os.FileMode
	}

	wg.Add(1)
	go func(ch chan<- DiffMessage) {
		defer wg.Done()
		for firstContentCh := range firstClnt.List(true) {
			if firstContentCh.Err != nil {
				ch <- DiffMessage{
					Error: firstContentCh.Err.Trace(firstClnt.URL().String()),
				}
				return
			}
			firstTrie.Insert(patricia.Prefix(firstContentCh.Content.Name), urlAttr{firstContentCh.Content.Size, firstContentCh.Content.Type})
		}
	}(ch)
	wg.Add(1)
	go func(ch chan<- DiffMessage) {
		defer wg.Done()
		for secondContentCh := range secondClnt.List(true) {
			if secondContentCh.Err != nil {
				ch <- DiffMessage{
					Error: secondContentCh.Err.Trace(secondClnt.URL().String()),
				}
				return
			}
			secondTrie.Insert(patricia.Prefix(secondContentCh.Content.Name), urlAttr{secondContentCh.Content.Size, secondContentCh.Content.Type})
		}
	}(ch)

	doneCh := make(chan struct{})
	defer close(doneCh)
	go func(doneCh <-chan struct{}) {
		cursorCh := cursorAnimate()
		for {
			select {
			case <-time.Tick(100 * time.Millisecond):
				if !globalQuietFlag && !globalJSONFlag {
					console.PrintC("\r" + "Scanning.. " + string(<-cursorCh))
				}
			case <-doneCh:
				return
			}
		}
	}(doneCh)
	wg.Wait()
	doneCh <- struct{}{}
	if !globalQuietFlag && !globalJSONFlag {
		console.Eraseline()
	}

	matchNameCh := make(chan string, 10000)
	go func(matchNameCh chan<- string) {
		itemFunc := func(prefix patricia.Prefix, item patricia.Item) error {
			matchNameCh <- string(prefix)
			return nil
		}
		firstTrie.Visit(itemFunc)
		defer close(matchNameCh)
	}(matchNameCh)
	for matchName := range matchNameCh {
		firstURLDelimited := firstClnt.URL().String()[:strings.LastIndex(firstClnt.URL().String(), string(firstClnt.URL().Separator))+1]
		secondURLDelimited := secondClnt.URL().String()[:strings.LastIndex(secondClnt.URL().String(), string(secondClnt.URL().Separator))+1]
		firstURL := firstURLDelimited + matchName
		secondURL := secondURLDelimited + matchName
		if !secondTrie.Match(patricia.Prefix(matchName)) {
			ch <- DiffMessage{
				FirstURL:  firstURL,
				SecondURL: secondURL,
				Diff:      "only-in-first",
			}
		} else {
			firstURLAttr := firstTrie.Get(patricia.Prefix(matchName)).(urlAttr)
			secondURLAttr := secondTrie.Get(patricia.Prefix(matchName)).(urlAttr)

			if firstURLAttr.Type.IsRegular() {
				if !secondURLAttr.Type.IsRegular() {
					ch <- DiffMessage{
						FirstURL:  firstURL,
						SecondURL: secondURL,
						Diff:      "type",
					}
					continue
				}
			}

			if firstURLAttr.Type.IsDir() {
				if !secondURLAttr.Type.IsDir() {
					ch <- DiffMessage{
						FirstURL:  firstURL,
						SecondURL: secondURL,
						Diff:      "type",
					}
					continue
				}
			}

			if firstURLAttr.Size != secondURLAttr.Size {
				ch <- DiffMessage{
					FirstURL:  firstURL,
					SecondURL: secondURL,
					Diff:      "size",
				}
			}
		}
	}
}
Пример #13
0
// Implements a jitter backoff loop for formatting all disks during
// initialization of the server.
func retryFormattingDisks(firstDisk bool, endpoints []*url.URL, storageDisks []StorageAPI) error {
	if len(endpoints) == 0 {
		return errInvalidArgument
	}
	if storageDisks == nil {
		return errInvalidArgument
	}

	// Create a done channel to control 'ListObjects' go routine.
	doneCh := make(chan struct{}, 1)

	// Indicate to our routine to exit cleanly upon return.
	defer close(doneCh)

	// Wait on the jitter retry loop.
	retryTimerCh := newRetryTimer(time.Second, time.Second*30, MaxJitter, doneCh)
	for {
		select {
		case retryCount := <-retryTimerCh:
			// Attempt to load all `format.json` from all disks.
			formatConfigs, sErrs := loadAllFormats(storageDisks)
			if retryCount > 5 {
				// After 5 retry attempts we start printing actual errors
				// for disks not being available.
				printRetryMsg(sErrs, storageDisks)
			}
			// Check if this is a XL or distributed XL, anything > 1 is considered XL backend.
			if len(formatConfigs) > 1 {
				switch prepForInitXL(firstDisk, sErrs, len(storageDisks)) {
				case Abort:
					return errCorruptedFormat
				case FormatDisks:
					console.Eraseline()
					printFormatMsg(endpoints, storageDisks, printOnceFn())
					return initFormatXL(storageDisks)
				case InitObjectLayer:
					console.Eraseline()
					// Validate formats load before proceeding forward.
					err := genericFormatCheck(formatConfigs, sErrs)
					if err == nil {
						printRegularMsg(endpoints, storageDisks, printOnceFn())
					}
					return err
				case WaitForHeal:
					// Validate formats load before proceeding forward.
					err := genericFormatCheck(formatConfigs, sErrs)
					if err == nil {
						printHealMsg(endpoints, storageDisks, printOnceFn())
					}
					return err
				case WaitForQuorum:
					console.Printf(
						"Initializing data volume. Waiting for minimum %d servers to come online.\n",
						len(storageDisks)/2+1,
					)
				case WaitForConfig:
					// Print configuration errors.
					printConfigErrMsg(storageDisks, sErrs, printOnceFn())
				case WaitForAll:
					console.Println("Initializing data volume for first time. Waiting for other servers to come online")
				case WaitForFormatting:
					console.Println("Initializing data volume for first time. Waiting for first server to come online")
				}
				continue
			} // else We have FS backend now. Check fs format as well now.
			if isFormatFound(formatConfigs) {
				console.Eraseline()
				// Validate formats load before proceeding forward.
				return genericFormatCheck(formatConfigs, sErrs)
			} // else initialize the format for FS.
			return initFormatFS(storageDisks[0])
		case <-globalServiceDoneCh:
			return errors.New("Initializing data volumes gracefully stopped")
		}
	}
}
Пример #14
0
func doCopySession(session *sessionV8) {
	trapCh := signalTrap(os.Interrupt, syscall.SIGTERM, syscall.SIGKILL)

	if !session.HasData() {
		doPrepareCopyURLs(session, trapCh)
	}

	// Enable accounting reader by default.
	accntReader := newAccounter(session.Header.TotalBytes)

	// Prepare URL scanner from session data file.
	urlScanner := bufio.NewScanner(session.NewDataReader())
	// isCopied returns true if an object has been already copied
	// or not. This is useful when we resume from a session.
	isCopied := isLastFactory(session.Header.LastCopied)

	// Enable progress bar reader only during default mode.
	var progressReader *progressBar
	if !globalQuiet && !globalJSON { // set up progress bar
		progressReader = newProgressBar(session.Header.TotalBytes)
	}

	// Wait on status of doCopy() operation.
	var statusCh = make(chan URLs)

	// Add a wait group.
	var wg = new(sync.WaitGroup)
	wg.Add(1)

	// Go routine to monitor signal traps if any.
	go func() {
		defer wg.Done()
		for {
			select {
			case <-trapCh:
				// Receive interrupt notification.
				if !globalQuiet && !globalJSON {
					console.Eraseline()
				}
				session.CloseAndDie()
			case cpURLs, ok := <-statusCh:
				// Status channel is closed, we should return.
				if !ok {
					return
				}
				if cpURLs.Error == nil {
					session.Header.LastCopied = cpURLs.SourceContent.URL.String()
					session.Save()
				} else {
					// Print in new line and adjust to top so that we
					// don't print over the ongoing progress bar.
					if !globalQuiet && !globalJSON {
						console.Eraseline()
					}
					errorIf(cpURLs.Error.Trace(cpURLs.SourceContent.URL.String()),
						fmt.Sprintf("Failed to copy ‘%s’.", cpURLs.SourceContent.URL.String()))
					if isErrIgnored(cpURLs.Error) {
						continue
					}
					// For critical errors we should exit. Session
					// can be resumed after the user figures out
					// the  problem.
					session.CloseAndDie()
				}
			}
		}
	}()

	// Loop through all urls.
	for urlScanner.Scan() {
		var cpURLs URLs
		// Unmarshal copyURLs from each line.
		json.Unmarshal([]byte(urlScanner.Text()), &cpURLs)

		// Save total count.
		cpURLs.TotalCount = session.Header.TotalObjects

		// Save totalSize.
		cpURLs.TotalSize = session.Header.TotalBytes

		// Verify if previously copied, notify progress bar.
		if isCopied(cpURLs.SourceContent.URL.String()) {
			statusCh <- doCopyFake(cpURLs, progressReader)
		} else {
			statusCh <- doCopy(cpURLs, progressReader, accntReader)
		}
	}

	// Close the goroutine.
	close(statusCh)

	// Wait for the goroutines to finish.
	wg.Wait()

	if !globalQuiet && !globalJSON {
		if progressReader.ProgressBar.Get() > 0 {
			progressReader.ProgressBar.Finish()
		}
	} else {
		if !globalJSON && globalQuiet {
			accntStat := accntReader.Stat()
			cpStatMessage := copyStatMessage{
				Total:       accntStat.Total,
				Transferred: accntStat.Transferred,
				Speed:       accntStat.Speed,
			}
			console.Println(console.Colorize("Copy", cpStatMessage.String()))
		}
	}
}