Esempio n. 1
0
func (c *Context) readComposeFile() error {
	if c.ComposeBytes != nil {
		return nil
	}

	logrus.Debugf("Opening compose file: %s", c.ComposeFile)

	if c.ComposeFile == "-" {
		composeBytes, err := ioutil.ReadAll(os.Stdin)
		if err != nil {
			logrus.Errorf("Failed to read compose file from stdin: %v", err)
			return err
		}
		c.ComposeBytes = composeBytes
	} else if c.ComposeFile != "" {
		if composeBytes, err := ioutil.ReadFile(c.ComposeFile); os.IsNotExist(err) {
			if c.IgnoreMissingConfig {
				return nil
			}
			logrus.Errorf("Failed to find %s", c.ComposeFile)
			return err
		} else if err != nil {
			logrus.Errorf("Failed to open %s", c.ComposeFile)
			return err
		} else {
			c.ComposeBytes = composeBytes
		}
	}

	return nil
}
Esempio n. 2
0
func (p *Project) traverse(start bool, selected map[string]bool, wrappers map[string]*serviceWrapper, action wrapperAction, cycleAction serviceAction) error {
	restart := false
	wrapperList := []string{}

	if start {
		for name := range p.Configs {
			wrapperList = append(wrapperList, name)
		}
	} else {
		for _, wrapper := range wrappers {
			if err := wrapper.Reset(); err != nil {
				return err
			}
		}
		wrapperList = p.reload
	}

	p.loadWrappers(wrappers, wrapperList)
	p.reload = []string{}

	// check service name
	for s := range selected {
		if wrappers[s] == nil {
			return errors.New("No such service: " + s)
		}
	}

	launched := map[string]bool{}

	for _, wrapper := range wrappers {
		p.startService(wrappers, []string{}, selected, launched, wrapper, action, cycleAction)
	}

	var firstError error

	for _, wrapper := range wrappers {
		if !isSelected(wrapper, selected) {
			continue
		}
		if err := wrapper.Wait(); err == ErrRestart {
			restart = true
		} else if err != nil {
			log.Errorf("Failed to start: %s : %v", wrapper.name, err)
			if firstError == nil {
				firstError = err
			}
		}
	}

	if restart {
		if p.ReloadCallback != nil {
			if err := p.ReloadCallback(); err != nil {
				log.Errorf("Failed calling callback: %v", err)
			}
		}
		return p.traverse(false, selected, wrappers, action, cycleAction)
	}
	return firstError
}
Esempio n. 3
0
func mergeProject(p *Project, bytes []byte) (map[string]*ServiceConfig, error) {
	configs := make(map[string]*ServiceConfig)

	datas := make(rawServiceMap)
	if err := yaml.Unmarshal(bytes, &datas); err != nil {
		return nil, err
	}

	if err := interpolate(p.context.EnvironmentLookup, &datas); err != nil {
		return nil, err
	}

	for name, data := range datas {
		data, err := parse(p.context.ConfigLookup, p.context.EnvironmentLookup, p.File, data, datas)
		if err != nil {
			logrus.Errorf("Failed to parse service %s: %v", name, err)
			return nil, err
		}

		datas[name] = data
	}

	if err := utils.Convert(datas, &configs); err != nil {
		return nil, err
	}

	adjustValues(configs)
	return configs, nil
}
Esempio n. 4
0
func (c *Context) lookupProjectName() (string, error) {
	if c.ProjectName != "" {
		return c.ProjectName, nil
	}

	if envProject := os.Getenv("COMPOSE_PROJECT_NAME"); envProject != "" {
		return envProject, nil
	}

	f, err := filepath.Abs(c.ComposeFile)
	if err != nil {
		logrus.Errorf("Failed to get absolute directory for: %s", c.ComposeFile)
		return "", err
	}

	f = toUnixPath(f)

	parent := path.Base(path.Dir(f))
	if parent != "" && parent != "." {
		return parent, nil
	} else if wd, err := os.Getwd(); err != nil {
		return "", err
	} else {
		return path.Base(toUnixPath(wd)), nil
	}
}
// getStoredHashStates returns a slice of hashStateEntries for this upload.
func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry, error) {
	uploadHashStatePathPrefix, err := bw.blobStore.pm.path(uploadHashStatePathSpec{
		name: bw.blobStore.repository.Name(),
		id:   bw.id,
		alg:  bw.digester.Digest().Algorithm(),
		list: true,
	})
	if err != nil {
		return nil, err
	}

	paths, err := bw.blobStore.driver.List(ctx, uploadHashStatePathPrefix)
	if err != nil {
		if _, ok := err.(storagedriver.PathNotFoundError); !ok {
			return nil, err
		}
		// Treat PathNotFoundError as no entries.
		paths = nil
	}

	hashStateEntries := make([]hashStateEntry, 0, len(paths))

	for _, p := range paths {
		pathSuffix := path.Base(p)
		// The suffix should be the offset.
		offset, err := strconv.ParseInt(pathSuffix, 0, 64)
		if err != nil {
			logrus.Errorf("unable to parse offset from upload state path %q: %s", p, err)
		}

		hashStateEntries = append(hashStateEntries, hashStateEntry{offset: offset, path: p})
	}

	return hashStateEntries, nil
}
Esempio n. 6
0
// Write attempts to flush the events to the downstream sink until it succeeds
// or the sink is closed.
func (rs *retryingSink) Write(events ...Event) error {
	rs.mu.Lock()
	defer rs.mu.Unlock()

retry:

	if rs.closed {
		return ErrSinkClosed
	}

	if !rs.proceed() {
		logrus.Warnf("%v encountered too many errors, backing off", rs.sink)
		rs.wait(rs.failures.backoff)
		goto retry
	}

	if err := rs.write(events...); err != nil {
		if err == ErrSinkClosed {
			// terminal!
			return err
		}

		logrus.Errorf("retryingsink: error writing events: %v, retrying", err)
		goto retry
	}

	return nil
}
Esempio n. 7
0
func (s *serviceWrapper) Do(wrappers map[string]*serviceWrapper, start, done EventType, action func(service Service) error) {
	defer s.done.Done()

	if s.state == StateExecuted {
		return
	}

	if wrappers != nil && !s.waitForDeps(wrappers) {
		return
	}

	s.state = StateExecuted

	s.project.Notify(start, s.service.Name(), nil)

	s.err = action(s.service)
	if s.err == ErrRestart {
		s.project.Notify(done, s.service.Name(), nil)
		s.project.Notify(EventProjectReloadTrigger, s.service.Name(), nil)
	} else if s.err != nil {
		log.Errorf("Failed %s %s : %v", start, s.name, s.err)
	} else {
		s.project.Notify(done, s.service.Name(), nil)
	}
}
Esempio n. 8
0
func (c *Container) pull(image string) error {
	taglessRemote, tag := parsers.ParseRepositoryTag(image)
	if tag == "" {
		image = utils.ImageReference(taglessRemote, DefaultTag)
	}

	repoInfo, err := registry.ParseRepositoryInfo(taglessRemote)
	if err != nil {
		return err
	}

	authConfig := cliconfig.AuthConfig{}
	if c.service.context.ConfigFile != nil && repoInfo != nil && repoInfo.Index != nil {
		authConfig = registry.ResolveAuthConfig(c.service.context.ConfigFile, repoInfo.Index)
	}

	err = c.client.PullImage(
		dockerclient.PullImageOptions{
			Repository:   image,
			OutputStream: os.Stderr, // TODO maybe get the stream from some configured place
		},
		dockerclient.AuthConfiguration{
			Username: authConfig.Username,
			Password: authConfig.Password,
			Email:    authConfig.Email,
		},
	)

	if err != nil {
		logrus.Errorf("Failed to pull image %s: %v", image, err)
	}

	return err
}
Esempio n. 9
0
// GetTotalUsedFds Returns the number of used File Descriptors by
// reading it via /proc filesystem.
func GetTotalUsedFds() int {
	if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
		logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
	} else {
		return len(fds)
	}
	return -1
}
Esempio n. 10
0
func (msl *manifestServiceListener) Get(dgst digest.Digest) (*manifest.SignedManifest, error) {
	sm, err := msl.ManifestService.Get(dgst)
	if err == nil {
		if err := msl.parent.listener.ManifestPulled(msl.parent.Repository.Name(), sm); err != nil {
			logrus.Errorf("error dispatching manifest pull to listener: %v", err)
		}
	}

	return sm, err
}
Esempio n. 11
0
func (msl *manifestServiceListener) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*manifest.SignedManifest, error) {
	sm, err := msl.ManifestService.GetByTag(tag, options...)
	if err == nil {
		if err := msl.parent.listener.ManifestPulled(msl.parent.Repository.Name(), sm); err != nil {
			logrus.Errorf("error dispatching manifest pull to listener: %v", err)
		}
	}

	return sm, err
}
Esempio n. 12
0
func (msl *manifestServiceListener) Put(sm *manifest.SignedManifest) error {
	err := msl.ManifestService.Put(sm)

	if err == nil {
		if err := msl.parent.listener.ManifestPushed(msl.parent.Repository.Name(), sm); err != nil {
			logrus.Errorf("error dispatching manifest push to listener: %v", err)
		}
	}

	return err
}
Esempio n. 13
0
// Verify attempts to verify this token using the given options.
// Returns a nil error if the token is valid.
func (t *Token) Verify(verifyOpts VerifyOptions) error {
	// Verify that the Issuer claim is a trusted authority.
	if !contains(verifyOpts.TrustedIssuers, t.Claims.Issuer) {
		log.Errorf("token from untrusted issuer: %q", t.Claims.Issuer)
		return ErrInvalidToken
	}

	// Verify that the Audience claim is allowed.
	if !contains(verifyOpts.AcceptedAudiences, t.Claims.Audience) {
		log.Errorf("token intended for another audience: %q", t.Claims.Audience)
		return ErrInvalidToken
	}

	// Verify that the token is currently usable and not expired.
	currentUnixTime := time.Now().Unix()
	if !(t.Claims.NotBefore <= currentUnixTime && currentUnixTime <= t.Claims.Expiration) {
		log.Errorf("token not to be used before %d or after %d - currently %d", t.Claims.NotBefore, t.Claims.Expiration, currentUnixTime)
		return ErrInvalidToken
	}

	// Verify the token signature.
	if len(t.Signature) == 0 {
		log.Error("token has no signature")
		return ErrInvalidToken
	}

	// Verify that the signing key is trusted.
	signingKey, err := t.VerifySigningKey(verifyOpts)
	if err != nil {
		log.Error(err)
		return ErrInvalidToken
	}

	// Finally, verify the signature of the token using the key which signed it.
	if err := signingKey.Verify(strings.NewReader(t.Raw), t.Header.SigningAlg, t.Signature); err != nil {
		log.Errorf("unable to verify token signature: %s", err)
		return ErrInvalidToken
	}

	return nil
}
Esempio n. 14
0
// Convert converts a struct (src) to another one (target) using yaml marshalling/unmarshalling.
// If the structure are not compatible, this will throw an error as the unmarshalling will fail.
func Convert(src, target interface{}) error {
	newBytes, err := yaml.Marshal(src)
	if err != nil {
		return err
	}

	err = yaml.Unmarshal(newBytes, target)
	if err != nil {
		logrus.Errorf("Failed to unmarshall: %v\n%s", err, string(newBytes))
	}
	return err
}
Esempio n. 15
0
func (p *Project) startService(wrappers map[string]*serviceWrapper, history []string, selected, launched map[string]bool, wrapper *serviceWrapper, action wrapperAction, cycleAction serviceAction) error {
	if launched[wrapper.name] {
		return nil
	}

	launched[wrapper.name] = true
	history = append(history, wrapper.name)

	for _, dep := range wrapper.service.DependentServices() {
		target := wrappers[dep.Target]
		if target == nil {
			log.Errorf("Failed to find %s", dep.Target)
			continue
		}

		if utils.Contains(history, dep.Target) {
			cycle := strings.Join(append(history, dep.Target), "->")
			if dep.Optional {
				log.Debugf("Ignoring cycle for %s", cycle)
				wrapper.IgnoreDep(dep.Target)
				if cycleAction != nil {
					var err error
					log.Debugf("Running cycle action for %s", cycle)
					err = cycleAction(target.service)
					if err != nil {
						return err
					}
				}
			} else {
				return fmt.Errorf("Cycle detected in path %s", cycle)
			}

			continue
		}

		err := p.startService(wrappers, history, selected, launched, target, action, cycleAction)
		if err != nil {
			return err
		}
	}

	if isSelected(wrapper, selected) {
		log.Debugf("Launching action for %s", wrapper.name)
		go action(wrapper, wrappers)
	} else {
		wrapper.Ignore()
	}

	return nil
}
Esempio n. 16
0
// run is the main broadcast loop, started when the broadcaster is created.
// Under normal conditions, it waits for events on the event channel. After
// Close is called, this goroutine will exit.
func (b *Broadcaster) run() {
	for {
		select {
		case block := <-b.events:
			for _, sink := range b.sinks {
				if err := sink.Write(block...); err != nil {
					logrus.Errorf("broadcaster: error writing events to %v, these events will be lost: %v", sink, err)
				}
			}
		case closing := <-b.closed:

			// close all the underlying sinks
			for _, sink := range b.sinks {
				if err := sink.Close(); err != nil {
					logrus.Errorf("broadcaster: error closing sink %v: %v", sink, err)
				}
			}
			closing <- struct{}{}

			logrus.Debugf("broadcaster: closed")
			return
		}
	}
}
Esempio n. 17
0
// NewToken parses the given raw token string
// and constructs an unverified JSON Web Token.
func NewToken(rawToken string) (*Token, error) {
	parts := strings.Split(rawToken, TokenSeparator)
	if len(parts) != 3 {
		return nil, ErrMalformedToken
	}

	var (
		rawHeader, rawClaims   = parts[0], parts[1]
		headerJSON, claimsJSON []byte
		err                    error
	)

	defer func() {
		if err != nil {
			log.Errorf("error while unmarshalling raw token: %s", err)
		}
	}()

	if headerJSON, err = joseBase64UrlDecode(rawHeader); err != nil {
		err = fmt.Errorf("unable to decode header: %s", err)
		return nil, ErrMalformedToken
	}

	if claimsJSON, err = joseBase64UrlDecode(rawClaims); err != nil {
		err = fmt.Errorf("unable to decode claims: %s", err)
		return nil, ErrMalformedToken
	}

	token := new(Token)
	token.Header = new(Header)
	token.Claims = new(ClaimSet)

	token.Raw = strings.Join(parts[:2], TokenSeparator)
	if token.Signature, err = joseBase64UrlDecode(parts[2]); err != nil {
		err = fmt.Errorf("unable to decode signature: %s", err)
		return nil, ErrMalformedToken
	}

	if err = json.Unmarshal(headerJSON, token.Header); err != nil {
		return nil, ErrMalformedToken
	}

	if err = json.Unmarshal(claimsJSON, token.Claims); err != nil {
		return nil, ErrMalformedToken
	}

	return token, nil
}
Esempio n. 18
0
// Load loads the specified byte array (the composefile content) and adds the
// service configuration to the project.
func (p *Project) Load(bytes []byte) error {
	configs := make(map[string]*ServiceConfig)
	configs, err := mergeProject(p, bytes)
	if err != nil {
		log.Errorf("Could not parse config for project %s : %v", p.Name, err)
		return err
	}

	for name, config := range configs {
		err := p.AddConfig(name, config)
		if err != nil {
			return err
		}
	}

	return nil
}
Esempio n. 19
0
func (s *serviceWrapper) Reset() error {
	if s.state != StateExecuted {
		service, err := s.project.CreateService(s.name)
		if err != nil {
			log.Errorf("Failed to create service for %s : %v", s.name, err)
			return err
		}

		s.service = service
	}

	if s.err == ErrRestart {
		s.err = nil
	}
	s.done.Add(1)

	return nil
}
Esempio n. 20
0
// NewProject creates a Project with the specified context.
func NewProject(context *Context) (*project.Project, error) {
	if context.ConfigLookup == nil {
		context.ConfigLookup = &lookup.FileConfigLookup{}
	}

	if context.EnvironmentLookup == nil {
		context.EnvironmentLookup = &lookup.OsEnvLookup{}
	}

	if context.ServiceFactory == nil {
		context.ServiceFactory = &ServiceFactory{
			context: context,
		}
	}

	if context.Builder == nil {
		context.Builder = NewDaemonBuilder(context)
	}

	if context.ClientFactory == nil {
		factory, err := NewDefaultClientFactory(ClientOpts{})
		if err != nil {
			return nil, err
		}
		context.ClientFactory = factory
	}

	p := project.NewProject(&context.Context)

	err := p.Parse()
	if err != nil {
		return nil, err
	}

	if err = context.open(); err != nil {
		logrus.Errorf("Failed to open project %s: %v", p.Name, err)
		return nil, err
	}

	return p, err
}
Esempio n. 21
0
func (s *serviceWrapper) waitForDeps(wrappers map[string]*serviceWrapper) bool {
	if s.noWait {
		return true
	}

	for _, dep := range s.service.DependentServices() {
		if s.ignored[dep.Target] {
			continue
		}

		if wrapper, ok := wrappers[dep.Target]; ok {
			if wrapper.Wait() == ErrRestart {
				s.project.Notify(EventProjectReload, wrapper.service.Name(), nil)
				s.err = ErrRestart
				return false
			}
		} else {
			log.Errorf("Failed to find %s", dep.Target)
		}
	}

	return true
}
Esempio n. 22
0
// WriteStream stores the contents of the provided io.Reader at a
// location designated by the given path. The driver will know it has
// received the full contents when the reader returns io.EOF. The number
// of successfully READ bytes will be returned, even if an error is
// returned. May be used to resume writing a stream by providing a nonzero
// offset. Offsets past the current size will write from the position
// beyond the end of the file.
func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) {
	partNumber := 1
	bytesRead := 0
	var putErrChan chan error
	parts := []s3.Part{}
	var part s3.Part
	done := make(chan struct{}) // stopgap to free up waiting goroutines

	multi, err := d.Bucket.InitMulti(d.s3Path(path), d.getContentType(), getPermissions(), d.getOptions())
	if err != nil {
		return 0, err
	}

	buf := d.getbuf()

	// We never want to leave a dangling multipart upload, our only consistent state is
	// when there is a whole object at path. This is in order to remain consistent with
	// the stat call.
	//
	// Note that if the machine dies before executing the defer, we will be left with a dangling
	// multipart upload, which will eventually be cleaned up, but we will lose all of the progress
	// made prior to the machine crashing.
	defer func() {
		if putErrChan != nil {
			if putErr := <-putErrChan; putErr != nil {
				err = putErr
			}
		}

		if len(parts) > 0 {
			if multi == nil {
				// Parts should be empty if the multi is not initialized
				panic("Unreachable")
			} else {
				if multi.Complete(parts) != nil {
					multi.Abort()
				}
			}
		}

		d.putbuf(buf) // needs to be here to pick up new buf value
		close(done)   // free up any waiting goroutines
	}()

	// Fills from 0 to total from current
	fromSmallCurrent := func(total int64) error {
		current, err := d.ReadStream(ctx, path, 0)
		if err != nil {
			return err
		}

		bytesRead = 0
		for int64(bytesRead) < total {
			//The loop should very rarely enter a second iteration
			nn, err := current.Read(buf[bytesRead:total])
			bytesRead += nn
			if err != nil {
				if err != io.EOF {
					return err
				}

				break
			}

		}
		return nil
	}

	// Fills from parameter to chunkSize from reader
	fromReader := func(from int64) error {
		bytesRead = 0
		for from+int64(bytesRead) < d.ChunkSize {
			nn, err := reader.Read(buf[from+int64(bytesRead):])
			totalRead += int64(nn)
			bytesRead += nn

			if err != nil {
				if err != io.EOF {
					return err
				}

				break
			}
		}

		if putErrChan == nil {
			putErrChan = make(chan error)
		} else {
			if putErr := <-putErrChan; putErr != nil {
				putErrChan = nil
				return putErr
			}
		}

		go func(bytesRead int, from int64, buf []byte) {
			defer d.putbuf(buf) // this buffer gets dropped after this call

			// DRAGONS(stevvooe): There are few things one might want to know
			// about this section. First, the putErrChan is expecting an error
			// and a nil or just a nil to come through the channel. This is
			// covered by the silly defer below. The other aspect is the s3
			// retry backoff to deal with RequestTimeout errors. Even though
			// the underlying s3 library should handle it, it doesn't seem to
			// be part of the shouldRetry function (see AdRoll/goamz/s3).
			defer func() {
				select {
				case putErrChan <- nil: // for some reason, we do this no matter what.
				case <-done:
					return // ensure we don't leak the goroutine
				}
			}()

			if bytesRead <= 0 {
				return
			}

			var err error
			var part s3.Part

		loop:
			for retries := 0; retries < 5; retries++ {
				part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf[0:int64(bytesRead)+from]))
				if err == nil {
					break // success!
				}

				// NOTE(stevvooe): This retry code tries to only retry under
				// conditions where the s3 package does not. We may add s3
				// error codes to the below if we see others bubble up in the
				// application. Right now, the most troubling is
				// RequestTimeout, which seems to only triggered when a tcp
				// connection to s3 slows to a crawl. If the RequestTimeout
				// ends up getting added to the s3 library and we don't see
				// other errors, this retry loop can be removed.
				switch err := err.(type) {
				case *s3.Error:
					switch err.Code {
					case "RequestTimeout":
						// allow retries on only this error.
					default:
						break loop
					}
				}

				backoff := 100 * time.Millisecond * time.Duration(retries+1)
				logrus.Errorf("error putting part, retrying after %v: %v", err, backoff.String())
				time.Sleep(backoff)
			}

			if err != nil {
				logrus.Errorf("error putting part, aborting: %v", err)
				select {
				case putErrChan <- err:
				case <-done:
					return // don't leak the goroutine
				}
			}

			// parts and partNumber are safe, because this function is the
			// only one modifying them and we force it to be executed
			// serially.
			parts = append(parts, part)
			partNumber++
		}(bytesRead, from, buf)

		buf = d.getbuf() // use a new buffer for the next call
		return nil
	}

	if offset > 0 {
		resp, err := d.Bucket.Head(d.s3Path(path), nil)
		if err != nil {
			if s3Err, ok := err.(*s3.Error); !ok || s3Err.Code != "NoSuchKey" {
				return 0, err
			}
		}

		currentLength := int64(0)
		if err == nil {
			currentLength = resp.ContentLength
		}

		if currentLength >= offset {
			if offset < d.ChunkSize {
				// chunkSize > currentLength >= offset
				if err = fromSmallCurrent(offset); err != nil {
					return totalRead, err
				}

				if err = fromReader(offset); err != nil {
					return totalRead, err
				}

				if totalRead+offset < d.ChunkSize {
					return totalRead, nil
				}
			} else {
				// currentLength >= offset >= chunkSize
				_, part, err = multi.PutPartCopy(partNumber,
					s3.CopyOptions{CopySourceOptions: "bytes=0-" + strconv.FormatInt(offset-1, 10)},
					d.Bucket.Name+"/"+d.s3Path(path))
				if err != nil {
					return 0, err
				}

				parts = append(parts, part)
				partNumber++
			}
		} else {
			// Fills between parameters with 0s but only when to - from <= chunkSize
			fromZeroFillSmall := func(from, to int64) error {
				bytesRead = 0
				for from+int64(bytesRead) < to {
					nn, err := bytes.NewReader(d.zeros).Read(buf[from+int64(bytesRead) : to])
					bytesRead += nn
					if err != nil {
						return err
					}
				}

				return nil
			}

			// Fills between parameters with 0s, making new parts
			fromZeroFillLarge := func(from, to int64) error {
				bytesRead64 := int64(0)
				for to-(from+bytesRead64) >= d.ChunkSize {
					part, err := multi.PutPart(int(partNumber), bytes.NewReader(d.zeros))
					if err != nil {
						return err
					}
					bytesRead64 += d.ChunkSize

					parts = append(parts, part)
					partNumber++
				}

				return fromZeroFillSmall(0, (to-from)%d.ChunkSize)
			}

			// currentLength < offset
			if currentLength < d.ChunkSize {
				if offset < d.ChunkSize {
					// chunkSize > offset > currentLength
					if err = fromSmallCurrent(currentLength); err != nil {
						return totalRead, err
					}

					if err = fromZeroFillSmall(currentLength, offset); err != nil {
						return totalRead, err
					}

					if err = fromReader(offset); err != nil {
						return totalRead, err
					}

					if totalRead+offset < d.ChunkSize {
						return totalRead, nil
					}
				} else {
					// offset >= chunkSize > currentLength
					if err = fromSmallCurrent(currentLength); err != nil {
						return totalRead, err
					}

					if err = fromZeroFillSmall(currentLength, d.ChunkSize); err != nil {
						return totalRead, err
					}

					part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf))
					if err != nil {
						return totalRead, err
					}

					parts = append(parts, part)
					partNumber++

					//Zero fill from chunkSize up to offset, then some reader
					if err = fromZeroFillLarge(d.ChunkSize, offset); err != nil {
						return totalRead, err
					}

					if err = fromReader(offset % d.ChunkSize); err != nil {
						return totalRead, err
					}

					if totalRead+(offset%d.ChunkSize) < d.ChunkSize {
						return totalRead, nil
					}
				}
			} else {
				// offset > currentLength >= chunkSize
				_, part, err = multi.PutPartCopy(partNumber,
					s3.CopyOptions{},
					d.Bucket.Name+"/"+d.s3Path(path))
				if err != nil {
					return 0, err
				}

				parts = append(parts, part)
				partNumber++

				//Zero fill from currentLength up to offset, then some reader
				if err = fromZeroFillLarge(currentLength, offset); err != nil {
					return totalRead, err
				}

				if err = fromReader((offset - currentLength) % d.ChunkSize); err != nil {
					return totalRead, err
				}

				if totalRead+((offset-currentLength)%d.ChunkSize) < d.ChunkSize {
					return totalRead, nil
				}
			}

		}
	}

	for {
		if err = fromReader(0); err != nil {
			return totalRead, err
		}

		if int64(bytesRead) < d.ChunkSize {
			break
		}
	}

	return totalRead, nil
}
// resumeDigestAt attempts to restore the state of the internal hash function
// by loading the most recent saved hash state less than or equal to the given
// offset. Any unhashed bytes remaining less than the given offset are hashed
// from the content uploaded so far.
func (bw *blobWriter) resumeDigestAt(ctx context.Context, offset int64) error {
	if offset < 0 {
		return fmt.Errorf("cannot resume hash at negative offset: %d", offset)
	}

	h, ok := bw.digester.Hash().(resumable.Hash)
	if !ok {
		return errResumableDigestNotAvailable
	}

	if offset == int64(h.Len()) {
		// State of digester is already at the requested offset.
		return nil
	}

	// List hash states from storage backend.
	var hashStateMatch hashStateEntry
	hashStates, err := bw.getStoredHashStates(ctx)
	if err != nil {
		return fmt.Errorf("unable to get stored hash states with offset %d: %s", offset, err)
	}

	// Find the highest stored hashState with offset less than or equal to
	// the requested offset.
	for _, hashState := range hashStates {
		if hashState.offset == offset {
			hashStateMatch = hashState
			break // Found an exact offset match.
		} else if hashState.offset < offset && hashState.offset > hashStateMatch.offset {
			// This offset is closer to the requested offset.
			hashStateMatch = hashState
		} else if hashState.offset > offset {
			// Remove any stored hash state with offsets higher than this one
			// as writes to this resumed hasher will make those invalid. This
			// is probably okay to skip for now since we don't expect anyone to
			// use the API in this way. For that reason, we don't treat an
			// an error here as a fatal error, but only log it.
			if err := bw.driver.Delete(ctx, hashState.path); err != nil {
				logrus.Errorf("unable to delete stale hash state %q: %s", hashState.path, err)
			}
		}
	}

	if hashStateMatch.offset == 0 {
		// No need to load any state, just reset the hasher.
		h.Reset()
	} else {
		storedState, err := bw.driver.GetContent(ctx, hashStateMatch.path)
		if err != nil {
			return err
		}

		if err = h.Restore(storedState); err != nil {
			return err
		}
	}

	// Mind the gap.
	if gapLen := offset - int64(h.Len()); gapLen > 0 {
		// Need to read content from the upload to catch up to the desired offset.
		fr, err := newFileReader(ctx, bw.driver, bw.path, bw.size)
		if err != nil {
			return err
		}

		if _, err = fr.Seek(int64(h.Len()), os.SEEK_SET); err != nil {
			return fmt.Errorf("unable to seek to layer reader offset %d: %s", h.Len(), err)
		}

		if _, err := io.CopyN(h, fr, gapLen); err != nil {
			return err
		}
	}

	return nil
}
Esempio n. 24
0
// loginV1 tries to register/login to the v1 registry server.
func loginV1(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint) (string, error) {
	var (
		status        string
		reqBody       []byte
		err           error
		reqStatusCode = 0
		serverAddress = authConfig.ServerAddress
	)

	logrus.Debugf("attempting v1 login to registry endpoint %s", registryEndpoint)

	if serverAddress == "" {
		return "", fmt.Errorf("Server Error: Server Address not set.")
	}

	loginAgainstOfficialIndex := serverAddress == IndexServer

	// to avoid sending the server address to the server it should be removed before being marshalled
	authCopy := *authConfig
	authCopy.ServerAddress = ""

	jsonBody, err := json.Marshal(authCopy)
	if err != nil {
		return "", fmt.Errorf("Config Error: %s", err)
	}

	// using `bytes.NewReader(jsonBody)` here causes the server to respond with a 411 status.
	b := strings.NewReader(string(jsonBody))
	req1, err := registryEndpoint.client.Post(serverAddress+"users/", "application/json; charset=utf-8", b)
	if err != nil {
		return "", fmt.Errorf("Server Error: %s", err)
	}
	reqStatusCode = req1.StatusCode
	defer req1.Body.Close()
	reqBody, err = ioutil.ReadAll(req1.Body)
	if err != nil {
		return "", fmt.Errorf("Server Error: [%#v] %s", reqStatusCode, err)
	}

	if reqStatusCode == 201 {
		if loginAgainstOfficialIndex {
			status = "Account created. Please use the confirmation link we sent" +
				" to your e-mail to activate it."
		} else {
			// *TODO: Use registry configuration to determine what this says, if anything?
			status = "Account created. Please see the documentation of the registry " + serverAddress + " for instructions how to activate it."
		}
	} else if reqStatusCode == 400 {
		if string(reqBody) == "\"Username or email already exists\"" {
			req, err := http.NewRequest("GET", serverAddress+"users/", nil)
			req.SetBasicAuth(authConfig.Username, authConfig.Password)
			resp, err := registryEndpoint.client.Do(req)
			if err != nil {
				return "", err
			}
			defer resp.Body.Close()
			body, err := ioutil.ReadAll(resp.Body)
			if err != nil {
				return "", err
			}
			if resp.StatusCode == 200 {
				return "Login Succeeded", nil
			} else if resp.StatusCode == 401 {
				return "", fmt.Errorf("Wrong login/password, please try again")
			} else if resp.StatusCode == 403 {
				if loginAgainstOfficialIndex {
					return "", fmt.Errorf("Login: Account is not Active. Please check your e-mail for a confirmation link.")
				}
				// *TODO: Use registry configuration to determine what this says, if anything?
				return "", fmt.Errorf("Login: Account is not Active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress)
			} else if resp.StatusCode == 500 { // Issue #14326
				logrus.Errorf("%s returned status code %d. Response Body :\n%s", req.URL.String(), resp.StatusCode, body)
				return "", fmt.Errorf("Internal Server Error")
			}
			return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, resp.StatusCode, resp.Header)
		}
		return "", fmt.Errorf("Registration: %s", reqBody)

	} else if reqStatusCode == 401 {
		// This case would happen with private registries where /v1/users is
		// protected, so people can use `docker login` as an auth check.
		req, err := http.NewRequest("GET", serverAddress+"users/", nil)
		req.SetBasicAuth(authConfig.Username, authConfig.Password)
		resp, err := registryEndpoint.client.Do(req)
		if err != nil {
			return "", err
		}
		defer resp.Body.Close()
		body, err := ioutil.ReadAll(resp.Body)
		if err != nil {
			return "", err
		}
		if resp.StatusCode == 200 {
			return "Login Succeeded", nil
		} else if resp.StatusCode == 401 {
			return "", fmt.Errorf("Wrong login/password, please try again")
		} else {
			return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body,
				resp.StatusCode, resp.Header)
		}
	} else {
		return "", fmt.Errorf("Unexpected status code [%d] : %s", reqStatusCode, reqBody)
	}
	return status, nil
}
Esempio n. 25
0
func parse(configLookup ConfigLookup, environmentLookup EnvironmentLookup, inFile string, serviceData rawService, datas rawServiceMap) (rawService, error) {
	serviceData, err := readEnvFile(configLookup, inFile, serviceData)
	if err != nil {
		return nil, err
	}

	serviceData, err = resolveBuild(inFile, serviceData)
	if err != nil {
		return nil, err
	}

	value, ok := serviceData["extends"]
	if !ok {
		return serviceData, nil
	}

	mapValue, ok := value.(map[interface{}]interface{})
	if !ok {
		return serviceData, nil
	}

	if configLookup == nil {
		return nil, fmt.Errorf("Can not use extends in file %s no mechanism provided to files", inFile)
	}

	file := asString(mapValue["file"])
	service := asString(mapValue["service"])

	if service == "" {
		return serviceData, nil
	}

	var baseService rawService

	if file == "" {
		if serviceData, ok := datas[service]; ok {
			baseService, err = parse(configLookup, environmentLookup, inFile, serviceData, datas)
		} else {
			return nil, fmt.Errorf("Failed to find service %s to extend", service)
		}
	} else {
		bytes, resolved, err := configLookup.Lookup(file, inFile)
		if err != nil {
			logrus.Errorf("Failed to lookup file %s: %v", file, err)
			return nil, err
		}

		var baseRawServices rawServiceMap
		if err := yaml.Unmarshal(bytes, &baseRawServices); err != nil {
			return nil, err
		}

		err = interpolate(environmentLookup, &baseRawServices)
		if err != nil {
			return nil, err
		}

		baseService, ok = baseRawServices[service]
		if !ok {
			return nil, fmt.Errorf("Failed to find service %s in file %s", service, file)
		}

		baseService, err = parse(configLookup, environmentLookup, resolved, baseService, baseRawServices)
	}

	if err != nil {
		return nil, err
	}

	baseService = clone(baseService)

	logrus.Debugf("Merging %#v, %#v", baseService, serviceData)

	for _, k := range noMerge {
		if _, ok := baseService[k]; ok {
			source := file
			if source == "" {
				source = inFile
			}
			return nil, fmt.Errorf("Cannot extend service '%s' in %s: services with '%s' cannot be extended", service, source, k)
		}
	}

	for k, v := range serviceData {
		// Image and build are mutually exclusive in merge
		if k == "image" {
			delete(baseService, "build")
		} else if k == "build" {
			delete(baseService, "image")
		}
		existing, ok := baseService[k]
		if ok {
			baseService[k] = merge(existing, v)
		} else {
			baseService[k] = v
		}
	}

	logrus.Debugf("Merged result %#v", baseService)

	return baseService, nil
}