示例#1
0
文件: create.go 项目: zlzlnet/docker
// Checks if the client set configurations for more than one network while creating a container
// Also checks if the IPAMConfig is valid
func (daemon *Daemon) verifyNetworkingConfig(nwConfig *networktypes.NetworkingConfig) error {
	if nwConfig == nil || len(nwConfig.EndpointsConfig) == 0 {
		return nil
	}
	if len(nwConfig.EndpointsConfig) == 1 {
		for _, v := range nwConfig.EndpointsConfig {
			if v != nil && v.IPAMConfig != nil {
				if v.IPAMConfig.IPv4Address != "" && net.ParseIP(v.IPAMConfig.IPv4Address).To4() == nil {
					return errors.NewBadRequestError(fmt.Errorf("invalid IPv4 address: %s", v.IPAMConfig.IPv4Address))
				}
				if v.IPAMConfig.IPv6Address != "" {
					n := net.ParseIP(v.IPAMConfig.IPv6Address)
					// if the address is an invalid network address (ParseIP == nil) or if it is
					// an IPv4 address (To4() != nil), then it is an invalid IPv6 address
					if n == nil || n.To4() != nil {
						return errors.NewBadRequestError(fmt.Errorf("invalid IPv6 address: %s", v.IPAMConfig.IPv6Address))
					}
				}
			}
		}
		return nil
	}
	l := make([]string, 0, len(nwConfig.EndpointsConfig))
	for k := range nwConfig.EndpointsConfig {
		l = append(l, k)
	}
	err := fmt.Errorf("Container cannot be connected to network endpoints: %s", strings.Join(l, ", "))
	return errors.NewBadRequestError(err)
}
示例#2
0
// UpdateNode updates existing nodes properties.
func (c *Cluster) UpdateNode(input string, version uint64, spec types.NodeSpec) error {
	c.mu.RLock()
	defer c.mu.RUnlock()

	state := c.currentNodeState()
	if !state.IsActiveManager() {
		return c.errNoManager(state)
	}

	nodeSpec, err := convert.NodeSpecToGRPC(spec)
	if err != nil {
		return apierrors.NewBadRequestError(err)
	}

	ctx, cancel := c.getRequestContext()
	defer cancel()

	currentNode, err := getNode(ctx, state.controlClient, input)
	if err != nil {
		return err
	}

	_, err = state.controlClient.UpdateNode(
		ctx,
		&swarmapi.UpdateNodeRequest{
			NodeID: currentNode.ID,
			Spec:   &nodeSpec,
			NodeVersion: &swarmapi.Version{
				Index: version,
			},
		},
	)
	return err
}
示例#3
0
// GetContainer looks for a container using the provided information, which could be
// one of the following inputs from the caller:
//  - A full container ID, which will exact match a container in daemon's list
//  - A container name, which will only exact match via the GetByName() function
//  - A partial container ID prefix (e.g. short ID) of any length that is
//    unique enough to only return a single container object
//  If none of these searches succeed, an error is returned
func (daemon *Daemon) GetContainer(prefixOrName string) (*container.Container, error) {
	if len(prefixOrName) == 0 {
		return nil, errors.NewBadRequestError(fmt.Errorf("No container name or ID supplied"))
	}

	if containerByID := daemon.containers.Get(prefixOrName); containerByID != nil {
		// prefix is an exact match to a full container ID
		return containerByID, nil
	}

	// GetByName will match only an exact name provided; we ignore errors
	if containerByName, _ := daemon.GetByName(prefixOrName); containerByName != nil {
		// prefix is an exact match to a full container Name
		return containerByName, nil
	}

	containerID, indexError := daemon.idIndex.Get(prefixOrName)
	if indexError != nil {
		// When truncindex defines an error type, use that instead
		if indexError == truncindex.ErrNotExist {
			err := fmt.Errorf("No such container: %s", prefixOrName)
			return nil, errors.NewRequestNotFoundError(err)
		}
		return nil, indexError
	}
	return daemon.containers.Get(containerID), nil
}
示例#4
0
文件: delete.go 项目: harche/docker
// ContainerRm removes the container id from the filesystem. An error
// is returned if the container is not found, or if the remove
// fails. If the remove succeeds, the container name is released, and
// network links are removed.
func (daemon *Daemon) ContainerRm(name string, config *types.ContainerRmConfig) error {
	start := time.Now()
	container, err := daemon.GetContainer(name)
	if err != nil {
		return err
	}

	// Container state RemovalInProgress should be used to avoid races.
	if inProgress := container.SetRemovalInProgress(); inProgress {
		err := fmt.Errorf("removal of container %s is already in progress", name)
		return errors.NewBadRequestError(err)
	}
	defer container.ResetRemovalInProgress()

	// check if container wasn't deregistered by previous rm since Get
	if c := daemon.containers.Get(container.ID); c == nil {
		return nil
	}

	if config.RemoveLink {
		return daemon.rmLink(container, name)
	}

	err = daemon.cleanupContainer(container, config.ForceRemove, config.RemoveVolume)
	containerActions.WithValues("delete").UpdateSince(start)

	return err
}
示例#5
0
文件: cluster.go 项目: docker/docker
// Join makes current Cluster part of an existing swarm cluster.
func (c *Cluster) Join(req types.JoinRequest) error {
	c.controlMutex.Lock()
	defer c.controlMutex.Unlock()
	c.mu.Lock()
	if c.nr != nil {
		c.mu.Unlock()
		return errSwarmExists
	}
	c.mu.Unlock()

	if err := validateAndSanitizeJoinRequest(&req); err != nil {
		return apierrors.NewBadRequestError(err)
	}

	listenHost, listenPort, err := resolveListenAddr(req.ListenAddr)
	if err != nil {
		return err
	}

	var advertiseAddr string
	if req.AdvertiseAddr != "" {
		advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort)
		// For joining, we don't need to provide an advertise address,
		// since the remote side can detect it.
		if err == nil {
			advertiseAddr = net.JoinHostPort(advertiseHost, advertisePort)
		}
	}

	clearPersistentState(c.root)

	nr, err := c.newNodeRunner(nodeStartConfig{
		RemoteAddr:    req.RemoteAddrs[0],
		ListenAddr:    net.JoinHostPort(listenHost, listenPort),
		AdvertiseAddr: advertiseAddr,
		joinAddr:      req.RemoteAddrs[0],
		joinToken:     req.JoinToken,
		availability:  req.Availability,
	})
	if err != nil {
		return err
	}

	c.mu.Lock()
	c.nr = nr
	c.mu.Unlock()

	select {
	case <-time.After(swarmConnectTimeout):
		return errSwarmJoinTimeoutReached
	case err := <-nr.Ready():
		if err != nil {
			c.mu.Lock()
			c.nr = nil
			c.mu.Unlock()
		}
		return err
	}
}
示例#6
0
func (sr *swarmRouter) updateSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
	var secret types.SecretSpec
	if err := json.NewDecoder(r.Body).Decode(&secret); err != nil {
		return errors.NewBadRequestError(err)
	}

	rawVersion := r.URL.Query().Get("version")
	version, err := strconv.ParseUint(rawVersion, 10, 64)
	if err != nil {
		return errors.NewBadRequestError(fmt.Errorf("invalid secret version"))
	}

	id := vars["id"]
	if err := sr.backend.UpdateSecret(id, version, secret); err != nil {
		return err
	}

	return nil
}
示例#7
0
// WrapHandler returns a new handler function wrapping the previous one in the request chain.
func (v VersionMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
	return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
		apiVersion := vars["version"]
		if apiVersion == "" {
			apiVersion = v.defaultVersion
		}

		if versions.GreaterThan(apiVersion, v.defaultVersion) {
			return errors.NewBadRequestError(fmt.Errorf("client is newer than server (client API version: %s, server API version: %s)", apiVersion, v.defaultVersion))
		}
		if versions.LessThan(apiVersion, v.minVersion) {
			return errors.NewBadRequestError(fmt.Errorf("client version %s is too old. Minimum supported API version is %s, please upgrade your client to a newer version", apiVersion, v.minVersion))
		}

		header := fmt.Sprintf("Docker/%s (%s)", v.serverVersion, runtime.GOOS)
		w.Header().Set("Server", header)
		ctx = context.WithValue(ctx, "api-version", apiVersion)
		return handler(ctx, w, r, vars)
	}

}
示例#8
0
// Update updates configuration of a managed swarm cluster.
func (c *Cluster) Update(version uint64, spec types.Spec, flags types.UpdateFlags) error {
	c.mu.RLock()
	defer c.mu.RUnlock()

	state := c.currentNodeState()
	if !state.IsActiveManager() {
		return c.errNoManager(state)
	}

	ctx, cancel := c.getRequestContext()
	defer cancel()

	swarm, err := getSwarm(ctx, state.controlClient)
	if err != nil {
		return err
	}

	// In update, client should provide the complete spec of the swarm, including
	// Name and Labels. If a field is specified with 0 or nil, then the default value
	// will be used to swarmkit.
	clusterSpec, err := convert.SwarmSpecToGRPC(spec)
	if err != nil {
		return apierrors.NewBadRequestError(err)
	}

	_, err = state.controlClient.UpdateCluster(
		ctx,
		&swarmapi.UpdateClusterRequest{
			ClusterID: swarm.ID,
			Spec:      &clusterSpec,
			ClusterVersion: &swarmapi.Version{
				Index: version,
			},
			Rotation: swarmapi.KeyRotation{
				WorkerJoinToken:  flags.RotateWorkerToken,
				ManagerJoinToken: flags.RotateManagerToken,
				ManagerUnlockKey: flags.RotateManagerUnlockKey,
			},
		},
	)
	return err
}
示例#9
0
// BuildFromContext builds a new image from a given context.
func (bm *BuildManager) BuildFromContext(ctx context.Context, src io.ReadCloser, remote string, buildOptions *types.ImageBuildOptions, pg backend.ProgressWriter) (string, error) {
	if buildOptions.Squash && !bm.backend.HasExperimental() {
		return "", apierrors.NewBadRequestError(errors.New("squash is only supported with experimental mode"))
	}
	buildContext, dockerfileName, err := builder.DetectContextFromRemoteURL(src, remote, pg.ProgressReaderFunc)
	if err != nil {
		return "", err
	}
	defer func() {
		if err := buildContext.Close(); err != nil {
			logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err)
		}
	}()

	if len(dockerfileName) > 0 {
		buildOptions.Dockerfile = dockerfileName
	}
	b, err := NewBuilder(ctx, buildOptions, bm.backend, builder.DockerIgnoreContext{ModifiableContext: buildContext}, nil)
	if err != nil {
		return "", err
	}
	return b.build(pg.StdoutFormatter, pg.StderrFormatter, pg.Output)
}
示例#10
0
文件: volumes.go 项目: Mic92/docker
// registerMountPoints initializes the container mount points with the configured volumes and bind mounts.
// It follows the next sequence to decide what to mount in each final destination:
//
// 1. Select the previously configured mount points for the containers, if any.
// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination.
// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations.
// 4. Cleanup old volumes that are about to be reassigned.
func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *containertypes.HostConfig) (retErr error) {
	binds := map[string]bool{}
	mountPoints := map[string]*volume.MountPoint{}
	defer func() {
		// clean up the container mountpoints once return with error
		if retErr != nil {
			for _, m := range mountPoints {
				if m.Volume == nil {
					continue
				}
				daemon.volumes.Dereference(m.Volume, container.ID)
			}
		}
	}()

	// 1. Read already configured mount points.
	for destination, point := range container.MountPoints {
		mountPoints[destination] = point
	}

	// 2. Read volumes from other containers.
	for _, v := range hostConfig.VolumesFrom {
		containerID, mode, err := volume.ParseVolumesFrom(v)
		if err != nil {
			return err
		}

		c, err := daemon.GetContainer(containerID)
		if err != nil {
			return err
		}

		for _, m := range c.MountPoints {
			cp := &volume.MountPoint{
				Name:        m.Name,
				Source:      m.Source,
				RW:          m.RW && volume.ReadWrite(mode),
				Driver:      m.Driver,
				Destination: m.Destination,
				Propagation: m.Propagation,
				Spec:        m.Spec,
				CopyData:    false,
			}

			if len(cp.Source) == 0 {
				v, err := daemon.volumes.GetWithRef(cp.Name, cp.Driver, container.ID)
				if err != nil {
					return err
				}
				cp.Volume = v
			}

			mountPoints[cp.Destination] = cp
		}
	}

	// 3. Read bind mounts
	for _, b := range hostConfig.Binds {
		bind, err := volume.ParseMountRaw(b, hostConfig.VolumeDriver)
		if err != nil {
			return err
		}

		// #10618
		_, tmpfsExists := hostConfig.Tmpfs[bind.Destination]
		if binds[bind.Destination] || tmpfsExists {
			return fmt.Errorf("Duplicate mount point '%s'", bind.Destination)
		}

		if bind.Type == mounttypes.TypeVolume {
			// create the volume
			v, err := daemon.volumes.CreateWithRef(bind.Name, bind.Driver, container.ID, nil, nil)
			if err != nil {
				return err
			}
			bind.Volume = v
			bind.Source = v.Path()
			// bind.Name is an already existing volume, we need to use that here
			bind.Driver = v.DriverName()
			if bind.Driver == volume.DefaultDriverName {
				setBindModeIfNull(bind)
			}
		}

		binds[bind.Destination] = true
		mountPoints[bind.Destination] = bind
	}

	for _, cfg := range hostConfig.Mounts {
		mp, err := volume.ParseMountSpec(cfg)
		if err != nil {
			return dockererrors.NewBadRequestError(err)
		}

		if binds[mp.Destination] {
			return fmt.Errorf("Duplicate mount point '%s'", cfg.Target)
		}

		if mp.Type == mounttypes.TypeVolume {
			var v volume.Volume
			if cfg.VolumeOptions != nil {
				var driverOpts map[string]string
				if cfg.VolumeOptions.DriverConfig != nil {
					driverOpts = cfg.VolumeOptions.DriverConfig.Options
				}
				v, err = daemon.volumes.CreateWithRef(mp.Name, mp.Driver, container.ID, driverOpts, cfg.VolumeOptions.Labels)
			} else {
				v, err = daemon.volumes.CreateWithRef(mp.Name, mp.Driver, container.ID, nil, nil)
			}
			if err != nil {
				return err
			}

			if err := label.Relabel(mp.Source, container.MountLabel, false); err != nil {
				return err
			}
			mp.Volume = v
			mp.Name = v.Name()
			mp.Driver = v.DriverName()

			// only use the cached path here since getting the path is not necessary right now and calling `Path()` may be slow
			if cv, ok := v.(interface {
				CachedPath() string
			}); ok {
				mp.Source = cv.CachedPath()
			}
		}

		binds[mp.Destination] = true
		mountPoints[mp.Destination] = mp
	}

	container.Lock()

	// 4. Cleanup old volumes that are about to be reassigned.
	for _, m := range mountPoints {
		if m.BackwardsCompatible() {
			if mp, exists := container.MountPoints[m.Destination]; exists && mp.Volume != nil {
				daemon.volumes.Dereference(mp.Volume, container.ID)
			}
		}
	}
	container.MountPoints = mountPoints

	container.Unlock()

	return nil
}
示例#11
0
func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
	if err := httputils.ParseForm(r); err != nil {
		return err
	}

	since, err := eventTime(r.Form.Get("since"))
	if err != nil {
		return err
	}
	until, err := eventTime(r.Form.Get("until"))
	if err != nil {
		return err
	}

	var (
		timeout        <-chan time.Time
		onlyPastEvents bool
	)
	if !until.IsZero() {
		if until.Before(since) {
			return errors.NewBadRequestError(fmt.Errorf("`since` time (%s) cannot be after `until` time (%s)", r.Form.Get("since"), r.Form.Get("until")))
		}

		now := time.Now()

		onlyPastEvents = until.Before(now)

		if !onlyPastEvents {
			dur := until.Sub(now)
			timeout = time.NewTimer(dur).C
		}
	}

	ef, err := filters.FromParam(r.Form.Get("filters"))
	if err != nil {
		return err
	}

	w.Header().Set("Content-Type", "application/json")
	output := ioutils.NewWriteFlusher(w)
	defer output.Close()
	output.Flush()

	enc := json.NewEncoder(output)

	buffered, l := s.backend.SubscribeToEvents(since, until, ef)
	defer s.backend.UnsubscribeFromEvents(l)

	for _, ev := range buffered {
		if err := enc.Encode(ev); err != nil {
			return err
		}
	}

	if onlyPastEvents {
		return nil
	}

	for {
		select {
		case ev := <-l:
			jev, ok := ev.(events.Message)
			if !ok {
				logrus.Warnf("unexpected event message: %q", ev)
				continue
			}
			if err := enc.Encode(jev); err != nil {
				return err
			}
		case <-timeout:
			return nil
		case <-ctx.Done():
			logrus.Debug("Client context cancelled, stop sending events")
			return nil
		}
	}
}
示例#12
0
文件: start.go 项目: zczc0128/docker
// ContainerStart starts a container.
func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.HostConfig, validateHostname bool, checkpoint string) error {
	if checkpoint != "" && !daemon.HasExperimental() {
		return errors.NewBadRequestError(fmt.Errorf("checkpoint is only supported in experimental mode"))
	}

	container, err := daemon.GetContainer(name)
	if err != nil {
		return err
	}

	if container.IsPaused() {
		return fmt.Errorf("Cannot start a paused container, try unpause instead.")
	}

	if container.IsRunning() {
		err := fmt.Errorf("Container already started")
		return errors.NewErrorWithStatusCode(err, http.StatusNotModified)
	}

	// Windows does not have the backwards compatibility issue here.
	if runtime.GOOS != "windows" {
		// This is kept for backward compatibility - hostconfig should be passed when
		// creating a container, not during start.
		if hostConfig != nil {
			logrus.Warn("DEPRECATED: Setting host configuration options when the container starts is deprecated and has been removed in Docker 1.12")
			oldNetworkMode := container.HostConfig.NetworkMode
			if err := daemon.setSecurityOptions(container, hostConfig); err != nil {
				return err
			}
			if err := daemon.mergeAndVerifyLogConfig(&hostConfig.LogConfig); err != nil {
				return err
			}
			if err := daemon.setHostConfig(container, hostConfig); err != nil {
				return err
			}
			newNetworkMode := container.HostConfig.NetworkMode
			if string(oldNetworkMode) != string(newNetworkMode) {
				// if user has change the network mode on starting, clean up the
				// old networks. It is a deprecated feature and has been removed in Docker 1.12
				container.NetworkSettings.Networks = nil
				if err := container.ToDisk(); err != nil {
					return err
				}
			}
			container.InitDNSHostConfig()
		}
	} else {
		if hostConfig != nil {
			return fmt.Errorf("Supplying a hostconfig on start is not supported. It should be supplied on create")
		}
	}

	// check if hostConfig is in line with the current system settings.
	// It may happen cgroups are umounted or the like.
	if _, err = daemon.verifyContainerSettings(container.HostConfig, nil, false, validateHostname); err != nil {
		return err
	}
	// Adapt for old containers in case we have updates in this function and
	// old containers never have chance to call the new function in create stage.
	if err := daemon.adaptContainerSettings(container.HostConfig, false); err != nil {
		return err
	}

	return daemon.containerStart(container, checkpoint, true)
}
示例#13
0
// UpdateService updates existing service to match new properties.
func (c *Cluster) UpdateService(serviceIDOrName string, version uint64, spec types.ServiceSpec, encodedAuth string, registryAuthFrom string) (*apitypes.ServiceUpdateResponse, error) {
	c.mu.RLock()
	defer c.mu.RUnlock()

	state := c.currentNodeState()
	if !state.IsActiveManager() {
		return nil, c.errNoManager(state)
	}

	ctx, cancel := c.getRequestContext()
	defer cancel()

	err := c.populateNetworkID(ctx, state.controlClient, &spec)
	if err != nil {
		return nil, err
	}

	serviceSpec, err := convert.ServiceSpecToGRPC(spec)
	if err != nil {
		return nil, apierrors.NewBadRequestError(err)
	}

	currentService, err := getService(ctx, state.controlClient, serviceIDOrName)
	if err != nil {
		return nil, err
	}

	newCtnr := serviceSpec.Task.GetContainer()
	if newCtnr == nil {
		return nil, errors.New("service does not use container tasks")
	}

	if encodedAuth != "" {
		newCtnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth}
	} else {
		// this is needed because if the encodedAuth isn't being updated then we
		// shouldn't lose it, and continue to use the one that was already present
		var ctnr *swarmapi.ContainerSpec
		switch registryAuthFrom {
		case apitypes.RegistryAuthFromSpec, "":
			ctnr = currentService.Spec.Task.GetContainer()
		case apitypes.RegistryAuthFromPreviousSpec:
			if currentService.PreviousSpec == nil {
				return nil, errors.New("service does not have a previous spec")
			}
			ctnr = currentService.PreviousSpec.Task.GetContainer()
		default:
			return nil, errors.New("unsupported registryAuthFrom value")
		}
		if ctnr == nil {
			return nil, errors.New("service does not use container tasks")
		}
		newCtnr.PullOptions = ctnr.PullOptions
		// update encodedAuth so it can be used to pin image by digest
		if ctnr.PullOptions != nil {
			encodedAuth = ctnr.PullOptions.RegistryAuth
		}
	}

	// retrieve auth config from encoded auth
	authConfig := &apitypes.AuthConfig{}
	if encodedAuth != "" {
		if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil {
			logrus.Warnf("invalid authconfig: %v", err)
		}
	}

	resp := &apitypes.ServiceUpdateResponse{}

	// pin image by digest
	if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" {
		digestImage, err := c.imageWithDigestString(ctx, newCtnr.Image, authConfig)
		if err != nil {
			logrus.Warnf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error())
			resp.Warnings = append(resp.Warnings, fmt.Sprintf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error()))
		} else if newCtnr.Image != digestImage {
			logrus.Debugf("pinning image %s by digest: %s", newCtnr.Image, digestImage)
			newCtnr.Image = digestImage
		} else {
			logrus.Debugf("updating service using supplied digest reference %s", newCtnr.Image)
		}
	}

	_, err = state.controlClient.UpdateService(
		ctx,
		&swarmapi.UpdateServiceRequest{
			ServiceID: currentService.ID,
			Spec:      &serviceSpec,
			ServiceVersion: &swarmapi.Version{
				Index: version,
			},
		},
	)

	return resp, err
}
示例#14
0
// CreateService creates a new service in a managed swarm cluster.
func (c *Cluster) CreateService(s types.ServiceSpec, encodedAuth string) (*apitypes.ServiceCreateResponse, error) {
	c.mu.RLock()
	defer c.mu.RUnlock()

	state := c.currentNodeState()
	if !state.IsActiveManager() {
		return nil, c.errNoManager(state)
	}

	ctx, cancel := c.getRequestContext()
	defer cancel()

	err := c.populateNetworkID(ctx, state.controlClient, &s)
	if err != nil {
		return nil, err
	}

	serviceSpec, err := convert.ServiceSpecToGRPC(s)
	if err != nil {
		return nil, apierrors.NewBadRequestError(err)
	}

	ctnr := serviceSpec.Task.GetContainer()
	if ctnr == nil {
		return nil, errors.New("service does not use container tasks")
	}

	if encodedAuth != "" {
		ctnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth}
	}

	// retrieve auth config from encoded auth
	authConfig := &apitypes.AuthConfig{}
	if encodedAuth != "" {
		if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil {
			logrus.Warnf("invalid authconfig: %v", err)
		}
	}

	resp := &apitypes.ServiceCreateResponse{}

	// pin image by digest
	if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" {
		digestImage, err := c.imageWithDigestString(ctx, ctnr.Image, authConfig)
		if err != nil {
			logrus.Warnf("unable to pin image %s to digest: %s", ctnr.Image, err.Error())
			resp.Warnings = append(resp.Warnings, fmt.Sprintf("unable to pin image %s to digest: %s", ctnr.Image, err.Error()))
		} else if ctnr.Image != digestImage {
			logrus.Debugf("pinning image %s by digest: %s", ctnr.Image, digestImage)
			ctnr.Image = digestImage
		} else {
			logrus.Debugf("creating service using supplied digest reference %s", ctnr.Image)
		}
	}

	r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec})
	if err != nil {
		return nil, err
	}

	resp.ID = r.Service.ID
	return resp, nil
}
示例#15
0
// Init initializes new cluster from user provided request.
func (c *Cluster) Init(req types.InitRequest) (string, error) {
	c.controlMutex.Lock()
	defer c.controlMutex.Unlock()
	c.mu.Lock()
	if c.nr != nil {
		if req.ForceNewCluster {
			if err := c.nr.Stop(); err != nil {
				c.mu.Unlock()
				return "", err
			}
		} else {
			c.mu.Unlock()
			return "", errSwarmExists
		}
	}
	c.mu.Unlock()

	if err := validateAndSanitizeInitRequest(&req); err != nil {
		return "", apierrors.NewBadRequestError(err)
	}

	listenHost, listenPort, err := resolveListenAddr(req.ListenAddr)
	if err != nil {
		return "", err
	}

	advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort)
	if err != nil {
		return "", err
	}

	localAddr := listenHost

	// If the local address is undetermined, the advertise address
	// will be used as local address, if it belongs to this system.
	// If the advertise address is not local, then we try to find
	// a system address to use as local address. If this fails,
	// we give up and ask the user to pass the listen address.
	if net.ParseIP(localAddr).IsUnspecified() {
		advertiseIP := net.ParseIP(advertiseHost)

		found := false
		for _, systemIP := range listSystemIPs() {
			if systemIP.Equal(advertiseIP) {
				localAddr = advertiseIP.String()
				found = true
				break
			}
		}

		if !found {
			ip, err := c.resolveSystemAddr()
			if err != nil {
				logrus.Warnf("Could not find a local address: %v", err)
				return "", errMustSpecifyListenAddr
			}
			localAddr = ip.String()
		}
	}

	if !req.ForceNewCluster {
		clearPersistentState(c.root)
	}

	nr, err := c.newNodeRunner(nodeStartConfig{
		forceNewCluster: req.ForceNewCluster,
		autolock:        req.AutoLockManagers,
		LocalAddr:       localAddr,
		ListenAddr:      net.JoinHostPort(listenHost, listenPort),
		AdvertiseAddr:   net.JoinHostPort(advertiseHost, advertisePort),
	})
	if err != nil {
		return "", err
	}
	c.mu.Lock()
	c.nr = nr
	c.mu.Unlock()

	if err := <-nr.Ready(); err != nil {
		if !req.ForceNewCluster { // if failure on first attempt don't keep state
			if err := clearPersistentState(c.root); err != nil {
				return "", err
			}
		}
		if err != nil {
			c.mu.Lock()
			c.nr = nil
			c.mu.Unlock()
		}
		return "", err
	}
	state := nr.State()
	if state.swarmNode == nil { // should never happen but protect from panic
		return "", errors.New("invalid cluster state for spec initialization")
	}
	if err := initClusterSpec(state.swarmNode, req.Spec); err != nil {
		return "", err
	}
	return state.NodeID(), nil
}