Exemple #1
0
// validateVolumesAndBindSettings validates each of the volumes and bind settings
// passed by the caller to ensure they are valid.
func validateVolumesAndBindSettings(c *container.Config, hc *container.HostConfig) error {
	if len(hc.Mounts) > 0 {
		if len(hc.Binds) > 0 {
			return conflictError(fmt.Errorf("must not specify both Binds and Mounts"))
		}

		if len(c.Volumes) > 0 {
			return conflictError(fmt.Errorf("must not specify both Volumes and Mounts"))
		}

		if len(hc.VolumeDriver) > 0 {
			return conflictError(fmt.Errorf("must not specify both VolumeDriver and Mounts"))
		}
	}

	// Ensure all volumes and binds are valid.
	for spec := range c.Volumes {
		if _, err := volume.ParseMountRaw(spec, hc.VolumeDriver); err != nil {
			return fmt.Errorf("invalid volume spec %q: %v", spec, err)
		}
	}
	for _, spec := range hc.Binds {
		if _, err := volume.ParseMountRaw(spec, hc.VolumeDriver); err != nil {
			return fmt.Errorf("invalid bind mount spec %q: %v", spec, err)
		}
	}

	return nil
}
Exemple #2
0
// validateMountSettings validates each of the volumes and bind settings
// passed by the caller to ensure they are valid.
func validateMountSettings(c *container.Config, hc *container.HostConfig) error {
	// it is ok to have len(hc.Mounts) > 0 && (len(hc.Binds) > 0 || len (c.Volumes) > 0 || len (hc.Tmpfs) > 0 )

	// Ensure all volumes and binds are valid.
	for spec := range c.Volumes {
		if _, err := volume.ParseMountRaw(spec, hc.VolumeDriver); err != nil {
			return fmt.Errorf("invalid volume spec %q: %v", spec, err)
		}
	}
	for _, spec := range hc.Binds {
		if _, err := volume.ParseMountRaw(spec, hc.VolumeDriver); err != nil {
			return fmt.Errorf("invalid bind mount spec %q: %v", spec, err)
		}
	}

	return nil
}
Exemple #3
0
// registerMountPoints initializes the container mount points with the configured volumes and bind mounts.
// It follows the next sequence to decide what to mount in each final destination:
//
// 1. Select the previously configured mount points for the containers, if any.
// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination.
// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations.
// 4. Cleanup old volumes that are about to be reassigned.
func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *containertypes.HostConfig) (retErr error) {
	binds := map[string]bool{}
	mountPoints := map[string]*volume.MountPoint{}
	defer func() {
		// clean up the container mountpoints once return with error
		if retErr != nil {
			for _, m := range mountPoints {
				if m.Volume == nil {
					continue
				}
				daemon.volumes.Dereference(m.Volume, container.ID)
			}
		}
	}()

	// 1. Read already configured mount points.
	for destination, point := range container.MountPoints {
		mountPoints[destination] = point
	}

	// 2. Read volumes from other containers.
	for _, v := range hostConfig.VolumesFrom {
		containerID, mode, err := volume.ParseVolumesFrom(v)
		if err != nil {
			return err
		}

		c, err := daemon.GetContainer(containerID)
		if err != nil {
			return err
		}

		for _, m := range c.MountPoints {
			cp := &volume.MountPoint{
				Name:        m.Name,
				Source:      m.Source,
				RW:          m.RW && volume.ReadWrite(mode),
				Driver:      m.Driver,
				Destination: m.Destination,
				Propagation: m.Propagation,
				Spec:        m.Spec,
				CopyData:    false,
			}

			if len(cp.Source) == 0 {
				v, err := daemon.volumes.GetWithRef(cp.Name, cp.Driver, container.ID)
				if err != nil {
					return err
				}
				cp.Volume = v
			}

			mountPoints[cp.Destination] = cp
		}
	}

	// 3. Read bind mounts
	for _, b := range hostConfig.Binds {
		bind, err := volume.ParseMountRaw(b, hostConfig.VolumeDriver)
		if err != nil {
			return err
		}

		// #10618
		_, tmpfsExists := hostConfig.Tmpfs[bind.Destination]
		if binds[bind.Destination] || tmpfsExists {
			return fmt.Errorf("Duplicate mount point '%s'", bind.Destination)
		}

		if bind.Type == mounttypes.TypeVolume {
			// create the volume
			v, err := daemon.volumes.CreateWithRef(bind.Name, bind.Driver, container.ID, nil, nil)
			if err != nil {
				return err
			}
			bind.Volume = v
			bind.Source = v.Path()
			// bind.Name is an already existing volume, we need to use that here
			bind.Driver = v.DriverName()
			if bind.Driver == volume.DefaultDriverName {
				setBindModeIfNull(bind)
			}
		}

		binds[bind.Destination] = true
		mountPoints[bind.Destination] = bind
	}

	for _, cfg := range hostConfig.Mounts {
		mp, err := volume.ParseMountSpec(cfg)
		if err != nil {
			return dockererrors.NewBadRequestError(err)
		}

		if binds[mp.Destination] {
			return fmt.Errorf("Duplicate mount point '%s'", cfg.Target)
		}

		if mp.Type == mounttypes.TypeVolume {
			var v volume.Volume
			if cfg.VolumeOptions != nil {
				var driverOpts map[string]string
				if cfg.VolumeOptions.DriverConfig != nil {
					driverOpts = cfg.VolumeOptions.DriverConfig.Options
				}
				v, err = daemon.volumes.CreateWithRef(mp.Name, mp.Driver, container.ID, driverOpts, cfg.VolumeOptions.Labels)
			} else {
				v, err = daemon.volumes.CreateWithRef(mp.Name, mp.Driver, container.ID, nil, nil)
			}
			if err != nil {
				return err
			}

			if err := label.Relabel(mp.Source, container.MountLabel, false); err != nil {
				return err
			}
			mp.Volume = v
			mp.Name = v.Name()
			mp.Driver = v.DriverName()

			// only use the cached path here since getting the path is not necessary right now and calling `Path()` may be slow
			if cv, ok := v.(interface {
				CachedPath() string
			}); ok {
				mp.Source = cv.CachedPath()
			}
		}

		binds[mp.Destination] = true
		mountPoints[mp.Destination] = mp
	}

	container.Lock()

	// 4. Cleanup old volumes that are about to be reassigned.
	for _, m := range mountPoints {
		if m.BackwardsCompatible() {
			if mp, exists := container.MountPoints[m.Destination]; exists && mp.Volume != nil {
				daemon.volumes.Dereference(mp.Volume, container.ID)
			}
		}
	}
	container.MountPoints = mountPoints

	container.Unlock()

	return nil
}
Exemple #4
0
// createContainerPlatformSpecificSettings performs platform specific container create functionality
func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error {
	// Make sure the host config has the default daemon isolation if not specified by caller.
	if containertypes.Isolation.IsDefault(containertypes.Isolation(hostConfig.Isolation)) {
		hostConfig.Isolation = daemon.defaultIsolation
	}

	for spec := range config.Volumes {

		mp, err := volume.ParseMountRaw(spec, hostConfig.VolumeDriver)
		if err != nil {
			return fmt.Errorf("Unrecognised volume spec: %v", err)
		}

		// If the mountpoint doesn't have a name, generate one.
		if len(mp.Name) == 0 {
			mp.Name = stringid.GenerateNonCryptoID()
		}

		// Skip volumes for which we already have something mounted on that
		// destination because of a --volume-from.
		if container.IsDestinationMounted(mp.Destination) {
			continue
		}

		volumeDriver := hostConfig.VolumeDriver

		// Create the volume in the volume driver. If it doesn't exist,
		// a new one will be created.
		v, err := daemon.volumes.CreateWithRef(mp.Name, volumeDriver, container.ID, nil, nil)
		if err != nil {
			return err
		}

		// FIXME Windows: This code block is present in the Linux version and
		// allows the contents to be copied to the container FS prior to it
		// being started. However, the function utilizes the FollowSymLinkInScope
		// path which does not cope with Windows volume-style file paths. There
		// is a separate effort to resolve this (@swernli), so this processing
		// is deferred for now. A case where this would be useful is when
		// a dockerfile includes a VOLUME statement, but something is created
		// in that directory during the dockerfile processing. What this means
		// on Windows for TP5 is that in that scenario, the contents will not
		// copied, but that's (somewhat) OK as HCS will bomb out soon after
		// at it doesn't support mapped directories which have contents in the
		// destination path anyway.
		//
		// Example for repro later:
		//   FROM windowsservercore
		//   RUN mkdir c:\myvol
		//   RUN copy c:\windows\system32\ntdll.dll c:\myvol
		//   VOLUME "c:\myvol"
		//
		// Then
		//   docker build -t vol .
		//   docker run -it --rm vol cmd  <-- This is where HCS will error out.
		//
		//	// never attempt to copy existing content in a container FS to a shared volume
		//	if v.DriverName() == volume.DefaultDriverName {
		//		if err := container.CopyImagePathContent(v, mp.Destination); err != nil {
		//			return err
		//		}
		//	}

		// Add it to container.MountPoints
		container.AddMountPointWithVolume(mp.Destination, v, mp.RW)
	}
	return nil
}