Esempio n. 1
0
func parseIP(ipStr string) net.IP {
	ip := net.ParseIP(ipStr)
	if ip == nil {
		log.Warnf("invalid remote IP address: %q", ipStr)
	}
	return ip
}
Esempio n. 2
0
// Write attempts to flush the events to the downstream sink until it succeeds
// or the sink is closed.
func (rs *retryingSink) Write(events ...Event) error {
	rs.mu.Lock()
	defer rs.mu.Unlock()

retry:

	if rs.closed {
		return ErrSinkClosed
	}

	if !rs.proceed() {
		logrus.Warnf("%v encountered too many errors, backing off", rs.sink)
		rs.wait(rs.failures.backoff)
		goto retry
	}

	if err := rs.write(events...); err != nil {
		if err == ErrSinkClosed {
			// terminal!
			return err
		}

		logrus.Errorf("retryingsink: error writing events: %v, retrying", err)
		goto retry
	}

	return nil
}
Esempio n. 3
0
func (graph *Graph) restore() error {
	dir, err := ioutil.ReadDir(graph.root)
	if err != nil {
		return err
	}
	var ids = []string{}
	for _, v := range dir {
		id := v.Name()
		if graph.driver.Exists(id) {
			img, err := graph.loadImage(id)
			if err != nil {
				logrus.Warnf("ignoring image %s, it could not be restored: %v", id, err)
				continue
			}
			graph.imageMutex.Lock(img.Parent)
			graph.parentRefs[img.Parent]++
			graph.imageMutex.Unlock(img.Parent)
			ids = append(ids, id)
		}
	}

	graph.idIndex = truncindex.NewTruncIndex(ids)
	logrus.Debugf("Restored %d elements", len(ids))
	return nil
}
Esempio n. 4
0
func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) {
	buf := new(syscall.Statfs_t)
	err := syscall.Statfs(loopFile, buf)
	if err != nil {
		log.Warnf("Couldn't stat loopfile filesystem %v: %v", loopFile, err)
		return 0, err
	}
	return buf.Bfree * uint64(buf.Bsize), nil
}
Esempio n. 5
0
func (devices *DeviceSet) isRealFile(loopFile string) (bool, error) {
	if loopFile != "" {
		fi, err := os.Stat(loopFile)
		if err != nil {
			log.Warnf("Couldn't stat loopfile %v: %v", loopFile, err)
			return false, err
		}
		return fi.Mode().IsRegular(), nil
	}
	return false, nil
}
Esempio n. 6
0
func checkPriorDriver(name, root string) {
	priorDrivers := []string{}
	for prior := range drivers {
		if prior != name && prior != "vfs" {
			if _, err := os.Stat(path.Join(root, prior)); err == nil {
				priorDrivers = append(priorDrivers, prior)
			}
		}
	}
	if len(priorDrivers) > 0 {
		log.Warnf("Graphdriver %s selected. Your graphdriver directory %s already contains data managed by other graphdrivers: %s", name, root, strings.Join(priorDrivers, ","))
	}
}
Esempio n. 7
0
// run is the main goroutine to flush events to the target sink.
func (eq *eventQueue) run() {
	for {
		block := eq.next()

		if block == nil {
			return // nil block means event queue is closed.
		}

		if err := eq.sink.Write(block...); err != nil {
			logrus.Warnf("eventqueue: error writing events to %v, these events will be lost: %v", eq.sink, err)
		}

		for _, listener := range eq.listeners {
			listener.egress(block...)
		}
	}
}
Esempio n. 8
0
func (p *v2Puller) Pull(tag string) (fallback bool, err error) {
	// TODO(tiborvass): was ReceiveTimeout
	p.repo, err = NewV2Repository(p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull")
	if err != nil {
		logrus.Warnf("Error getting v2 registry: %v", err)
		return true, err
	}

	p.sessionID = stringid.GenerateRandomID()

	if err := p.pullV2Repository(tag); err != nil {
		if registry.ContinueOnError(err) {
			logrus.Debugf("Error trying v2 registry: %v", err)
			return true, err
		}
		return false, err
	}
	return false, nil
}
Esempio n. 9
0
func init() {
	const portRangeKernelParam = "/proc/sys/net/ipv4/ip_local_port_range"
	portRangeFallback := fmt.Sprintf("using fallback port range %d-%d", beginPortRange, endPortRange)

	file, err := os.Open(portRangeKernelParam)
	if err != nil {
		log.Warnf("port allocator - %s due to error: %v", portRangeFallback, err)
		return
	}
	var start, end int
	n, err := fmt.Fscanf(bufio.NewReader(file), "%d\t%d", &start, &end)
	if n != 2 || err != nil {
		if err == nil {
			err = fmt.Errorf("unexpected count of parsed numbers (%d)", n)
		}
		log.Errorf("port allocator - failed to parse system ephemeral port range from %s - %s: %v", portRangeKernelParam, portRangeFallback, err)
		return
	}
	beginPortRange = start
	endPortRange = end
}
Esempio n. 10
0
func lookupGidByName(nameOrGid string) (int, error) {
	groupFile, err := user.GetGroupPath()
	if err != nil {
		return -1, err
	}
	groups, err := user.ParseGroupFileFilter(groupFile, func(g user.Group) bool {
		return g.Name == nameOrGid || strconv.Itoa(g.Gid) == nameOrGid
	})
	if err != nil {
		return -1, err
	}
	if groups != nil && len(groups) > 0 {
		return groups[0].Gid, nil
	}
	gid, err := strconv.Atoi(nameOrGid)
	if err == nil {
		logrus.Warnf("Could not find GID %d", gid)
		return gid, nil
	}
	return -1, fmt.Errorf("Group %s not found", nameOrGid)
}
Esempio n. 11
0
func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) (io.ReadCloser, error) {
	req, err := http.NewRequest("POST", "/"+serviceMethod, data)
	if err != nil {
		return nil, err
	}
	req.Header.Add("Accept", versionMimetype)
	req.URL.Scheme = c.scheme
	req.URL.Host = c.addr

	var retries int
	start := time.Now()

	for {
		resp, err := c.http.Do(req)
		if err != nil {
			if !retry {
				return nil, err
			}

			timeOff := backoff(retries)
			if abort(start, timeOff) {
				return nil, err
			}
			retries++
			logrus.Warnf("Unable to connect to plugin: %s, retrying in %v", c.addr, timeOff)
			time.Sleep(timeOff)
			continue
		}

		if resp.StatusCode != http.StatusOK {
			remoteErr, err := ioutil.ReadAll(resp.Body)
			if err != nil {
				return nil, &remoteError{err.Error(), serviceMethod}
			}
			return nil, &remoteError{string(remoteErr), serviceMethod}
		}
		return resp.Body, nil
	}
}
Esempio n. 12
0
func loadWithRetry(name string, retry bool) (*Plugin, error) {
	registry := newLocalRegistry()
	start := time.Now()

	var retries int
	for {
		pl, err := registry.Plugin(name)
		if err != nil {
			if !retry {
				return nil, err
			}

			timeOff := backoff(retries)
			if abort(start, timeOff) {
				return nil, err
			}
			retries++
			logrus.Warnf("Unable to locate plugin: %s, retrying in %v", name, timeOff)
			time.Sleep(timeOff)
			continue
		}

		storage.Lock()
		storage.plugins[name] = pl
		storage.Unlock()

		err = pl.activate()

		if err != nil {
			storage.Lock()
			delete(storage.plugins, name)
			storage.Unlock()
		}

		return pl, err
	}
}
Esempio n. 13
0
// Images returns a filtered list of images. filterArgs is a JSON-encoded set
// of filter arguments which will be interpreted by pkg/parsers/filters.
// filter is a shell glob string applied to repository names. The argument
// named all controls whether all images in the graph are filtered, or just
// the heads.
func (s *TagStore) Images(filterArgs, filter string, all bool) ([]*types.Image, error) {
	var (
		allImages  map[string]*image.Image
		err        error
		filtTagged = true
		filtLabel  = false
	)

	imageFilters, err := filters.FromParam(filterArgs)
	if err != nil {
		return nil, err
	}
	for name := range imageFilters {
		if _, ok := acceptedImageFilterTags[name]; !ok {
			return nil, fmt.Errorf("Invalid filter '%s'", name)
		}
	}

	if i, ok := imageFilters["dangling"]; ok {
		for _, value := range i {
			if v := strings.ToLower(value); v == "true" {
				filtTagged = false
			} else if v != "false" {
				return nil, fmt.Errorf("Invalid filter 'dangling=%s'", v)
			}
		}
	}

	_, filtLabel = imageFilters["label"]

	if all && filtTagged {
		allImages = s.graph.Map()
	} else {
		allImages = s.graph.Heads()
	}

	lookup := make(map[string]*types.Image)
	s.Lock()
	for repoName, repository := range s.Repositories {
		filterTagName := ""
		if filter != "" {
			filterName := filter
			// Test if the tag was in there, if yes, get the name
			if strings.Contains(filterName, ":") {
				filterWithTag := strings.Split(filter, ":")
				filterName = filterWithTag[0]
				filterTagName = filterWithTag[1]
			}
			if match, _ := path.Match(filterName, repoName); !match {
				continue
			}
			if filterTagName != "" {
				if _, ok := repository[filterTagName]; !ok {
					continue
				}
			}
		}
		for ref, id := range repository {
			imgRef := utils.ImageReference(repoName, ref)
			if !strings.Contains(imgRef, filterTagName) {
				continue
			}
			image, err := s.graph.Get(id)
			if err != nil {
				logrus.Warnf("couldn't load %s from %s: %s", id, imgRef, err)
				continue
			}

			if lImage, exists := lookup[id]; exists {
				if filtTagged {
					if utils.DigestReference(ref) {
						lImage.RepoDigests = append(lImage.RepoDigests, imgRef)
					} else { // Tag Ref.
						lImage.RepoTags = append(lImage.RepoTags, imgRef)
					}
				}
			} else {
				// get the boolean list for if only the untagged images are requested
				delete(allImages, id)

				if len(imageFilters["label"]) > 0 {
					if image.Config == nil {
						// Very old image that do not have image.Config (or even labels)
						continue
					}
					// We are now sure image.Config is not nil
					if !imageFilters.MatchKVList("label", image.Config.Labels) {
						continue
					}
				}
				if filtTagged {
					newImage := newImage(image, s.graph.GetParentsSize(image))

					if utils.DigestReference(ref) {
						newImage.RepoTags = []string{}
						newImage.RepoDigests = []string{imgRef}
					} else {
						newImage.RepoTags = []string{imgRef}
						newImage.RepoDigests = []string{}
					}

					lookup[id] = newImage
				}
			}

		}
	}
	s.Unlock()

	images := []*types.Image{}
	for _, value := range lookup {
		images = append(images, value)
	}

	// Display images which aren't part of a repository/tag
	if filter == "" || filtLabel {
		for _, image := range allImages {
			if len(imageFilters["label"]) > 0 {
				if image.Config == nil {
					// Very old image that do not have image.Config (or even labels)
					continue
				}
				// We are now sure image.Config is not nil
				if !imageFilters.MatchKVList("label", image.Config.Labels) {
					continue
				}
			}
			newImage := newImage(image, s.graph.GetParentsSize(image))
			newImage.RepoTags = []string{"<none>:<none>"}
			newImage.RepoDigests = []string{"<none>@<none>"}

			images = append(images, newImage)
		}
	}

	sort.Sort(sort.Reverse(byCreated(images)))

	return images, nil
}
Esempio n. 14
0
func (devices *DeviceSet) initDevmapper(doInit bool) error {
	if os.Getenv("DEBUG") != "" {
		devicemapper.LogInitVerbose(devicemapper.LogLevelDebug)
	} else {
		devicemapper.LogInitVerbose(devicemapper.LogLevelWarn)
	}
	// give ourselves to libdm as a log handler
	devicemapper.LogInit(devices)

	_, err := devicemapper.GetDriverVersion()
	if err != nil {
		// Can't even get driver version, assume not supported
		return graphdriver.ErrNotSupported
	}

	// https://github.com/docker/docker/issues/4036
	if supported := devicemapper.UdevSetSyncSupport(true); !supported {
		log.Warnf("Udev sync is not supported. This will lead to unexpected behavior, data loss and errors")
	}
	log.Debugf("devicemapper: udev sync support: %v", devicemapper.UdevSyncSupported())

	if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil && !os.IsExist(err) {
		return err
	}

	// Set the device prefix from the device id and inode of the docker root dir

	st, err := os.Stat(devices.root)
	if err != nil {
		return fmt.Errorf("Error looking up dir %s: %s", devices.root, err)
	}
	sysSt := st.Sys().(*syscall.Stat_t)
	// "reg-" stands for "regular file".
	// In the future we might use "dev-" for "device file", etc.
	// docker-maj,min[-inode] stands for:
	//	- Managed by docker
	//	- The target of this device is at major <maj> and minor <min>
	//	- If <inode> is defined, use that file inside the device as a loopback image. Otherwise use the device itself.
	devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino)
	log.Debugf("Generated prefix: %s", devices.devicePrefix)

	// Check for the existence of the thin-pool device
	log.Debugf("Checking for existence of the pool '%s'", devices.getPoolName())
	info, err := devicemapper.GetInfo(devices.getPoolName())
	if info == nil {
		log.Debugf("Error device devicemapper.GetInfo: %s", err)
		return err
	}

	// It seems libdevmapper opens this without O_CLOEXEC, and go exec will not close files
	// that are not Close-on-exec, and lxc-start will die if it inherits any unexpected files,
	// so we add this badhack to make sure it closes itself
	setCloseOnExec("/dev/mapper/control")

	// Make sure the sparse images exist in <root>/devicemapper/data and
	// <root>/devicemapper/metadata

	createdLoopback := false

	// If the pool doesn't exist, create it
	if info.Exists == 0 && devices.thinPoolDevice == "" {
		log.Debugf("Pool doesn't exist. Creating it.")

		var (
			dataFile     *os.File
			metadataFile *os.File
		)

		if devices.dataDevice == "" {
			// Make sure the sparse images exist in <root>/devicemapper/data

			hasData := devices.hasImage("data")

			if !doInit && !hasData {
				return errors.New("Loopback data file not found")
			}

			if !hasData {
				createdLoopback = true
			}

			data, err := devices.ensureImage("data", devices.dataLoopbackSize)
			if err != nil {
				log.Debugf("Error device ensureImage (data): %s", err)
				return err
			}

			dataFile, err = devicemapper.AttachLoopDevice(data)
			if err != nil {
				return err
			}
			devices.dataLoopFile = data
			devices.dataDevice = dataFile.Name()
		} else {
			dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600)
			if err != nil {
				return err
			}
		}
		defer dataFile.Close()

		if devices.metadataDevice == "" {
			// Make sure the sparse images exist in <root>/devicemapper/metadata

			hasMetadata := devices.hasImage("metadata")

			if !doInit && !hasMetadata {
				return errors.New("Loopback metadata file not found")
			}

			if !hasMetadata {
				createdLoopback = true
			}

			metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize)
			if err != nil {
				log.Debugf("Error device ensureImage (metadata): %s", err)
				return err
			}

			metadataFile, err = devicemapper.AttachLoopDevice(metadata)
			if err != nil {
				return err
			}
			devices.metadataLoopFile = metadata
			devices.metadataDevice = metadataFile.Name()
		} else {
			metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600)
			if err != nil {
				return err
			}
		}
		defer metadataFile.Close()

		if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil {
			return err
		}
	}

	// If we didn't just create the data or metadata image, we need to
	// load the transaction id and migrate old metadata
	if !createdLoopback {
		if err = devices.initMetaData(); err != nil {
			return err
		}
	}

	// Right now this loads only NextDeviceId. If there is more metadata
	// down the line, we might have to move it earlier.
	if err = devices.loadDeviceSetMetaData(); err != nil {
		return err
	}

	// Setup the base image
	if doInit {
		if err := devices.setupBaseImage(); err != nil {
			log.Debugf("Error device setupBaseImage: %s", err)
			return err
		}
	}

	return nil
}
Esempio n. 15
0
func InitDriver(job *engine.Job) error {
	var (
		networkv4      *net.IPNet
		networkv6      *net.IPNet
		addrv4         net.Addr
		addrsv6        []net.Addr
		enableIPTables = job.GetenvBool("EnableIptables")
		enableIPv6     = job.GetenvBool("EnableIPv6")
		icc            = job.GetenvBool("InterContainerCommunication")
		ipMasq         = job.GetenvBool("EnableIpMasq")
		ipForward      = job.GetenvBool("EnableIpForward")
		bridgeIP       = job.Getenv("BridgeIP")
		bridgeIPv6     = "fe80::1/64"
		fixedCIDR      = job.Getenv("FixedCIDR")
		fixedCIDRv6    = job.Getenv("FixedCIDRv6")
	)

	if defaultIP := job.Getenv("DefaultBindingIP"); defaultIP != "" {
		defaultBindingIP = net.ParseIP(defaultIP)
	}

	bridgeIface = job.Getenv("BridgeIface")
	usingDefaultBridge := false
	if bridgeIface == "" {
		usingDefaultBridge = true
		bridgeIface = DefaultNetworkBridge
	}

	addrv4, addrsv6, err := networkdriver.GetIfaceAddr(bridgeIface)

	if err != nil {
		// No Bridge existent, create one
		// If we're not using the default bridge, fail without trying to create it
		if !usingDefaultBridge {
			return err
		}

		// If the iface is not found, try to create it
		if err := configureBridge(bridgeIP, bridgeIPv6, enableIPv6); err != nil {
			return err
		}

		addrv4, addrsv6, err = networkdriver.GetIfaceAddr(bridgeIface)
		if err != nil {
			return err
		}

		if fixedCIDRv6 != "" {
			// Setting route to global IPv6 subnet
			log.Infof("Adding route to IPv6 network %q via device %q", fixedCIDRv6, bridgeIface)
			if err := netlink.AddRoute(fixedCIDRv6, "", "", bridgeIface); err != nil {
				log.Fatalf("Could not add route to IPv6 network %q via device %q", fixedCIDRv6, bridgeIface)
			}
		}
	} else {
		// Bridge exists already, getting info...
		// Validate that the bridge ip matches the ip specified by BridgeIP
		if bridgeIP != "" {
			networkv4 = addrv4.(*net.IPNet)
			bip, _, err := net.ParseCIDR(bridgeIP)
			if err != nil {
				return err
			}
			if !networkv4.IP.Equal(bip) {
				return fmt.Errorf("Bridge ip (%s) does not match existing bridge configuration %s", networkv4.IP, bip)
			}
		}

		// A bridge might exist but not have any IPv6 addr associated with it yet
		// (for example, an existing Docker installation that has only been used
		// with IPv4 and docker0 already is set up) In that case, we can perform
		// the bridge init for IPv6 here, else we will error out below if --ipv6=true
		if len(addrsv6) == 0 && enableIPv6 {
			if err := setupIPv6Bridge(bridgeIPv6); err != nil {
				return err
			}
			// Recheck addresses now that IPv6 is setup on the bridge
			addrv4, addrsv6, err = networkdriver.GetIfaceAddr(bridgeIface)
			if err != nil {
				return err
			}
		}

		// TODO: Check if route to fixedCIDRv6 is set
	}

	if enableIPv6 {
		bip6, _, err := net.ParseCIDR(bridgeIPv6)
		if err != nil {
			return err
		}
		found := false
		for _, addrv6 := range addrsv6 {
			networkv6 = addrv6.(*net.IPNet)
			if networkv6.IP.Equal(bip6) {
				found = true
				break
			}
		}
		if !found {
			return fmt.Errorf("Bridge IPv6 does not match existing bridge configuration %s", bip6)
		}
	}

	networkv4 = addrv4.(*net.IPNet)

	if enableIPv6 {
		if len(addrsv6) == 0 {
			return errors.New("IPv6 enabled but no IPv6 detected")
		}
		bridgeIPv6Addr = networkv6.IP
	}

	// Configure iptables for link support
	if enableIPTables {
		if err := setupIPTables(addrv4, icc, ipMasq); err != nil {
			return err
		}

	}

	if ipForward {
		// Enable IPv4 forwarding
		if err := ioutil.WriteFile("/proc/sys/net/ipv4/ip_forward", []byte{'1', '\n'}, 0644); err != nil {
			log.Warnf("WARNING: unable to enable IPv4 forwarding: %s\n", err)
		}

		if fixedCIDRv6 != "" {
			// Enable IPv6 forwarding
			if err := ioutil.WriteFile("/proc/sys/net/ipv6/conf/default/forwarding", []byte{'1', '\n'}, 0644); err != nil {
				log.Warnf("WARNING: unable to enable IPv6 default forwarding: %s\n", err)
			}
			if err := ioutil.WriteFile("/proc/sys/net/ipv6/conf/all/forwarding", []byte{'1', '\n'}, 0644); err != nil {
				log.Warnf("WARNING: unable to enable IPv6 all forwarding: %s\n", err)
			}
		}
	}

	// We can always try removing the iptables
	if err := iptables.RemoveExistingChain("DOCKER", iptables.Nat); err != nil {
		return err
	}

	if enableIPTables {
		_, err := iptables.NewChain("DOCKER", bridgeIface, iptables.Nat)
		if err != nil {
			return err
		}
		chain, err := iptables.NewChain("DOCKER", bridgeIface, iptables.Filter)
		if err != nil {
			return err
		}
		portmapper.SetIptablesChain(chain)
	}

	bridgeIPv4Network = networkv4
	if fixedCIDR != "" {
		_, subnet, err := net.ParseCIDR(fixedCIDR)
		if err != nil {
			return err
		}
		log.Debugf("Subnet: %v", subnet)
		if err := ipAllocator.RegisterSubnet(bridgeIPv4Network, subnet); err != nil {
			return err
		}
	}

	if fixedCIDRv6 != "" {
		_, subnet, err := net.ParseCIDR(fixedCIDRv6)
		if err != nil {
			return err
		}
		log.Debugf("Subnet: %v", subnet)
		if err := ipAllocator.RegisterSubnet(subnet, subnet); err != nil {
			return err
		}
		globalIPv6Network = subnet
	}

	// Block BridgeIP in IP allocator
	ipAllocator.RequestIP(bridgeIPv4Network, bridgeIPv4Network.IP)

	// https://github.com/docker/docker/issues/2768
	job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", bridgeIPv4Network.IP)

	for name, f := range map[string]engine.Handler{
		"allocate_interface": Allocate,
		"release_interface":  Release,
		"allocate_port":      AllocatePort,
		"link":               LinkContainers,
	} {
		if err := job.Eng.Register(name, f); err != nil {
			return err
		}
	}
	return nil
}
Esempio n. 16
0
func (s *TagStore) exportImage(name, tempdir string) error {
	for n := name; n != ""; {
		img, err := s.LookupImage(n)
		if err != nil || img == nil {
			return fmt.Errorf("No such image %s", n)
		}

		// temporary directory
		tmpImageDir := filepath.Join(tempdir, n)
		if err := os.Mkdir(tmpImageDir, os.FileMode(0755)); err != nil {
			if os.IsExist(err) {
				return nil
			}
			return err
		}

		var version = "1.0"
		var versionBuf = []byte(version)

		if err := ioutil.WriteFile(filepath.Join(tmpImageDir, "VERSION"), versionBuf, os.FileMode(0644)); err != nil {
			return err
		}

		imageInspectRaw, err := json.Marshal(img)
		if err != nil {
			return err
		}

		// serialize json
		json, err := os.Create(filepath.Join(tmpImageDir, "json"))
		if err != nil {
			return err
		}

		written, err := json.Write(imageInspectRaw)
		if err != nil {
			return err
		}
		if written != len(imageInspectRaw) {
			logrus.Warnf("%d byes should have been written instead %d have been written", written, len(imageInspectRaw))
		}

		// serialize filesystem
		fsTar, err := os.Create(filepath.Join(tmpImageDir, "layer.tar"))
		if err != nil {
			return err
		}
		if err := s.ImageTarLayer(n, fsTar); err != nil {
			return err
		}

		for _, fname := range []string{"", "VERSION", "json", "layer.tar"} {
			if err := os.Chtimes(filepath.Join(tmpImageDir, fname), img.Created, img.Created); err != nil {
				return err
			}
		}

		// try again with parent
		n = img.Parent
	}
	return nil
}
Esempio n. 17
0
// Allocate an external port and map it to the interface
func AllocatePort(job *engine.Job) error {
	var (
		err error

		ip            = defaultBindingIP
		id            = job.Args[0]
		hostIP        = job.Getenv("HostIP")
		hostPort      = job.GetenvInt("HostPort")
		containerPort = job.GetenvInt("ContainerPort")
		proto         = job.Getenv("Proto")
		network       = currentInterfaces.Get(id)
	)

	if hostIP != "" {
		ip = net.ParseIP(hostIP)
		if ip == nil {
			return fmt.Errorf("Bad parameter: invalid host ip %s", hostIP)
		}
	}

	// host ip, proto, and host port
	var container net.Addr
	switch proto {
	case "tcp":
		container = &net.TCPAddr{IP: network.IP, Port: containerPort}
	case "udp":
		container = &net.UDPAddr{IP: network.IP, Port: containerPort}
	default:
		return fmt.Errorf("unsupported address type %s", proto)
	}

	//
	// Try up to 10 times to get a port that's not already allocated.
	//
	// In the event of failure to bind, return the error that portmapper.Map
	// yields.
	//

	var host net.Addr
	for i := 0; i < MaxAllocatedPortAttempts; i++ {
		if host, err = portmapper.Map(container, ip, hostPort); err == nil {
			break
		}
		// There is no point in immediately retrying to map an explicitly
		// chosen port.
		if hostPort != 0 {
			log.Warnf("Failed to allocate and map port %d: %s", hostPort, err)
			break
		}
		log.Warnf("Failed to allocate and map port: %s, retry: %d", err, i+1)
	}

	if err != nil {
		return err
	}

	network.PortMappings = append(network.PortMappings, host)

	out := engine.Env{}
	switch netAddr := host.(type) {
	case *net.TCPAddr:
		out.Set("HostIP", netAddr.IP.String())
		out.SetInt("HostPort", netAddr.Port)
	case *net.UDPAddr:
		out.Set("HostIP", netAddr.IP.String())
		out.SetInt("HostPort", netAddr.Port)
	}
	if _, err := out.WriteTo(job.Stdout); err != nil {
		return err
	}

	return nil
}
Esempio n. 18
0
// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
// compressed or uncompressed.
// Returns the size in bytes of the contents of the layer.
func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, err error) {
	tr := tar.NewReader(layer)
	trBuf := pools.BufioReader32KPool.Get(tr)
	defer pools.BufioReader32KPool.Put(trBuf)

	var dirs []*tar.Header
	unpackedPaths := make(map[string]struct{})

	if options == nil {
		options = &TarOptions{}
	}
	if options.ExcludePatterns == nil {
		options.ExcludePatterns = []string{}
	}
	remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
	if err != nil {
		return 0, err
	}

	aufsTempdir := ""
	aufsHardlinks := make(map[string]*tar.Header)

	if options == nil {
		options = &TarOptions{}
	}
	// Iterate through the files in the archive.
	for {
		hdr, err := tr.Next()
		if err == io.EOF {
			// end of tar archive
			break
		}
		if err != nil {
			return 0, err
		}

		size += hdr.Size

		// Normalize name, for safety and for a simple is-root check
		hdr.Name = filepath.Clean(hdr.Name)

		// Windows does not support filenames with colons in them. Ignore
		// these files. This is not a problem though (although it might
		// appear that it is). Let's suppose a client is running docker pull.
		// The daemon it points to is Windows. Would it make sense for the
		// client to be doing a docker pull Ubuntu for example (which has files
		// with colons in the name under /usr/share/man/man3)? No, absolutely
		// not as it would really only make sense that they were pulling a
		// Windows image. However, for development, it is necessary to be able
		// to pull Linux images which are in the repository.
		//
		// TODO Windows. Once the registry is aware of what images are Windows-
		// specific or Linux-specific, this warning should be changed to an error
		// to cater for the situation where someone does manage to upload a Linux
		// image but have it tagged as Windows inadvertently.
		if runtime.GOOS == "windows" {
			if strings.Contains(hdr.Name, ":") {
				logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
				continue
			}
		}

		// Note as these operations are platform specific, so must the slash be.
		if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
			// Not the root directory, ensure that the parent directory exists.
			// This happened in some tests where an image had a tarfile without any
			// parent directories.
			parent := filepath.Dir(hdr.Name)
			parentPath := filepath.Join(dest, parent)

			if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
				err = system.MkdirAll(parentPath, 0600)
				if err != nil {
					return 0, err
				}
			}
		}

		// Skip AUFS metadata dirs
		if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
			// Regular files inside /.wh..wh.plnk can be used as hardlink targets
			// We don't want this directory, but we need the files in them so that
			// such hardlinks can be resolved.
			if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
				basename := filepath.Base(hdr.Name)
				aufsHardlinks[basename] = hdr
				if aufsTempdir == "" {
					if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
						return 0, err
					}
					defer os.RemoveAll(aufsTempdir)
				}
				if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil); err != nil {
					return 0, err
				}
			}

			if hdr.Name != WhiteoutOpaqueDir {
				continue
			}
		}
		path := filepath.Join(dest, hdr.Name)
		rel, err := filepath.Rel(dest, path)
		if err != nil {
			return 0, err
		}

		// Note as these operations are platform specific, so must the slash be.
		if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
			return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
		}
		base := filepath.Base(path)

		if strings.HasPrefix(base, WhiteoutPrefix) {
			dir := filepath.Dir(path)
			if base == WhiteoutOpaqueDir {
				_, err := os.Lstat(dir)
				if err != nil {
					return 0, err
				}
				err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
					if err != nil {
						if os.IsNotExist(err) {
							err = nil // parent was deleted
						}
						return err
					}
					if path == dir {
						return nil
					}
					if _, exists := unpackedPaths[path]; !exists {
						err := os.RemoveAll(path)
						return err
					}
					return nil
				})
				if err != nil {
					return 0, err
				}
			} else {
				originalBase := base[len(WhiteoutPrefix):]
				originalPath := filepath.Join(dir, originalBase)
				if err := os.RemoveAll(originalPath); err != nil {
					return 0, err
				}
			}
		} else {
			// If path exits we almost always just want to remove and replace it.
			// The only exception is when it is a directory *and* the file from
			// the layer is also a directory. Then we want to merge them (i.e.
			// just apply the metadata from the layer).
			if fi, err := os.Lstat(path); err == nil {
				if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
					if err := os.RemoveAll(path); err != nil {
						return 0, err
					}
				}
			}

			trBuf.Reset(tr)
			srcData := io.Reader(trBuf)
			srcHdr := hdr

			// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
			// we manually retarget these into the temporary files we extracted them into
			if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
				linkBasename := filepath.Base(hdr.Linkname)
				srcHdr = aufsHardlinks[linkBasename]
				if srcHdr == nil {
					return 0, fmt.Errorf("Invalid aufs hardlink")
				}
				tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
				if err != nil {
					return 0, err
				}
				defer tmpFile.Close()
				srcData = tmpFile
			}

			// if the options contain a uid & gid maps, convert header uid/gid
			// entries using the maps such that lchown sets the proper mapped
			// uid/gid after writing the file. We only perform this mapping if
			// the file isn't already owned by the remapped root UID or GID, as
			// that specific uid/gid has no mapping from container -> host, and
			// those files already have the proper ownership for inside the
			// container.
			if srcHdr.Uid != remappedRootUID {
				xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps)
				if err != nil {
					return 0, err
				}
				srcHdr.Uid = xUID
			}
			if srcHdr.Gid != remappedRootGID {
				xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps)
				if err != nil {
					return 0, err
				}
				srcHdr.Gid = xGID
			}
			if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil {
				return 0, err
			}

			// Directory mtimes must be handled at the end to avoid further
			// file creation in them to modify the directory mtime
			if hdr.Typeflag == tar.TypeDir {
				dirs = append(dirs, hdr)
			}
			unpackedPaths[path] = struct{}{}
		}
	}

	for _, hdr := range dirs {
		path := filepath.Join(dest, hdr.Name)
		if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
			return 0, err
		}
	}

	return size, nil
}