Пример #1
0
// Trap sets up a simplified signal "trap", appropriate for common
// behavior expected from a vanilla unix command-line tool in general
// (and the Docker engine in particular).
//
// * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated.
// * If SIGINT or SIGTERM are received 3 times before cleanup is complete, then cleanup is
//   skipped and the process is terminated immediately (allows force quit of stuck daemon)
// * A SIGQUIT always causes an exit without cleanup, with a goroutine dump preceding exit.
//
func Trap(cleanup func()) {
	c := make(chan os.Signal, 1)
	// we will handle INT, TERM, QUIT here
	signals := []os.Signal{os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT}
	gosignal.Notify(c, signals...)
	go func() {
		interruptCount := uint32(0)
		for sig := range c {
			go func(sig os.Signal) {
				logrus.Infof("Processing signal '%v'", sig)
				switch sig {
				case os.Interrupt, syscall.SIGTERM:
					if atomic.LoadUint32(&interruptCount) < 3 {
						// Initiate the cleanup only once
						if atomic.AddUint32(&interruptCount, 1) == 1 {
							// Call the provided cleanup handler
							cleanup()
							os.Exit(0)
						} else {
							return
						}
					} else {
						// 3 SIGTERM/INT signals received; force exit without cleanup
						logrus.Infof("Forcing docker daemon shutdown without cleanup; 3 interrupts received")
					}
				case syscall.SIGQUIT:
					DumpStacks()
					logrus.Infof("Forcing docker daemon shutdown without cleanup on SIGQUIT")
				}
				//for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal #
				os.Exit(128 + int(sig.(syscall.Signal)))
			}(sig)
		}
	}()
}
Пример #2
0
// Trap sets up a simplified signal "trap", appropriate for common
// behavior expected from a vanilla unix command-line tool in general
// (and the Docker engine in particular).
//
// * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated.
// * If SIGINT or SIGTERM are repeated 3 times before cleanup is complete, then cleanup is
// skipped and the process terminated directly.
// * If "DEBUG" is set in the environment, SIGQUIT causes an exit without cleanup.
//
func Trap(cleanup func()) {
	c := make(chan os.Signal, 1)
	signals := []os.Signal{os.Interrupt, syscall.SIGTERM}
	if os.Getenv("DEBUG") == "" {
		signals = append(signals, syscall.SIGQUIT)
	}
	gosignal.Notify(c, signals...)
	go func() {
		interruptCount := uint32(0)
		for sig := range c {
			go func(sig os.Signal) {
				log.Infof("Received signal '%v', starting shutdown of docker...", sig)
				switch sig {
				case os.Interrupt, syscall.SIGTERM:
					// If the user really wants to interrupt, let him do so.
					if atomic.LoadUint32(&interruptCount) < 3 {
						// Initiate the cleanup only once
						if atomic.AddUint32(&interruptCount, 1) == 1 {
							// Call cleanup handler
							cleanup()
							os.Exit(0)
						} else {
							return
						}
					} else {
						log.Infof("Force shutdown of docker, interrupting cleanup")
					}
				case syscall.SIGQUIT:
				}
				os.Exit(128 + int(sig.(syscall.Signal)))
			}(sig)
		}
	}()
}
Пример #3
0
// Release an interface for a select ip
func Release(job *engine.Job) error {
	var (
		id                 = job.Args[0]
		containerInterface = currentInterfaces.Get(id)
	)

	if containerInterface == nil {
		return fmt.Errorf("No network information to release for %s", id)
	}

	for _, nat := range containerInterface.PortMappings {
		if err := portmapper.Unmap(nat); err != nil {
			log.Infof("Unable to unmap port %s: %s", nat, err)
		}
	}

	if err := ipAllocator.ReleaseIP(bridgeIPv4Network, containerInterface.IP); err != nil {
		log.Infof("Unable to release IPv4 %s", err)
	}
	if globalIPv6Network != nil {
		if err := ipAllocator.ReleaseIP(globalIPv6Network, containerInterface.IPv6); err != nil {
			log.Infof("Unable to release IPv6 %s", err)
		}
	}
	return nil
}
Пример #4
0
// Run executes the job and blocks until the job completes.
// If the job fails it returns an error
func (job *Job) Run() error {
	if job.Eng.IsShutdown() && !job.GetenvBool("overrideShutdown") {
		return fmt.Errorf("engine is shutdown")
	}
	// FIXME: this is a temporary workaround to avoid Engine.Shutdown
	// waiting 5 seconds for server/api.ServeApi to complete (which it never will)
	// everytime the daemon is cleanly restarted.
	// The permanent fix is to implement Job.Stop and Job.OnStop so that
	// ServeApi can cooperate and terminate cleanly.
	if job.Name != "serveapi" {
		job.Eng.l.Lock()
		job.Eng.tasks.Add(1)
		job.Eng.l.Unlock()
		defer job.Eng.tasks.Done()
	}
	// FIXME: make this thread-safe
	// FIXME: implement wait
	if !job.end.IsZero() {
		return fmt.Errorf("%s: job has already completed", job.Name)
	}
	// Log beginning and end of the job
	if job.Eng.Logging {
		log.Infof("+job %s", job.CallString())
		defer func() {
			// what if err is nil?
			log.Infof("-job %s%s", job.CallString(), job.err)
		}()
	}
	var errorMessage = bytes.NewBuffer(nil)
	job.Stderr.Add(errorMessage)
	if job.handler == nil {
		job.err = fmt.Errorf("%s: command not found", job.Name)
	} else {
		job.err = job.handler(job)
		job.end = time.Now()
	}
	if job.closeIO {
		// Wait for all background tasks to complete
		if err := job.Stdout.Close(); err != nil {
			return err
		}
		if err := job.Stderr.Close(); err != nil {
			return err
		}
		if err := job.Stdin.Close(); err != nil {
			return err
		}
	}

	return job.err
}
Пример #5
0
func setupTcpHttp(addr string, job *engine.Job) (*HttpServer, error) {
	if !job.GetenvBool("TlsVerify") {
		log.Infof("/!\\ DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
	}

	r := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("CorsHeaders"), job.Getenv("Version"))

	l, err := newListener("tcp", addr, job.GetenvBool("BufferRequests"))
	if err != nil {
		return nil, err
	}

	if err := allocateDaemonPort(addr); err != nil {
		return nil, err
	}

	if job.GetenvBool("Tls") || job.GetenvBool("TlsVerify") {
		var tlsCa string
		if job.GetenvBool("TlsVerify") {
			tlsCa = job.Getenv("TlsCa")
		}
		l, err = setupTls(job.Getenv("TlsCert"), job.Getenv("TlsKey"), tlsCa, l)
		if err != nil {
			return nil, err
		}
	}
	return &HttpServer{&http.Server{Addr: addr, Handler: r}, l}, nil
}
Пример #6
0
func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, corsHeaders string, dockerVersion version.Version) http.HandlerFunc {
	return func(w http.ResponseWriter, r *http.Request) {
		// log the request
		log.Debugf("Calling %s %s", localMethod, localRoute)

		if logging {
			log.Infof("%s %s", r.Method, r.RequestURI)
		}

		if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") {
			userAgent := strings.Split(r.Header.Get("User-Agent"), "/")
			if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) {
				log.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion)
			}
		}
		version := version.Version(mux.Vars(r)["version"])
		if version == "" {
			version = api.APIVERSION
		}
		if corsHeaders != "" {
			writeCorsHeaders(w, r, corsHeaders)
		}

		if version.GreaterThan(api.APIVERSION) {
			http.Error(w, fmt.Errorf("client and server don't have same version (client : %s, server: %s)", version, api.APIVERSION).Error(), http.StatusNotFound)
			return
		}

		if err := handlerFunc(eng, version, w, r, mux.Vars(r)); err != nil {
			log.Errorf("Handler for %s %s returned error: %s", localMethod, localRoute, err)
			httpError(w, err)
		}
	}
}
Пример #7
0
func (auth *RequestAuthorization) getToken() (string, error) {
	auth.tokenLock.Lock()
	defer auth.tokenLock.Unlock()
	now := time.Now()
	if now.Before(auth.tokenExpiration) {
		log.Debugf("Using cached token for %s", auth.authConfig.Username)
		return auth.tokenCache, nil
	}

	tlsConfig := tls.Config{
		MinVersion: tls.VersionTLS10,
	}
	if !auth.registryEndpoint.IsSecure {
		tlsConfig.InsecureSkipVerify = true
	}

	client := &http.Client{
		Transport: &http.Transport{
			DisableKeepAlives: true,
			Proxy:             http.ProxyFromEnvironment,
			TLSClientConfig:   &tlsConfig,
		},
		CheckRedirect: AddRequiredHeadersToRedirectedRequests,
	}
	factory := HTTPRequestFactory(nil)

	for _, challenge := range auth.registryEndpoint.AuthChallenges {
		switch strings.ToLower(challenge.Scheme) {
		case "basic":
			// no token necessary
		case "bearer":
			log.Debugf("Getting bearer token with %s for %s", challenge.Parameters, auth.authConfig.Username)
			params := map[string]string{}
			for k, v := range challenge.Parameters {
				params[k] = v
			}
			params["scope"] = fmt.Sprintf("%s:%s:%s", auth.resource, auth.scope, strings.Join(auth.actions, ","))
			token, err := getToken(auth.authConfig.Username, auth.authConfig.Password, params, auth.registryEndpoint, client, factory)
			if err != nil {
				return "", err
			}
			auth.tokenCache = token
			auth.tokenExpiration = now.Add(time.Minute)

			return token, nil
		default:
			log.Infof("Unsupported auth scheme: %q", challenge.Scheme)
		}
	}

	// Do not expire cache since there are no challenges which use a token
	auth.tokenExpiration = time.Now().Add(time.Hour * 24)

	return "", nil
}
Пример #8
0
// release an interface for a select ip
func Release(job *engine.Job) engine.Status {
	var (
		id                 = job.Args[0]
		containerInterface = currentInterfaces.Get(id)
	)

	if containerInterface == nil {
		return job.Errorf("No network information to release for %s", id)
	}

	for _, nat := range containerInterface.PortMappings {
		if err := portmapper.Unmap(nat); err != nil {
			log.Infof("Unable to unmap port %s: %s", nat, err)
		}
	}

	if err := ipallocator.ReleaseIP(bridgeNetwork, containerInterface.IP); err != nil {
		log.Infof("Unable to release ip %s", err)
	}
	return engine.StatusOK
}
Пример #9
0
func digestFromManifest(m *manifest.SignedManifest, localName string) (digest.Digest, int, error) {
	payload, err := m.Payload()
	if err != nil {
		// If this failed, the signatures section was corrupted
		// or missing. Treat the entire manifest as the payload.
		payload = m.Raw
	}
	manifestDigest, err := digest.FromBytes(payload)
	if err != nil {
		logrus.Infof("Could not compute manifest digest for %s:%s : %v", localName, m.Tag, err)
	}
	return manifestDigest, len(payload), nil
}
Пример #10
0
// PurgeUploads deletes files from the upload directory
// created before olderThan.  The list of files deleted and errors
// encountered are returned
func PurgeUploads(ctx context.Context, driver storageDriver.StorageDriver, olderThan time.Time, actuallyDelete bool) ([]string, []error) {
	log.Infof("PurgeUploads starting: olderThan=%s, actuallyDelete=%t", olderThan, actuallyDelete)
	uploadData, errors := getOutstandingUploads(ctx, driver)
	var deleted []string
	for _, uploadData := range uploadData {
		if uploadData.startedAt.Before(olderThan) {
			var err error
			log.Infof("Upload files in %s have older date (%s) than purge date (%s).  Removing upload directory.",
				uploadData.containingDir, uploadData.startedAt, olderThan)
			if actuallyDelete {
				err = driver.Delete(ctx, uploadData.containingDir)
			}
			if err == nil {
				deleted = append(deleted, uploadData.containingDir)
			} else {
				errors = append(errors, err)
			}
		}
	}

	log.Infof("Purge uploads finished.  Num deleted=%d, num errors=%d", len(deleted), len(errors))
	return deleted, errors
}
Пример #11
0
// Close the broadcaster, ensuring that all messages are flushed to the
// underlying sink before returning.
func (b *Broadcaster) Close() error {
	logrus.Infof("broadcaster: closing")
	select {
	case <-b.closed:
		// already closed
		return fmt.Errorf("broadcaster: already closed")
	default:
		// do a little chan handoff dance to synchronize closing
		closed := make(chan struct{})
		b.closed <- closed
		close(b.closed)
		<-closed
		return nil
	}
}
Пример #12
0
// DumpStacks dumps the runtime stack.
func DumpStacks() {
	var (
		buf       []byte
		stackSize int
	)
	bufferLen := 16384
	for stackSize == len(buf) {
		buf = make([]byte, bufferLen)
		stackSize = runtime.Stack(buf, true)
		bufferLen *= 2
	}
	buf = buf[:stackSize]
	// Note that if the daemon is started with a less-verbose log-level than "info" (the default), the goroutine
	// traces won't show up in the log.
	logrus.Infof("=== BEGIN goroutine stack dump ===\n%s\n=== END goroutine stack dump ===", buf)
}
Пример #13
0
func (r *resumableRequestReader) Read(p []byte) (n int, err error) {
	if r.client == nil || r.request == nil {
		return 0, fmt.Errorf("client and request can't be nil\n")
	}
	isFreshRequest := false
	if r.lastRange != 0 && r.currentResponse == nil {
		readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize)
		r.request.Header.Set("Range", readRange)
		time.Sleep(5 * time.Second)
	}
	if r.currentResponse == nil {
		r.currentResponse, err = r.client.Do(r.request)
		isFreshRequest = true
	}
	if err != nil && r.failures+1 != r.maxFailures {
		r.cleanUpResponse()
		r.failures++
		time.Sleep(5 * time.Duration(r.failures) * time.Second)
		return 0, nil
	} else if err != nil {
		r.cleanUpResponse()
		return 0, err
	}
	if r.currentResponse.StatusCode == 416 && r.lastRange == r.totalSize && r.currentResponse.ContentLength == 0 {
		r.cleanUpResponse()
		return 0, io.EOF
	} else if r.currentResponse.StatusCode != 206 && r.lastRange != 0 && isFreshRequest {
		r.cleanUpResponse()
		return 0, fmt.Errorf("the server doesn't support byte ranges")
	}
	if r.totalSize == 0 {
		r.totalSize = r.currentResponse.ContentLength
	} else if r.totalSize <= 0 {
		r.cleanUpResponse()
		return 0, fmt.Errorf("failed to auto detect content length")
	}
	n, err = r.currentResponse.Body.Read(p)
	r.lastRange += int64(n)
	if err != nil {
		r.cleanUpResponse()
	}
	if err != nil && err != io.EOF {
		logrus.Infof("encountered error during pull and clearing it before resume: %s", err)
		err = nil
	}
	return n, err
}
Пример #14
0
// ServeApi loops through all of the protocols sent in to docker and spawns
// off a go routine to setup a serving http.Server for each.
func ServeApi(job *engine.Job) error {
	if len(job.Args) == 0 {
		return fmt.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name)
	}
	var (
		protoAddrs = job.Args
		chErrors   = make(chan error, len(protoAddrs))
	)
	activationLock = make(chan struct{})

	for _, protoAddr := range protoAddrs {
		protoAddrParts := strings.SplitN(protoAddr, "://", 2)
		if len(protoAddrParts) != 2 {
			return fmt.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name)
		}
		go func() {
			log.Infof("Listening for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1])
			srv, err := NewServer(protoAddrParts[0], protoAddrParts[1], job)
			if err != nil {
				chErrors <- err
				return
			}
			job.Eng.OnShutdown(func() {
				if err := srv.Close(); err != nil {
					log.Error(err)
				}
			})
			if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") {
				err = nil
			}
			chErrors <- err
		}()
	}

	for i := 0; i < len(protoAddrs); i++ {
		err := <-chErrors
		if err != nil {
			return err
		}
	}

	return nil
}
Пример #15
0
// Auth contacts the public registry with the provided credentials,
// and returns OK if authentication was sucessful.
// It can be used to verify the validity of a client's credentials.
func (s *Service) Auth(job *engine.Job) error {
	var (
		authConfig = new(AuthConfig)
		endpoint   *Endpoint
		index      *IndexInfo
		status     string
		err        error
	)

	job.GetenvJson("authConfig", authConfig)

	addr := authConfig.ServerAddress
	if addr == "" {
		// Use the official registry address if not specified.
		addr = IndexServerAddress()
	}

	if index, err = ResolveIndexInfo(job, addr); err != nil {
		return err
	}

	if endpoint, err = NewEndpoint(index); err != nil {
		log.Errorf("unable to get new registry endpoint: %s", err)
		return err
	}

	authConfig.ServerAddress = endpoint.String()

	if status, err = Login(authConfig, endpoint, HTTPRequestFactory(nil)); err != nil {
		log.Errorf("unable to login against registry endpoint %s: %s", endpoint, err)
		return err
	}

	log.Infof("successful registry login for endpoint %s: %s", endpoint, status)
	job.Printf("%s\n", status)

	return nil
}
Пример #16
0
func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
	if version.LessThan("1.3") {
		return fmt.Errorf("Multipart upload for build is no longer supported. Please upgrade your docker client.")
	}
	var (
		authEncoded       = r.Header.Get("X-Registry-Auth")
		authConfig        = &registry.AuthConfig{}
		configFileEncoded = r.Header.Get("X-Registry-Config")
		configFile        = &registry.ConfigFile{}
		job               = eng.Job("build")
	)

	// This block can be removed when API versions prior to 1.9 are deprecated.
	// Both headers will be parsed and sent along to the daemon, but if a non-empty
	// ConfigFile is present, any value provided as an AuthConfig directly will
	// be overridden. See BuildFile::CmdFrom for details.
	if version.LessThan("1.9") && authEncoded != "" {
		authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
		if err := json.NewDecoder(authJson).Decode(authConfig); err != nil {
			// for a pull it is not an error if no auth was given
			// to increase compatibility with the existing api it is defaulting to be empty
			authConfig = &registry.AuthConfig{}
		}
	}

	if configFileEncoded != "" {
		configFileJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(configFileEncoded))
		if err := json.NewDecoder(configFileJson).Decode(configFile); err != nil {
			// for a pull it is not an error if no auth was given
			// to increase compatibility with the existing api it is defaulting to be empty
			configFile = &registry.ConfigFile{}
		}
	}

	if version.GreaterThanOrEqualTo("1.8") {
		job.SetenvBool("json", true)
		streamJSON(job, w, true)
	} else {
		job.Stdout.Add(utils.NewWriteFlusher(w))
	}

	if r.FormValue("forcerm") == "1" && version.GreaterThanOrEqualTo("1.12") {
		job.Setenv("rm", "1")
	} else if r.FormValue("rm") == "" && version.GreaterThanOrEqualTo("1.12") {
		job.Setenv("rm", "1")
	} else {
		job.Setenv("rm", r.FormValue("rm"))
	}
	if r.FormValue("pull") == "1" && version.GreaterThanOrEqualTo("1.16") {
		job.Setenv("pull", "1")
	}
	job.Stdin.Add(r.Body)
	job.Setenv("remote", r.FormValue("remote"))
	job.Setenv("dockerfile", r.FormValue("dockerfile"))
	job.Setenv("t", r.FormValue("t"))
	job.Setenv("q", r.FormValue("q"))
	job.Setenv("nocache", r.FormValue("nocache"))
	job.Setenv("forcerm", r.FormValue("forcerm"))
	job.SetenvJson("authConfig", authConfig)
	job.SetenvJson("configFile", configFile)
	job.Setenv("memswap", r.FormValue("memswap"))
	job.Setenv("memory", r.FormValue("memory"))
	job.Setenv("cpusetcpus", r.FormValue("cpusetcpus"))
	job.Setenv("cpushares", r.FormValue("cpushares"))

	// Job cancellation. Note: not all job types support this.
	if closeNotifier, ok := w.(http.CloseNotifier); ok {
		finished := make(chan struct{})
		defer close(finished)
		go func() {
			select {
			case <-finished:
			case <-closeNotifier.CloseNotify():
				log.Infof("Client disconnected, cancelling job: %s", job.Name)
				job.Cancel()
			}
		}()
	}

	if err := job.Run(); err != nil {
		if !job.Stdout.Used() {
			return err
		}
		sf := utils.NewStreamFormatter(version.GreaterThanOrEqualTo("1.8"))
		w.Write(sf.FormatError(err))
	}
	return nil
}
Пример #17
0
func InitDriver(job *engine.Job) error {
	var (
		networkv4      *net.IPNet
		networkv6      *net.IPNet
		addrv4         net.Addr
		addrsv6        []net.Addr
		enableIPTables = job.GetenvBool("EnableIptables")
		enableIPv6     = job.GetenvBool("EnableIPv6")
		icc            = job.GetenvBool("InterContainerCommunication")
		ipMasq         = job.GetenvBool("EnableIpMasq")
		ipForward      = job.GetenvBool("EnableIpForward")
		bridgeIP       = job.Getenv("BridgeIP")
		bridgeIPv6     = "fe80::1/64"
		fixedCIDR      = job.Getenv("FixedCIDR")
		fixedCIDRv6    = job.Getenv("FixedCIDRv6")
	)

	if defaultIP := job.Getenv("DefaultBindingIP"); defaultIP != "" {
		defaultBindingIP = net.ParseIP(defaultIP)
	}

	bridgeIface = job.Getenv("BridgeIface")
	usingDefaultBridge := false
	if bridgeIface == "" {
		usingDefaultBridge = true
		bridgeIface = DefaultNetworkBridge
	}

	addrv4, addrsv6, err := networkdriver.GetIfaceAddr(bridgeIface)

	if err != nil {
		// No Bridge existent, create one
		// If we're not using the default bridge, fail without trying to create it
		if !usingDefaultBridge {
			return err
		}

		// If the iface is not found, try to create it
		if err := configureBridge(bridgeIP, bridgeIPv6, enableIPv6); err != nil {
			return err
		}

		addrv4, addrsv6, err = networkdriver.GetIfaceAddr(bridgeIface)
		if err != nil {
			return err
		}

		if fixedCIDRv6 != "" {
			// Setting route to global IPv6 subnet
			log.Infof("Adding route to IPv6 network %q via device %q", fixedCIDRv6, bridgeIface)
			if err := netlink.AddRoute(fixedCIDRv6, "", "", bridgeIface); err != nil {
				log.Fatalf("Could not add route to IPv6 network %q via device %q", fixedCIDRv6, bridgeIface)
			}
		}
	} else {
		// Bridge exists already, getting info...
		// Validate that the bridge ip matches the ip specified by BridgeIP
		if bridgeIP != "" {
			networkv4 = addrv4.(*net.IPNet)
			bip, _, err := net.ParseCIDR(bridgeIP)
			if err != nil {
				return err
			}
			if !networkv4.IP.Equal(bip) {
				return fmt.Errorf("Bridge ip (%s) does not match existing bridge configuration %s", networkv4.IP, bip)
			}
		}

		// A bridge might exist but not have any IPv6 addr associated with it yet
		// (for example, an existing Docker installation that has only been used
		// with IPv4 and docker0 already is set up) In that case, we can perform
		// the bridge init for IPv6 here, else we will error out below if --ipv6=true
		if len(addrsv6) == 0 && enableIPv6 {
			if err := setupIPv6Bridge(bridgeIPv6); err != nil {
				return err
			}
			// Recheck addresses now that IPv6 is setup on the bridge
			addrv4, addrsv6, err = networkdriver.GetIfaceAddr(bridgeIface)
			if err != nil {
				return err
			}
		}

		// TODO: Check if route to fixedCIDRv6 is set
	}

	if enableIPv6 {
		bip6, _, err := net.ParseCIDR(bridgeIPv6)
		if err != nil {
			return err
		}
		found := false
		for _, addrv6 := range addrsv6 {
			networkv6 = addrv6.(*net.IPNet)
			if networkv6.IP.Equal(bip6) {
				found = true
				break
			}
		}
		if !found {
			return fmt.Errorf("Bridge IPv6 does not match existing bridge configuration %s", bip6)
		}
	}

	networkv4 = addrv4.(*net.IPNet)

	if enableIPv6 {
		if len(addrsv6) == 0 {
			return errors.New("IPv6 enabled but no IPv6 detected")
		}
		bridgeIPv6Addr = networkv6.IP
	}

	// Configure iptables for link support
	if enableIPTables {
		if err := setupIPTables(addrv4, icc, ipMasq); err != nil {
			return err
		}

	}

	if ipForward {
		// Enable IPv4 forwarding
		if err := ioutil.WriteFile("/proc/sys/net/ipv4/ip_forward", []byte{'1', '\n'}, 0644); err != nil {
			log.Warnf("WARNING: unable to enable IPv4 forwarding: %s\n", err)
		}

		if fixedCIDRv6 != "" {
			// Enable IPv6 forwarding
			if err := ioutil.WriteFile("/proc/sys/net/ipv6/conf/default/forwarding", []byte{'1', '\n'}, 0644); err != nil {
				log.Warnf("WARNING: unable to enable IPv6 default forwarding: %s\n", err)
			}
			if err := ioutil.WriteFile("/proc/sys/net/ipv6/conf/all/forwarding", []byte{'1', '\n'}, 0644); err != nil {
				log.Warnf("WARNING: unable to enable IPv6 all forwarding: %s\n", err)
			}
		}
	}

	// We can always try removing the iptables
	if err := iptables.RemoveExistingChain("DOCKER", iptables.Nat); err != nil {
		return err
	}

	if enableIPTables {
		_, err := iptables.NewChain("DOCKER", bridgeIface, iptables.Nat)
		if err != nil {
			return err
		}
		chain, err := iptables.NewChain("DOCKER", bridgeIface, iptables.Filter)
		if err != nil {
			return err
		}
		portmapper.SetIptablesChain(chain)
	}

	bridgeIPv4Network = networkv4
	if fixedCIDR != "" {
		_, subnet, err := net.ParseCIDR(fixedCIDR)
		if err != nil {
			return err
		}
		log.Debugf("Subnet: %v", subnet)
		if err := ipAllocator.RegisterSubnet(bridgeIPv4Network, subnet); err != nil {
			return err
		}
	}

	if fixedCIDRv6 != "" {
		_, subnet, err := net.ParseCIDR(fixedCIDRv6)
		if err != nil {
			return err
		}
		log.Debugf("Subnet: %v", subnet)
		if err := ipAllocator.RegisterSubnet(subnet, subnet); err != nil {
			return err
		}
		globalIPv6Network = subnet
	}

	// Block BridgeIP in IP allocator
	ipAllocator.RequestIP(bridgeIPv4Network, bridgeIPv4Network.IP)

	// https://github.com/docker/docker/issues/2768
	job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", bridgeIPv4Network.IP)

	for name, f := range map[string]engine.Handler{
		"allocate_interface": Allocate,
		"release_interface":  Release,
		"allocate_port":      AllocatePort,
		"link":               LinkContainers,
	} {
		if err := job.Eng.Register(name, f); err != nil {
			return err
		}
	}
	return nil
}
Пример #18
0
// New creates the driver and initializes it at the specified root.
func New(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (driver Driver, err error) {
	for _, name := range []string{os.Getenv("DOCKER_DRIVER"), DefaultDriver} {
		if name != "" {
			logrus.Debugf("[graphdriver] trying provided driver %q", name) // so the logs show specified driver
			return GetDriver(name, root, options, uidMaps, gidMaps)
		}
	}

	// Guess for prior driver
	priorDrivers := scanPriorDrivers(root)
	for _, name := range priority {
		if name == "vfs" {
			// don't use vfs even if there is state present.
			continue
		}
		for _, prior := range priorDrivers {
			// of the state found from prior drivers, check in order of our priority
			// which we would prefer
			if prior == name {
				driver, err = getBuiltinDriver(name, root, options, uidMaps, gidMaps)
				if err != nil {
					// unlike below, we will return error here, because there is prior
					// state, and now it is no longer supported/prereq/compatible, so
					// something changed and needs attention. Otherwise the daemon's
					// images would just "disappear".
					logrus.Errorf("[graphdriver] prior storage driver %q failed: %s", name, err)
					return nil, err
				}
				if err := checkPriorDriver(name, root); err != nil {
					return nil, err
				}
				logrus.Infof("[graphdriver] using prior storage driver %q", name)
				return driver, nil
			}
		}
	}

	// Check for priority drivers first
	for _, name := range priority {
		driver, err = getBuiltinDriver(name, root, options, uidMaps, gidMaps)
		if err != nil {
			if err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS {
				continue
			}
			return nil, err
		}
		return driver, nil
	}

	// Check all registered drivers if no priority driver is found
	for _, initFunc := range drivers {
		if driver, err = initFunc(root, options, uidMaps, gidMaps); err != nil {
			if err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS {
				continue
			}
			return nil, err
		}
		return driver, nil
	}
	return nil, fmt.Errorf("No supported storage backend found")
}
Пример #19
0
func (p *v2Pusher) pushV2Tag(tag string) error {
	logrus.Debugf("Pushing repository: %s:%s", p.repo.Name(), tag)

	layerID, exists := p.localRepo[tag]
	if !exists {
		return fmt.Errorf("tag does not exist: %s", tag)
	}

	layersSeen := make(map[string]bool)

	layer, err := p.graph.Get(layerID)
	if err != nil {
		return err
	}

	m := &manifest.Manifest{
		Versioned: manifest.Versioned{
			SchemaVersion: 1,
		},
		Name:         p.repo.Name(),
		Tag:          tag,
		Architecture: layer.Architecture,
		FSLayers:     []manifest.FSLayer{},
		History:      []manifest.History{},
	}

	var metadata runconfig.Config
	if layer != nil && layer.Config != nil {
		metadata = *layer.Config
	}

	out := p.config.OutStream

	for ; layer != nil; layer, err = p.graph.GetParent(layer) {
		if err != nil {
			return err
		}

		// break early if layer has already been seen in this image,
		// this prevents infinite loops on layers which loopback, this
		// cannot be prevented since layer IDs are not merkle hashes
		// TODO(dmcgowan): throw error if no valid use case is found
		if layersSeen[layer.ID] {
			break
		}

		logrus.Debugf("Pushing layer: %s", layer.ID)

		if layer.Config != nil && metadata.Image != layer.ID {
			if err := runconfig.Merge(&metadata, layer.Config); err != nil {
				return err
			}
		}

		var exists bool
		dgst, err := p.graph.GetLayerDigest(layer.ID)
		switch err {
		case nil:
			if p.layersPushed[dgst] {
				exists = true
				// break out of switch, it is already known that
				// the push is not needed and therefore doing a
				// stat is unnecessary
				break
			}
			_, err := p.repo.Blobs(context.Background()).Stat(context.Background(), dgst)
			switch err {
			case nil:
				exists = true
				out.Write(p.sf.FormatProgress(stringid.TruncateID(layer.ID), "Image already exists", nil))
			case distribution.ErrBlobUnknown:
				// nop
			default:
				out.Write(p.sf.FormatProgress(stringid.TruncateID(layer.ID), "Image push failed", nil))
				return err
			}
		case ErrDigestNotSet:
			// nop
		case digest.ErrDigestInvalidFormat, digest.ErrDigestUnsupported:
			return fmt.Errorf("error getting image checksum: %v", err)
		}

		// if digest was empty or not saved, or if blob does not exist on the remote repository,
		// then fetch it.
		if !exists {
			var pushDigest digest.Digest
			if pushDigest, err = p.pushV2Image(p.repo.Blobs(context.Background()), layer); err != nil {
				return err
			}
			if dgst == "" {
				// Cache new checksum
				if err := p.graph.SetLayerDigest(layer.ID, pushDigest); err != nil {
					return err
				}
			}
			dgst = pushDigest
		}

		// read v1Compatibility config, generate new if needed
		jsonData, err := p.graph.GenerateV1CompatibilityChain(layer.ID)
		if err != nil {
			return err
		}

		m.FSLayers = append(m.FSLayers, manifest.FSLayer{BlobSum: dgst})
		m.History = append(m.History, manifest.History{V1Compatibility: string(jsonData)})

		layersSeen[layer.ID] = true
		p.layersPushed[dgst] = true
	}

	// Fix parent chain if necessary
	if err = fixHistory(m); err != nil {
		return err
	}

	logrus.Infof("Signed manifest for %s:%s using daemon's key: %s", p.repo.Name(), tag, p.trustKey.KeyID())
	signed, err := manifest.Sign(m, p.trustKey)
	if err != nil {
		return err
	}

	manifestDigest, manifestSize, err := digestFromManifest(signed, p.repo.Name())
	if err != nil {
		return err
	}
	if manifestDigest != "" {
		out.Write(p.sf.FormatStatus("", "%s: digest: %s size: %d", tag, manifestDigest, manifestSize))
	}

	manSvc, err := p.repo.Manifests(context.Background())
	if err != nil {
		return err
	}
	return manSvc.Put(signed)
}
Пример #20
0
func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Writer, endpoint *registry.Endpoint, repoInfo *registry.RepositoryInfo, tag string, sf *utils.StreamFormatter, parallel bool, auth *registry.RequestAuthorization) (bool, error) {
	log.Debugf("Pulling tag from V2 registry: %q", tag)

	manifestBytes, manifestDigest, err := r.GetV2ImageManifest(endpoint, repoInfo.RemoteName, tag, auth)
	if err != nil {
		return false, err
	}

	// loadManifest ensures that the manifest payload has the expected digest
	// if the tag is a digest reference.
	manifest, verified, err := s.loadManifest(eng, manifestBytes, manifestDigest, tag)
	if err != nil {
		return false, fmt.Errorf("error verifying manifest: %s", err)
	}

	if err := checkValidManifest(manifest); err != nil {
		return false, err
	}

	if verified {
		log.Printf("Image manifest for %s has been verified", utils.ImageReference(repoInfo.CanonicalName, tag))
	}
	out.Write(sf.FormatStatus(tag, "Pulling from %s", repoInfo.CanonicalName))

	downloads := make([]downloadInfo, len(manifest.FSLayers))

	for i := len(manifest.FSLayers) - 1; i >= 0; i-- {
		var (
			sumStr  = manifest.FSLayers[i].BlobSum
			imgJSON = []byte(manifest.History[i].V1Compatibility)
		)

		img, err := image.NewImgJSON(imgJSON)
		if err != nil {
			return false, fmt.Errorf("failed to parse json: %s", err)
		}
		downloads[i].img = img

		// Check if exists
		if s.graph.Exists(img.ID) {
			log.Debugf("Image already exists: %s", img.ID)
			continue
		}

		dgst, err := digest.ParseDigest(sumStr)
		if err != nil {
			return false, err
		}
		downloads[i].digest = dgst

		out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Pulling fs layer", nil))

		downloadFunc := func(di *downloadInfo) error {
			log.Debugf("pulling blob %q to V1 img %s", sumStr, img.ID)

			if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil {
				if c != nil {
					out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil))
					<-c
					out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))
				} else {
					log.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
				}
			} else {
				defer s.poolRemove("pull", "img:"+img.ID)
				tmpFile, err := ioutil.TempFile("", "GetV2ImageBlob")
				if err != nil {
					return err
				}

				r, l, err := r.GetV2ImageBlobReader(endpoint, repoInfo.RemoteName, di.digest.Algorithm(), di.digest.Hex(), auth)
				if err != nil {
					return err
				}
				defer r.Close()

				verifier, err := digest.NewDigestVerifier(di.digest)
				if err != nil {
					return err
				}

				if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{
					In:        ioutil.NopCloser(io.TeeReader(r, verifier)),
					Out:       out,
					Formatter: sf,
					Size:      int(l),
					NewLines:  false,
					ID:        stringid.TruncateID(img.ID),
					Action:    "Downloading",
				})); err != nil {
					return fmt.Errorf("unable to copy v2 image blob data: %s", err)
				}

				out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Verifying Checksum", nil))

				if !verifier.Verified() {
					log.Infof("Image verification failed: checksum mismatch for %q", di.digest.String())
					verified = false
				}

				out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))

				log.Debugf("Downloaded %s to tempfile %s", img.ID, tmpFile.Name())
				di.tmpFile = tmpFile
				di.length = l
				di.downloaded = true
			}
			di.imgJSON = imgJSON

			return nil
		}

		if parallel {
			downloads[i].err = make(chan error)
			go func(di *downloadInfo) {
				di.err <- downloadFunc(di)
			}(&downloads[i])
		} else {
			err := downloadFunc(&downloads[i])
			if err != nil {
				return false, err
			}
		}
	}

	var tagUpdated bool
	for i := len(downloads) - 1; i >= 0; i-- {
		d := &downloads[i]
		if d.err != nil {
			err := <-d.err
			if err != nil {
				return false, err
			}
		}
		if d.downloaded {
			// if tmpFile is empty assume download and extracted elsewhere
			defer os.Remove(d.tmpFile.Name())
			defer d.tmpFile.Close()
			d.tmpFile.Seek(0, 0)
			if d.tmpFile != nil {
				err = s.graph.Register(d.img,
					progressreader.New(progressreader.Config{
						In:        d.tmpFile,
						Out:       out,
						Formatter: sf,
						Size:      int(d.length),
						ID:        stringid.TruncateID(d.img.ID),
						Action:    "Extracting",
					}))
				if err != nil {
					return false, err
				}

				// FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted)
			}
			out.Write(sf.FormatProgress(stringid.TruncateID(d.img.ID), "Pull complete", nil))
			tagUpdated = true
		} else {
			out.Write(sf.FormatProgress(stringid.TruncateID(d.img.ID), "Already exists", nil))
		}

	}

	// Check for new tag if no layers downloaded
	if !tagUpdated {
		repo, err := s.Get(repoInfo.LocalName)
		if err != nil {
			return false, err
		}
		if repo != nil {
			if _, exists := repo[tag]; !exists {
				tagUpdated = true
			}
		}
	}

	if verified && tagUpdated {
		out.Write(sf.FormatStatus(utils.ImageReference(repoInfo.CanonicalName, tag), "The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security."))
	}

	if manifestDigest != "" {
		out.Write(sf.FormatStatus("", "Digest: %s", manifestDigest))
	}

	if utils.DigestReference(tag) {
		if err = s.SetDigest(repoInfo.LocalName, tag, downloads[0].img.ID); err != nil {
			return false, err
		}
	} else {
		// only set the repository/tag -> image ID mapping when pulling by tag (i.e. not by digest)
		if err = s.Set(repoInfo.LocalName, tag, downloads[0].img.ID, true); err != nil {
			return false, err
		}
	}

	return tagUpdated, nil
}
Пример #21
0
func (s *TagStore) pushV2Repository(r *registry.Session, localRepo Repository, out io.Writer, repoInfo *registry.RepositoryInfo, tag string, sf *utils.StreamFormatter) error {
	endpoint, err := r.V2RegistryEndpoint(repoInfo.Index)
	if err != nil {
		if repoInfo.Index.Official {
			log.Debugf("Unable to push to V2 registry, falling back to v1: %s", err)
			return ErrV2RegistryUnavailable
		}
		return fmt.Errorf("error getting registry endpoint: %s", err)
	}

	tags, err := s.getImageTags(localRepo, tag)
	if err != nil {
		return err
	}
	if len(tags) == 0 {
		return fmt.Errorf("No tags to push for %s", repoInfo.LocalName)
	}

	auth, err := r.GetV2Authorization(endpoint, repoInfo.RemoteName, false)
	if err != nil {
		return fmt.Errorf("error getting authorization: %s", err)
	}

	for _, tag := range tags {
		log.Debugf("Pushing repository: %s:%s", repoInfo.CanonicalName, tag)

		layerId, exists := localRepo[tag]
		if !exists {
			return fmt.Errorf("tag does not exist: %s", tag)
		}

		layer, err := s.graph.Get(layerId)
		if err != nil {
			return err
		}

		m := &registry.ManifestData{
			SchemaVersion: 1,
			Name:          repoInfo.RemoteName,
			Tag:           tag,
			Architecture:  layer.Architecture,
		}
		var metadata runconfig.Config
		if layer.Config != nil {
			metadata = *layer.Config
		}

		layersSeen := make(map[string]bool)
		layers := []*image.Image{layer}
		for ; layer != nil; layer, err = layer.GetParent() {
			if err != nil {
				return err
			}

			if layersSeen[layer.ID] {
				break
			}
			layers = append(layers, layer)
			layersSeen[layer.ID] = true
		}
		m.FSLayers = make([]*registry.FSLayer, len(layers))
		m.History = make([]*registry.ManifestHistory, len(layers))

		// Schema version 1 requires layer ordering from top to root
		for i, layer := range layers {
			log.Debugf("Pushing layer: %s", layer.ID)

			if layer.Config != nil && metadata.Image != layer.ID {
				err = runconfig.Merge(&metadata, layer.Config)
				if err != nil {
					return err
				}
			}
			jsonData, err := layer.RawJson()
			if err != nil {
				return fmt.Errorf("cannot retrieve the path for %s: %s", layer.ID, err)
			}

			checksum, err := layer.GetCheckSum(s.graph.ImageRoot(layer.ID))
			if err != nil {
				return fmt.Errorf("error getting image checksum: %s", err)
			}

			var exists bool
			if len(checksum) > 0 {
				sumParts := strings.SplitN(checksum, ":", 2)
				if len(sumParts) < 2 {
					return fmt.Errorf("Invalid checksum: %s", checksum)
				}

				// Call mount blob
				exists, err = r.HeadV2ImageBlob(endpoint, repoInfo.RemoteName, sumParts[0], sumParts[1], auth)
				if err != nil {
					out.Write(sf.FormatProgress(stringid.TruncateID(layer.ID), "Image push failed", nil))
					return err
				}
			}
			if !exists {
				if cs, err := s.pushV2Image(r, layer, endpoint, repoInfo.RemoteName, sf, out, auth); err != nil {
					return err
				} else if cs != checksum {
					// Cache new checksum
					if err := layer.SaveCheckSum(s.graph.ImageRoot(layer.ID), cs); err != nil {
						return err
					}
					checksum = cs
				}
			} else {
				out.Write(sf.FormatProgress(stringid.TruncateID(layer.ID), "Image already exists", nil))
			}
			m.FSLayers[i] = &registry.FSLayer{BlobSum: checksum}
			m.History[i] = &registry.ManifestHistory{V1Compatibility: string(jsonData)}
		}

		if err := checkValidManifest(m); err != nil {
			return fmt.Errorf("invalid manifest: %s", err)
		}

		log.Debugf("Pushing %s:%s to v2 repository", repoInfo.LocalName, tag)
		mBytes, err := json.MarshalIndent(m, "", "   ")
		if err != nil {
			return err
		}
		js, err := libtrust.NewJSONSignature(mBytes)
		if err != nil {
			return err
		}

		if err = js.Sign(s.trustKey); err != nil {
			return err
		}

		signedBody, err := js.PrettySignature("signatures")
		if err != nil {
			return err
		}
		log.Infof("Signed manifest for %s:%s using daemon's key: %s", repoInfo.LocalName, tag, s.trustKey.KeyID())

		// push the manifest
		digest, err := r.PutV2ImageManifest(endpoint, repoInfo.RemoteName, tag, signedBody, mBytes, auth)
		if err != nil {
			return err
		}

		out.Write(sf.FormatStatus("", "Digest: %s", digest))
	}
	return nil
}
Пример #22
0
// Allocate a network interface
func Allocate(job *engine.Job) error {
	var (
		ip            net.IP
		mac           net.HardwareAddr
		err           error
		id            = job.Args[0]
		requestedIP   = net.ParseIP(job.Getenv("RequestedIP"))
		requestedIPv6 = net.ParseIP(job.Getenv("RequestedIPv6"))
		globalIPv6    net.IP
	)

	ip, err = ipAllocator.RequestIP(bridgeIPv4Network, requestedIP)
	if err != nil {
		return err
	}

	// If no explicit mac address was given, generate a random one.
	if mac, err = net.ParseMAC(job.Getenv("RequestedMac")); err != nil {
		mac = generateMacAddr(ip)
	}

	if globalIPv6Network != nil {
		// If globalIPv6Network Size is at least a /80 subnet generate IPv6 address from MAC address
		netmask_ones, _ := globalIPv6Network.Mask.Size()
		if requestedIPv6 == nil && netmask_ones <= 80 {
			requestedIPv6 = make(net.IP, len(globalIPv6Network.IP))
			copy(requestedIPv6, globalIPv6Network.IP)
			for i, h := range mac {
				requestedIPv6[i+10] = h
			}
		}

		globalIPv6, err = ipAllocator.RequestIP(globalIPv6Network, requestedIPv6)
		if err != nil {
			log.Errorf("Allocator: RequestIP v6: %v", err)
			return err
		}
		log.Infof("Allocated IPv6 %s", globalIPv6)
	}

	out := engine.Env{}
	out.Set("IP", ip.String())
	out.Set("Mask", bridgeIPv4Network.Mask.String())
	out.Set("Gateway", bridgeIPv4Network.IP.String())
	out.Set("MacAddress", mac.String())
	out.Set("Bridge", bridgeIface)

	size, _ := bridgeIPv4Network.Mask.Size()
	out.SetInt("IPPrefixLen", size)

	// If linklocal IPv6
	localIPv6Net, err := linkLocalIPv6FromMac(mac.String())
	if err != nil {
		return err
	}
	localIPv6, _, _ := net.ParseCIDR(localIPv6Net)
	out.Set("LinkLocalIPv6", localIPv6.String())
	out.Set("MacAddress", mac.String())

	if globalIPv6Network != nil {
		out.Set("GlobalIPv6", globalIPv6.String())
		sizev6, _ := globalIPv6Network.Mask.Size()
		out.SetInt("GlobalIPv6PrefixLen", sizev6)
		out.Set("IPv6Gateway", bridgeIPv6Addr.String())
	}

	currentInterfaces.Set(id, &networkInterface{
		IP:   ip,
		IPv6: globalIPv6,
	})

	out.WriteTo(job.Stdout)

	return nil
}