func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, corsHeaders string, dockerVersion version.Version) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { // log the request log.Debugf("Calling %s %s", localMethod, localRoute) if logging { log.Infof("%s %s", r.Method, r.RequestURI) } if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { userAgent := strings.Split(r.Header.Get("User-Agent"), "/") if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) { log.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion) } } version := version.Version(mux.Vars(r)["version"]) if version == "" { version = api.APIVERSION } if corsHeaders != "" { writeCorsHeaders(w, r, corsHeaders) } if version.GreaterThan(api.APIVERSION) { http.Error(w, fmt.Errorf("client and server don't have same version (client : %s, server: %s)", version, api.APIVERSION).Error(), http.StatusNotFound) return } if err := handlerFunc(eng, version, w, r, mux.Vars(r)); err != nil { log.Errorf("Handler for %s %s returned error: %s", localMethod, localRoute, err) httpError(w, err) } } }
func (devices *DeviceSet) AddDevice(hash, baseHash string) error { log.Debugf("[deviceset] AddDevice(hash=%s basehash=%s)", hash, baseHash) defer log.Debugf("[deviceset] AddDevice(hash=%s basehash=%s) END", hash, baseHash) baseInfo, err := devices.lookupDevice(baseHash) if err != nil { return err } baseInfo.lock.Lock() defer baseInfo.lock.Unlock() devices.Lock() defer devices.Unlock() if info, _ := devices.lookupDevice(hash); info != nil { return fmt.Errorf("device %s already exists", hash) } if err := devices.createRegisterSnapDevice(hash, baseInfo); err != nil { return err } return nil }
func (d *Driver) Put(id string) error { // Protect the d.active from concurrent access d.Lock() defer d.Unlock() mount := d.active[id] if mount == nil { log.Debugf("Put on a non-mounted device %s", id) return nil } mount.count-- if mount.count > 0 { return nil } defer delete(d.active, id) if mount.mounted { err := syscall.Unmount(mount.path, 0) if err != nil { log.Debugf("Failed to unmount %s overlay: %v", id, err) } return err } return nil }
func validateEndpoint(endpoint *Endpoint) error { log.Debugf("pinging registry endpoint %s", endpoint) // Try HTTPS ping to registry endpoint.URL.Scheme = "https" if _, err := endpoint.Ping(); err != nil { if endpoint.IsSecure { // If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry` // in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP. return fmt.Errorf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host) } // If registry is insecure and HTTPS failed, fallback to HTTP. log.Debugf("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err) endpoint.URL.Scheme = "http" var err2 error if _, err2 = endpoint.Ping(); err2 == nil { return nil } return fmt.Errorf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2) } return nil }
// waitRemove blocks until either: // a) the device registered at <device_set_prefix>-<hash> is removed, // or b) the 10 second timeout expires. func (devices *DeviceSet) waitRemove(devname string) error { log.Debugf("[deviceset %s] waitRemove(%s)", devices.devicePrefix, devname) defer log.Debugf("[deviceset %s] waitRemove(%s) END", devices.devicePrefix, devname) i := 0 for ; i < 1000; i++ { devinfo, err := devicemapper.GetInfo(devname) if err != nil { // If there is an error we assume the device doesn't exist. // The error might actually be something else, but we can't differentiate. return nil } if i%100 == 0 { log.Debugf("Waiting for removal of %s: exists=%d", devname, devinfo.Exists) } if devinfo.Exists == 0 { break } devices.Unlock() time.Sleep(10 * time.Millisecond) devices.Lock() } if i == 1000 { return fmt.Errorf("Timeout while waiting for device %s to be removed", devname) } return nil }
// Push initiates a push operation on the repository named localName. func (s *TagStore) Push(localName string, imagePushConfig *ImagePushConfig) error { // FIXME: Allow to interrupt current push when new push of same image is done. var sf = streamformatter.NewJSONStreamFormatter() // Resolve the Repository name from fqn to RepositoryInfo repoInfo, err := s.registryService.ResolveRepository(localName) if err != nil { return err } endpoints, err := s.registryService.LookupPushEndpoints(repoInfo.CanonicalName) if err != nil { return err } reposLen := 1 if imagePushConfig.Tag == "" { reposLen = len(s.Repositories[repoInfo.LocalName]) } imagePushConfig.OutStream.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", repoInfo.CanonicalName, reposLen)) // If it fails, try to get the repository localRepo, exists := s.Repositories[repoInfo.LocalName] if !exists { return fmt.Errorf("Repository does not exist: %s", repoInfo.LocalName) } var lastErr error for _, endpoint := range endpoints { logrus.Debugf("Trying to push %s to %s %s", repoInfo.CanonicalName, endpoint.URL, endpoint.Version) pusher, err := s.NewPusher(endpoint, localRepo, repoInfo, imagePushConfig, sf) if err != nil { lastErr = err continue } if fallback, err := pusher.Push(); err != nil { if fallback { lastErr = err continue } logrus.Debugf("Not continuing with error: %v", err) return err } s.eventsService.Log("push", repoInfo.LocalName, "") return nil } if lastErr == nil { lastErr = fmt.Errorf("no endpoints found for %s", repoInfo.CanonicalName) } return lastErr }
func (auth *RequestAuthorization) getToken() (string, error) { auth.tokenLock.Lock() defer auth.tokenLock.Unlock() now := time.Now() if now.Before(auth.tokenExpiration) { log.Debugf("Using cached token for %s", auth.authConfig.Username) return auth.tokenCache, nil } tlsConfig := tls.Config{ MinVersion: tls.VersionTLS10, } if !auth.registryEndpoint.IsSecure { tlsConfig.InsecureSkipVerify = true } client := &http.Client{ Transport: &http.Transport{ DisableKeepAlives: true, Proxy: http.ProxyFromEnvironment, TLSClientConfig: &tlsConfig, }, CheckRedirect: AddRequiredHeadersToRedirectedRequests, } factory := HTTPRequestFactory(nil) for _, challenge := range auth.registryEndpoint.AuthChallenges { switch strings.ToLower(challenge.Scheme) { case "basic": // no token necessary case "bearer": log.Debugf("Getting bearer token with %s for %s", challenge.Parameters, auth.authConfig.Username) params := map[string]string{} for k, v := range challenge.Parameters { params[k] = v } params["scope"] = fmt.Sprintf("%s:%s:%s", auth.resource, auth.scope, strings.Join(auth.actions, ",")) token, err := getToken(auth.authConfig.Username, auth.authConfig.Password, params, auth.registryEndpoint, client, factory) if err != nil { return "", err } auth.tokenCache = token auth.tokenExpiration = now.Add(time.Minute) return token, nil default: log.Infof("Unsupported auth scheme: %q", challenge.Scheme) } } // Do not expire cache since there are no challenges which use a token auth.tokenExpiration = time.Now().Add(time.Hour * 24) return "", nil }
func (p *v2Puller) download(di *downloadInfo) { logrus.Debugf("pulling blob %q to %s", di.digest, di.img.id) blobs := p.repo.Blobs(context.Background()) desc, err := blobs.Stat(context.Background(), di.digest) if err != nil { logrus.Debugf("Error statting layer: %v", err) di.err <- err return } di.size = desc.Size layerDownload, err := blobs.Open(context.Background(), di.digest) if err != nil { logrus.Debugf("Error fetching layer: %v", err) di.err <- err return } defer layerDownload.Close() verifier, err := digest.NewDigestVerifier(di.digest) if err != nil { di.err <- err return } reader := progressreader.New(progressreader.Config{ In: ioutil.NopCloser(io.TeeReader(layerDownload, verifier)), Out: di.broadcaster, Formatter: p.sf, Size: di.size, NewLines: false, ID: stringid.TruncateID(di.img.id), Action: "Downloading", }) io.Copy(di.tmpFile, reader) di.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.id), "Verifying Checksum", nil)) if !verifier.Verified() { err = fmt.Errorf("filesystem layer verification failed for digest %s", di.digest) logrus.Error(err) di.err <- err return } di.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.id), "Download complete", nil)) logrus.Debugf("Downloaded %s to tempfile %s", di.img.id, di.tmpFile.Name()) di.layer = layerDownload di.err <- nil }
func (devices *DeviceSet) setupBaseImage() error { oldInfo, _ := devices.lookupDevice("") if oldInfo != nil && oldInfo.Initialized { return nil } if oldInfo != nil && !oldInfo.Initialized { log.Debugf("Removing uninitialized base image") if err := devices.DeleteDevice(""); err != nil { return err } } if devices.thinPoolDevice != "" && oldInfo == nil { _, transactionId, dataUsed, _, _, _, err := devices.poolStatus() if err != nil { return err } if dataUsed != 0 { return fmt.Errorf("Unable to take ownership of thin-pool (%s) that already has used data blocks", devices.thinPoolDevice) } if transactionId != 0 { return fmt.Errorf("Unable to take ownership of thin-pool (%s) with non-zero transaction Id", devices.thinPoolDevice) } } log.Debugf("Initializing base device-mapper thin volume") // Create initial device info, err := devices.createRegisterDevice("") if err != nil { return err } log.Debugf("Creating filesystem on base device-mapper thin volume") if err = devices.activateDeviceIfNeeded(info); err != nil { return err } if err := devices.createFilesystem(info); err != nil { return err } info.Initialized = true if err = devices.saveMetadata(info); err != nil { info.Initialized = false return err } return nil }
// ExportChanges produces an Archive from the provided changes, relative to dir. func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (Archive, error) { reader, writer := io.Pipe() go func() { ta := &tarAppender{ TarWriter: tar.NewWriter(writer), Buffer: pools.BufioWriter32KPool.Get(nil), SeenFiles: make(map[uint64]string), UIDMaps: uidMaps, GIDMaps: gidMaps, } // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) sort.Sort(changesByPath(changes)) // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors // from this for _, change := range changes { if change.Kind == ChangeDelete { whiteOutDir := filepath.Dir(change.Path) whiteOutBase := filepath.Base(change.Path) whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) timestamp := time.Now() hdr := &tar.Header{ Name: whiteOut[1:], Size: 0, ModTime: timestamp, AccessTime: timestamp, ChangeTime: timestamp, } if err := ta.TarWriter.WriteHeader(hdr); err != nil { logrus.Debugf("Can't write whiteout header: %s", err) } } else { path := filepath.Join(dir, change.Path) if err := ta.addTarFile(path, change.Path[1:]); err != nil { logrus.Debugf("Can't add file %s to tar: %s", path, err) } } } // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { logrus.Debugf("Can't close layer: %s", err) } if err := writer.Close(); err != nil { logrus.Debugf("failed close Changes writer: %s", err) } }() return reader, nil }
// Finally Push the (signed) manifest of the blobs we've just pushed func (r *Session) PutV2ImageManifest(ep *Endpoint, imageName, tagName string, signedManifest, rawManifest []byte, auth *RequestAuthorization) (digest.Digest, error) { routeURL, err := getV2Builder(ep).BuildManifestURL(imageName, tagName) if err != nil { return "", err } method := "PUT" log.Debugf("[registry] Calling %q %s", method, routeURL) req, err := r.reqFactory.NewRequest(method, routeURL, bytes.NewReader(signedManifest)) if err != nil { return "", err } if err := auth.Authorize(req); err != nil { return "", err } res, _, err := r.doRequest(req) if err != nil { return "", err } defer res.Body.Close() // All 2xx and 3xx responses can be accepted for a put. if res.StatusCode >= 400 { if res.StatusCode == 401 { return "", errLoginRequired } errBody, err := ioutil.ReadAll(res.Body) if err != nil { return "", err } log.Debugf("Unexpected response from server: %q %#v", errBody, res.Header) return "", utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to push %s:%s manifest", res.StatusCode, imageName, tagName), res) } hdrDigest, err := digest.ParseDigest(res.Header.Get(DockerDigestHeader)) if err != nil { return "", fmt.Errorf("invalid manifest digest from registry: %s", err) } dgstVerifier, err := digest.NewDigestVerifier(hdrDigest) if err != nil { return "", fmt.Errorf("invalid manifest digest from registry: %s", err) } dgstVerifier.Write(rawManifest) if !dgstVerifier.Verified() { computedDigest, _ := digest.FromBytes(rawManifest) return "", fmt.Errorf("unable to verify manifest digest: registry has %q, computed %q", hdrDigest, computedDigest) } return hdrDigest, nil }
// Retrieve the all the images to be uploaded in the correct order func (p *v1Pusher) getImageList(requestedTag string) ([]string, map[string][]string, error) { var ( imageList []string imagesSeen = make(map[string]bool) tagsByImage = make(map[string][]string) ) for tag, id := range p.localRepo { if requestedTag != "" && requestedTag != tag { // Include only the requested tag. continue } if utils.DigestReference(tag) { // Ignore digest references. continue } var imageListForThisTag []string tagsByImage[id] = append(tagsByImage[id], tag) for img, err := p.graph.Get(id); img != nil; img, err = p.graph.GetParent(img) { if err != nil { return nil, nil, err } if imagesSeen[img.ID] { // This image is already on the list, we can ignore it and all its parents break } imagesSeen[img.ID] = true imageListForThisTag = append(imageListForThisTag, img.ID) } // reverse the image list for this tag (so the "most"-parent image is first) for i, j := 0, len(imageListForThisTag)-1; i < j; i, j = i+1, j-1 { imageListForThisTag[i], imageListForThisTag[j] = imageListForThisTag[j], imageListForThisTag[i] } // append to main image list imageList = append(imageList, imageListForThisTag...) } if len(imageList) == 0 { return nil, nil, fmt.Errorf("No images found for the requested repository / tag") } logrus.Debugf("Image list: %v", imageList) logrus.Debugf("Tags by image: %v", tagsByImage) return imageList, tagsByImage, nil }
func (devices *DeviceSet) createRegisterDevice(hash string) (*DevInfo, error) { deviceId, err := devices.getNextFreeDeviceId() if err != nil { return nil, err } if err := devices.openTransaction(hash, deviceId); err != nil { log.Debugf("Error opening transaction hash = %s deviceId = %d", hash, deviceId) devices.markDeviceIdFree(deviceId) return nil, err } for { if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceId); err != nil { if devicemapper.DeviceIdExists(err) { // Device Id already exists. This should not // happen. Now we have a mechianism to find // a free device Id. So something is not right. // Give a warning and continue. log.Errorf("Device Id %d exists in pool but it is supposed to be unused", deviceId) deviceId, err = devices.getNextFreeDeviceId() if err != nil { return nil, err } // Save new device id into transaction devices.refreshTransaction(deviceId) continue } log.Debugf("Error creating device: %s", err) devices.markDeviceIdFree(deviceId) return nil, err } break } log.Debugf("Registering device (id %v) with FS size %v", deviceId, devices.baseFsSize) info, err := devices.registerDevice(deviceId, hash, devices.baseFsSize, devices.OpenTransactionId) if err != nil { _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) devices.markDeviceIdFree(deviceId) return nil, err } if err := devices.closeTransaction(); err != nil { devices.unregisterDevice(deviceId, hash) devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) devices.markDeviceIdFree(deviceId) return nil, err } return info, nil }
// loginV2 tries to login to the v2 registry server. The given registry endpoint has been // pinged or setup with a list of authorization challenges. Each of these challenges are // tried until one of them succeeds. Currently supported challenge schemes are: // HTTP Basic Authorization // Token Authorization with a separate token issuing server // NOTE: the v2 logic does not attempt to create a user account if one doesn't exist. For // now, users should create their account through other means like directly from a web page // served by the v2 registry service provider. Whether this will be supported in the future // is to be determined. func loginV2(authConfig *AuthConfig, registryEndpoint *Endpoint, factory *utils.HTTPRequestFactory) (string, error) { log.Debugf("attempting v2 login to registry endpoint %s", registryEndpoint) tlsConfig := tls.Config{ MinVersion: tls.VersionTLS10, } if !registryEndpoint.IsSecure { tlsConfig.InsecureSkipVerify = true } client := &http.Client{ Transport: &http.Transport{ DisableKeepAlives: true, Proxy: http.ProxyFromEnvironment, TLSClientConfig: &tlsConfig, }, CheckRedirect: AddRequiredHeadersToRedirectedRequests, } var ( err error allErrors []error ) for _, challenge := range registryEndpoint.AuthChallenges { log.Debugf("trying %q auth challenge with params %s", challenge.Scheme, challenge.Parameters) switch strings.ToLower(challenge.Scheme) { case "basic": err = tryV2BasicAuthLogin(authConfig, challenge.Parameters, registryEndpoint, client, factory) case "bearer": err = tryV2TokenAuthLogin(authConfig, challenge.Parameters, registryEndpoint, client, factory) default: // Unsupported challenge types are explicitly skipped. err = fmt.Errorf("unsupported auth scheme: %q", challenge.Scheme) } if err == nil { return "Login Succeeded", nil } log.Debugf("error trying auth challenge %q: %s", challenge.Scheme, err) allErrors = append(allErrors, err) } return "", fmt.Errorf("no successful auth challenge for %s - errors: %s", registryEndpoint, allErrors) }
func (r *Session) GetRemoteImageLayer(imgID, registry string, token []string, imgSize int64) (io.ReadCloser, error) { var ( retries = 5 statusCode = 0 client *http.Client res *http.Response imageURL = fmt.Sprintf("%simages/%s/layer", registry, imgID) ) req, err := r.reqFactory.NewRequest("GET", imageURL, nil) if err != nil { return nil, fmt.Errorf("Error while getting from the server: %s\n", err) } setTokenAuth(req, token) for i := 1; i <= retries; i++ { statusCode = 0 res, client, err = r.doRequest(req) if err != nil { log.Debugf("Error contacting registry: %s", err) if res != nil { if res.Body != nil { res.Body.Close() } statusCode = res.StatusCode } if i == retries { return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", statusCode, imgID) } time.Sleep(time.Duration(i) * 5 * time.Second) continue } break } if res.StatusCode != 200 { res.Body.Close() return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", res.StatusCode, imgID) } if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 { log.Debugf("server supports resume") return httputils.ResumableRequestReaderWithInitialResponse(client, req, 5, imgSize, res), nil } log.Debugf("server doesn't support resume") return res.Body, nil }
// GetRemoteImageLayer retrieves an image layer from the registry func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io.ReadCloser, error) { var ( retries = 5 statusCode = 0 res *http.Response err error imageURL = fmt.Sprintf("%simages/%s/layer", registry, imgID) ) req, err := http.NewRequest("GET", imageURL, nil) if err != nil { return nil, fmt.Errorf("Error while getting from the server: %v", err) } // TODO(tiborvass): why are we doing retries at this level? // These retries should be generic to both v1 and v2 for i := 1; i <= retries; i++ { statusCode = 0 res, err = r.client.Do(req) if err == nil { break } logrus.Debugf("Error contacting registry %s: %v", registry, err) if res != nil { if res.Body != nil { res.Body.Close() } statusCode = res.StatusCode } if i == retries { return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", statusCode, imgID) } time.Sleep(time.Duration(i) * 5 * time.Second) } if res.StatusCode != 200 { res.Body.Close() return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", res.StatusCode, imgID) } if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 { logrus.Debugf("server supports resume") return httputils.ResumableRequestReaderWithInitialResponse(r.client, req, 5, imgSize, res), nil } logrus.Debugf("server doesn't support resume") return res.Body, nil }
// GetRemoteTags retrieves all tags from the given repository. It queries each // of the registries supplied in the registries argument, and returns data from // the first one that answers the query successfully. It returns a map with // tag names as the keys and image IDs as the values. func (r *Session) GetRemoteTags(registries []string, repository string) (map[string]string, error) { if strings.Count(repository, "/") == 0 { // This will be removed once the registry supports auto-resolution on // the "library" namespace repository = "library/" + repository } for _, host := range registries { endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository) res, err := r.client.Get(endpoint) if err != nil { return nil, err } logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint) defer res.Body.Close() if res.StatusCode == 404 { return nil, ErrRepoNotFound } if res.StatusCode != 200 { continue } result := make(map[string]string) if err := json.NewDecoder(res.Body).Decode(&result); err != nil { return nil, err } return result, nil } return nil, fmt.Errorf("Could not reach any registry endpoint") }
func (p *v1Pusher) Push() (fallback bool, err error) { tlsConfig, err := p.registryService.TLSConfig(p.repoInfo.Index.Name) if err != nil { return false, err } // Adds Docker-specific headers as well as user-specified headers (metaHeaders) tr := transport.NewTransport( // TODO(tiborvass): was NoTimeout registry.NewTransport(tlsConfig), registry.DockerHeaders(p.config.MetaHeaders)..., ) client := registry.HTTPClient(tr) v1Endpoint, err := p.endpoint.ToV1Endpoint(p.config.MetaHeaders) if err != nil { logrus.Debugf("Could not get v1 endpoint: %v", err) return true, err } p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) if err != nil { // TODO(dmcgowan): Check if should fallback return true, err } if err := p.pushRepository(p.config.Tag); err != nil { // TODO(dmcgowan): Check if should fallback return false, err } return false, nil }
// PushImageJSONRegistry pushes JSON metadata for a local image to the registry func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, registry string) error { u := registry + "images/" + imgData.ID + "/json" logrus.Debugf("[registry] Calling PUT %s", u) req, err := http.NewRequest("PUT", u, bytes.NewReader(jsonRaw)) if err != nil { return err } req.Header.Add("Content-type", "application/json") res, err := r.client.Do(req) if err != nil { return fmt.Errorf("Failed to upload metadata: %s", err) } defer res.Body.Close() if res.StatusCode == 401 && strings.HasPrefix(registry, "http://") { return httputils.NewHTTPRequestError("HTTP code 401, Docker will not send auth headers over HTTP.", res) } if res.StatusCode != 200 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) } var jsonBody map[string]string if err := json.Unmarshal(errBody, &jsonBody); err != nil { errBody = []byte(err.Error()) } else if jsonBody["error"] == "Image already exists" { return ErrAlreadyExists } return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody), res) } return nil }
// TarResourceRebase is like TarResource but renames the first path element of // items in the resulting tar archive to match the given rebaseName if not "". func TarResourceRebase(sourcePath, rebaseName string) (content Archive, err error) { sourcePath = normalizePath(sourcePath) if _, err = os.Lstat(sourcePath); err != nil { // Catches the case where the source does not exist or is not a // directory if asserted to be a directory, as this also causes an // error. return } // Separate the source path between it's directory and // the entry in that directory which we are archiving. sourceDir, sourceBase := SplitPathDirEntry(sourcePath) filter := []string{sourceBase} logrus.Debugf("copying %q from %q", sourceBase, sourceDir) return TarWithOptions(sourceDir, &TarOptions{ Compression: Uncompressed, IncludeFiles: filter, IncludeSourceDir: true, RebaseNames: map[string]string{ sourceBase: rebaseName, }, }) }
func (r *Session) GetV2ImageBlobReader(ep *Endpoint, imageName, sumType, sum string, auth *RequestAuthorization) (io.ReadCloser, int64, error) { routeURL, err := getV2Builder(ep).BuildBlobURL(imageName, sumType+":"+sum) if err != nil { return nil, 0, err } method := "GET" log.Debugf("[registry] Calling %q %s", method, routeURL) req, err := r.reqFactory.NewRequest(method, routeURL, nil) if err != nil { return nil, 0, err } if err := auth.Authorize(req); err != nil { return nil, 0, err } res, _, err := r.doRequest(req) if err != nil { return nil, 0, err } if res.StatusCode != 200 { if res.StatusCode == 401 { return nil, 0, errLoginRequired } return nil, 0, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to pull %s blob - %s:%s", res.StatusCode, imageName, sumType, sum), res) } lenStr := res.Header.Get("Content-Length") l, err := strconv.ParseInt(lenStr, 10, 64) if err != nil { return nil, 0, err } return res.Body, l, err }
func (r *Session) GetV2ImageBlob(ep *Endpoint, imageName, sumType, sum string, blobWrtr io.Writer, auth *RequestAuthorization) error { routeURL, err := getV2Builder(ep).BuildBlobURL(imageName, sumType+":"+sum) if err != nil { return err } method := "GET" log.Debugf("[registry] Calling %q %s", method, routeURL) req, err := r.reqFactory.NewRequest(method, routeURL, nil) if err != nil { return err } if err := auth.Authorize(req); err != nil { return err } res, _, err := r.doRequest(req) if err != nil { return err } defer res.Body.Close() if res.StatusCode != 200 { if res.StatusCode == 401 { return errLoginRequired } return utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to pull %s blob", res.StatusCode, imageName), res) } _, err = io.Copy(blobWrtr, res.Body) return err }
// - Succeeded to head image blob (already exists) // - Failed with no error (continue to Push the Blob) // - Failed with error func (r *Session) HeadV2ImageBlob(ep *Endpoint, imageName, sumType, sum string, auth *RequestAuthorization) (bool, error) { routeURL, err := getV2Builder(ep).BuildBlobURL(imageName, sumType+":"+sum) if err != nil { return false, err } method := "HEAD" log.Debugf("[registry] Calling %q %s", method, routeURL) req, err := r.reqFactory.NewRequest(method, routeURL, nil) if err != nil { return false, err } if err := auth.Authorize(req); err != nil { return false, err } res, _, err := r.doRequest(req) if err != nil { return false, err } res.Body.Close() // close early, since we're not needing a body on this call .. yet? switch { case res.StatusCode >= 200 && res.StatusCode < 400: // return something indicating no push needed return true, nil case res.StatusCode == 401: return false, errLoginRequired case res.StatusCode == 404: // return something indicating blob push needed return false, nil } return false, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying head request for %s - %s:%s", res.StatusCode, imageName, sumType, sum), res) }
func (devices *DeviceSet) closeTransaction() error { if err := devices.updatePoolTransactionId(); err != nil { log.Debugf("Failed to close Transaction") return err } return nil }
// PushImageChecksumRegistry uploads checksums for an image func (r *Session) PushImageChecksumRegistry(imgData *ImgData, registry string) error { u := registry + "images/" + imgData.ID + "/checksum" logrus.Debugf("[registry] Calling PUT %s", u) req, err := http.NewRequest("PUT", u, nil) if err != nil { return err } req.Header.Set("X-Docker-Checksum", imgData.Checksum) req.Header.Set("X-Docker-Checksum-Payload", imgData.ChecksumPayload) res, err := r.client.Do(req) if err != nil { return fmt.Errorf("Failed to upload metadata: %v", err) } defer res.Body.Close() if len(res.Cookies()) > 0 { r.client.Jar.SetCookies(req.URL, res.Cookies()) } if res.StatusCode != 200 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err) } var jsonBody map[string]string if err := json.Unmarshal(errBody, &jsonBody); err != nil { errBody = []byte(err.Error()) } else if jsonBody["error"] == "Image already exists" { return ErrAlreadyExists } return fmt.Errorf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody) } return nil }
func (devices *DeviceSet) registerDevice(id int, hash string, size uint64, transactionId uint64) (*DevInfo, error) { log.Debugf("registerDevice(%v, %v)", id, hash) info := &DevInfo{ Hash: hash, DeviceId: id, Size: size, TransactionId: transactionId, Initialized: false, devices: devices, } devices.devicesLock.Lock() devices.Devices[hash] = info devices.devicesLock.Unlock() if err := devices.saveMetadata(info); err != nil { // Try to remove unused device devices.devicesLock.Lock() delete(devices.Devices, hash) devices.devicesLock.Unlock() return nil, err } return info, nil }
// NewRequest() creates a new *http.Request, // applies all decorators in the HTTPRequestFactory on the request, // then applies decorators provided by d on the request. func (h *HTTPRequestFactory) NewRequest(method, urlStr string, body io.Reader, d ...HTTPRequestDecorator) (*http.Request, error) { req, err := http.NewRequest(method, urlStr, body) if err != nil { return nil, err } // By default, a nil factory should work. if h == nil { return req, nil } for _, dec := range h.decorators { req, err = dec.ChangeRequest(req) if err != nil { return nil, err } } for _, dec := range d { req, err = dec.ChangeRequest(req) if err != nil { return nil, err } } log.Debugf("%v -- HEADERS: %v", req.URL, req.Header) return req, err }
// NewSession creates a new session // TODO(tiborvass): remove authConfig param once registry client v2 is vendored func NewSession(client *http.Client, authConfig *cliconfig.AuthConfig, endpoint *Endpoint) (r *Session, err error) { r = &Session{ authConfig: authConfig, client: client, indexEndpoint: endpoint, id: stringid.GenerateRandomID(), } var alwaysSetBasicAuth bool // If we're working with a standalone private registry over HTTPS, send Basic Auth headers // alongside all our requests. if endpoint.VersionString(1) != IndexServer && endpoint.URL.Scheme == "https" { info, err := endpoint.Ping() if err != nil { return nil, err } if info.Standalone && authConfig != nil { logrus.Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", endpoint.String()) alwaysSetBasicAuth = true } } // Annotate the transport unconditionally so that v2 can // properly fallback on v1 when an image is not found. client.Transport = AuthTransport(client.Transport, authConfig, alwaysSetBasicAuth) jar, err := cookiejar.New(nil) if err != nil { return nil, errors.New("cookiejar.New is not supposed to return an error") } client.Jar = jar return r, nil }
func (graph *Graph) restore() error { dir, err := ioutil.ReadDir(graph.root) if err != nil { return err } var ids = []string{} for _, v := range dir { id := v.Name() if graph.driver.Exists(id) { img, err := graph.loadImage(id) if err != nil { logrus.Warnf("ignoring image %s, it could not be restored: %v", id, err) continue } graph.imageMutex.Lock(img.Parent) graph.parentRefs[img.Parent]++ graph.imageMutex.Unlock(img.Parent) ids = append(ids, id) } } graph.idIndex = truncindex.NewTruncIndex(ids) logrus.Debugf("Restored %d elements", len(ids)) return nil }
// ensureImage creates a sparse file of <size> bytes at the path // <root>/devicemapper/<name>. // If the file already exists, it does nothing. // Either way it returns the full path. func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { dirname := devices.loopbackDir() filename := path.Join(dirname, name) if err := os.MkdirAll(dirname, 0700); err != nil && !os.IsExist(err) { return "", err } if _, err := os.Stat(filename); err != nil { if !os.IsNotExist(err) { return "", err } log.Debugf("Creating loopback file %s for device-manage use", filename) file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) if err != nil { return "", err } defer file.Close() if err = file.Truncate(size); err != nil { return "", err } } return filename, nil }