// RemoveImages removes least recently pulled docker images from the local docker host. func RemoveImages(PulledImages []ImageMetadataInfo, imageToMDMap map[string][]ImageMetadataInfo) { numRemoved := 0 for _, imageMD := range PulledImages { // Get all metadata (repo/tags) associated with that image for _, metadata := range imageToMDMap[imageMD.Image] { // basespec := RegistrySpec + "/" + string(t.Repo) + ":" if ExcludeRepo[RepoType(metadata.Repo)] { continue } blog.Debug("Removing the following registry/repo:tag: " + RegistrySpec + "/" + metadata.Repo + ":" + metadata.Tag) apipath := "/images/" + RegistrySpec + "/" + metadata.Repo + ":" + metadata.Tag blog.Info("RemoveImages %s", apipath) config.BanyanUpdate("Remove", apipath) _, err := DockerAPI(DockerTransport, "DELETE", apipath, []byte{}, "") if err != nil { blog.Error(err, "RemoveImages Repo:Tag", metadata.Repo, metadata.Tag, "image", metadata.Image) } numRemoved++ } } blog.Info("Number of repo/tags removed this time around: %d", numRemoved) return }
// GetNewImageMetadata takes the set of existing images, queries the registry to find any changes, // and then brings the Output Writer up to date by telling it the obsolete metadata to delete // and the new metadata to add. func GetNewImageMetadata(oldMetadataSet MetadataSet) (tagSlice []TagInfo, metadataSlice []ImageMetadataInfo, currentMetadataSet MetadataSet) { var currentMetadataSlice []ImageMetadataInfo //config.BanyanUpdate("Loading Registry Metadata") if LocalHost == true { blog.Info("Collect images from local Docker host") currentMetadataSlice = GetLocalImageMetadata(oldMetadataSet) // there is no tag API under Docker Remote API, // and the caller of GetNewImageMetadata ignores tagSlice tagSlice = nil } else { switch { case HubAPI == false: tagSlice, currentMetadataSlice = GetImageMetadata(oldMetadataSet) case HubAPI == true: tagSlice, currentMetadataSlice = GetImageMetadataTokenAuthV1(oldMetadataSet) } } // get only the new metadata from currentMetadataSlice currentMetadataSet = NewMetadataSet() for _, metadata := range currentMetadataSlice { currentMetadataSet[metadata] = true if _, ok := oldMetadataSet[metadata]; !ok { // metadata is not in old map metadataSlice = append(metadataSlice, metadata) } } // find entries in the old map that are not in the current map, // and remove those entries from the database obsolete := []ImageMetadataInfo{} for metadata := range oldMetadataSet { if _, ok := currentMetadataSet[metadata]; !ok { if len(ReposToProcess) > 0 { if _, present := ReposToProcess[RepoType(metadata.Repo)]; present { obsolete = append(obsolete, metadata) blog.Info("Need to remove ImageMetadata: %v", metadata) } } else { obsolete = append(obsolete, metadata) blog.Info("Need to remove ImageMetadata: %v", metadata) } } } if len(obsolete) > 0 { RemoveObsoleteMetadata(obsolete) } if len(metadataSlice) > 0 || len(obsolete) > 0 { config.BanyanUpdate("Detected changes in registry metadata") } // Sort image metadata from newest image to oldest image sort.Sort(ByDateTime(metadataSlice)) return }
// NewDockerTransport creates an HTTP transport to the Docker unix/tcp socket. func NewDockerTransport(proto, addr string) (tr *http.Transport, e error) { // check Docker environment variables dockerHost := os.Getenv("DOCKER_HOST") if os.Getenv("DOCKER_TLS_VERIFY") == "0" { DockerTLSVerify = false } dockerCertPath := os.Getenv("DOCKER_CERT_PATH") if dockerHost == "" { DockerProto = proto DockerAddr = addr } else { blog.Info("$DOCKER_HOST env var = %s", dockerHost) switch { case strings.HasPrefix(dockerHost, "tcp://"): blog.Info("Using protocol tcp") DockerProto = "tcp" DockerAddr = dockerHost[6:] case strings.HasPrefix(dockerHost, "unix://"): blog.Info("Using protocol unix") DockerProto = "unix" DockerAddr = dockerHost[6:] default: except.Fail("Unexpected value in $DOCKER_HOST:", dockerHost) } } // create transport for unix socket if DockerProto != "unix" && DockerProto != "tcp" { e = errors.New("Protocol " + DockerProto + " is not yet supported") return } if DockerProto == "unix" { tr = &http.Transport{} tr.DisableCompression = true tr.Dial = func(_, _ string) (net.Conn, error) { return net.DialTimeout(DockerProto, DockerAddr, HTTPTIMEOUT) } return } if DockerTLSVerify { certfile := dockerCertPath + "/cert.pem" cafile := dockerCertPath + "/ca.pem" keyfile := dockerCertPath + "/key.pem" tr, e = NewTLSTransport(DockerAddr, certfile, cafile, keyfile) if e != nil { except.Fail(e, "NewTLSTransport") } return } tr = &http.Transport{} return }
// checkRepoList gets the list of repositories to process from the command line // and from the repoList file. func checkRepoList(initial bool) (updates bool) { newList := make(map[collector.RepoType]bool) // check repositories specified on the command line if len(flag.Args()) > 1 { for _, repo := range flag.Args()[1:] { newList[collector.RepoType(repo)] = true if initial { updates = true } } } // check repositories specified in the repoList file. Ignore file read errors. data, err := ioutil.ReadFile(*repoList) if err != nil { if initial { blog.Info("Repolist: " + *repoList + " not specified") } } else { arr := strings.Split(string(data), "\n") for _, line := range arr { // skip over comments and whitespace arr := strings.Split(line, "#") repo := arr[0] repotrim := strings.TrimSpace(repo) if repotrim != "" { r := collector.RepoType(repotrim) newList[r] = true if _, ok := collector.ReposToProcess[r]; !ok { updates = true } } } } if len(newList) == 0 { collector.ReposToProcess = newList return } collector.ReposToProcess = newList if searchTerm := collector.NeedRegistrySearch(); searchTerm != "" { config.FilterRepos = false } else { config.FilterRepos = true } if updates { blog.Info("Limiting collection to the following repos:") for repo := range newList { blog.Info(repo) } } return }
// RemoveImages removes least recently pulled docker images from the local docker host. func RemoveImages(PulledImages []ImageMetadataInfo) { numRemoved := 0 imageMap, err := GetLocalImages(false, false) if err != nil { except.Error(err, ": RemoveImages unable to list local images") } for _, metadata := range PulledImages { if strings.HasPrefix(metadata.Repo, "library/") { metadata.Repo = strings.Replace(metadata.Repo, "library/", "", 1) } imageID := ImageIDType(metadata.Image) if metadata.Image == "" { // unknown image ID. Search the repotags for a match var err error imageID, err = imageMap.Image(RepoType(metadata.Repo), TagType(metadata.Tag)) if err != nil { except.Error(err, ": RemoveImages unable to find image ID") break } } // Get all repo:tags associated with the image repoTagSlice := imageMap.RepoTags(imageID) if len(repoTagSlice) == 0 { except.Error("RemoveImages unable to find expected repo:tag " + metadata.Repo + ":" + metadata.Tag + " for image ID=" + string(imageID)) except.Error("imageMap is %v", imageMap) continue } for _, repotag := range repoTagSlice { // basespec := RegistrySpec + "/" + string(t.Repo) + ":" if ExcludeRepo[RepoType(repotag.Repo)] { continue } apipath := "/images/" + string(repotag.Repo) + ":" + string(repotag.Tag) blog.Info("RemoveImages %s", apipath) config.BanyanUpdate("Remove", apipath) _, err := DockerAPI(DockerTransport, "DELETE", apipath, []byte{}, "") if err != nil { except.Error(err, "RemoveImages Repo:Tag", repotag.Repo, repotag.Tag, "image", metadata.Image) } numRemoved++ } } blog.Info("Number of repo/tags removed this time around: %d", numRemoved) RemoveDanglingImages() return }
func main() { doFlags() setupLogging() //verifyVolumes() copyBanyanData() // setup connection to docker daemon's unix/tcp socket var e error collector.DockerTransport, e = collector.NewDockerTransport(*dockerProto, *dockerAddr) if e != nil { except.Fail(e, ": Error in connecting to docker remote API socket") } authToken := RegisterCollector() // Set output writers SetOutputWriters(authToken) SetupBanyanStatus(authToken) checkConfigUpdate(true) if collector.LocalHost == false && collector.RegistryAPIURL == "" { collector.RegistryAPIURL, collector.HubAPI, collector.BasicAuth, collector.XRegistryAuth = collector.GetRegistryURL() blog.Info("registry API URL: %s", collector.RegistryAPIURL) } // Log the docker version major, minor, revision, e := collector.DockerVersion() if e != nil { except.Error(e, ": Could not identify Docker version") } else { blog.Info("Docker version %d.%d.%d", major, minor, revision) config.BanyanUpdate("Docker version", strconv.Itoa(major)+"."+strconv.Itoa(minor)+"."+strconv.Itoa(revision)) } // Images we have processed already processedImages := collector.NewImageSet() e = getImageList(processedImages) if e != nil { blog.Info("Fresh start: No previously collected images were found in %s", *imageList) } _ = getImageManifestHashList(processedImages) blog.Debug(processedImages) // Main infinite loop. InfLoop(authToken, processedImages) }
// GetNewImageMetadata takes the set of existing images, queries the registry to find any changes, // and then brings the Output Writer up to date by telling it the obsolete metadata to delete // and the new metadata to add. func GetNewImageMetadata(oldMetadataSet MetadataSet) (metadataSlice []ImageMetadataInfo, currentMetadataSet MetadataSet) { var currentMetadataSlice []ImageMetadataInfo //config.BanyanUpdate("Loading Registry Metadata") if LocalHost == true { blog.Info("Collect images from local Docker host") currentMetadataSlice = GetLocalImageMetadata(oldMetadataSet) } else { currentMetadataSlice = GetImageMetadata(oldMetadataSet) } // get only the new metadata from currentMetadataSlice currentMetadataSet = NewMetadataSet() for _, metadata := range currentMetadataSlice { currentMetadataSet.Insert(metadata) if oldMetadataSet.Exists(metadata) == false { // metadata is not in old map metadataSlice = append(metadataSlice, metadata) } } // find entries in the old map that are not in the current map, // and remove those entries from the database obsolete := []ImageMetadataInfo{} for metadata := range oldMetadataSet { if !currentMetadataSet.Exists(metadata) { if len(ReposToProcess) > 0 { if _, present := ReposToProcess[RepoType(metadata.Repo)]; present { obsolete = append(obsolete, metadata) blog.Info("Need to remove ImageMetadata: %v", metadata) } } else { obsolete = append(obsolete, metadata) blog.Info("Need to remove ImageMetadata: %v", metadata) } } } if len(obsolete) > 0 { RemoveObsoleteMetadata(obsolete) } if len(metadataSlice) > 0 || len(obsolete) > 0 { config.BanyanUpdate("Detected changes in registry metadata") } // Sort image metadata from newest image to oldest image sort.Sort(ByDateTime(metadataSlice)) return }
// WriteImageAllData writes image (pkg and other) data into file func (f *FileWriter) WriteImageAllData(outMapMap map[string]map[string]interface{}) { blog.Info("Writing image (pkg and other) data into file...") for imageID, scriptMap := range outMapMap { for scriptName, out := range scriptMap { scriptDir := f.dir + "/" + trimExtension(scriptName) err := fsutil.CreateDirIfNotExist(scriptDir) if err != nil { blog.Error(err, ": Error creating script dir: ", scriptDir) continue } image := string(imageID) if len(image) < 12 { blog.Warn("Weird...Haven't seen imageIDs so small -- possibly a test?") } else { image = string(imageID)[0:12] } filenamePath := scriptDir + "/" + image if _, ok := out.([]byte); ok { f.format = "txt" filenamePath += "-miscdata" } else { // by default it is json. But f.format could get overwritten at any point // in the for loop if the output type is []byte, hence the (re)assignment f.format = "json" // NOTE: If we start using json for output other than imageData, change this filenamePath += "-pkgdata" } f.writeFileInFormat(filenamePath, &out) } } return }
// lookupMetadataTokenAuthV1 takes as input the imageID, and Docker Hub auth/index info, // and it returns ImageMetadataInfo for that image by querying the indexed registry. func lookupMetadataTokenAuthV1(imageID ImageIDType, client *http.Client, indexInfo IndexInfo) ( metadata ImageMetadataInfo, e error) { blog.Info("Get Metadata for Image: %s", string(imageID)) URL := "https://" + indexInfo.RegistryURL + "/v1/images/" + string(imageID) + "/json" response, e := RegistryRequestWithToken(client, URL, indexInfo.DockerToken) if e != nil { blog.Error(e, "Unable to query metadata for image: "+string(imageID)) return } // log.Print("metadata query response: " + string(response)) var m ImageStruct if e = json.Unmarshal(response, &m); e != nil { return } var creationTime time.Time metadata.Image = string(imageID) if creationTime, e = time.Parse(time.RFC3339Nano, m.Created); e != nil { return } metadata.Datetime = creationTime metadata.Size = m.Size metadata.Author = m.Author metadata.Checksum = m.Checksum metadata.Comment = m.Comment metadata.Parent = m.Parent return }
// GetImageMetadataTokenAuthV1 returns repositories/tags/image metadata from the Docker Hub // or other registry using v1 token authorization. // The user must have specified a set of repositories of interest. // The function queries the index server, e.g., Docker Hub, to get the token and registry, and then uses // the token to query the registry. func GetImageMetadataTokenAuthV1(oldMetadataSet MetadataSet) (tagSlice []TagInfo, metadataSlice []ImageMetadataInfo) { if len(ReposToProcess) == 0 { return } client := &http.Client{} metadataMap := NewImageToMetadataMap(oldMetadataSet) for repo := range ReposToProcess { blog.Info("Get index and tag info for %s", string(repo)) config.BanyanUpdate("Get index and tag info for", string(repo)) var ( indexInfo IndexInfo e error repoTagSlice []TagInfo repoMetadataSlice []ImageMetadataInfo ) // loop until success for { indexInfo, e = getReposTokenAuthV1(repo, client) if e != nil { blog.Warn(e, ":index lookup failed for repo", string(repo), "- retrying.") config.BanyanUpdate(e.Error(), ":index lookup failed, repo", string(repo), "- retrying") time.Sleep(config.RETRYDURATION) continue } repoTagSlice, e = getTagsTokenAuthV1(repo, client, indexInfo) if e != nil { blog.Warn(e, ":tag lookup failed for repo", string(repo), "- retrying.") config.BanyanUpdate(e.Error(), ":tag lookup failed for repo", string(repo), "- retrying") time.Sleep(config.RETRYDURATION) continue } if len(repoTagSlice) != 1 { blog.Error("Incorrect length of repoTagSlice: expected length=1, got length=%d", len(repoTagSlice)) config.BanyanUpdate("Incorrect length of repoTagSlice:", strconv.Itoa(len(repoTagSlice)), string(repo)) time.Sleep(config.RETRYDURATION) continue } repoMetadataSlice, e = getMetadataTokenAuthV1(repoTagSlice[0], metadataMap, client, indexInfo) if e != nil { blog.Warn(e, ":metadata lookup failed for", string(repoTagSlice[0].Repo), "- retrying.") config.BanyanUpdate(e.Error(), ":metadata lookup failed for", string(repoTagSlice[0].Repo), "- retrying") time.Sleep(config.RETRYDURATION) continue } //success! break } tagSlice = append(tagSlice, repoTagSlice...) metadataSlice = append(metadataSlice, repoMetadataSlice...) } return }
func main() { doFlags() setupLogging() //verifyVolumes() copyBanyanData() // setup connection to docker daemon's unix/tcp socket var e error collector.DockerTransport, e = collector.NewDockerTransport(*dockerProto, *dockerAddr) if e != nil { blog.Exit(e, ": Error in connecting to docker remote API socket") } authToken := RegisterCollector() // Set output writers SetOutputWriters(authToken) SetupBanyanStatus(authToken) checkConfigUpdate(true) if collector.LocalHost == false && collector.RegistryAPIURL == "" { collector.RegistryAPIURL, collector.HubAPI, collector.BasicAuth, collector.XRegistryAuth = collector.GetRegistryURL() blog.Info("registry API URL: %s", collector.RegistryAPIURL) } // Images we have processed already processedImages := collector.NewImageSet() e = getImageList(processedImages) if e != nil { blog.Info("Fresh start: No previously collected images were found in %s", *imageList) } blog.Debug(processedImages) // Image Metadata we have already seen MetadataSet := collector.NewMetadataSet() PulledList := []collector.ImageMetadataInfo{} // Main infinite loop. InfLoop(authToken, processedImages, MetadataSet, PulledList) }
// DockerAPI performs an HTTP GET,POST,DELETE operation to the Docker daemon. func DockerAPI(tr *http.Transport, operation, apipath string, jsonString []byte, XRegistryAuth string) (resp []byte, e error) { switch operation { case "GET", "POST", "DELETE": break default: e = errors.New("Operation " + operation + " not supported") return } // for unix socket, URL (host.domain) is needed but can be anything var host string HTTP := "http://" if DockerProto == "unix" { host = dummydomain } else { host = DockerAddr if DockerTLSVerify { HTTP = "https://" } } URL := HTTP + host + apipath blog.Info("DockerAPI %s", URL) req, e := http.NewRequest(operation, URL, bytes.NewBuffer(jsonString)) if e != nil { except.Error(e, ":DockerAPI failed to create http request") return } req.Header.Add("Content-Type", "application/json") if XRegistryAuth != "" { req.Header.Add("X-Registry-Auth", XRegistryAuth) } //req.Header.Set("Authorization", "Bearer "+authToken) client := &http.Client{Transport: tr, Timeout: DockerTimeout} r, e := client.Do(req) if e != nil { except.Error(e, ":DockerAPI URL", URL, "client request failed") return } defer r.Body.Close() resp, e = ioutil.ReadAll(r.Body) if e != nil { except.Error(e, ":DockerAPI URL", URL, "invalid response body") return } if r.StatusCode < 200 || r.StatusCode > 299 { e = errors.New("DockerAPI URL: " + URL + " status code: " + strconv.Itoa(r.StatusCode) + "error: " + string(resp)) return } return }
// createCmd returns a json byte slice desribing the container we want to create func createCmd(imageID ImageIDType, scriptName, staticBinary, dirPath string) (jsonString []byte, err error) { var container Container container.User = "******" container.AttachStdout = true container.AttachStderr = true container.HostConfig.Binds = []string{config.BANYANHOSTDIR() + "/hosttarget" + ":" + TARGETCONTAINERDIR + ":ro"} container.Image = string(imageID) container.Entrypoint = []string{TARGETCONTAINERDIR + "/bin/bash-static", "-c"} container.Cmd = []string{"PATH=" + TARGETCONTAINERDIR + "/bin" + ":$PATH " + staticBinary + " " + dirPath + "/" + scriptName} blog.Info("Executing command: docker %v", container.Cmd) return json.Marshal(container) }
// TestWriteImageMetadata tests writing (appending/removing) imageMD to file func TestWriteImageMetadata(t *testing.T) { const ( format = "json" destDir = "/tmp" ) // Testing imagedata... var imdata = []ImageMetadataInfo{ {"111", time.Now(), "r1", "t1", 100, "a1", "c1", "c1", "p1"}, {"121", time.Now(), "r2", "t2", 100, "a2", "c2", "c2", "p2"}, {"131", time.Now(), "r3", "t3", 100, "a3", "c3", "c3", "p3"}, } // Remove output file if it already exists -- since we append file := "metadata." + format filenamePath := destDir + "/" + file if _, err := os.Stat(filenamePath); err == nil { // file exists e := os.Remove(filenamePath) if e != nil { t.Fatal(": Error in removing metadata file: ", filenamePath) } } //ignore else // Append to MD file b1 := testWriteImageMDToFile(t, imdata, "/tmp", "json", "ADD") data1 := ImageMetadataAndAction{"ADD", imdata} b2, err := json.MarshalIndent(data1, "", "\t") if err != nil { t.Fatal(err, ": Error in marshaling json") } if !bytes.Equal(b1, b2) { t.Fatal("Input/Output image metadata don't match") } // "Remove" from MD file (note that action is set to remove, rather than really removing anything) b3 := testWriteImageMDToFile(t, []ImageMetadataInfo{imdata[0]}, "/tmp", "json", "REMOVE") data2 := ImageMetadataAndAction{"REMOVE", []ImageMetadataInfo{imdata[0]}} b4, err := json.MarshalIndent(data2, "", "\t") b5 := append(b2, b4...) if err != nil { t.Fatal(err, ": Error in marshaling json") } if !bytes.Equal(b3, b5) { blog.Info(string(b5)) t.Fatal("Input/Output image metadata don't match: ", len(b3), len(b5)) } //Pass... return }
// GetLocalImageMetadata returns image metadata queried from a local Docker host. // Query the local docker daemon to detect new image builds on the host and new images pulled from registry by users. func GetLocalImageMetadata(oldMetadataSet MetadataSet) (metadataSlice []ImageMetadataInfo) { for { blog.Info("Get a list of images from local Docker daemon") imageMap, e := GetLocalImages() if e != nil { blog.Warn(e, " GetLocalImages") blog.Warn("Retrying") time.Sleep(config.RETRYDURATION) continue } blog.Info("Get Image Metadata from local Docker daemon") // Get image metadata metadataSlice, e = getImageMetadata(imageMap, oldMetadataSet) if e != nil { blog.Warn(e, " GetImageMetadata") blog.Warn("Retrying") time.Sleep(config.RETRYDURATION) continue } break } return }
// PullImage performs a docker pull on an image specified by repo/tag. // TODO: Detect if the pulled image has a different imageID than the value retrieved from // metadata, and if so correct the metadata, or at least skip processing the image. func PullImage(metadata ImageMetadataInfo) { tagspec := RegistrySpec + "/" + metadata.Repo + ":" + metadata.Tag apipath := "/images/create?fromImage=" + tagspec blog.Info("PullImage downloading %s, Image ID: %s", apipath, metadata.Image) config.BanyanUpdate("Pull", apipath, metadata.Image) resp, err := DockerAPI(DockerTransport, "POST", apipath, []byte{}, XRegistryAuth) if err != nil { blog.Error(err, "PullImage failed for", RegistrySpec, metadata.Repo, metadata.Tag, metadata.Image) } if strings.Contains(string(resp), `"error":`) { blog.Error("PullImage error for %s/%s/%s", RegistrySpec, metadata.Repo, metadata.Tag) } blog.Trace(string(resp)) return }
func InfLoop(authToken string, processedImages collector.ImageSet, MetadataSet collector.MetadataSet, PulledList []collector.ImageMetadataInfo) { duration := time.Duration(*poll) * time.Second reposToLimit := NewRepoSet() for { config.BanyanUpdate("New iteration") MetadataSet, PulledList = DoIteration(reposToLimit, authToken, processedImages, MetadataSet, PulledList) blog.Info("Looping in %d seconds", *poll) config.BanyanUpdate("Sleeping for", strconv.FormatInt(*poll, 10), "seconds") time.Sleep(duration) checkConfigUpdate(false) authToken = refreshToken(authToken) } }
// TestWriteImageAllData tests writing different types of image data to files func TestWriteImageAllData(t *testing.T) { cases := []struct { script, image, destDir, format string }{ {"myscript", "image", "/tmp", "json"}, {"myscript.sh", "image1234", "/tmp", "json"}, {"myscript.abc.sh", "aaaabbbb", "/tmp", "json"}, } outMap := make(map[string]interface{}) outMapMap := make(map[string]map[string]interface{}) // Testing imagedata... var idata = []ImageDataInfo{{"111", "a", "b", "c", "dn1", "did1"}, {"111", "d", "e", "f", "dn2", "did2"}, {"121", "g", "h", "i", "dn3", "did3"}} for _, c := range cases { outMap[c.script] = idata outMapMap[c.image] = outMap b1 := testWriteToFile(t, outMapMap, c.script, c.image, c.destDir, c.format, "-pkgdata") b2, err := json.MarshalIndent(idata, "", "\t") if err != nil { t.Fatal(err, ": Error in marshaling json for imagedata") } if !bytes.Equal(b1, b2) { blog.Debug(b1) blog.Debug(b2) t.Fatal("Input/Output image data don't match: ", len(b1), len(b2)) } } blog.Info("Reaching here => writing imagedata to file works fine") // Testing random output ([]byte)... randOut := []byte("Testing random output from scripts") for _, c := range cases { script := "X" + c.script outMap[script] = randOut outMapMap[c.image] = outMap b := testWriteToFile(t, outMapMap, script, c.image, c.destDir, "txt", "-miscdata") if !bytes.Equal(b, randOut) { blog.Debug(b) blog.Debug(randOut) t.Fatal("Input/Output image rand txt don't match", len(b), len(randOut)) } } //Pass... return }
// WaitContainer makes a docker remote API call to wait for a container to finish running. func WaitContainer(containerID string) (statusCode int, err error) { apipath := "/containers/" + containerID + "/wait" resp, err := DockerAPI(DockerTransport, "POST", apipath, []byte{}, "") if err != nil { except.Error(err, ": Error in Remote Docker API call: ", apipath) return } blog.Debug("Response from docker remote API call for wait: " + string(resp)) var msg struct { StatusCode int } err = json.Unmarshal(resp, &msg) if err != nil { except.Error(err, "waitContainer resp", string(resp)) return } blog.Info("Got StatusCode %d\n", msg.StatusCode) statusCode = msg.StatusCode return }
func InfLoop(authToken string, processedImages collector.ImageSet) { duration := time.Duration(*poll) * time.Second reposToLimit := NewRepoSet() // Image Metadata we have already seen metadataSet := collector.NewMetadataSet() initMetadataSet(authToken, metadataSet) pulledList := []collector.ImageMetadataInfo{} for { config.BanyanUpdate("New iteration") metadataSet, pulledList = DoIteration(reposToLimit, authToken, processedImages, metadataSet, pulledList) blog.Info("Looping in %d seconds", *poll) config.BanyanUpdate("Sleeping for", strconv.FormatInt(*poll, 10), "seconds") time.Sleep(duration) checkConfigUpdate(false) authToken = refreshToken(authToken) } }
// RemoveDanglingImages deletes any dangling images (untagged and unreferenced intermediate layers). func RemoveDanglingImages() (e error) { dangling, err := ListDanglingImages() if err != nil { except.Error(err, "RemoveDanglingImages") return err } if len(dangling) == 0 { return } for _, image := range dangling { _, err = RemoveImageByID(image) if err != nil { except.Error(err, "RemoveDanglingImages") e = err continue } blog.Info("Removed dangling image %s", string(image)) } return }
func (f *FileWriter) writeFileInFormat(filenamePath string, data interface{}) { blog.Info("Writing " + filenamePath + "...") switch f.format { case "json": err := jsonifyAndWriteToFile(filenamePath+".json", data) if err != nil { blog.Error(err, ": Error in writing json output into file: ", filenamePath+".json") return } case "txt": // what's passed in is ptr to interface{}. First get interface{} out of it and then // typecast that to []byte err := ioutil.WriteFile(filenamePath+".txt", (*(data.(*interface{}))).([]byte), 0644) if err != nil { blog.Error(err, ": Error in writing to file: ", filenamePath) return } default: blog.Warn("Currently only supporting json output to write to files") } }
// CreateContainer makes a docker remote API call to create a container. func CreateContainer(containerSpec []byte) (containerID string, err error) { apipath := "/containers/create" resp, err := DockerAPI(DockerTransport, "POST", apipath, containerSpec, "") if err != nil { except.Error(err, ": Error in Remote Docker API call: ", apipath, string(containerSpec)) return } blog.Debug("Response from docker remote API call for create: " + string(resp)) var msg struct { Id string Warnings string } err = json.Unmarshal(resp, &msg) if err != nil { except.Error(err, "createContainer resp", string(resp)) return } blog.Info("Got ID %s Warnings %s\n", msg.Id, msg.Warnings) containerID = msg.Id return }
// PullImage performs a docker pull on an image specified by repo/tag. func PullImage(metadata *ImageMetadataInfo) (err error) { tagspec := metadata.Repo + ":" + metadata.Tag if RegistrySpec != config.DockerHub { tagspec = RegistrySpec + "/" + tagspec } apipath := "/images/create?fromImage=" + tagspec blog.Info("PullImage downloading %s, Image ID: %s", apipath, metadata.Image) config.BanyanUpdate("Pull", apipath, metadata.Image) resp, err := DockerAPI(DockerTransport, "POST", apipath, []byte{}, XRegistryAuth) if err != nil { except.Error(err, "PullImage failed for", RegistrySpec, metadata.Repo, metadata.Tag, metadata.Image) return } if strings.Contains(string(resp), `"error":`) { err = errors.New("PullImage error for " + RegistrySpec + "/" + metadata.Repo + "/" + metadata.Tag) except.Error(err) return } blog.Trace(string(resp)) // get the Docker-calculated image ID calculatedID, err := dockerImageID(RegistrySpec, metadata) if err != nil { except.Error(err, "dockerImageID") return } if metadata.Image > "" && metadata.Image != calculatedID { newMetadata := *metadata newMetadata.Image = calculatedID RemoveImages([]ImageMetadataInfo{newMetadata}) err = errors.New("PullImage " + metadata.Repo + ":" + metadata.Tag + " image ID " + calculatedID + " doesn't match metadata-derived ID " + metadata.Image) except.Error(err) return err } metadata.Image = calculatedID return }
// GetLocalImages queries the local Docker daemon for list of images. func GetLocalImages() (imageMap ImageToRepoTagMap, e error) { // query a list of images from Docker daemon response, e := listImages() if e != nil { return nil, e } // parse JSON var localImageList []LocalImageStruct if e = json.Unmarshal(response, &localImageList); e != nil { return nil, e } // make map from each imageID to all of its aliases (repo+tag) imageMap = make(ImageToRepoTagMap) for _, localImage := range localImageList { imageID := ImageIDType(localImage.ID) for _, regRepoTag := range localImage.RepoTags { // skip images with no repo:tag if regRepoTag == "" || regRepoTag == "\u003cnone\u003e:\u003cnone\u003e" || regRepoTag == "<none>:<none>" { blog.Info("Image ", imageID, " has a <none>:<none> repository:tag.") continue } repoTag, e := ExtractRepoTag(regRepoTag) if e != nil { return nil, e } blog.Debug(imageID, regRepoTag, repoTag) if CheckRepoToProcess(repoTag.Repo) { imageMap.Insert(imageID, repoTag) } } } return }
// DoIteration runs one iteration of the main loop to get new images, extract data from them, // and saves results. func DoIteration(ReposToLimit RepoSet, authToken string, processedImages collector.ImageSet, oldMetadataSet collector.MetadataSet, PulledList []collector.ImageMetadataInfo) (currentMetadataSet collector.MetadataSet, PulledNew []collector.ImageMetadataInfo) { blog.Debug("DoIteration: processedImages is %v", processedImages) PulledNew = PulledList _ /*tagSlice*/, metadataSlice, currentMetadataSet := collector.GetNewImageMetadata(oldMetadataSet) if len(metadataSlice) == 0 { blog.Info("No new metadata in this iteration") return } blog.Info("Obtained %d new metadata items in this iteration", len(metadataSlice)) collector.SaveImageMetadata(metadataSlice) // number of images processed for each repository in this iteration imageCount := make(map[collector.RepoType]int) // Set of repos to stop limiting according to maxImages after this iteration completes. StopLimiting := NewRepoSet() // processed metadata processedMetadata := collector.NewMetadataSet() for { pulledImages := collector.NewImageSet() pullErrorMetadata := collector.NewMetadataSet() for _, metadata := range metadataSlice { processedMetadata.Insert(metadata) if config.FilterRepos && !collector.ReposToProcess[collector.RepoType(metadata.Repo)] { continue } // TODO: need to filter out images from ExcludedRepo also when collecting from local Docker host? if collector.ExcludeRepo[collector.RepoType(metadata.Repo)] { continue } if pulledImages[collector.ImageIDType(metadata.Image)] { continue } // TODO: need to consider maxImages limit also when collecting from local Docker host? repo := collector.RepoType(metadata.Repo) if _, ok := ReposToLimit[repo]; !ok { // new repo we haven't seen before; apply maxImages limit to repo blog.Info("Starting to apply maxImages limit to repo %s", string(repo)) ReposToLimit[repo] = true } if ReposToLimit[repo] && *maxImages > 0 && imageCount[repo] >= *maxImages { blog.Info("Max image count %d reached for %s, skipping :%s", *maxImages, metadata.Repo, metadata.Tag) // stop applying the maxImages limit to repo StopLimiting[repo] = true continue } if processedImages[collector.ImageIDType(metadata.Image)] { continue } imageCount[collector.RepoType(metadata.Repo)]++ // docker pull image if !collector.LocalHost { err := collector.PullImage(metadata) if err != nil { // docker pull failed for some reason, possibly a transient failure. // So we remove this metadata element from the current and processed sets, // and move on to process any remaining metadata elements. // In the next iteration, metadata // lookup may rediscover this deleted metadata element // and treat it as new, thus ensuring that the image pull will be retried. // TODO: If the registry is corrupted, this can lead to an infinite // loop in which the same image pull keeps getting tried and consistently fails. currentMetadataSet.Delete(metadata) processedMetadata.Delete(metadata) // remember this pull error in order to demote this metadata to the end of the slice. pullErrorMetadata.Insert(metadata) err = collector.RemoveDanglingImages() if err != nil { except.Error(err, ": RemoveDanglingImages") } continue } } PulledNew = append(PulledNew, metadata) excess := len(PulledNew) - *removeThresh if !collector.LocalHost && *removeThresh > 0 && excess > 0 { config.BanyanUpdate("Removing " + strconv.Itoa(excess) + " pulled images") collector.RemoveImages(PulledNew[0:excess]) PulledNew = PulledNew[excess:] } pulledImages[collector.ImageIDType(metadata.Image)] = true if len(pulledImages) == IMAGEBATCH { break } } if len(pulledImages) == 0 { blog.Info("No pulled images left to process in this iteration") config.BanyanUpdate("No pulled images left to process in this iteration") break } // reorder metadataSlice by moving images that couldn't be pulled to the end of the list newMDSlice := []collector.ImageMetadataInfo{} for _, metadata := range metadataSlice { if !pullErrorMetadata.Exists(metadata) { newMDSlice = append(newMDSlice, metadata) } } for metadata := range pullErrorMetadata { newMDSlice = append(newMDSlice, metadata) } metadataSlice = newMDSlice // get and save image data for all the images in pulledimages outMapMap := collector.GetImageAllData(pulledImages) collector.SaveImageAllData(outMapMap) for imageID := range pulledImages { processedImages[imageID] = true } if e := persistImageList(pulledImages); e != nil { except.Error(e, "Failed to persist list of collected images") } if checkConfigUpdate(false) == true { // Config changed, and possibly did so before all current metadata was processed. // Thus, remember only the metadata that has already been processed, and forget // metadata that has not been processed yet. // That way, the next time DoIteration() is entered, the metadata lookup // will correctly schedule the forgotten metadata for processing, along with // any new metadata. currentMetadataSet = processedMetadata break } } for repo := range StopLimiting { blog.Info("No longer enforcing maxImages limit on repo %s", repo) ReposToLimit[repo] = false } return }
// AppendImageMetadata appends image metadata to file func (f *FileWriter) AppendImageMetadata(imageMetadata []ImageMetadataInfo) { blog.Info("Appending image metadata to file...") f.format = "json" f.handleImageMetadata(imageMetadata, "ADD") }
// RemoveImageMetadata removes image metadata from file func (f *FileWriter) RemoveImageMetadata(imageMetadata []ImageMetadataInfo) { blog.Info("Removing image metadata from file...") f.format = "json" f.handleImageMetadata(imageMetadata, "REMOVE") }
// getImageMetadata queries the Docker registry for info about each image. func getImageMetadata(imageMap map[ImageIDType][]RepoTagType, oldMetadataSet MetadataSet) (metadataSlice []ImageMetadataInfo, e error) { metadataMap := NewImageToMetadataMap(oldMetadataSet) previousImages := NewImageSet() for metadata := range oldMetadataSet { previousImages[ImageIDType(metadata.Image)] = true } // for each alias, create an entry in metadataSlice ch := make(chan ImageMetadataInfo) errch := make(chan error) goCount := 0 var client *http.Client if *RegistryTLSNoVerify { tr := &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, } client = &http.Client{Transport: tr} } else { client = &http.Client{} } for imageID := range imageMap { var curr ImageMetadataInfo if previousImages[imageID] { // We already know this image's metadata, but we need to record // its current repo:tag aliases. var e error curr, e = metadataMap.Metadata(imageID) if e != nil { blog.Error(e, "imageID", string(imageID), "not in metadataMap") continue } metadataSlice = append(metadataSlice, curr) continue } goCount++ go func(imageID ImageIDType, ch chan ImageMetadataInfo, errch chan error) { var metadata ImageMetadataInfo blog.Info("Get Metadata for Image: %s", string(imageID)) var response []byte var e error if LocalHost { response, e = inspectImage(string(imageID)) } else { if *RegistryProto == "quay" { // TODO: Properly support quay.io image metadata instead of faking it. t := time.Date(2011, time.January, 1, 1, 0, 0, 0, time.UTC) metadata.Image = string(imageID) metadata.Datetime = t ch <- metadata return } response, e = RegistryQuery(client, RegistryAPIURL+"/v1/images/"+string(imageID)+"/json") } if e != nil { errch <- e return } var m ImageStruct if e = json.Unmarshal(response, &m); e != nil { errch <- e return } metadata.Image = string(imageID) if c, e := time.Parse(time.RFC3339Nano, m.Created); e != nil { errch <- e return } else { metadata.Datetime = c metadata.Size = m.Size metadata.Author = m.Author metadata.Checksum = m.Checksum metadata.Comment = m.Comment metadata.Parent = m.Parent } ch <- metadata }(imageID, ch, errch) if goCount > maxGoCount { for ; goCount > minGoCount; goCount-- { select { case metadata := <-ch: metadataSlice = append(metadataSlice, metadata) case err := <-errch: blog.Error(err, ":getImageMetadata") } } } } for ; goCount > 0; goCount-- { select { case metadata := <-ch: metadataSlice = append(metadataSlice, metadata) case err := <-errch: blog.Error(err, ":getImageMetadata") } } // fill in the repo and tag fields of metadataSlice, replicating entries for multiple aliases to an image finalMetadataSlice := []ImageMetadataInfo{} for _, metadata := range metadataSlice { for _, repotag := range imageMap[ImageIDType(metadata.Image)] { newmd := metadata // fill in the repo and tag // _ = repotag newmd.Repo = string(repotag.Repo) newmd.Tag = string(repotag.Tag) finalMetadataSlice = append(finalMetadataSlice, newmd) } } metadataSlice = finalMetadataSlice return }
// GetImageMetadata returns repository/tag/image metadata queried from a Docker registry. // If the user has specified the repositories to examine, then no other repositories are examined. // If the user has not specified repositories, then the registry search API is used to // get the list of all repositories in the registry. func GetImageMetadata(oldMetadataSet MetadataSet) (tagSlice []TagInfo, metadataSlice []ImageMetadataInfo) { for { blog.Info("Get Repos") repoSlice, e := getRepos() if e != nil { blog.Warn(e, " getRepos") blog.Warn("Retrying") time.Sleep(config.RETRYDURATION) continue } if len(repoSlice) == 0 { // For some reason (like, registry search doesn't work), we are not // seeing any repos in the registry. // So, just reconstruct the list of repos that we saw earlier. blog.Warn("Empty repoSlice, reusing previous metadata") repomap := make(map[string]bool) for metadata := range oldMetadataSet { if repomap[metadata.Repo] == false { repoSlice = append(repoSlice, RepoType(metadata.Repo)) repomap[metadata.Repo] = true } } } // Now get a list of all the tags, and the image metadata/manifest if *RegistryProto == "v1" { blog.Info("Get Tags") tagSlice, e = getTags(repoSlice) if e != nil { blog.Warn(e, " getTags") blog.Warn("Retrying") time.Sleep(config.RETRYDURATION) continue } // get map from each imageID to all of its aliases (repo+tag) imageMap := make(ImageToRepoTagMap) for _, ti := range tagSlice { for tag, imageID := range ti.TagMap { repotag := RepoTagType{Repo: ti.Repo, Tag: tag} imageMap.Insert(imageID, repotag) } } blog.Info("Get Image Metadata") // Get image metadata metadataSlice, e = getImageMetadata(imageMap, oldMetadataSet) if e != nil { blog.Warn(e, " getImageMetadata") blog.Warn("Retrying") time.Sleep(config.RETRYDURATION) continue } break } if *RegistryProto == "v2" { blog.Info("Get Tags and Metadata") tagSlice, metadataSlice, e = v2GetTagsMetadata(repoSlice) if e != nil { blog.Warn(e) blog.Warn("Retrying") time.Sleep(config.RETRYDURATION) continue } break } } return }