Ejemplo n.º 1
0
/* queryAuthServerV2 retrieves an authorization token from a V2 auth server */
func queryAuthServerV2(client *http.Client, fieldMap map[string]string, BasicAuth string) (token string, e error) {
	authServer := fieldMap["realm"]
	if authServer == "" {
		e = errors.New("No registry token auth server specified")
		return
	}
	blog.Debug("authServer=%s\n", authServer)
	URL := authServer
	first := true
	for key, value := range fieldMap {
		if key != "realm" {
			if first {
				URL = URL + "?"
				first = false
			} else {
				URL = URL + "&"
			}
			URL = URL + key + "=" + value
		}
	}
	blog.Debug("Auth server URL is %s", URL)

	req, e := http.NewRequest("GET", URL, nil)
	if e != nil {
		return
	}
	req.Header.Set("Authorization", "Basic "+BasicAuth)
	r, e := client.Do(req)
	if e != nil {
		return
	}
	defer r.Body.Close()
	if r.StatusCode < 200 || r.StatusCode > 299 {
		e = &HTTPStatusCodeError{StatusCode: r.StatusCode}
		return
	}
	response, e := ioutil.ReadAll(r.Body)
	if e != nil {
		return
	}
	var parsedReply authServerResult
	e = json.Unmarshal(response, &parsedReply)
	if e != nil {
		return
	}
	token = parsedReply.Token
	return token, e
}
Ejemplo n.º 2
0
func testWriteToFile(t *testing.T, outMapMap map[string]map[string]interface{}, script, image, destDir, format string, suffix string) (b []byte) {
	fw := NewFileWriter(format, destDir)
	fw.WriteImageAllData(outMapMap)
	// Test if correct output file exists
	finalDir := destDir + "/" + trimExtension(script) + "/"
	blog.Debug("final dir: " + finalDir)
	var filenamePath string
	if ok, e := fsutil.DirExists(finalDir); ok {
		if len(image) > 12 {
			image = image[0:12]
		}
		file := image + suffix + "." + format
		filenamePath = finalDir + "/" + file
		_, err := os.Stat(filenamePath)
		if err != nil {
			if os.IsNotExist(err) {
				t.Fatal(err, ": File ", filenamePath, " doesn't exist")
			}
			t.Fatal(err, ": Unknown error while locating file: ", filenamePath)
		}
	} else {
		t.Fatal(e, ": Directory: ", finalDir, " doesn't exist")
	}

	b, err := ioutil.ReadFile(filenamePath)
	if err != nil {
		t.Fatal(err, ": Error in reading file: ", filenamePath)
	}
	return b
}
Ejemplo n.º 3
0
// RemoveImages removes least recently pulled docker images from the local docker host.
func RemoveImages(PulledImages []ImageMetadataInfo, imageToMDMap map[string][]ImageMetadataInfo) {
	numRemoved := 0
	for _, imageMD := range PulledImages {
		// Get all metadata (repo/tags) associated with that image
		for _, metadata := range imageToMDMap[imageMD.Image] {
			// basespec := RegistrySpec + "/" + string(t.Repo) + ":"
			if ExcludeRepo[RepoType(metadata.Repo)] {
				continue
			}
			blog.Debug("Removing the following registry/repo:tag: " + RegistrySpec + "/" +
				metadata.Repo + ":" + metadata.Tag)
			apipath := "/images/" + RegistrySpec + "/" + metadata.Repo + ":" + metadata.Tag
			blog.Info("RemoveImages %s", apipath)
			config.BanyanUpdate("Remove", apipath)
			_, err := DockerAPI(DockerTransport, "DELETE", apipath, []byte{}, "")
			if err != nil {
				blog.Error(err, "RemoveImages Repo:Tag", metadata.Repo, metadata.Tag,
					"image", metadata.Image)
			}
			numRemoved++
		}
	}

	blog.Info("Number of repo/tags removed this time around: %d", numRemoved)
	return
}
Ejemplo n.º 4
0
func getScripts(dirPath string) (scripts []Script, err error) {
	files, err := ioutil.ReadDir(dirPath)
	if err != nil {
		blog.Warn(err, ": Error in reading contents of ", dirPath)
		return
	}

	for _, file := range files {
		file.Name()
		//figure out type of script
		var script Script
		switch {
		case strings.HasSuffix(file.Name(), ".sh"):
			blog.Debug("dirpath: " + dirPath + " after removing prefix: " + config.BANYANDIR() + " looks like: " + strings.TrimPrefix(dirPath, config.BANYANDIR()+"/hosttarget"))
			script = newBashScript(file.Name(), TARGETCONTAINERDIR+strings.TrimPrefix(dirPath, config.BANYANDIR()+"/hosttarget"), []string{""})
		case strings.HasSuffix(file.Name(), ".py"):
			script = newPythonScript(file.Name(), TARGETCONTAINERDIR+strings.TrimPrefix(dirPath, config.BANYANDIR()+"/hosttarget"), []string{""})
		default:
			blog.Warn("Unknown script file type for: " + file.Name())
			//Ignore this file...
			continue
		}
		scripts = append(scripts, script)
	}

	return
}
Ejemplo n.º 5
0
// LogsContainer makes a docker remote API call to get logs from a container.
func LogsContainer(containerID string) (output []byte, err error) {
	apipath := "/containers/" + containerID + "/logs?stdout=1"
	resp, err := DockerAPI(DockerTransport, "GET", apipath, []byte{}, "")
	if err != nil {
		except.Error(err, ": Error in Remote Docker API call: ", apipath)
		return
	}
	blog.Debug("Response from docker remote API call for logs: " + string(resp))
	for {
		if len(resp) < 8 {
			break
		}
		header := resp[0:8]
		var size int32
		buf := bytes.NewBuffer(header[4:8])
		binary.Read(buf, binary.BigEndian, &size)
		payload := resp[8:(8 + size)]
		// blog.Info(string(frame))
		resp = resp[(8 + size):]
		if header[0] == uint8(1) {
			// 1=stdout: return only the stdout log
			output = append(output, payload...)
		}
	}
	return
}
Ejemplo n.º 6
0
// TestWriteImageAllData tests writing different types of image data to files
func TestWriteImageAllData(t *testing.T) {
	cases := []struct {
		script, image, destDir, format string
	}{
		{"myscript", "image", "/tmp", "json"},
		{"myscript.sh", "image1234", "/tmp", "json"},
		{"myscript.abc.sh", "aaaabbbb", "/tmp", "json"},
	}
	outMap := make(map[string]interface{})
	outMapMap := make(map[string]map[string]interface{})

	// Testing imagedata...
	var idata = []ImageDataInfo{{"111", "a", "b", "c", "dn1", "did1"}, {"111", "d", "e", "f", "dn2", "did2"}, {"121", "g", "h", "i", "dn3", "did3"}}
	for _, c := range cases {
		outMap[c.script] = idata
		outMapMap[c.image] = outMap
		b1 := testWriteToFile(t, outMapMap, c.script, c.image, c.destDir, c.format, "-pkgdata")
		b2, err := json.MarshalIndent(idata, "", "\t")
		if err != nil {
			t.Fatal(err, ": Error in marshaling json for imagedata")
		}
		if !bytes.Equal(b1, b2) {
			blog.Debug(b1)
			blog.Debug(b2)
			t.Fatal("Input/Output image data don't match: ", len(b1), len(b2))
		}
	}

	blog.Info("Reaching here => writing imagedata to file works fine")

	// Testing random output ([]byte)...
	randOut := []byte("Testing random output from scripts")
	for _, c := range cases {
		script := "X" + c.script
		outMap[script] = randOut
		outMapMap[c.image] = outMap
		b := testWriteToFile(t, outMapMap, script, c.image, c.destDir, "txt", "-miscdata")
		if !bytes.Equal(b, randOut) {
			blog.Debug(b)
			blog.Debug(randOut)
			t.Fatal("Input/Output image rand txt don't match", len(b), len(randOut))
		}
	}

	//Pass...
	return
}
Ejemplo n.º 7
0
// Run handles running of a script inside an image
func (sh ScriptInfo) Run(imageID ImageIDType) (b []byte, err error) {
	jsonString, err := createCmd(imageID, sh.name, sh.staticBinary, sh.dirPath)
	if err != nil {
		except.Error(err, ": Error in creating command")
		return
	}
	blog.Debug("Container spec: %s", string(jsonString))
	containerID, err := CreateContainer(jsonString)
	if err != nil {
		except.Error(err, ": Error in creating container")
		return
	}
	blog.Debug("New container ID: %s", containerID)

	defer RemoveContainer(containerID)

	jsonString, err = StartContainer(containerID)
	if err != nil {
		except.Error(err, ": Error in starting container")
		return
	}
	blog.Debug("Response from StartContainer: %s", string(jsonString))
	statusCode, err := WaitContainer(containerID)
	if err != nil {
		except.Error(err, ": Error in waiting for container to stop")
		return
	}
	if statusCode != 0 {
		err = errors.New("Bash script exit status: " + strconv.Itoa(statusCode))
		return
	}
	b, err = LogsContainer(containerID)
	if err != nil {
		except.Error(err, ":Error in extracting output from container")
		return
	}
	/*
		_, err = removeContainer(containerID)
		if err != nil {
			except.Error(err, ":Error in removing container for image", containerID)
			return
		}
	*/
	return
}
Ejemplo n.º 8
0
// RemoveContainer makes a docker remote API call to remove a container.
func RemoveContainer(containerID string) (resp []byte, err error) {
	apipath := "/containers/" + containerID
	resp, err = DockerAPI(DockerTransport, "DELETE", apipath, []byte{}, "")
	if err != nil {
		except.Error(err)
		return
	}
	blog.Debug("Response from docker remote API call for remove: " + string(resp))
	return
}
Ejemplo n.º 9
0
// listImages makes a docker remote API call to get a list of images
func listImages() (resp []byte, err error) {
	apipath := "/images/json"
	resp, err = DockerAPI(DockerTransport, "GET", apipath, []byte{}, "")
	if err != nil {
		except.Error(err)
		return
	}
	blog.Debug("Response from docker remote API call for list images: " + string(resp))
	return
}
Ejemplo n.º 10
0
func InspectImage(imageID string) (resp []byte, err error) {
	apipath := "/images/" + imageID + "/json"
	resp, err = DockerAPI(DockerTransport, "GET", apipath, []byte{}, "")
	if err != nil {
		except.Error(err)
		return
	}
	blog.Debug("Response from docker remote API call for inspect image " + imageID + " : \n" + string(resp))
	return
}
Ejemplo n.º 11
0
// StartContainer makes a docker remote API call to start a container.
func StartContainer(containerID string) (jsonOut []byte, err error) {
	apipath := "/containers/" + containerID + "/start"
	resp, err := DockerAPI(DockerTransport, "POST", apipath, []byte{}, "")
	if err != nil {
		except.Error(err, ": Error in Remote Docker API call: ", apipath)
		return
	}
	blog.Debug("Response from docker remote API call for start: " + string(resp))
	return
}
Ejemplo n.º 12
0
// DockerAPI performs an HTTP GET,POST,DELETE operation to the Docker daemon.
func DockerAPI(tr *http.Transport, operation, apipath string, jsonString []byte,
	XRegistryAuth string) (resp []byte, e error) {
	switch operation {
	case "GET", "POST", "DELETE":
		break
	default:
		e = errors.New("Operation " + operation + " not supported")
		return
	}
	// for unix socket, URL (host.domain) is needed but can be anything
	var host string
	HTTP := "http://"
	if DockerProto == "unix" {
		host = dummydomain
	} else {
		host = DockerAddr
		if DockerTLSVerify {
			HTTP = "https://"
		}
	}
	URL := HTTP + host + apipath
	blog.Debug("DockerAPI %s", URL)
	req, e := http.NewRequest(operation, URL, bytes.NewBuffer(jsonString))
	if e != nil {
		except.Error(e, ":DockerAPI failed to create http request")
		return
	}
	req.Header.Add("Content-Type", "application/json")
	if XRegistryAuth != "" {
		req.Header.Add("X-Registry-Auth", XRegistryAuth)
	}

	//req.Header.Set("Authorization", "Bearer "+authToken)
	client := &http.Client{Transport: tr, Timeout: DockerTimeout}
	r, e := client.Do(req)
	if e != nil {
		except.Error(e, ":DockerAPI URL", URL, "client request failed")
		return
	}
	defer r.Body.Close()
	resp, e = ioutil.ReadAll(r.Body)
	if e != nil {
		except.Error(e, ":DockerAPI URL", URL, "invalid response body")
		return
	}
	if r.StatusCode < 200 || r.StatusCode > 299 {
		e = errors.New("DockerAPI URL: " + URL + " status code: " + strconv.Itoa(r.StatusCode) +
			"error: " + string(resp))
		return
	}
	return
}
Ejemplo n.º 13
0
func main() {
	doFlags()

	setupLogging()

	//verifyVolumes()

	copyBanyanData()

	// setup connection to docker daemon's unix/tcp socket
	var e error
	collector.DockerTransport, e = collector.NewDockerTransport(*dockerProto, *dockerAddr)
	if e != nil {
		except.Fail(e, ": Error in connecting to docker remote API socket")
	}

	authToken := RegisterCollector()

	// Set output writers
	SetOutputWriters(authToken)
	SetupBanyanStatus(authToken)

	checkConfigUpdate(true)
	if collector.LocalHost == false && collector.RegistryAPIURL == "" {
		collector.RegistryAPIURL, collector.HubAPI, collector.BasicAuth, collector.XRegistryAuth = collector.GetRegistryURL()
		blog.Info("registry API URL: %s", collector.RegistryAPIURL)
	}

	// Log the docker version
	major, minor, revision, e := collector.DockerVersion()
	if e != nil {
		except.Error(e, ": Could not identify Docker version")
	} else {
		blog.Info("Docker version %d.%d.%d", major, minor, revision)
		config.BanyanUpdate("Docker version", strconv.Itoa(major)+"."+strconv.Itoa(minor)+"."+strconv.Itoa(revision))
	}

	// Images we have processed already
	processedImages := collector.NewImageSet()
	e = getImageList(processedImages)
	if e != nil {
		blog.Info("Fresh start: No previously collected images were found in %s", *imageList)
	}
	_ = getImageManifestHashList(processedImages)
	blog.Debug(processedImages)

	// Main infinite loop.
	InfLoop(authToken, processedImages)
}
Ejemplo n.º 14
0
// doFlags defines the cmdline Usage string and parses flag options.
func doFlags() {
	flag.Usage = func() {
		fmt.Fprintf(os.Stderr, "  Usage: %s [OPTIONS] REGISTRY REPO [REPO...]\n", os.Args[0])
		fmt.Fprintf(os.Stderr, "\n  REGISTRY:\n")
		fmt.Fprintf(os.Stderr, "\tURL of your Docker registry; use index.docker.io for Docker Hub, use local.host to collect images from local Docker host\n")
		fmt.Fprintf(os.Stderr, "\n  REPO:\n")
		fmt.Fprintf(os.Stderr, "\tOne or more repos to gather info about; if no repo is specified Collector will gather info on *all* repos in the Registry\n")
		fmt.Fprintf(os.Stderr, "\n  Environment variables:\n")
		fmt.Fprintf(os.Stderr, "\tCOLLECTOR_DIR:   (Required) Directory that contains the \"data\" folder with Collector default scripts, e.g., $GOPATH/src/github.com/banyanops/collector\n")
		fmt.Fprintf(os.Stderr, "\tCOLLECTOR_ID:    ID provided by Banyan web interface to register Collector with the Banyan service\n")
		fmt.Fprintf(os.Stderr, "\tBANYAN_HOST_DIR: Host directory mounted into Collector/Target containers where results are stored (default: $HOME/.banyan)\n")
		fmt.Fprintf(os.Stderr, "\tBANYAN_DIR:      (Specify only in Dockerfile) Directory in the Collector container where host directory BANYAN_HOST_DIR is mounted\n")
		fmt.Fprintf(os.Stderr, "\tDOCKER_{HOST,CERT_PATH,TLS_VERIFY}: If set, e.g., by docker-machine, then they take precedence over --dockerProto and --dockerAddr\n")
		printExampleUsage()
		fmt.Fprintf(os.Stderr, "  Options:\n")
		flag.PrintDefaults()
	}
	flag.Parse()
	if config.COLLECTORDIR() == "" {
		flag.Usage()
		os.Exit(except.ErrorExitStatus)
	}
	if len(flag.Args()) < 1 {
		flag.Usage()
		os.Exit(except.ErrorExitStatus)
	}
	if *dockerProto != "unix" && *dockerProto != "tcp" {
		flag.Usage()
		os.Exit(except.ErrorExitStatus)
	}
	requiredDirs := []string{config.BANYANDIR(), filepath.Dir(*imageList), filepath.Dir(*repoList), *config.BanyanOutDir, collector.DefaultScriptsDir, collector.UserScriptsDir, collector.BinDir}
	for _, dir := range requiredDirs {
		blog.Debug("Creating directory: " + dir)
		err := fsutil.CreateDirIfNotExist(dir)
		if err != nil {
			except.Fail(err, ": Error in creating a required directory: ", dir)
		}
	}
	collector.RegistrySpec = flag.Arg(0)
	// EqualFold: case insensitive comparison
	if strings.EqualFold(flag.Arg(0), "local.host") {
		collector.LocalHost = true
	}
	//nextMaxImages = *maxImages
}
Ejemplo n.º 15
0
func main() {
	doFlags()

	setupLogging()

	//verifyVolumes()

	copyBanyanData()

	// setup connection to docker daemon's unix/tcp socket
	var e error
	collector.DockerTransport, e = collector.NewDockerTransport(*dockerProto, *dockerAddr)
	if e != nil {
		blog.Exit(e, ": Error in connecting to docker remote API socket")
	}

	authToken := RegisterCollector()

	// Set output writers
	SetOutputWriters(authToken)
	SetupBanyanStatus(authToken)

	checkConfigUpdate(true)
	if collector.LocalHost == false && collector.RegistryAPIURL == "" {
		collector.RegistryAPIURL, collector.HubAPI, collector.BasicAuth, collector.XRegistryAuth = collector.GetRegistryURL()
		blog.Info("registry API URL: %s", collector.RegistryAPIURL)
	}

	// Images we have processed already
	processedImages := collector.NewImageSet()
	e = getImageList(processedImages)
	if e != nil {
		blog.Info("Fresh start: No previously collected images were found in %s", *imageList)
	}
	blog.Debug(processedImages)

	// Image Metadata we have already seen
	MetadataSet := collector.NewMetadataSet()
	PulledList := []collector.ImageMetadataInfo{}

	// Main infinite loop.
	InfLoop(authToken, processedImages, MetadataSet, PulledList)
}
Ejemplo n.º 16
0
// WaitContainer makes a docker remote API call to wait for a container to finish running.
func WaitContainer(containerID string) (statusCode int, err error) {
	apipath := "/containers/" + containerID + "/wait"
	resp, err := DockerAPI(DockerTransport, "POST", apipath, []byte{}, "")
	if err != nil {
		except.Error(err, ": Error in Remote Docker API call: ", apipath)
		return
	}
	blog.Debug("Response from docker remote API call for wait: " + string(resp))
	var msg struct {
		StatusCode int
	}
	err = json.Unmarshal(resp, &msg)
	if err != nil {
		except.Error(err, "waitContainer resp", string(resp))
		return
	}
	blog.Info("Got StatusCode %d\n", msg.StatusCode)
	statusCode = msg.StatusCode
	return
}
Ejemplo n.º 17
0
// getImageList reads the list of previously processed images from the imageList file.
func getImageList(processedImages collector.ImageSet) (e error) {
	f, e := os.Open(*imageList)
	if e != nil {
		except.Warn(e, ": Error in opening", *imageList, ": perhaps a fresh start?")
		return
	}
	defer f.Close()
	r := bufio.NewReader(f)
	data, e := ioutil.ReadAll(r)
	if e != nil {
		except.Error(e, ": Error in reading file ", *imageList)
		return
	}
	for _, str := range strings.Split(string(data), "\n") {
		if len(str) != 0 {
			blog.Debug("Previous image: %s", str)
			processedImages[collector.ImageIDType(str)] = true
		}
	}
	return
}
Ejemplo n.º 18
0
// CreateContainer makes a docker remote API call to create a container.
func CreateContainer(containerSpec []byte) (containerID string, err error) {
	apipath := "/containers/create"
	resp, err := DockerAPI(DockerTransport, "POST", apipath, containerSpec, "")
	if err != nil {
		except.Error(err, ": Error in Remote Docker API call: ", apipath, string(containerSpec))
		return
	}
	blog.Debug("Response from docker remote API call for create: " + string(resp))
	var msg struct {
		Id       string
		Warnings string
	}
	err = json.Unmarshal(resp, &msg)
	if err != nil {
		except.Error(err, "createContainer resp", string(resp))
		return
	}
	blog.Info("Got ID %s Warnings %s\n", msg.Id, msg.Warnings)
	containerID = msg.Id
	return
}
Ejemplo n.º 19
0
// GetLocalImages queries the local Docker daemon for list of images.
// The registry name gets stripped from the repo nam if stripRegistry is set to true.
// The repo has to appear in the list of repos to check if checkRepo is set to true.
func GetLocalImages(stripRegistry bool, checkRepo bool) (imageMap ImageToRepoTagMap, e error) {

	// query a list of images from Docker daemon
	response, e := listImages()
	if e != nil {
		return nil, e
	}
	// parse JSON
	var localImageList []LocalImageStruct
	if e = json.Unmarshal(response, &localImageList); e != nil {
		return nil, e
	}

	// make map from each imageID to all of its aliases (repo+tag)
	imageMap = make(ImageToRepoTagMap)
	for _, localImage := range localImageList {
		imageID := ImageIDType(localImage.ID)
		for _, regRepoTag := range localImage.RepoTags {
			// skip images with no repo:tag
			if regRepoTag == "" || regRepoTag == "\u003cnone\u003e:\u003cnone\u003e" || regRepoTag == "<none>:<none>" {
				blog.Debug("Image %s has a <none>:<none> repository:tag.", string(imageID))
				continue
			}

			repoTag, e := ExtractRepoTag(regRepoTag, stripRegistry)
			if e != nil {
				return nil, e
			}

			if checkRepo {
				if CheckRepoToProcess(repoTag.Repo) {
					imageMap.Insert(imageID, repoTag)
				}
			} else {
				imageMap.Insert(imageID, repoTag)
			}
		}
	}
	return
}
Ejemplo n.º 20
0
// getImageManifestHashList reads the list of previously processed images (manifest hash) from the imageList_ManifestHash file.
func getImageManifestHashList(processedImagesManifestHash collector.ImageSet) (e error) {
	filename := *imageList + "_ManifestHash"
	f, e := os.Open(filename)
	if e != nil {
		except.Warn(e, ": Error in opening", filename, ": perhaps a fresh start?")
		return
	}
	defer f.Close()
	r := bufio.NewReader(f)
	data, e := ioutil.ReadAll(r)
	if e != nil {
		except.Error(e, ": Error in reading file ", filename)
		return
	}
	for _, str := range strings.Split(string(data), "\n") {
		if len(str) != 0 {
			blog.Debug("Previous image: %s", str)
			processedImagesManifestHash.Insert(collector.ImageIDType(str))
		}
	}
	return
}
Ejemplo n.º 21
0
// RegistryQueryV2 performs an HTTP GET operation from the registry and returns the response.
// If the initial response code is 401 Unauthorized, then this function issues a call
// if indicated by an WWW-Authenticate header in the response to get a token, and
// then re-issues the initial call to get the final response.
func RegistryQueryV2(client *http.Client, URL string) (response []byte, e error) {
	_, _, BasicAuth, XRegistryAuth = GetRegistryURL()
	req, e := http.NewRequest("GET", URL, nil)
	if e != nil {
		return nil, e
	}
	req.Header.Set("Authorization", "Basic "+BasicAuth)
	r, e := client.Do(req)
	if e != nil {
		return nil, e
	}
	if r.StatusCode == 401 {
		blog.Debug("Registry Query %s got 401", URL)
		// get the WWW-Authenticate header
		WWWAuth := r.Header.Get("WWW-Authenticate")
		if WWWAuth == "" {
			except.Error("Empty WWW-Authenticate", URL)
			return
		}
		arr := strings.Fields(WWWAuth)
		if len(arr) != 2 {
			e = errors.New("Invalid WWW-Authenticate format for " + WWWAuth)
			except.Error(e)
			return
		}
		authType := arr[0]
		blog.Debug("Authorization type: %s", authType)
		fieldMap := make(map[string]string)
		e = parseAuthenticateFields(arr[1], fieldMap)
		if e != nil {
			except.Error(e)
			return
		}
		r.Body.Close()
		// access the authentication server to get a token
		token, err := queryAuthServerV2(client, fieldMap, BasicAuth)
		if err != nil {
			except.Error(err)
			return nil, err
		}
		// re-issue the original request, this time using the token
		req, e = http.NewRequest("GET", URL, nil)
		if e != nil {
			return nil, e
		}
		req.Header.Set("Authorization", authType+" "+token)
		r, e = client.Do(req)
		if e != nil {
			return nil, e
		}
	}
	defer r.Body.Close()
	if r.StatusCode < 200 || r.StatusCode > 299 {
		e = &HTTPStatusCodeError{StatusCode: r.StatusCode}
		return
	}
	response, e = ioutil.ReadAll(r.Body)
	if e != nil {
		return
	}
	return
}
Ejemplo n.º 22
0
// DoIteration runs one iteration of the main loop to get new images, extract data from them,
// and saves results.
func DoIteration(ReposToLimit RepoSet, authToken string,
	processedImages collector.ImageSet, oldMetadataSet collector.MetadataSet,
	PulledList []collector.ImageMetadataInfo) (currentMetadataSet collector.MetadataSet,
	PulledNew []collector.ImageMetadataInfo) {
	blog.Debug("DoIteration: processedImages is %v", processedImages)
	PulledNew = PulledList
	_ /*tagSlice*/, metadataSlice, currentMetadataSet := collector.GetNewImageMetadata(oldMetadataSet)

	if len(metadataSlice) == 0 {
		blog.Info("No new metadata in this iteration")
		return
	}
	blog.Info("Obtained %d new metadata items in this iteration", len(metadataSlice))
	collector.SaveImageMetadata(metadataSlice)

	// number of images processed for each repository in this iteration
	imageCount := make(map[collector.RepoType]int)

	// Set of repos to stop limiting according to maxImages after this iteration completes.
	StopLimiting := NewRepoSet()

	// processed metadata
	processedMetadata := collector.NewMetadataSet()

	for {
		pulledImages := collector.NewImageSet()
		pullErrorMetadata := collector.NewMetadataSet()
		for _, metadata := range metadataSlice {
			processedMetadata.Insert(metadata)
			if config.FilterRepos && !collector.ReposToProcess[collector.RepoType(metadata.Repo)] {
				continue
			}
			// TODO: need to filter out images from ExcludedRepo also when collecting from local Docker host?
			if collector.ExcludeRepo[collector.RepoType(metadata.Repo)] {
				continue
			}
			if pulledImages[collector.ImageIDType(metadata.Image)] {
				continue
			}
			// TODO: need to consider maxImages limit also when collecting from local Docker host?
			repo := collector.RepoType(metadata.Repo)
			if _, ok := ReposToLimit[repo]; !ok {
				// new repo we haven't seen before; apply maxImages limit to repo
				blog.Info("Starting to apply maxImages limit to repo %s", string(repo))
				ReposToLimit[repo] = true
			}
			if ReposToLimit[repo] && *maxImages > 0 && imageCount[repo] >= *maxImages {
				blog.Info("Max image count %d reached for %s, skipping :%s",
					*maxImages, metadata.Repo, metadata.Tag)
				// stop applying the maxImages limit to repo
				StopLimiting[repo] = true
				continue
			}
			if processedImages[collector.ImageIDType(metadata.Image)] {
				continue
			}

			imageCount[collector.RepoType(metadata.Repo)]++

			// docker pull image
			if !collector.LocalHost {
				err := collector.PullImage(metadata)
				if err != nil {
					// docker pull failed for some reason, possibly a transient failure.
					// So we remove this metadata element from the current and processed sets,
					// and move on to process any remaining metadata elements.
					// In the next iteration, metadata
					// lookup may rediscover this deleted metadata element
					// and treat it as new, thus ensuring that the image pull will be retried.
					// TODO: If the registry is corrupted, this can lead to an infinite
					// loop in which the same image pull keeps getting tried and consistently fails.
					currentMetadataSet.Delete(metadata)
					processedMetadata.Delete(metadata)
					// remember this pull error in order to demote this metadata to the end of the slice.
					pullErrorMetadata.Insert(metadata)
					err = collector.RemoveDanglingImages()
					if err != nil {
						except.Error(err, ": RemoveDanglingImages")
					}
					continue
				}
			}
			PulledNew = append(PulledNew, metadata)
			excess := len(PulledNew) - *removeThresh
			if !collector.LocalHost && *removeThresh > 0 && excess > 0 {
				config.BanyanUpdate("Removing " + strconv.Itoa(excess) + " pulled images")
				collector.RemoveImages(PulledNew[0:excess])
				PulledNew = PulledNew[excess:]
			}
			pulledImages[collector.ImageIDType(metadata.Image)] = true
			if len(pulledImages) == IMAGEBATCH {
				break
			}
		}

		if len(pulledImages) == 0 {
			blog.Info("No pulled images left to process in this iteration")
			config.BanyanUpdate("No pulled images left to process in this iteration")
			break
		}

		// reorder metadataSlice by moving images that couldn't be pulled to the end of the list
		newMDSlice := []collector.ImageMetadataInfo{}
		for _, metadata := range metadataSlice {
			if !pullErrorMetadata.Exists(metadata) {
				newMDSlice = append(newMDSlice, metadata)
			}
		}
		for metadata := range pullErrorMetadata {
			newMDSlice = append(newMDSlice, metadata)
		}
		metadataSlice = newMDSlice

		// get and save image data for all the images in pulledimages
		outMapMap := collector.GetImageAllData(pulledImages)
		collector.SaveImageAllData(outMapMap)
		for imageID := range pulledImages {
			processedImages[imageID] = true
		}
		if e := persistImageList(pulledImages); e != nil {
			except.Error(e, "Failed to persist list of collected images")
		}
		if checkConfigUpdate(false) == true {
			// Config changed, and possibly did so before all current metadata was processed.
			// Thus, remember only the metadata that has already been processed, and forget
			// metadata that has not been processed yet.
			// That way, the next time DoIteration() is entered, the metadata lookup
			// will correctly schedule the forgotten metadata for processing, along with
			// any new metadata.
			currentMetadataSet = processedMetadata
			break
		}
	}

	for repo := range StopLimiting {
		blog.Info("No longer enforcing maxImages limit on repo %s", repo)
		ReposToLimit[repo] = false
	}
	return
}
Ejemplo n.º 23
0
// DoIteration runs one iteration of the main loop to get new images, extract data from them,
// and saves results.
func DoIteration(ReposToLimit RepoSet, authToken string,
	processedImages collector.ImageSet, oldMetadataSet collector.MetadataSet,
	PulledList []collector.ImageMetadataInfo) (currentMetadataSet collector.MetadataSet,
	PulledNew []collector.ImageMetadataInfo) {
	blog.Debug("DoIteration: processedImages is %v", processedImages)
	PulledNew = PulledList
	_ /*tagSlice*/, metadataSlice, currentMetadataSet := collector.GetNewImageMetadata(oldMetadataSet)

	if len(metadataSlice) == 0 {
		blog.Info("No new metadata in this iteration")
		return
	}
	blog.Info("Obtained %d new metadata items in this iteration", len(metadataSlice))
	collector.SaveImageMetadata(metadataSlice)

	// number of images processed for each repository in this iteration
	imageCount := make(map[collector.RepoType]int)
	imageToMDMap := collector.GetImageToMDMap(metadataSlice)

	// Set of repos to stop limiting according to maxImages after this iteration completes.
	StopLimiting := NewRepoSet()

	for {
		pulledImages := collector.NewImageSet()
		for _, metadata := range metadataSlice {
			if config.FilterRepos && !collector.ReposToProcess[collector.RepoType(metadata.Repo)] {
				continue
			}
			// TODO: need to filter out images from ExcludedRepo also when collecting from local Docker host?
			if collector.ExcludeRepo[collector.RepoType(metadata.Repo)] {
				continue
			}
			if pulledImages[collector.ImageIDType(metadata.Image)] {
				continue
			}
			// TODO: need to consider maxImages limit also when collecting from local Docker host?
			repo := collector.RepoType(metadata.Repo)
			if _, ok := ReposToLimit[repo]; !ok {
				// new repo we haven't seen before; apply maxImages limit to repo
				blog.Info("Starting to apply maxImages limit to repo %s", string(repo))
				ReposToLimit[repo] = true
			}
			if ReposToLimit[repo] && *maxImages > 0 && imageCount[repo] >= *maxImages {
				blog.Info("Max image count %d reached for %s, skipping :%s",
					*maxImages, metadata.Repo, metadata.Tag)
				// stop applying the maxImages limit to repo
				StopLimiting[repo] = true
				continue
			}
			if processedImages[collector.ImageIDType(metadata.Image)] {
				continue
			}

			imageCount[collector.RepoType(metadata.Repo)]++

			// docker pull image
			if !collector.LocalHost {
				collector.PullImage(metadata)
			}
			PulledNew = append(PulledNew, metadata)
			if !collector.LocalHost && *removeThresh > 0 && len(PulledNew) > *removeThresh {
				collector.RemoveImages(PulledNew[0:*removeThresh], imageToMDMap)
				PulledNew = PulledNew[*removeThresh:]
			}
			pulledImages[collector.ImageIDType(metadata.Image)] = true
			if len(pulledImages) == IMAGEBATCH {
				break
			}
		}

		if len(pulledImages) == 0 {
			break
		}
		// get and save image data for all the images in pulledimages
		outMapMap := collector.GetImageAllData(pulledImages)
		collector.SaveImageAllData(outMapMap)
		for imageID := range pulledImages {
			processedImages[imageID] = true
		}
		if e := persistImageList(pulledImages); e != nil {
			blog.Error(e, "Failed to persist list of collected images")
		}
		if checkConfigUpdate(false) == true {
			break
		}
	}

	for repo := range StopLimiting {
		blog.Info("No longer enforcing maxImages limit on repo %s", repo)
		ReposToLimit[repo] = false
	}
	return
}