// GetImageMetadataTokenAuthV1 returns repositories/tags/image metadata from the Docker Hub // or other registry using v1 token authorization. // The user must have specified a set of repositories of interest. // The function queries the index server, e.g., Docker Hub, to get the token and registry, and then uses // the token to query the registry. func GetImageMetadataTokenAuthV1(oldMetadataSet MetadataSet) (tagSlice []TagInfo, metadataSlice []ImageMetadataInfo) { if len(ReposToProcess) == 0 { return } client := &http.Client{} metadataMap := NewImageToMetadataMap(oldMetadataSet) for repo := range ReposToProcess { blog.Info("Get index and tag info for %s", string(repo)) config.BanyanUpdate("Get index and tag info for", string(repo)) var ( indexInfo IndexInfo e error repoTagSlice []TagInfo repoMetadataSlice []ImageMetadataInfo ) // loop until success for { indexInfo, e = getReposTokenAuthV1(repo, client) if e != nil { blog.Warn(e, ":index lookup failed for repo", string(repo), "- retrying.") config.BanyanUpdate(e.Error(), ":index lookup failed, repo", string(repo), "- retrying") time.Sleep(config.RETRYDURATION) continue } repoTagSlice, e = getTagsTokenAuthV1(repo, client, indexInfo) if e != nil { blog.Warn(e, ":tag lookup failed for repo", string(repo), "- retrying.") config.BanyanUpdate(e.Error(), ":tag lookup failed for repo", string(repo), "- retrying") time.Sleep(config.RETRYDURATION) continue } if len(repoTagSlice) != 1 { blog.Error("Incorrect length of repoTagSlice: expected length=1, got length=%d", len(repoTagSlice)) config.BanyanUpdate("Incorrect length of repoTagSlice:", strconv.Itoa(len(repoTagSlice)), string(repo)) time.Sleep(config.RETRYDURATION) continue } repoMetadataSlice, e = getMetadataTokenAuthV1(repoTagSlice[0], metadataMap, client, indexInfo) if e != nil { blog.Warn(e, ":metadata lookup failed for", string(repoTagSlice[0].Repo), "- retrying.") config.BanyanUpdate(e.Error(), ":metadata lookup failed for", string(repoTagSlice[0].Repo), "- retrying") time.Sleep(config.RETRYDURATION) continue } //success! break } tagSlice = append(tagSlice, repoTagSlice...) metadataSlice = append(metadataSlice, repoMetadataSlice...) } return }
// RemoveImages removes least recently pulled docker images from the local docker host. func RemoveImages(PulledImages []ImageMetadataInfo, imageToMDMap map[string][]ImageMetadataInfo) { numRemoved := 0 for _, imageMD := range PulledImages { // Get all metadata (repo/tags) associated with that image for _, metadata := range imageToMDMap[imageMD.Image] { // basespec := RegistrySpec + "/" + string(t.Repo) + ":" if ExcludeRepo[RepoType(metadata.Repo)] { continue } blog.Debug("Removing the following registry/repo:tag: " + RegistrySpec + "/" + metadata.Repo + ":" + metadata.Tag) apipath := "/images/" + RegistrySpec + "/" + metadata.Repo + ":" + metadata.Tag blog.Info("RemoveImages %s", apipath) config.BanyanUpdate("Remove", apipath) _, err := DockerAPI(DockerTransport, "DELETE", apipath, []byte{}, "") if err != nil { blog.Error(err, "RemoveImages Repo:Tag", metadata.Repo, metadata.Tag, "image", metadata.Image) } numRemoved++ } } blog.Info("Number of repo/tags removed this time around: %d", numRemoved) return }
func InfLoop(authToken string, processedImages collector.ImageSet, MetadataSet collector.MetadataSet, PulledList []collector.ImageMetadataInfo) { duration := time.Duration(*poll) * time.Second reposToLimit := NewRepoSet() for { config.BanyanUpdate("New iteration") MetadataSet, PulledList = DoIteration(reposToLimit, authToken, processedImages, MetadataSet, PulledList) blog.Info("Looping in %d seconds", *poll) config.BanyanUpdate("Sleeping for", strconv.FormatInt(*poll, 10), "seconds") time.Sleep(duration) checkConfigUpdate(false) authToken = refreshToken(authToken) } }
// SaveImageAllData saves output of all the scripts. func SaveImageAllData(outMapMap map[string]map[string]interface{} /*, dotfiles []DotFilesType*/) { config.BanyanUpdate("Save Image Data", statusMessageImageData(outMapMap)) for _, writer := range WriterList { writer.WriteImageAllData(outMapMap) } return }
// GetNewImageMetadata takes the set of existing images, queries the registry to find any changes, // and then brings the Output Writer up to date by telling it the obsolete metadata to delete // and the new metadata to add. func GetNewImageMetadata(oldMetadataSet MetadataSet) (tagSlice []TagInfo, metadataSlice []ImageMetadataInfo, currentMetadataSet MetadataSet) { var currentMetadataSlice []ImageMetadataInfo //config.BanyanUpdate("Loading Registry Metadata") if LocalHost == true { blog.Info("Collect images from local Docker host") currentMetadataSlice = GetLocalImageMetadata(oldMetadataSet) // there is no tag API under Docker Remote API, // and the caller of GetNewImageMetadata ignores tagSlice tagSlice = nil } else { switch { case HubAPI == false: tagSlice, currentMetadataSlice = GetImageMetadata(oldMetadataSet) case HubAPI == true: tagSlice, currentMetadataSlice = GetImageMetadataTokenAuthV1(oldMetadataSet) } } // get only the new metadata from currentMetadataSlice currentMetadataSet = NewMetadataSet() for _, metadata := range currentMetadataSlice { currentMetadataSet[metadata] = true if _, ok := oldMetadataSet[metadata]; !ok { // metadata is not in old map metadataSlice = append(metadataSlice, metadata) } } // find entries in the old map that are not in the current map, // and remove those entries from the database obsolete := []ImageMetadataInfo{} for metadata := range oldMetadataSet { if _, ok := currentMetadataSet[metadata]; !ok { if len(ReposToProcess) > 0 { if _, present := ReposToProcess[RepoType(metadata.Repo)]; present { obsolete = append(obsolete, metadata) blog.Info("Need to remove ImageMetadata: %v", metadata) } } else { obsolete = append(obsolete, metadata) blog.Info("Need to remove ImageMetadata: %v", metadata) } } } if len(obsolete) > 0 { RemoveObsoleteMetadata(obsolete) } if len(metadataSlice) > 0 || len(obsolete) > 0 { config.BanyanUpdate("Detected changes in registry metadata") } // Sort image metadata from newest image to oldest image sort.Sort(ByDateTime(metadataSlice)) return }
func InfLoop(authToken string, processedImages collector.ImageSet) { duration := time.Duration(*poll) * time.Second reposToLimit := NewRepoSet() // Image Metadata we have already seen metadataSet := collector.NewMetadataSet() initMetadataSet(authToken, metadataSet) pulledList := []collector.ImageMetadataInfo{} for { config.BanyanUpdate("New iteration") metadataSet, pulledList = DoIteration(reposToLimit, authToken, processedImages, metadataSet, pulledList) blog.Info("Looping in %d seconds", *poll) config.BanyanUpdate("Sleeping for", strconv.FormatInt(*poll, 10), "seconds") time.Sleep(duration) checkConfigUpdate(false) authToken = refreshToken(authToken) } }
// RemoveImages removes least recently pulled docker images from the local docker host. func RemoveImages(PulledImages []ImageMetadataInfo) { numRemoved := 0 imageMap, err := GetLocalImages(false, false) if err != nil { except.Error(err, ": RemoveImages unable to list local images") } for _, metadata := range PulledImages { if strings.HasPrefix(metadata.Repo, "library/") { metadata.Repo = strings.Replace(metadata.Repo, "library/", "", 1) } imageID := ImageIDType(metadata.Image) if metadata.Image == "" { // unknown image ID. Search the repotags for a match var err error imageID, err = imageMap.Image(RepoType(metadata.Repo), TagType(metadata.Tag)) if err != nil { except.Error(err, ": RemoveImages unable to find image ID") break } } // Get all repo:tags associated with the image repoTagSlice := imageMap.RepoTags(imageID) if len(repoTagSlice) == 0 { except.Error("RemoveImages unable to find expected repo:tag " + metadata.Repo + ":" + metadata.Tag + " for image ID=" + string(imageID)) except.Error("imageMap is %v", imageMap) continue } for _, repotag := range repoTagSlice { // basespec := RegistrySpec + "/" + string(t.Repo) + ":" if ExcludeRepo[RepoType(repotag.Repo)] { continue } apipath := "/images/" + string(repotag.Repo) + ":" + string(repotag.Tag) blog.Info("RemoveImages %s", apipath) config.BanyanUpdate("Remove", apipath) _, err := DockerAPI(DockerTransport, "DELETE", apipath, []byte{}, "") if err != nil { except.Error(err, "RemoveImages Repo:Tag", repotag.Repo, repotag.Tag, "image", metadata.Image) } numRemoved++ } } blog.Info("Number of repo/tags removed this time around: %d", numRemoved) RemoveDanglingImages() return }
// RemoveObsoleteMetadata removes obsolete metadata from the Banyan service. func RemoveObsoleteMetadata(obsolete []ImageMetadataInfo) { if len(obsolete) == 0 { blog.Warn("No image metadata to save!") return } config.BanyanUpdate("Remove Metadata", statusMessageMD(obsolete)) for _, writer := range WriterList { writer.RemoveImageMetadata(obsolete) } return }
// SaveImageMetadata saves image metadata to selected storage location // (standard output, Banyan service, etc.). func SaveImageMetadata(metadataSlice []ImageMetadataInfo) { if len(metadataSlice) == 0 { blog.Warn("No image metadata to save!") return } config.BanyanUpdate("Save Image Metadata", statusMessageMD(metadataSlice)) for _, writer := range WriterList { writer.AppendImageMetadata(metadataSlice) } return }
// Warn logs a warning message and generates a config.BanyanUpdate. func Warn(arg0 interface{}, args ...interface{}) { if len(args) == 0 { blog.Warn(arg0) s := fmt.Sprintf("WARN %v", arg0) config.BanyanUpdate(s) } else { var s string switch arg0.(type) { case string: blog.Warn(arg0.(string), args...) s = fmt.Sprintf("WARN %s", arg0.(string)) s = fmt.Sprintf(s, args...) default: blog.Warn(arg0, args...) s = fmt.Sprintf("WARN %v", arg0) arr := []interface{}{s} arr = append(arr, args...) s = fmt.Sprintln(arr...) } s = strings.TrimRight(s, "\n") config.BanyanUpdate(s) } }
// GetNewImageMetadata takes the set of existing images, queries the registry to find any changes, // and then brings the Output Writer up to date by telling it the obsolete metadata to delete // and the new metadata to add. func GetNewImageMetadata(oldMetadataSet MetadataSet) (metadataSlice []ImageMetadataInfo, currentMetadataSet MetadataSet) { var currentMetadataSlice []ImageMetadataInfo //config.BanyanUpdate("Loading Registry Metadata") if LocalHost == true { blog.Info("Collect images from local Docker host") currentMetadataSlice = GetLocalImageMetadata(oldMetadataSet) } else { currentMetadataSlice = GetImageMetadata(oldMetadataSet) } // get only the new metadata from currentMetadataSlice currentMetadataSet = NewMetadataSet() for _, metadata := range currentMetadataSlice { currentMetadataSet.Insert(metadata) if oldMetadataSet.Exists(metadata) == false { // metadata is not in old map metadataSlice = append(metadataSlice, metadata) } } // find entries in the old map that are not in the current map, // and remove those entries from the database obsolete := []ImageMetadataInfo{} for metadata := range oldMetadataSet { if !currentMetadataSet.Exists(metadata) { if len(ReposToProcess) > 0 { if _, present := ReposToProcess[RepoType(metadata.Repo)]; present { obsolete = append(obsolete, metadata) blog.Info("Need to remove ImageMetadata: %v", metadata) } } else { obsolete = append(obsolete, metadata) blog.Info("Need to remove ImageMetadata: %v", metadata) } } } if len(obsolete) > 0 { RemoveObsoleteMetadata(obsolete) } if len(metadataSlice) > 0 || len(obsolete) > 0 { config.BanyanUpdate("Detected changes in registry metadata") } // Sort image metadata from newest image to oldest image sort.Sort(ByDateTime(metadataSlice)) return }
func main() { doFlags() setupLogging() //verifyVolumes() copyBanyanData() // setup connection to docker daemon's unix/tcp socket var e error collector.DockerTransport, e = collector.NewDockerTransport(*dockerProto, *dockerAddr) if e != nil { except.Fail(e, ": Error in connecting to docker remote API socket") } authToken := RegisterCollector() // Set output writers SetOutputWriters(authToken) SetupBanyanStatus(authToken) checkConfigUpdate(true) if collector.LocalHost == false && collector.RegistryAPIURL == "" { collector.RegistryAPIURL, collector.HubAPI, collector.BasicAuth, collector.XRegistryAuth = collector.GetRegistryURL() blog.Info("registry API URL: %s", collector.RegistryAPIURL) } // Log the docker version major, minor, revision, e := collector.DockerVersion() if e != nil { except.Error(e, ": Could not identify Docker version") } else { blog.Info("Docker version %d.%d.%d", major, minor, revision) config.BanyanUpdate("Docker version", strconv.Itoa(major)+"."+strconv.Itoa(minor)+"."+strconv.Itoa(revision)) } // Images we have processed already processedImages := collector.NewImageSet() e = getImageList(processedImages) if e != nil { blog.Info("Fresh start: No previously collected images were found in %s", *imageList) } _ = getImageManifestHashList(processedImages) blog.Debug(processedImages) // Main infinite loop. InfLoop(authToken, processedImages) }
// PullImage performs a docker pull on an image specified by repo/tag. // TODO: Detect if the pulled image has a different imageID than the value retrieved from // metadata, and if so correct the metadata, or at least skip processing the image. func PullImage(metadata ImageMetadataInfo) { tagspec := RegistrySpec + "/" + metadata.Repo + ":" + metadata.Tag apipath := "/images/create?fromImage=" + tagspec blog.Info("PullImage downloading %s, Image ID: %s", apipath, metadata.Image) config.BanyanUpdate("Pull", apipath, metadata.Image) resp, err := DockerAPI(DockerTransport, "POST", apipath, []byte{}, XRegistryAuth) if err != nil { blog.Error(err, "PullImage failed for", RegistrySpec, metadata.Repo, metadata.Tag, metadata.Image) } if strings.Contains(string(resp), `"error":`) { blog.Error("PullImage error for %s/%s/%s", RegistrySpec, metadata.Repo, metadata.Tag) } blog.Trace(string(resp)) return }
// GetImageAllData extracts content info from each pulled image. Currently it gets system package info. func GetImageAllData(pulledImages ImageSet) (outMapMap map[string]map[string]interface{}) { //Map ImageID -> Script Map; Script Map: Script name -> output outMapMap = make(map[string]map[string]interface{}) for imageID := range pulledImages { config.BanyanUpdate("Scripts", string(imageID)) outMap, err := runAllScripts(imageID) if err != nil { blog.Error(err, ": Error processing image", string(imageID)) continue } outMapMap[string(imageID)] = outMap } return }
// PullImage performs a docker pull on an image specified by repo/tag. func PullImage(metadata *ImageMetadataInfo) (err error) { tagspec := metadata.Repo + ":" + metadata.Tag if RegistrySpec != config.DockerHub { tagspec = RegistrySpec + "/" + tagspec } apipath := "/images/create?fromImage=" + tagspec blog.Info("PullImage downloading %s, Image ID: %s", apipath, metadata.Image) config.BanyanUpdate("Pull", apipath, metadata.Image) resp, err := DockerAPI(DockerTransport, "POST", apipath, []byte{}, XRegistryAuth) if err != nil { except.Error(err, "PullImage failed for", RegistrySpec, metadata.Repo, metadata.Tag, metadata.Image) return } if strings.Contains(string(resp), `"error":`) { err = errors.New("PullImage error for " + RegistrySpec + "/" + metadata.Repo + "/" + metadata.Tag) except.Error(err) return } blog.Trace(string(resp)) // get the Docker-calculated image ID calculatedID, err := dockerImageID(RegistrySpec, metadata) if err != nil { except.Error(err, "dockerImageID") return } if metadata.Image > "" && metadata.Image != calculatedID { newMetadata := *metadata newMetadata.Image = calculatedID RemoveImages([]ImageMetadataInfo{newMetadata}) err = errors.New("PullImage " + metadata.Repo + ":" + metadata.Tag + " image ID " + calculatedID + " doesn't match metadata-derived ID " + metadata.Image) except.Error(err) return err } metadata.Image = calculatedID return }
// SaveImageMetadata saves image metadata to selected storage location // (standard output, Banyan service, etc.). func SaveImageMetadata(metadataSlice []ImageMetadataInfo) { if len(metadataSlice) == 0 { except.Warn("No image metadata to save!") return } config.BanyanUpdate("Save Image Metadata", statusMessageMD(metadataSlice)) slice := []ImageMetadataInfo{} for _, metadata := range metadataSlice { if len(metadata.Image) > 0 { slice = append(slice, metadata) } } if len(slice) == 0 { return } for _, writer := range WriterList { writer.AppendImageMetadata(slice) } return }
// DoIteration runs one iteration of the main loop to get new images, extract data from them, // and saves results. func DoIteration(ReposToLimit RepoSet, authToken string, processedImages collector.ImageSet, oldMetadataSet collector.MetadataSet, PulledList []collector.ImageMetadataInfo) (currentMetadataSet collector.MetadataSet, PulledNew []collector.ImageMetadataInfo) { blog.Debug("DoIteration: processedImages is %v", processedImages) PulledNew = PulledList _ /*tagSlice*/, metadataSlice, currentMetadataSet := collector.GetNewImageMetadata(oldMetadataSet) if len(metadataSlice) == 0 { blog.Info("No new metadata in this iteration") return } blog.Info("Obtained %d new metadata items in this iteration", len(metadataSlice)) collector.SaveImageMetadata(metadataSlice) // number of images processed for each repository in this iteration imageCount := make(map[collector.RepoType]int) // Set of repos to stop limiting according to maxImages after this iteration completes. StopLimiting := NewRepoSet() // processed metadata processedMetadata := collector.NewMetadataSet() for { pulledImages := collector.NewImageSet() pullErrorMetadata := collector.NewMetadataSet() for _, metadata := range metadataSlice { processedMetadata.Insert(metadata) if config.FilterRepos && !collector.ReposToProcess[collector.RepoType(metadata.Repo)] { continue } // TODO: need to filter out images from ExcludedRepo also when collecting from local Docker host? if collector.ExcludeRepo[collector.RepoType(metadata.Repo)] { continue } if pulledImages[collector.ImageIDType(metadata.Image)] { continue } // TODO: need to consider maxImages limit also when collecting from local Docker host? repo := collector.RepoType(metadata.Repo) if _, ok := ReposToLimit[repo]; !ok { // new repo we haven't seen before; apply maxImages limit to repo blog.Info("Starting to apply maxImages limit to repo %s", string(repo)) ReposToLimit[repo] = true } if ReposToLimit[repo] && *maxImages > 0 && imageCount[repo] >= *maxImages { blog.Info("Max image count %d reached for %s, skipping :%s", *maxImages, metadata.Repo, metadata.Tag) // stop applying the maxImages limit to repo StopLimiting[repo] = true continue } if processedImages[collector.ImageIDType(metadata.Image)] { continue } imageCount[collector.RepoType(metadata.Repo)]++ // docker pull image if !collector.LocalHost { err := collector.PullImage(metadata) if err != nil { // docker pull failed for some reason, possibly a transient failure. // So we remove this metadata element from the current and processed sets, // and move on to process any remaining metadata elements. // In the next iteration, metadata // lookup may rediscover this deleted metadata element // and treat it as new, thus ensuring that the image pull will be retried. // TODO: If the registry is corrupted, this can lead to an infinite // loop in which the same image pull keeps getting tried and consistently fails. currentMetadataSet.Delete(metadata) processedMetadata.Delete(metadata) // remember this pull error in order to demote this metadata to the end of the slice. pullErrorMetadata.Insert(metadata) err = collector.RemoveDanglingImages() if err != nil { except.Error(err, ": RemoveDanglingImages") } continue } } PulledNew = append(PulledNew, metadata) excess := len(PulledNew) - *removeThresh if !collector.LocalHost && *removeThresh > 0 && excess > 0 { config.BanyanUpdate("Removing " + strconv.Itoa(excess) + " pulled images") collector.RemoveImages(PulledNew[0:excess]) PulledNew = PulledNew[excess:] } pulledImages[collector.ImageIDType(metadata.Image)] = true if len(pulledImages) == IMAGEBATCH { break } } if len(pulledImages) == 0 { blog.Info("No pulled images left to process in this iteration") config.BanyanUpdate("No pulled images left to process in this iteration") break } // reorder metadataSlice by moving images that couldn't be pulled to the end of the list newMDSlice := []collector.ImageMetadataInfo{} for _, metadata := range metadataSlice { if !pullErrorMetadata.Exists(metadata) { newMDSlice = append(newMDSlice, metadata) } } for metadata := range pullErrorMetadata { newMDSlice = append(newMDSlice, metadata) } metadataSlice = newMDSlice // get and save image data for all the images in pulledimages outMapMap := collector.GetImageAllData(pulledImages) collector.SaveImageAllData(outMapMap) for imageID := range pulledImages { processedImages[imageID] = true } if e := persistImageList(pulledImages); e != nil { except.Error(e, "Failed to persist list of collected images") } if checkConfigUpdate(false) == true { // Config changed, and possibly did so before all current metadata was processed. // Thus, remember only the metadata that has already been processed, and forget // metadata that has not been processed yet. // That way, the next time DoIteration() is entered, the metadata lookup // will correctly schedule the forgotten metadata for processing, along with // any new metadata. currentMetadataSet = processedMetadata break } } for repo := range StopLimiting { blog.Info("No longer enforcing maxImages limit on repo %s", repo) ReposToLimit[repo] = false } return }
// GetImageMetadataTokenAuthV1 returns repositories/tags/image metadata from the Docker Hub // or other registry using v1 token authorization. // The user must have specified a set of repositories of interest. // The function queries the index server, e.g., Docker Hub, to get the token and registry, and then uses // the token to query the registry. func GetImageMetadataTokenAuthV1(oldMetadataSet MetadataSet) (tagSlice []TagInfo, metadataSlice []ImageMetadataInfo) { if len(ReposToProcess) == 0 { return } client := &http.Client{} metadataMap := NewImageToMetadataMap(oldMetadataSet) allRepos := []RepoType{} // Check if we need to use the search API, i.e. only one repo given, and ends in wildcard "*". if searchTerm := NeedRegistrySearch(); searchTerm != "" { blog.Info("Using search API") var e error allRepos, e = registrySearchV1(client, searchTerm) if e != nil { blog.Error(e, ":registry search") return } } // If search wasn't needed, the repos were individually specified. if len(allRepos) == 0 { for repo := range ReposToProcess { allRepos = append(allRepos, repo) } } for _, repo := range allRepos { blog.Info("Get index and tag info for %s", string(repo)) config.BanyanUpdate("Get index and tag info for", string(repo)) var ( indexInfo IndexInfo e error repoTagSlice []TagInfo repoMetadataSlice []ImageMetadataInfo ) // loop until success for { indexInfo, e = getReposTokenAuthV1(repo, client) if e != nil { blog.Warn(e, ":index lookup failed for repo", string(repo), "- retrying.") config.BanyanUpdate(e.Error(), ":index lookup failed, repo", string(repo), "- retrying") time.Sleep(config.RETRYDURATION) continue } repoTagSlice, e = getTagsTokenAuthV1(repo, client, indexInfo) if e != nil { blog.Warn(e, ":tag lookup failed for repo", string(repo), "- retrying.") config.BanyanUpdate(e.Error(), ":tag lookup failed for repo", string(repo), "- retrying") time.Sleep(config.RETRYDURATION) continue } if len(repoTagSlice) != 1 { blog.Error("Incorrect length of repoTagSlice: expected length=1, got length=%d", len(repoTagSlice)) config.BanyanUpdate("Incorrect length of repoTagSlice:", strconv.Itoa(len(repoTagSlice)), string(repo)) time.Sleep(config.RETRYDURATION) continue } repoMetadataSlice, e = getMetadataTokenAuthV1(repoTagSlice[0], metadataMap, client, indexInfo) if e != nil { blog.Warn(e, ":metadata lookup failed for", string(repoTagSlice[0].Repo), "- retrying.") config.BanyanUpdate(e.Error(), ":metadata lookup failed for", string(repoTagSlice[0].Repo), "- retrying") time.Sleep(config.RETRYDURATION) continue } //success! break } tagSlice = append(tagSlice, repoTagSlice...) metadataSlice = append(metadataSlice, repoMetadataSlice...) } return }