func GetServerStatus(arvLogger *logger.Logger, keepServer ServerAddress, client http.Client) { url := fmt.Sprintf("http://%s:%d/status.json", keepServer.Host, keepServer.Port) if arvLogger != nil { now := time.Now() arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) { keepInfo := logger.GetOrCreateMap(p, "keep_info") serverInfo := make(map[string]interface{}) serverInfo["status_request_sent_at"] = now serverInfo["host"] = keepServer.Host serverInfo["port"] = keepServer.Port keepInfo[keepServer.Uuid] = serverInfo }) } resp, err := client.Get(url) if err != nil { loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Error getting keep status from %s: %v", url, err)) } else if resp.StatusCode != 200 { loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Received error code %d in response to request "+ "for %s status: %s", resp.StatusCode, url, resp.Status)) } var keepStatus map[string]interface{} decoder := json.NewDecoder(resp.Body) decoder.UseNumber() err = decoder.Decode(&keepStatus) if err != nil { loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Error decoding keep status from %s: %v", url, err)) } if arvLogger != nil { now := time.Now() arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) { keepInfo := logger.GetOrCreateMap(p, "keep_info") serverInfo := keepInfo[keepServer.Uuid].(map[string]interface{}) serverInfo["status_response_processed_at"] = now serverInfo["status"] = keepStatus }) } }
// A LogMutator that records the current memory usage. This is most useful as a logger write hook. func LogMemoryAlloc(p map[string]interface{}, e map[string]interface{}) { runInfo := logger.GetOrCreateMap(p, "run_info") var memStats runtime.MemStats runtime.ReadMemStats(&memStats) runInfo["memory_bytes_in_use"] = memStats.Alloc runInfo["memory_bytes_reserved"] = memStats.Sys }
func FatalWithMessage(arvLogger *logger.Logger, message string) { if arvLogger != nil { arvLogger.FinalUpdate(func(p map[string]interface{}, e map[string]interface{}) { p["FATAL"] = message runInfo := logger.GetOrCreateMap(p, "run_info") runInfo["finished_at"] = time.Now() }) } log.Fatalf(message) }
// Summarize results from keep server func (readServers *ReadServers) Summarize(arvLogger *logger.Logger) { readServers.BlockReplicationCounts = make(map[int]int) for _, infos := range readServers.BlockToServers { replication := len(infos) readServers.BlockReplicationCounts[replication]++ } if arvLogger != nil { arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) { keepInfo := logger.GetOrCreateMap(p, "keep_info") keepInfo["distinct_blocks_stored"] = len(readServers.BlockToServers) }) } }
// Useful to call at the begining of execution to log info about the // current run. func LogRunInfo(arvLogger *logger.Logger) { if arvLogger != nil { now := time.Now() arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) { runInfo := logger.GetOrCreateMap(p, "run_info") runInfo["started_at"] = now runInfo["args"] = os.Args hostname, err := os.Hostname() if err != nil { runInfo["hostname_error"] = err.Error() } else { runInfo["hostname"] = hostname } runInfo["pid"] = os.Getpid() }) } }
// Summarize the collections read func (readCollections *ReadCollections) Summarize(arvLogger *logger.Logger) { readCollections.OwnerToCollectionSize = make(map[string]int) readCollections.BlockToDesiredReplication = make(map[blockdigest.DigestWithSize]int) numCollections := len(readCollections.UUIDToCollection) readCollections.CollectionUUIDToIndex = make(map[string]int, numCollections) readCollections.CollectionIndexToUUID = make([]string, 0, numCollections) readCollections.BlockToCollectionIndices = make(map[blockdigest.DigestWithSize][]int) for _, coll := range readCollections.UUIDToCollection { collectionIndex := len(readCollections.CollectionIndexToUUID) readCollections.CollectionIndexToUUID = append(readCollections.CollectionIndexToUUID, coll.UUID) readCollections.CollectionUUIDToIndex[coll.UUID] = collectionIndex readCollections.OwnerToCollectionSize[coll.OwnerUUID] = readCollections.OwnerToCollectionSize[coll.OwnerUUID] + coll.TotalSize for block, size := range coll.BlockDigestToSize { locator := blockdigest.DigestWithSize{Digest: block, Size: uint32(size)} readCollections.BlockToCollectionIndices[locator] = append(readCollections.BlockToCollectionIndices[locator], collectionIndex) storedReplication := readCollections.BlockToDesiredReplication[locator] if coll.ReplicationLevel > storedReplication { readCollections.BlockToDesiredReplication[locator] = coll.ReplicationLevel } } } if arvLogger != nil { arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) { collectionInfo := logger.GetOrCreateMap(p, "collection_info") // Since maps are shallow copied, we run a risk of concurrent // updates here. By copying results.OwnerToCollectionSize into // the log, we're assuming that it won't be updated. collectionInfo["owner_to_collection_size"] = readCollections.OwnerToCollectionSize collectionInfo["distinct_blocks_named"] = len(readCollections.BlockToDesiredReplication) }) } return }
// WritePullLists writes each pull list to a file. // The filename is based on the hostname. // // This is just a hack for prototyping, it is not expected to be used // in production. func WritePullLists(arvLogger *logger.Logger, pullLists map[string]PullList, dryRun bool) error { r := strings.NewReplacer(":", ".") for host, list := range pullLists { if arvLogger != nil { // We need a local variable because Update doesn't call our mutator func until later, // when our list variable might have been reused by the next loop iteration. host := host listLen := len(list) arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) { pullListInfo := logger.GetOrCreateMap(p, "pull_list_len") pullListInfo[host] = listLen }) } if dryRun { log.Print("dry run, not sending pull list to service %s with %d blocks", host, len(list)) continue } filename := fmt.Sprintf("pull_list.%s", r.Replace(RemoveProtocolPrefix(host))) pullListFile, err := os.Create(filename) if err != nil { return err } defer pullListFile.Close() enc := json.NewEncoder(pullListFile) err = enc.Encode(list) if err != nil { return err } log.Printf("Wrote pull list to %s.", filename) } return nil }
// CreateIndexRequest to the keep server func CreateIndexRequest(arvLogger *logger.Logger, keepServer ServerAddress, arv arvadosclient.ArvadosClient) (req *http.Request, err error) { url := fmt.Sprintf("http://%s:%d/index", keepServer.Host, keepServer.Port) log.Println("About to fetch keep server contents from " + url) if arvLogger != nil { now := time.Now() arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) { keepInfo := logger.GetOrCreateMap(p, "keep_info") serverInfo := keepInfo[keepServer.UUID].(map[string]interface{}) serverInfo["index_request_sent_at"] = now }) } req, err = http.NewRequest("GET", url, nil) if err != nil { return req, fmt.Errorf("Error building http request for %s: %v", url, err) } req.Header.Add("Authorization", "OAuth2 "+arv.ApiToken) return req, err }
func CreateIndexRequest(arvLogger *logger.Logger, keepServer ServerAddress) (req *http.Request) { url := fmt.Sprintf("http://%s:%d/index", keepServer.Host, keepServer.Port) log.Println("About to fetch keep server contents from " + url) if arvLogger != nil { now := time.Now() arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) { keepInfo := logger.GetOrCreateMap(p, "keep_info") serverInfo := keepInfo[keepServer.Uuid].(map[string]interface{}) serverInfo["index_request_sent_at"] = now }) } req, err := http.NewRequest("GET", url, nil) if err != nil { loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Error building http request for %s: %v", url, err)) } req.Header.Add("Authorization", fmt.Sprintf("OAuth2 %s", GetDataManagerToken(arvLogger))) return }
func ReadServerResponse(arvLogger *logger.Logger, keepServer ServerAddress, resp *http.Response) (response ServerResponse) { if resp.StatusCode != 200 { loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Received error code %d in response to request "+ "for %s index: %s", resp.StatusCode, keepServer.String(), resp.Status)) } if arvLogger != nil { now := time.Now() arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) { keepInfo := logger.GetOrCreateMap(p, "keep_info") serverInfo := keepInfo[keepServer.Uuid].(map[string]interface{}) serverInfo["index_response_received_at"] = now }) } response.Address = keepServer response.Contents.BlockDigestToInfo = make(map[blockdigest.DigestWithSize]BlockInfo) reader := bufio.NewReader(resp.Body) numLines, numDuplicates, numSizeDisagreements := 0, 0, 0 for { numLines++ line, err := reader.ReadString('\n') if err == io.EOF { loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Index from %s truncated at line %d", keepServer.String(), numLines)) } else if err != nil { loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Error reading index response from %s at line %d: %v", keepServer.String(), numLines, err)) } if line == "\n" { if _, err := reader.Peek(1); err == nil { extra, _ := reader.ReadString('\n') loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Index from %s had trailing data at line %d after EOF marker: %s", keepServer.String(), numLines+1, extra)) } else if err != io.EOF { loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Index from %s had read error after EOF marker at line %d: %v", keepServer.String(), numLines, err)) } numLines-- break } blockInfo, err := parseBlockInfoFromIndexLine(line) if err != nil { loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Error parsing BlockInfo from index line "+ "received from %s: %v", keepServer.String(), err)) } if storedBlock, ok := response.Contents.BlockDigestToInfo[blockInfo.Digest]; ok { // This server returned multiple lines containing the same block digest. numDuplicates += 1 // Keep the block that's newer. if storedBlock.Mtime < blockInfo.Mtime { response.Contents.BlockDigestToInfo[blockInfo.Digest] = blockInfo } } else { response.Contents.BlockDigestToInfo[blockInfo.Digest] = blockInfo } } log.Printf("%s index contained %d lines with %d duplicates with "+ "%d size disagreements", keepServer.String(), numLines, numDuplicates, numSizeDisagreements) if arvLogger != nil { now := time.Now() arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) { keepInfo := logger.GetOrCreateMap(p, "keep_info") serverInfo := keepInfo[keepServer.Uuid].(map[string]interface{}) serverInfo["processing_finished_at"] = now serverInfo["lines_received"] = numLines serverInfo["duplicates_seen"] = numDuplicates serverInfo["size_disagreements_seen"] = numSizeDisagreements }) } resp.Body.Close() return }
func GetKeepServers(params GetKeepServersParams) (results ReadServers) { if ¶ms.Client == nil { log.Fatalf("params.Client passed to GetKeepServers() should " + "contain a valid ArvadosClient, but instead it is nil.") } sdkParams := arvadosclient.Dict{ "filters": [][]string{[]string{"service_type", "=", "disk"}}, } if params.Limit > 0 { sdkParams["limit"] = params.Limit } var sdkResponse KeepServiceList err := params.Client.List("keep_services", sdkParams, &sdkResponse) if err != nil { loggerutil.FatalWithMessage(params.Logger, fmt.Sprintf("Error requesting keep disks from API server: %v", err)) } if params.Logger != nil { params.Logger.Update(func(p map[string]interface{}, e map[string]interface{}) { keepInfo := logger.GetOrCreateMap(p, "keep_info") keepInfo["num_keep_servers_available"] = sdkResponse.ItemsAvailable keepInfo["num_keep_servers_received"] = len(sdkResponse.KeepServers) keepInfo["keep_servers"] = sdkResponse.KeepServers }) } log.Printf("Received keep services list: %+v", sdkResponse) if len(sdkResponse.KeepServers) < sdkResponse.ItemsAvailable { loggerutil.FatalWithMessage(params.Logger, fmt.Sprintf("Did not receive all available keep servers: %+v", sdkResponse)) } results.KeepServerIndexToAddress = sdkResponse.KeepServers results.KeepServerAddressToIndex = make(map[ServerAddress]int) for i, address := range results.KeepServerIndexToAddress { results.KeepServerAddressToIndex[address] = i } log.Printf("Got Server Addresses: %v", results) // This is safe for concurrent use client := http.Client{} // Send off all the index requests concurrently responseChan := make(chan ServerResponse) for _, keepServer := range sdkResponse.KeepServers { // The above keepsServer variable is reused for each iteration, so // it would be shared across all goroutines. This would result in // us querying one server n times instead of n different servers // as we intended. To avoid this we add it as an explicit // parameter which gets copied. This bug and solution is described // in https://golang.org/doc/effective_go.html#channels go func(keepServer ServerAddress) { responseChan <- GetServerContents(params.Logger, keepServer, client) }(keepServer) } results.ServerToContents = make(map[ServerAddress]ServerContents) results.BlockToServers = make(map[blockdigest.DigestWithSize][]BlockServerInfo) // Read all the responses for i := range sdkResponse.KeepServers { _ = i // Here to prevent go from complaining. response := <-responseChan log.Printf("Received channel response from %v containing %d files", response.Address, len(response.Contents.BlockDigestToInfo)) results.ServerToContents[response.Address] = response.Contents serverIndex := results.KeepServerAddressToIndex[response.Address] for _, blockInfo := range response.Contents.BlockDigestToInfo { results.BlockToServers[blockInfo.Digest] = append( results.BlockToServers[blockInfo.Digest], BlockServerInfo{ServerIndex: serverIndex, Mtime: blockInfo.Mtime}) } } return }
// GetCollections gets collections from api func GetCollections(params GetCollectionsParams) (results ReadCollections) { if ¶ms.Client == nil { log.Fatalf("params.Client passed to GetCollections() should " + "contain a valid ArvadosClient, but instead it is nil.") } fieldsWanted := []string{"manifest_text", "owner_uuid", "uuid", "redundancy", "modified_at"} sdkParams := arvadosclient.Dict{ "select": fieldsWanted, "order": []string{"modified_at ASC"}, "filters": [][]string{[]string{"modified_at", ">=", "1900-01-01T00:00:00Z"}}} if params.BatchSize > 0 { sdkParams["limit"] = params.BatchSize } var defaultReplicationLevel int { value, err := params.Client.Discovery("defaultCollectionReplication") if err != nil { loggerutil.FatalWithMessage(params.Logger, fmt.Sprintf("Error querying default collection replication: %v", err)) } defaultReplicationLevel = int(value.(float64)) if defaultReplicationLevel <= 0 { loggerutil.FatalWithMessage(params.Logger, fmt.Sprintf("Default collection replication returned by arvados SDK "+ "should be a positive integer but instead it was %d.", defaultReplicationLevel)) } } initialNumberOfCollectionsAvailable, err := util.NumberItemsAvailable(params.Client, "collections") if err != nil { loggerutil.FatalWithMessage(params.Logger, fmt.Sprintf("Error querying collection count: %v", err)) } // Include a 1% margin for collections added while we're reading so // that we don't have to grow the map in most cases. maxExpectedCollections := int( float64(initialNumberOfCollectionsAvailable) * 1.01) results.UUIDToCollection = make(map[string]Collection, maxExpectedCollections) if params.Logger != nil { params.Logger.Update(func(p map[string]interface{}, e map[string]interface{}) { collectionInfo := logger.GetOrCreateMap(p, "collection_info") collectionInfo["num_collections_at_start"] = initialNumberOfCollectionsAvailable collectionInfo["batch_size"] = params.BatchSize collectionInfo["default_replication_level"] = defaultReplicationLevel }) } // These values are just for getting the loop to run the first time, // afterwards they'll be set to real values. previousTotalCollections := -1 totalCollections := 0 for totalCollections > previousTotalCollections { // We're still finding new collections // Write the heap profile for examining memory usage WriteHeapProfile() // Get next batch of collections. var collections SdkCollectionList err := params.Client.List("collections", sdkParams, &collections) if err != nil { loggerutil.FatalWithMessage(params.Logger, fmt.Sprintf("Error querying collections: %v", err)) } // Process collection and update our date filter. sdkParams["filters"].([][]string)[0][2] = ProcessCollections(params.Logger, collections.Items, defaultReplicationLevel, results.UUIDToCollection).Format(time.RFC3339) // update counts previousTotalCollections = totalCollections totalCollections = len(results.UUIDToCollection) log.Printf("%d collections read, %d new in last batch, "+ "%s latest modified date, %.0f %d %d avg,max,total manifest size", totalCollections, totalCollections-previousTotalCollections, sdkParams["filters"].([][]string)[0][2], float32(totalManifestSize)/float32(totalCollections), maxManifestSize, totalManifestSize) if params.Logger != nil { params.Logger.Update(func(p map[string]interface{}, e map[string]interface{}) { collectionInfo := logger.GetOrCreateMap(p, "collection_info") collectionInfo["collections_read"] = totalCollections collectionInfo["latest_modified_date_seen"] = sdkParams["filters"].([][]string)[0][2] collectionInfo["total_manifest_size"] = totalManifestSize collectionInfo["max_manifest_size"] = maxManifestSize }) } } // Write the heap profile for examining memory usage WriteHeapProfile() return }
func singlerun(arv arvadosclient.ArvadosClient) error { var err error if isAdmin, err := util.UserIsAdmin(arv); err != nil { return errors.New("Error verifying admin token: " + err.Error()) } else if !isAdmin { return errors.New("Current user is not an admin. Datamanager requires a privileged token.") } if logEventTypePrefix != "" { arvLogger, err = logger.NewLogger(logger.LoggerParams{ Client: arv, EventTypePrefix: logEventTypePrefix, WriteInterval: time.Second * time.Duration(logFrequencySeconds)}) } loggerutil.LogRunInfo(arvLogger) if arvLogger != nil { arvLogger.AddWriteHook(loggerutil.LogMemoryAlloc) } var ( dataFetcher summary.DataFetcher readCollections collection.ReadCollections keepServerInfo keep.ReadServers ) if summary.ShouldReadData() { dataFetcher = summary.ReadData } else { dataFetcher = BuildDataFetcher(arv) } err = dataFetcher(arvLogger, &readCollections, &keepServerInfo) if err != nil { return err } err = summary.MaybeWriteData(arvLogger, readCollections, keepServerInfo) if err != nil { return err } buckets := summary.BucketReplication(readCollections, keepServerInfo) bucketCounts := buckets.Counts() replicationSummary := buckets.SummarizeBuckets(readCollections) replicationCounts := replicationSummary.ComputeCounts() log.Printf("Blocks In Collections: %d, "+ "\nBlocks In Keep: %d.", len(readCollections.BlockToDesiredReplication), len(keepServerInfo.BlockToServers)) log.Println(replicationCounts.PrettyPrint()) log.Printf("Blocks Histogram:") for _, rlbss := range bucketCounts { log.Printf("%+v: %10d", rlbss.Levels, rlbss.Count) } kc, err := keepclient.MakeKeepClient(&arv) if err != nil { return fmt.Errorf("Error setting up keep client %v", err.Error()) } // Log that we're finished. We force the recording, since go will // not wait for the write timer before exiting. if arvLogger != nil { defer arvLogger.FinalUpdate(func(p map[string]interface{}, e map[string]interface{}) { summaryInfo := logger.GetOrCreateMap(p, "summary_info") summaryInfo["block_replication_counts"] = bucketCounts summaryInfo["replication_summary"] = replicationCounts p["summary_info"] = summaryInfo p["run_info"].(map[string]interface{})["finished_at"] = time.Now() }) } pullServers := summary.ComputePullServers(kc, &keepServerInfo, readCollections.BlockToDesiredReplication, replicationSummary.UnderReplicatedBlocks) pullLists := summary.BuildPullLists(pullServers) trashLists, trashErr := summary.BuildTrashLists(kc, &keepServerInfo, replicationSummary.KeepBlocksNotInCollections) err = summary.WritePullLists(arvLogger, pullLists, dryRun) if err != nil { return err } if trashErr != nil { return err } keep.SendTrashLists(arvLogger, kc, trashLists, dryRun) return nil }
// GetCollections gets collections from api func GetCollections(params GetCollectionsParams) (results ReadCollections, err error) { if ¶ms.Client == nil { err = fmt.Errorf("params.Client passed to GetCollections() should " + "contain a valid ArvadosClient, but instead it is nil.") return } fieldsWanted := []string{"manifest_text", "owner_uuid", "uuid", "replication_desired", "modified_at"} sdkParams := arvadosclient.Dict{ "select": fieldsWanted, "order": []string{"modified_at ASC", "uuid ASC"}, "filters": [][]string{[]string{"modified_at", ">=", "1900-01-01T00:00:00Z"}}, "offset": 0} if params.BatchSize > 0 { sdkParams["limit"] = params.BatchSize } var defaultReplicationLevel int { var value interface{} value, err = params.Client.Discovery("defaultCollectionReplication") if err != nil { return } defaultReplicationLevel = int(value.(float64)) if defaultReplicationLevel <= 0 { err = fmt.Errorf("Default collection replication returned by arvados SDK "+ "should be a positive integer but instead it was %d.", defaultReplicationLevel) return } } initialNumberOfCollectionsAvailable, err := util.NumberItemsAvailable(params.Client, "collections") if err != nil { return } // Include a 1% margin for collections added while we're reading so // that we don't have to grow the map in most cases. maxExpectedCollections := int( float64(initialNumberOfCollectionsAvailable) * 1.01) results.UUIDToCollection = make(map[string]Collection, maxExpectedCollections) if params.Logger != nil { params.Logger.Update(func(p map[string]interface{}, e map[string]interface{}) { collectionInfo := logger.GetOrCreateMap(p, "collection_info") collectionInfo["num_collections_at_start"] = initialNumberOfCollectionsAvailable collectionInfo["batch_size"] = params.BatchSize collectionInfo["default_replication_level"] = defaultReplicationLevel }) } // These values are just for getting the loop to run the first time, // afterwards they'll be set to real values. remainingCollections := 1 var totalCollections int var previousTotalCollections int for remainingCollections > 0 { // We're still finding new collections // Write the heap profile for examining memory usage err = WriteHeapProfile() if err != nil { return } // Get next batch of collections. var collections SdkCollectionList err = params.Client.List("collections", sdkParams, &collections) if err != nil { return } batchCollections := len(collections.Items) // We must always have at least one collection in the batch if batchCollections < 1 { err = fmt.Errorf("API query returned no collections for %+v", sdkParams) return } // Update count of remaining collections remainingCollections = collections.ItemsAvailable - sdkParams["offset"].(int) - batchCollections // Process collection and update our date filter. latestModificationDate, maxManifestSize, totalManifestSize, err := ProcessCollections(params.Logger, collections.Items, defaultReplicationLevel, results.UUIDToCollection) if err != nil { return results, err } if sdkParams["filters"].([][]string)[0][2] != latestModificationDate.Format(time.RFC3339) { sdkParams["filters"].([][]string)[0][2] = latestModificationDate.Format(time.RFC3339) sdkParams["offset"] = 0 } else { sdkParams["offset"] = sdkParams["offset"].(int) + batchCollections } // update counts previousTotalCollections = totalCollections totalCollections = len(results.UUIDToCollection) log.Printf("%d collections read, %d (%d new) in last batch, "+ "%d remaining, "+ "%s latest modified date, %.0f %d %d avg,max,total manifest size", totalCollections, batchCollections, totalCollections-previousTotalCollections, remainingCollections, sdkParams["filters"].([][]string)[0][2], float32(totalManifestSize)/float32(totalCollections), maxManifestSize, totalManifestSize) if params.Logger != nil { params.Logger.Update(func(p map[string]interface{}, e map[string]interface{}) { collectionInfo := logger.GetOrCreateMap(p, "collection_info") collectionInfo["collections_read"] = totalCollections collectionInfo["latest_modified_date_seen"] = sdkParams["filters"].([][]string)[0][2] collectionInfo["total_manifest_size"] = totalManifestSize collectionInfo["max_manifest_size"] = maxManifestSize }) } } // Make one final API request to verify that we have processed all collections available up to the latest modification date var collections SdkCollectionList sdkParams["filters"].([][]string)[0][1] = "<=" sdkParams["limit"] = 0 err = params.Client.List("collections", sdkParams, &collections) if err != nil { return } finalNumberOfCollectionsAvailable, err := util.NumberItemsAvailable(params.Client, "collections") if err != nil { return } if totalCollections < finalNumberOfCollectionsAvailable { err = fmt.Errorf("API server indicates a total of %d collections "+ "available up to %v, but we only retrieved %d. "+ "Refusing to continue as this could indicate an "+ "otherwise undetected failure.", finalNumberOfCollectionsAvailable, sdkParams["filters"].([][]string)[0][2], totalCollections) return } // Write the heap profile for examining memory usage err = WriteHeapProfile() return }
// SendTrashLists to trash queue func SendTrashLists(arvLogger *logger.Logger, kc *keepclient.KeepClient, spl map[string]TrashList, dryRun bool) (errs []error) { count := 0 barrier := make(chan error) client := kc.Client for url, v := range spl { if arvLogger != nil { // We need a local variable because Update doesn't call our mutator func until later, // when our list variable might have been reused by the next loop iteration. url := url trashLen := len(v) arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) { trashListInfo := logger.GetOrCreateMap(p, "trash_list_len") trashListInfo[url] = trashLen }) } if dryRun { log.Printf("dry run, not sending trash list to service %s with %d blocks", url, len(v)) continue } count++ log.Printf("Sending trash list to %v", url) go (func(url string, v TrashList) { pipeReader, pipeWriter := io.Pipe() go (func() { enc := json.NewEncoder(pipeWriter) enc.Encode(v) pipeWriter.Close() })() req, err := http.NewRequest("PUT", fmt.Sprintf("%s/trash", url), pipeReader) if err != nil { log.Printf("Error creating trash list request for %v error: %v", url, err.Error()) barrier <- err return } req.Header.Add("Authorization", "OAuth2 "+kc.Arvados.ApiToken) // Make the request var resp *http.Response if resp, err = client.Do(req); err != nil { log.Printf("Error sending trash list to %v error: %v", url, err.Error()) barrier <- err return } log.Printf("Sent trash list to %v: response was HTTP %v", url, resp.Status) io.Copy(ioutil.Discard, resp.Body) resp.Body.Close() if resp.StatusCode != 200 { barrier <- errors.New(fmt.Sprintf("Got HTTP code %v", resp.StatusCode)) } else { barrier <- nil } })(url, v) } for i := 0; i < count; i++ { b := <-barrier if b != nil { errs = append(errs, b) } } return errs }
// GetKeepServers from api server func GetKeepServers(params GetKeepServersParams) (results ReadServers, err error) { sdkParams := arvadosclient.Dict{ "filters": [][]string{[]string{"service_type", "!=", "proxy"}}, } if params.Limit > 0 { sdkParams["limit"] = params.Limit } var sdkResponse ServiceList err = params.Client.List("keep_services", sdkParams, &sdkResponse) if err != nil { return } var keepServers []ServerAddress for _, server := range sdkResponse.KeepServers { if server.ServiceType == serviceType { keepServers = append(keepServers, server) } else { log.Printf("Skipping keep_service %q because its service_type %q does not match -service-type=%q", server, server.ServiceType, serviceType) } } if len(keepServers) == 0 { return results, fmt.Errorf("Found no keepservices with the service type %v", serviceType) } if params.Logger != nil { params.Logger.Update(func(p map[string]interface{}, e map[string]interface{}) { keepInfo := logger.GetOrCreateMap(p, "keep_info") keepInfo["num_keep_servers_available"] = sdkResponse.ItemsAvailable keepInfo["num_keep_servers_received"] = len(sdkResponse.KeepServers) keepInfo["keep_servers"] = sdkResponse.KeepServers keepInfo["indexable_keep_servers"] = keepServers }) } log.Printf("Received keep services list: %+v", sdkResponse) if len(sdkResponse.KeepServers) < sdkResponse.ItemsAvailable { return results, fmt.Errorf("Did not receive all available keep servers: %+v", sdkResponse) } results.KeepServerIndexToAddress = keepServers results.KeepServerAddressToIndex = make(map[ServerAddress]int) for i, address := range results.KeepServerIndexToAddress { results.KeepServerAddressToIndex[address] = i } log.Printf("Got Server Addresses: %v", results) // Send off all the index requests concurrently responseChan := make(chan ServerResponse) for _, keepServer := range results.KeepServerIndexToAddress { // The above keepsServer variable is reused for each iteration, so // it would be shared across all goroutines. This would result in // us querying one server n times instead of n different servers // as we intended. To avoid this we add it as an explicit // parameter which gets copied. This bug and solution is described // in https://golang.org/doc/effective_go.html#channels go func(keepServer ServerAddress) { responseChan <- GetServerContents(params.Logger, keepServer, params.Client) }(keepServer) } results.ServerToContents = make(map[ServerAddress]ServerContents) results.BlockToServers = make(map[blockdigest.DigestWithSize][]BlockServerInfo) // Read all the responses for i := range results.KeepServerIndexToAddress { _ = i // Here to prevent go from complaining. response := <-responseChan // Check if there were any errors during GetServerContents if response.Err != nil { return results, response.Err } log.Printf("Received channel response from %v containing %d files", response.Address, len(response.Contents.BlockDigestToInfo)) results.ServerToContents[response.Address] = response.Contents serverIndex := results.KeepServerAddressToIndex[response.Address] for _, blockInfo := range response.Contents.BlockDigestToInfo { results.BlockToServers[blockInfo.Digest] = append( results.BlockToServers[blockInfo.Digest], BlockServerInfo{ServerIndex: serverIndex, Mtime: blockInfo.Mtime}) } } return }