func main() { flag.Parse() if minutesBetweenRuns == 0 { arv, err := arvadosclient.MakeArvadosClient() if err != nil { loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Error making arvados client: %v", err)) } err = singlerun(arv) if err != nil { loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("singlerun: %v", err)) } } else { waitTime := time.Minute * time.Duration(minutesBetweenRuns) for { log.Println("Beginning Run") arv, err := arvadosclient.MakeArvadosClient() if err != nil { loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Error making arvados client: %v", err)) } err = singlerun(arv) if err != nil { log.Printf("singlerun: %v", err) } log.Printf("Sleeping for %d minutes", minutesBetweenRuns) time.Sleep(waitTime) } } }
// ReadData reads data that we've written to a file. // // This is useful for development, so that we don't need to read all // our data from the network every time we tweak something. // // This should not be used outside of development, since you'll be // working with stale data. func ReadData(arvLogger *logger.Logger, readCollections *collection.ReadCollections, keepServerInfo *keep.ReadServers) { if readDataFrom == "" { loggerutil.FatalWithMessage(arvLogger, "ReadData() called with empty filename.") } else { summaryFile, err := os.Open(readDataFrom) if err != nil { loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Failed to open %s: %v", readDataFrom, err)) } defer summaryFile.Close() dec := gob.NewDecoder(summaryFile) data := serializedData{} err = dec.Decode(&data) if err != nil { loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Failed to read summary data: %v", err)) } // re-summarize data, so that we can update our summarizing // functions without needing to do all our network i/o data.ReadCollections.Summarize(arvLogger) data.KeepServerInfo.Summarize(arvLogger) *readCollections = data.ReadCollections *keepServerInfo = data.KeepServerInfo log.Printf("Read summary data from: %s", readDataFrom) } }
// MaybeWriteData writes data we've read to a file. // // This is useful for development, so that we don't need to read all // our data from the network every time we tweak something. // // This should not be used outside of development, since you'll be // working with stale data. func MaybeWriteData(arvLogger *logger.Logger, readCollections collection.ReadCollections, keepServerInfo keep.ReadServers) bool { if writeDataTo == "" { return false } summaryFile, err := os.Create(writeDataTo) if err != nil { loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Failed to open %s: %v", writeDataTo, err)) } defer summaryFile.Close() enc := gob.NewEncoder(summaryFile) data := serializedData{ ReadCollections: readCollections, KeepServerInfo: keepServerInfo} err = enc.Encode(data) if err != nil { loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Failed to write summary data: %v", err)) } log.Printf("Wrote summary data to: %s", writeDataTo) return true }
func GetServerStatus(arvLogger *logger.Logger, keepServer ServerAddress, client http.Client) { url := fmt.Sprintf("http://%s:%d/status.json", keepServer.Host, keepServer.Port) if arvLogger != nil { now := time.Now() arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) { keepInfo := logger.GetOrCreateMap(p, "keep_info") serverInfo := make(map[string]interface{}) serverInfo["status_request_sent_at"] = now serverInfo["host"] = keepServer.Host serverInfo["port"] = keepServer.Port keepInfo[keepServer.Uuid] = serverInfo }) } resp, err := client.Get(url) if err != nil { loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Error getting keep status from %s: %v", url, err)) } else if resp.StatusCode != 200 { loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Received error code %d in response to request "+ "for %s status: %s", resp.StatusCode, url, resp.Status)) } var keepStatus map[string]interface{} decoder := json.NewDecoder(resp.Body) decoder.UseNumber() err = decoder.Decode(&keepStatus) if err != nil { loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Error decoding keep status from %s: %v", url, err)) } if arvLogger != nil { now := time.Now() arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) { keepInfo := logger.GetOrCreateMap(p, "keep_info") serverInfo := keepInfo[keepServer.Uuid].(map[string]interface{}) serverInfo["status_response_processed_at"] = now serverInfo["status"] = keepStatus }) } }
func GetDataManagerToken(arvLogger *logger.Logger) string { readDataManagerToken := func() { if dataManagerTokenFile == "" { flag.Usage() loggerutil.FatalWithMessage(arvLogger, "Data Manager Token needed, but data manager token file not specified.") } else { rawRead, err := ioutil.ReadFile(dataManagerTokenFile) if err != nil { loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Unexpected error reading token file %s: %v", dataManagerTokenFile, err)) } dataManagerToken = strings.TrimSpace(string(rawRead)) } } dataManagerTokenFileReadOnce.Do(readDataManagerToken) return dataManagerToken }
// WritePullLists writes each pull list to a file. // The filename is based on the hostname. // // This is just a hack for prototyping, it is not expected to be used // in production. func WritePullLists(arvLogger *logger.Logger, pullLists map[string]PullList) { r := strings.NewReplacer(":", ".") for host, list := range pullLists { filename := fmt.Sprintf("pull_list.%s", r.Replace(RemoveProtocolPrefix(host))) pullListFile, err := os.Create(filename) if err != nil { loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Failed to open %s: %v", filename, err)) } defer pullListFile.Close() enc := json.NewEncoder(pullListFile) err = enc.Encode(list) if err != nil { loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Failed to write pull list to %s: %v", filename, err)) } log.Printf("Wrote pull list to %s.", filename) } }
func GetServerContents(arvLogger *logger.Logger, keepServer ServerAddress, client http.Client) (response ServerResponse) { GetServerStatus(arvLogger, keepServer, client) req := CreateIndexRequest(arvLogger, keepServer) resp, err := client.Do(req) if err != nil { loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Error fetching %s: %v. Response was %+v", req.URL.String(), err, resp)) } return ReadServerResponse(arvLogger, keepServer, resp) }
func CreateIndexRequest(arvLogger *logger.Logger, keepServer ServerAddress) (req *http.Request) { url := fmt.Sprintf("http://%s:%d/index", keepServer.Host, keepServer.Port) log.Println("About to fetch keep server contents from " + url) if arvLogger != nil { now := time.Now() arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) { keepInfo := logger.GetOrCreateMap(p, "keep_info") serverInfo := keepInfo[keepServer.Uuid].(map[string]interface{}) serverInfo["index_request_sent_at"] = now }) } req, err := http.NewRequest("GET", url, nil) if err != nil { loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Error building http request for %s: %v", url, err)) } req.Header.Add("Authorization", fmt.Sprintf("OAuth2 %s", GetDataManagerToken(arvLogger))) return }
func ReadServerResponse(arvLogger *logger.Logger, keepServer ServerAddress, resp *http.Response) (response ServerResponse) { if resp.StatusCode != 200 { loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Received error code %d in response to request "+ "for %s index: %s", resp.StatusCode, keepServer.String(), resp.Status)) } if arvLogger != nil { now := time.Now() arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) { keepInfo := logger.GetOrCreateMap(p, "keep_info") serverInfo := keepInfo[keepServer.Uuid].(map[string]interface{}) serverInfo["index_response_received_at"] = now }) } response.Address = keepServer response.Contents.BlockDigestToInfo = make(map[blockdigest.DigestWithSize]BlockInfo) reader := bufio.NewReader(resp.Body) numLines, numDuplicates, numSizeDisagreements := 0, 0, 0 for { numLines++ line, err := reader.ReadString('\n') if err == io.EOF { loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Index from %s truncated at line %d", keepServer.String(), numLines)) } else if err != nil { loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Error reading index response from %s at line %d: %v", keepServer.String(), numLines, err)) } if line == "\n" { if _, err := reader.Peek(1); err == nil { extra, _ := reader.ReadString('\n') loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Index from %s had trailing data at line %d after EOF marker: %s", keepServer.String(), numLines+1, extra)) } else if err != io.EOF { loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Index from %s had read error after EOF marker at line %d: %v", keepServer.String(), numLines, err)) } numLines-- break } blockInfo, err := parseBlockInfoFromIndexLine(line) if err != nil { loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Error parsing BlockInfo from index line "+ "received from %s: %v", keepServer.String(), err)) } if storedBlock, ok := response.Contents.BlockDigestToInfo[blockInfo.Digest]; ok { // This server returned multiple lines containing the same block digest. numDuplicates += 1 // Keep the block that's newer. if storedBlock.Mtime < blockInfo.Mtime { response.Contents.BlockDigestToInfo[blockInfo.Digest] = blockInfo } } else { response.Contents.BlockDigestToInfo[blockInfo.Digest] = blockInfo } } log.Printf("%s index contained %d lines with %d duplicates with "+ "%d size disagreements", keepServer.String(), numLines, numDuplicates, numSizeDisagreements) if arvLogger != nil { now := time.Now() arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) { keepInfo := logger.GetOrCreateMap(p, "keep_info") serverInfo := keepInfo[keepServer.Uuid].(map[string]interface{}) serverInfo["processing_finished_at"] = now serverInfo["lines_received"] = numLines serverInfo["duplicates_seen"] = numDuplicates serverInfo["size_disagreements_seen"] = numSizeDisagreements }) } resp.Body.Close() return }
func GetKeepServers(params GetKeepServersParams) (results ReadServers) { if ¶ms.Client == nil { log.Fatalf("params.Client passed to GetKeepServers() should " + "contain a valid ArvadosClient, but instead it is nil.") } sdkParams := arvadosclient.Dict{ "filters": [][]string{[]string{"service_type", "=", "disk"}}, } if params.Limit > 0 { sdkParams["limit"] = params.Limit } var sdkResponse KeepServiceList err := params.Client.List("keep_services", sdkParams, &sdkResponse) if err != nil { loggerutil.FatalWithMessage(params.Logger, fmt.Sprintf("Error requesting keep disks from API server: %v", err)) } if params.Logger != nil { params.Logger.Update(func(p map[string]interface{}, e map[string]interface{}) { keepInfo := logger.GetOrCreateMap(p, "keep_info") keepInfo["num_keep_servers_available"] = sdkResponse.ItemsAvailable keepInfo["num_keep_servers_received"] = len(sdkResponse.KeepServers) keepInfo["keep_servers"] = sdkResponse.KeepServers }) } log.Printf("Received keep services list: %+v", sdkResponse) if len(sdkResponse.KeepServers) < sdkResponse.ItemsAvailable { loggerutil.FatalWithMessage(params.Logger, fmt.Sprintf("Did not receive all available keep servers: %+v", sdkResponse)) } results.KeepServerIndexToAddress = sdkResponse.KeepServers results.KeepServerAddressToIndex = make(map[ServerAddress]int) for i, address := range results.KeepServerIndexToAddress { results.KeepServerAddressToIndex[address] = i } log.Printf("Got Server Addresses: %v", results) // This is safe for concurrent use client := http.Client{} // Send off all the index requests concurrently responseChan := make(chan ServerResponse) for _, keepServer := range sdkResponse.KeepServers { // The above keepsServer variable is reused for each iteration, so // it would be shared across all goroutines. This would result in // us querying one server n times instead of n different servers // as we intended. To avoid this we add it as an explicit // parameter which gets copied. This bug and solution is described // in https://golang.org/doc/effective_go.html#channels go func(keepServer ServerAddress) { responseChan <- GetServerContents(params.Logger, keepServer, client) }(keepServer) } results.ServerToContents = make(map[ServerAddress]ServerContents) results.BlockToServers = make(map[blockdigest.DigestWithSize][]BlockServerInfo) // Read all the responses for i := range sdkResponse.KeepServers { _ = i // Here to prevent go from complaining. response := <-responseChan log.Printf("Received channel response from %v containing %d files", response.Address, len(response.Contents.BlockDigestToInfo)) results.ServerToContents[response.Address] = response.Contents serverIndex := results.KeepServerAddressToIndex[response.Address] for _, blockInfo := range response.Contents.BlockDigestToInfo { results.BlockToServers[blockInfo.Digest] = append( results.BlockToServers[blockInfo.Digest], BlockServerInfo{ServerIndex: serverIndex, Mtime: blockInfo.Mtime}) } } return }
// ProcessCollections read from api server func ProcessCollections(arvLogger *logger.Logger, receivedCollections []SdkCollectionInfo, defaultReplicationLevel int, UUIDToCollection map[string]Collection) (latestModificationDate time.Time) { for _, sdkCollection := range receivedCollections { collection := Collection{UUID: StrCopy(sdkCollection.UUID), OwnerUUID: StrCopy(sdkCollection.OwnerUUID), ReplicationLevel: sdkCollection.Redundancy, BlockDigestToSize: make(map[blockdigest.BlockDigest]int)} if sdkCollection.ModifiedAt.IsZero() { loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf( "Arvados SDK collection returned with unexpected zero "+ "modification date. This probably means that either we failed to "+ "parse the modification date or the API server has changed how "+ "it returns modification dates: %+v", collection)) } if sdkCollection.ModifiedAt.After(latestModificationDate) { latestModificationDate = sdkCollection.ModifiedAt } if collection.ReplicationLevel == 0 { collection.ReplicationLevel = defaultReplicationLevel } manifest := manifest.Manifest{sdkCollection.ManifestText} manifestSize := uint64(len(sdkCollection.ManifestText)) if _, alreadySeen := UUIDToCollection[collection.UUID]; !alreadySeen { totalManifestSize += manifestSize } if manifestSize > maxManifestSize { maxManifestSize = manifestSize } blockChannel := manifest.BlockIterWithDuplicates() for block := range blockChannel { if storedSize, stored := collection.BlockDigestToSize[block.Digest]; stored && storedSize != block.Size { message := fmt.Sprintf( "Collection %s contains multiple sizes (%d and %d) for block %s", collection.UUID, storedSize, block.Size, block.Digest) loggerutil.FatalWithMessage(arvLogger, message) } collection.BlockDigestToSize[block.Digest] = block.Size } collection.TotalSize = 0 for _, size := range collection.BlockDigestToSize { collection.TotalSize += size } UUIDToCollection[collection.UUID] = collection // Clear out all the manifest strings that we don't need anymore. // These hopefully form the bulk of our memory usage. manifest.Text = "" sdkCollection.ManifestText = "" } return }
// GetCollections gets collections from api func GetCollections(params GetCollectionsParams) (results ReadCollections) { if ¶ms.Client == nil { log.Fatalf("params.Client passed to GetCollections() should " + "contain a valid ArvadosClient, but instead it is nil.") } fieldsWanted := []string{"manifest_text", "owner_uuid", "uuid", "redundancy", "modified_at"} sdkParams := arvadosclient.Dict{ "select": fieldsWanted, "order": []string{"modified_at ASC"}, "filters": [][]string{[]string{"modified_at", ">=", "1900-01-01T00:00:00Z"}}} if params.BatchSize > 0 { sdkParams["limit"] = params.BatchSize } var defaultReplicationLevel int { value, err := params.Client.Discovery("defaultCollectionReplication") if err != nil { loggerutil.FatalWithMessage(params.Logger, fmt.Sprintf("Error querying default collection replication: %v", err)) } defaultReplicationLevel = int(value.(float64)) if defaultReplicationLevel <= 0 { loggerutil.FatalWithMessage(params.Logger, fmt.Sprintf("Default collection replication returned by arvados SDK "+ "should be a positive integer but instead it was %d.", defaultReplicationLevel)) } } initialNumberOfCollectionsAvailable, err := util.NumberItemsAvailable(params.Client, "collections") if err != nil { loggerutil.FatalWithMessage(params.Logger, fmt.Sprintf("Error querying collection count: %v", err)) } // Include a 1% margin for collections added while we're reading so // that we don't have to grow the map in most cases. maxExpectedCollections := int( float64(initialNumberOfCollectionsAvailable) * 1.01) results.UUIDToCollection = make(map[string]Collection, maxExpectedCollections) if params.Logger != nil { params.Logger.Update(func(p map[string]interface{}, e map[string]interface{}) { collectionInfo := logger.GetOrCreateMap(p, "collection_info") collectionInfo["num_collections_at_start"] = initialNumberOfCollectionsAvailable collectionInfo["batch_size"] = params.BatchSize collectionInfo["default_replication_level"] = defaultReplicationLevel }) } // These values are just for getting the loop to run the first time, // afterwards they'll be set to real values. previousTotalCollections := -1 totalCollections := 0 for totalCollections > previousTotalCollections { // We're still finding new collections // Write the heap profile for examining memory usage WriteHeapProfile() // Get next batch of collections. var collections SdkCollectionList err := params.Client.List("collections", sdkParams, &collections) if err != nil { loggerutil.FatalWithMessage(params.Logger, fmt.Sprintf("Error querying collections: %v", err)) } // Process collection and update our date filter. sdkParams["filters"].([][]string)[0][2] = ProcessCollections(params.Logger, collections.Items, defaultReplicationLevel, results.UUIDToCollection).Format(time.RFC3339) // update counts previousTotalCollections = totalCollections totalCollections = len(results.UUIDToCollection) log.Printf("%d collections read, %d new in last batch, "+ "%s latest modified date, %.0f %d %d avg,max,total manifest size", totalCollections, totalCollections-previousTotalCollections, sdkParams["filters"].([][]string)[0][2], float32(totalManifestSize)/float32(totalCollections), maxManifestSize, totalManifestSize) if params.Logger != nil { params.Logger.Update(func(p map[string]interface{}, e map[string]interface{}) { collectionInfo := logger.GetOrCreateMap(p, "collection_info") collectionInfo["collections_read"] = totalCollections collectionInfo["latest_modified_date_seen"] = sdkParams["filters"].([][]string)[0][2] collectionInfo["total_manifest_size"] = totalManifestSize collectionInfo["max_manifest_size"] = maxManifestSize }) } } // Write the heap profile for examining memory usage WriteHeapProfile() return }
func singlerun() error { arv, err := arvadosclient.MakeArvadosClient() if err != nil { log.Fatalf("Error setting up arvados client %s", err.Error()) } if is_admin, err := util.UserIsAdmin(arv); err != nil { log.Fatalf("Error querying current arvados user %s", err.Error()) } else if !is_admin { log.Fatalf("Current user is not an admin. Datamanager can only be run by admins.") } var arvLogger *logger.Logger if logEventTypePrefix != "" { arvLogger = logger.NewLogger(logger.LoggerParams{ Client: arv, EventTypePrefix: logEventTypePrefix, WriteInterval: time.Second * time.Duration(logFrequencySeconds)}) } loggerutil.LogRunInfo(arvLogger) if arvLogger != nil { arvLogger.AddWriteHook(loggerutil.LogMemoryAlloc) } var ( dataFetcher summary.DataFetcher readCollections collection.ReadCollections keepServerInfo keep.ReadServers ) if summary.ShouldReadData() { dataFetcher = summary.ReadData } else { dataFetcher = BuildDataFetcher(arv) } dataFetcher(arvLogger, &readCollections, &keepServerInfo) summary.MaybeWriteData(arvLogger, readCollections, keepServerInfo) buckets := summary.BucketReplication(readCollections, keepServerInfo) bucketCounts := buckets.Counts() replicationSummary := buckets.SummarizeBuckets(readCollections) replicationCounts := replicationSummary.ComputeCounts() log.Printf("Blocks In Collections: %d, "+ "\nBlocks In Keep: %d.", len(readCollections.BlockToDesiredReplication), len(keepServerInfo.BlockToServers)) log.Println(replicationCounts.PrettyPrint()) log.Printf("Blocks Histogram:") for _, rlbss := range bucketCounts { log.Printf("%+v: %10d", rlbss.Levels, rlbss.Count) } kc, err := keepclient.MakeKeepClient(&arv) if err != nil { loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Error setting up keep client %s", err.Error())) } // Log that we're finished. We force the recording, since go will // not wait for the write timer before exiting. if arvLogger != nil { defer arvLogger.FinalUpdate(func(p map[string]interface{}, e map[string]interface{}) { summaryInfo := logger.GetOrCreateMap(p, "summary_info") summaryInfo["block_replication_counts"] = bucketCounts summaryInfo["replication_summary"] = replicationCounts p["summary_info"] = summaryInfo p["run_info"].(map[string]interface{})["finished_at"] = time.Now() }) } pullServers := summary.ComputePullServers(kc, &keepServerInfo, readCollections.BlockToDesiredReplication, replicationSummary.UnderReplicatedBlocks) pullLists := summary.BuildPullLists(pullServers) trashLists, trashErr := summary.BuildTrashLists(kc, &keepServerInfo, replicationSummary.KeepBlocksNotInCollections) summary.WritePullLists(arvLogger, pullLists) if trashErr != nil { return err } else { keep.SendTrashLists(keep.GetDataManagerToken(arvLogger), kc, trashLists) } return nil }