// Initialize will initialize the munger func (p *FlakeManager) Initialize(config *github.Config, features *features.Features) error { // TODO: don't get the mungers from the global list, they should be passed in... for _, m := range GetAllMungers() { if m.Name() == "issue-cacher" { p.finder = m.(*IssueCacher) } if m.Name() == "submit-queue" { p.sq = m.(*SubmitQueue) } } if p.finder == nil { return fmt.Errorf("issue-cacher not found") } if p.sq == nil { return fmt.Errorf("submit-queue not found") } p.config = config p.googleGCSBucketUtils = utils.NewUtils(utils.KubekinsBucket, utils.LogDir) var owner sync.OwnerMapper var err error if p.ownerPath != "" { owner, err = testowner.NewReloadingOwnerList(p.ownerPath) if err != nil { return err } } p.syncer = sync.NewIssueSyncer(config, p.finder, owner) return nil }
func makeTestFlakeManager() *FlakeManager { bucketUtils := utils.NewUtils("bucket", "logs") return &FlakeManager{ sq: nil, config: nil, googleGCSBucketUtils: bucketUtils, } }
// internalInitialize will initialize the munger for the given GCS bucket url. func (sq *SubmitQueue) internalInitialize(config *github.Config, features *features.Features, GCSBucketUrl string) error { sq.Lock() defer sq.Unlock() sq.githubConfig = config if len(sq.JenkinsHost) == 0 { glog.Fatalf("--jenkins-host is required.") } if sq.FakeE2E { sq.e2e = &fake_e2e.FakeE2ETester{ JobNames: sq.JobNames, WeakStableJobNames: sq.WeakStableJobNames, } } else { sq.e2e = (&e2e.RealE2ETester{ JobNames: sq.JobNames, JenkinsHost: sq.JenkinsHost, WeakStableJobNames: sq.WeakStableJobNames, BuildStatus: map[string]e2e.BuildInfo{}, GoogleGCSBucketUtils: utils.NewUtils(GCSBucketUrl), }).Init() } if len(config.Address) > 0 { if len(config.WWWRoot) > 0 { http.Handle("/", gziphandler.GzipHandler(http.FileServer(http.Dir(config.WWWRoot)))) } http.Handle("/prs", gziphandler.GzipHandler(http.HandlerFunc(sq.servePRs))) http.Handle("/history", gziphandler.GzipHandler(http.HandlerFunc(sq.serveHistory))) http.Handle("/users", gziphandler.GzipHandler(http.HandlerFunc(sq.serveUsers))) http.Handle("/github-e2e-queue", gziphandler.GzipHandler(http.HandlerFunc(sq.serveGithubE2EStatus))) http.Handle("/google-internal-ci", gziphandler.GzipHandler(http.HandlerFunc(sq.serveGoogleInternalStatus))) http.Handle("/merge-info", gziphandler.GzipHandler(http.HandlerFunc(sq.serveMergeInfo))) http.Handle("/priority-info", gziphandler.GzipHandler(http.HandlerFunc(sq.servePriorityInfo))) http.Handle("/health", gziphandler.GzipHandler(http.HandlerFunc(sq.serveHealth))) http.Handle("/sq-stats", gziphandler.GzipHandler(http.HandlerFunc(sq.serveSQStats))) http.Handle("/flakes", gziphandler.GzipHandler(http.HandlerFunc(sq.serveFlakes))) config.ServeDebugStats("/stats") go http.ListenAndServe(config.Address, nil) } if sq.githubE2EPollTime == 0 { sq.githubE2EPollTime = githubE2EPollTime } sq.health.StartTime = sq.clock.Now() sq.healthHistory = make([]healthRecord, 0) go sq.handleGithubE2EAndMerge() go sq.updateGoogleE2ELoop() return nil }
// Initialize will initialize the munger func (p *FlakeManager) Initialize(config *github.Config, features *features.Features) error { // TODO: don't get the mungers from the global list, they should be passed in... for _, m := range GetAllMungers() { if m.Name() == "issue-cacher" { p.finder = m.(*IssueCacher) } if m.Name() == "submit-queue" { p.sq = m.(*SubmitQueue) } } if p.finder == nil { return fmt.Errorf("issue-cacher not found") } if p.sq == nil { return fmt.Errorf("submit-queue not found") } p.oldestTime = time.Now().Add(-time.Hour * 24) p.alreadySyncedFlakes = map[cache.Flake]int{} p.config = config p.googleGCSBucketUtils = utils.NewUtils(URLTestStorageBucket) return nil }
func TestCheckGCSWeakBuilds(t *testing.T) { latestBuildNumberFoo := 42 latestBuildNumberBar := 44 tests := []struct { paths map[string][]byte expectStable bool expectedLastBuild int expectedStatus map[string]BuildInfo }{ // Simple case - both succeeds { paths: map[string][]byte{ "/foo/latest-build.txt": []byte(strconv.Itoa(latestBuildNumberFoo)), fmt.Sprintf("/foo/%v/finished.json", latestBuildNumberFoo): marshalOrDie(utils.FinishedFile{ Result: "SUCCESS", Timestamp: 1234, }, t), "/bar/latest-build.txt": []byte(strconv.Itoa(latestBuildNumberBar)), fmt.Sprintf("/bar/%v/finished.json", latestBuildNumberBar): marshalOrDie(utils.FinishedFile{ Result: "SUCCESS", Timestamp: 1234, }, t), }, expectStable: true, expectedStatus: map[string]BuildInfo{ "foo": {Status: "Stable", ID: "42"}, "bar": {Status: "Stable", ID: "44"}, }, }, // If last build was successful we shouldn't be looking any further { paths: map[string][]byte{ "/foo/latest-build.txt": []byte(strconv.Itoa(latestBuildNumberFoo)), fmt.Sprintf("/foo/%v/finished.json", latestBuildNumberFoo): marshalOrDie(utils.FinishedFile{ Result: "SUCCESS", Timestamp: 1234, }, t), fmt.Sprintf("/foo/%v/finished.json", latestBuildNumberFoo-1): marshalOrDie(utils.FinishedFile{ Result: "UNSTABLE", Timestamp: 1234, }, t), "/bar/latest-build.txt": []byte(strconv.Itoa(latestBuildNumberBar)), fmt.Sprintf("/bar/%v/finished.json", latestBuildNumberBar): marshalOrDie(utils.FinishedFile{ Result: "SUCCESS", Timestamp: 1234, }, t), fmt.Sprintf("/bar/%v/finished.json", latestBuildNumberBar-1): marshalOrDie(utils.FinishedFile{ Result: "FAILURE", Timestamp: 1234, }, t), }, expectStable: true, expectedStatus: map[string]BuildInfo{ "foo": {Status: "Stable", ID: "42"}, "bar": {Status: "Stable", ID: "44"}, }, }, // If the last build was unsuccessful but there's no failures in JUnit file we assume that it was // an infrastructure failure. Build should succeed if at least one of two builds were fully successful. { paths: map[string][]byte{ "/foo/latest-build.txt": []byte(strconv.Itoa(latestBuildNumberFoo)), fmt.Sprintf("/foo/%v/finished.json", latestBuildNumberFoo): marshalOrDie(utils.FinishedFile{ Result: "UNSTABLE", Timestamp: 1234, }, t), fmt.Sprintf("/foo/%v/artifacts/junit_01.xml", latestBuildNumberFoo): getJUnit(5, 0), fmt.Sprintf("/foo/%v/finished.json", latestBuildNumberFoo-1): marshalOrDie(utils.FinishedFile{ Result: "UNSTABLE", Timestamp: 1233, }, t), fmt.Sprintf("/foo/%v/finished.json", latestBuildNumberFoo-2): marshalOrDie(utils.FinishedFile{ Result: "SUCCESS", Timestamp: 1232, }, t), "/bar/latest-build.txt": []byte(strconv.Itoa(latestBuildNumberBar)), fmt.Sprintf("/bar/%v/finished.json", latestBuildNumberBar): marshalOrDie(utils.FinishedFile{ Result: "SUCCESS", Timestamp: 1234, }, t), }, expectStable: true, expectedStatus: map[string]BuildInfo{ "foo": {Status: "Stable", ID: "42"}, "bar": {Status: "Stable", ID: "44"}, }, }, // If the last build was unsuccessful but there's no failures in JUnit file we assume that it was // an infrastructure failure. Build should fail more than both recent builds failed. { paths: map[string][]byte{ "/foo/latest-build.txt": []byte(strconv.Itoa(latestBuildNumberFoo)), fmt.Sprintf("/foo/%v/finished.json", latestBuildNumberFoo): marshalOrDie(utils.FinishedFile{ Result: "UNSTABLE", Timestamp: 1234, }, t), fmt.Sprintf("/foo/%v/artifacts/junit_01.xml", latestBuildNumberFoo): getJUnit(5, 0), fmt.Sprintf("/foo/%v/finished.json", latestBuildNumberFoo-1): marshalOrDie(utils.FinishedFile{ Result: "UNSTABLE", Timestamp: 1233, }, t), fmt.Sprintf("/foo/%v/finished.json", latestBuildNumberFoo-2): marshalOrDie(utils.FinishedFile{ Result: "UNSTABLE", Timestamp: 1232, }, t), "/bar/latest-build.txt": []byte(strconv.Itoa(latestBuildNumberBar)), fmt.Sprintf("/bar/%v/finished.json", latestBuildNumberBar): marshalOrDie(utils.FinishedFile{ Result: "SUCCESS", Timestamp: 1234, }, t), }, expectStable: false, expectedStatus: map[string]BuildInfo{ "foo": {Status: "Not Stable", ID: "42"}, "bar": {Status: "Stable", ID: "44"}, }, }, // If the last build was unsuccessful and there's a failed test in a JUnit file we should fail. { paths: map[string][]byte{ "/foo/latest-build.txt": []byte(strconv.Itoa(latestBuildNumberFoo)), fmt.Sprintf("/foo/%v/finished.json", latestBuildNumberFoo): marshalOrDie(utils.FinishedFile{ Result: "UNSTABLE", Timestamp: 1234, }, t), fmt.Sprintf("/foo/%v/artifacts/junit_01.xml", latestBuildNumberFoo): getJUnit(5, 0), fmt.Sprintf("/foo/%v/artifacts/junit_02.xml", latestBuildNumberFoo): getJUnit(5, 1), fmt.Sprintf("/foo/%v/artifacts/junit_03.xml", latestBuildNumberFoo): getJUnit(5, 0), "/bar/latest-build.txt": []byte(strconv.Itoa(latestBuildNumberBar)), fmt.Sprintf("/bar/%v/finished.json", latestBuildNumberBar): marshalOrDie(utils.FinishedFile{ Result: "SUCCESS", Timestamp: 1234, }, t), }, expectStable: false, expectedStatus: map[string]BuildInfo{ "foo": {Status: "Not Stable", ID: "42"}, "bar": {Status: "Stable", ID: "44"}, }, }, // Result shouldn't depend on order. { paths: map[string][]byte{ "/foo/latest-build.txt": []byte(strconv.Itoa(latestBuildNumberFoo)), fmt.Sprintf("/foo/%v/finished.json", latestBuildNumberFoo): marshalOrDie(utils.FinishedFile{ Result: "SUCCESS", Timestamp: 1234, }, t), "/bar/latest-build.txt": []byte(strconv.Itoa(latestBuildNumberBar)), fmt.Sprintf("/bar/%v/finished.json", latestBuildNumberBar): marshalOrDie(utils.FinishedFile{ Result: "FAILURE", Timestamp: 1234, }, t), fmt.Sprintf("/bar/%v/artifacts/junit_01.xml", latestBuildNumberBar): getJUnit(5, 0), fmt.Sprintf("/bar/%v/artifacts/junit_02.xml", latestBuildNumberBar): getJUnit(5, 1), fmt.Sprintf("/bar/%v/artifacts/junit_03.xml", latestBuildNumberBar): getJUnit(5, 1), }, expectStable: false, expectedStatus: map[string]BuildInfo{ "foo": {Status: "Stable", ID: "42"}, "bar": {Status: "Not Stable", ID: "44"}, }, }, } for _, test := range tests { server := httptest.NewServer(&testHandler{ handler: func(res http.ResponseWriter, req *http.Request) { data, found := test.paths[req.URL.Path] if !found { res.WriteHeader(http.StatusNotFound) fmt.Fprintf(res, "Unknown path: %s", req.URL.Path) return } res.WriteHeader(http.StatusOK) res.Write(data) }, }) e2e := &RealE2ETester{ JenkinsHost: server.URL, WeakStableJobNames: []string{ "foo", "bar", }, BuildStatus: map[string]BuildInfo{}, GoogleGCSBucketUtils: utils.NewUtils(server.URL), } stable := e2e.GCSWeakStable() if stable != test.expectStable { t.Errorf("expected: %v, saw: %v", test.expectStable, stable) } if !reflect.DeepEqual(test.expectedStatus, e2e.BuildStatus) { t.Errorf("expected: %v, saw: %v", test.expectedStatus, e2e.BuildStatus) } } }
func TestCheckGCSBuilds(t *testing.T) { latestBuildNumberFoo := 42 latestBuildNumberBar := 44 tests := []struct { paths map[string][]byte expectStable bool expectedLastBuild int expectedStatus map[string]BuildInfo }{ { paths: map[string][]byte{ "/foo/latest-build.txt": []byte(strconv.Itoa(latestBuildNumberFoo)), fmt.Sprintf("/foo/%v/finished.json", latestBuildNumberFoo): marshalOrDie(utils.FinishedFile{ Result: "SUCCESS", Timestamp: 1234, }, t), "/bar/latest-build.txt": []byte(strconv.Itoa(latestBuildNumberBar)), fmt.Sprintf("/bar/%v/finished.json", latestBuildNumberBar): marshalOrDie(utils.FinishedFile{ Result: "SUCCESS", Timestamp: 1234, }, t), }, expectStable: true, expectedStatus: map[string]BuildInfo{ "foo": {Status: "Stable", ID: "42"}, "bar": {Status: "Stable", ID: "44"}, }, }, { paths: map[string][]byte{ "/foo/latest-build.txt": []byte(strconv.Itoa(latestBuildNumberFoo)), fmt.Sprintf("/foo/%v/finished.json", latestBuildNumberFoo): marshalOrDie(utils.FinishedFile{ Result: "SUCCESS", Timestamp: 1234, }, t), "/bar/latest-build.txt": []byte(strconv.Itoa(latestBuildNumberBar)), fmt.Sprintf("/bar/%v/finished.json", latestBuildNumberBar): marshalOrDie(utils.FinishedFile{ Result: "UNSTABLE", Timestamp: 1234, }, t), }, expectStable: false, expectedStatus: map[string]BuildInfo{ "foo": {Status: "Stable", ID: "42"}, "bar": {Status: "Not Stable", ID: "44"}, }, }, { paths: map[string][]byte{ "/foo/latest-build.txt": []byte(strconv.Itoa(latestBuildNumberFoo)), fmt.Sprintf("/foo/%v/finished.json", latestBuildNumberFoo): marshalOrDie(utils.FinishedFile{ Result: "SUCCESS", Timestamp: 1234, }, t), "/bar/latest-build.txt": []byte(strconv.Itoa(latestBuildNumberBar)), fmt.Sprintf("/bar/%v/finished.json", latestBuildNumberBar): marshalOrDie(utils.FinishedFile{ Result: "FAILURE", Timestamp: 1234, }, t), }, expectStable: false, expectedStatus: map[string]BuildInfo{ "foo": {Status: "Stable", ID: "42"}, "bar": {Status: "Not Stable", ID: "44"}, }, }, { paths: map[string][]byte{ "/foo/latest-build.txt": []byte(strconv.Itoa(latestBuildNumberFoo)), fmt.Sprintf("/foo/%v/finished.json", latestBuildNumberFoo): marshalOrDie(utils.FinishedFile{ Result: "FAILURE", Timestamp: 1234, }, t), "/bar/latest-build.txt": []byte(strconv.Itoa(latestBuildNumberBar)), fmt.Sprintf("/bar/%v/finished.json", latestBuildNumberBar): marshalOrDie(utils.FinishedFile{ Result: "UNSTABLE", Timestamp: 1234, }, t), }, expectStable: false, expectedStatus: map[string]BuildInfo{ "foo": {Status: "Not Stable", ID: "42"}, "bar": {Status: "Not Stable", ID: "44"}, }, }, { paths: map[string][]byte{ "/foo/latest-build.txt": []byte(strconv.Itoa(latestBuildNumberFoo)), fmt.Sprintf("/foo/%v/finished.json", latestBuildNumberFoo): marshalOrDie(utils.FinishedFile{ Result: "UNSTABLE", Timestamp: 1234, }, t), "/bar/latest-build.txt": []byte(strconv.Itoa(latestBuildNumberBar)), fmt.Sprintf("/bar/%v/finished.json", latestBuildNumberBar): marshalOrDie(utils.FinishedFile{ Result: "SUCCESS", Timestamp: 1234, }, t), }, expectStable: false, expectedStatus: map[string]BuildInfo{ "foo": {Status: "Not Stable", ID: "42"}, "bar": {Status: "Stable", ID: "44"}, }, }, } for _, test := range tests { server := httptest.NewServer(&testHandler{ handler: func(res http.ResponseWriter, req *http.Request) { data, found := test.paths[req.URL.Path] if !found { res.WriteHeader(http.StatusNotFound) fmt.Fprintf(res, "Unknown path: %s", req.URL.Path) return } res.WriteHeader(http.StatusOK) res.Write(data) }, }) e2e := &RealE2ETester{ JenkinsHost: server.URL, JobNames: []string{ "foo", "bar", }, BuildStatus: map[string]BuildInfo{}, GoogleGCSBucketUtils: utils.NewUtils(server.URL), } stable := e2e.GCSBasedStable() if stable != test.expectStable { t.Errorf("expected: %v, saw: %v", test.expectStable, stable) } if !reflect.DeepEqual(test.expectedStatus, e2e.BuildStatus) { t.Errorf("expected: %v, saw: %v", test.expectedStatus, e2e.BuildStatus) } } }
func main() { registerFlags(pflag.CommandLine) pflag.CommandLine.AddGoFlagSet(goflag.CommandLine) pflag.Parse() if leftBuildNumber == 0 || rightBuildNumber == 0 { fmt.Fprintf(os.Stderr, "Need both left and right build numbers") return } googleGCSBucketUtils := utils.NewUtils(utils.KubekinsBucket, utils.LogDir) leftResp, err := googleGCSBucketUtils.GetFileFromJenkinsGoogleBucket(job, leftBuildNumber, buildFilePath) if err != nil { panic(err) } leftBody := leftResp.Body defer leftBody.Close() leftBodyScanner := bufio.NewScanner(leftBody) leftLogs, leftResources, leftMetrics := src.ProcessSingleTest(leftBodyScanner, leftBuildNumber) rightResp, err := googleGCSBucketUtils.GetFileFromJenkinsGoogleBucket(job, rightBuildNumber, buildFilePath) if err != nil { panic(err) } rightBody := rightResp.Body defer rightBody.Close() rightBodyScanner := bufio.NewScanner(rightBody) rightLogs, rightResources, rightMetrics := src.ProcessSingleTest(rightBodyScanner, rightBuildNumber) if len(leftLogs) != 0 && len(rightLogs) != 0 { for k := range leftLogs { if _, ok := rightLogs[k]; !ok { fmt.Printf("Right logs missing for test %v\n", k) continue } violatingLogs := src.CompareLogGenerationSpeed(leftLogs[k], rightLogs[k]) if len(violatingLogs) == 0 { continue } if enableOutputColoring { src.ChangeColor(ct.Cyan, os.Stdout) } fmt.Printf("Differences for test %v", k) if enableOutputColoring { src.ResetColor(os.Stdout) } fmt.Print("\n") violatingLogs.PrintToStdout(leftBuildNumber, rightBuildNumber, enableOutputColoring) } } fmt.Println("") if len(leftResources) != 0 && len(rightResources) != 0 { for k := range leftResources { if _, ok := rightResources[k]; !ok { fmt.Printf("Right resources missing for test %v\n", k) continue } violatingResources := src.CompareResourceUsages(leftResources[k], rightResources[k]) if len(violatingResources) == 0 { continue } if enableOutputColoring { src.ChangeColor(ct.Cyan, os.Stdout) } fmt.Printf("Differences for test %v", k) if enableOutputColoring { src.ResetColor(os.Stdout) } fmt.Print("\n") violatingResources.PrintToStdout(leftBuildNumber, rightBuildNumber, enableOutputColoring) } } fmt.Println("") if len(leftMetrics) != 0 && len(rightMetrics) != 0 { for k := range rightMetrics { if _, ok := rightMetrics[k]; !ok { fmt.Printf("Right resources missing for test %v\n", k) continue } violatingMetrics := src.CompareMetrics(leftMetrics[k], rightMetrics[k]) if len(violatingMetrics) == 0 { continue } if enableOutputColoring { src.ChangeColor(ct.Cyan, os.Stdout) } fmt.Printf("Differences for test %v", k) if enableOutputColoring { src.ResetColor(os.Stdout) } fmt.Print("\n") violatingMetrics.PrintToStdout(leftBuildNumber, rightBuildNumber, enableOutputColoring) } } }
// internalInitialize will initialize the munger. // if overrideUrl is specified, will create testUtils func (sq *SubmitQueue) internalInitialize(config *github.Config, features *features.Features, overrideUrl string) error { sq.Lock() defer sq.Unlock() // Clean up all of our flags which we wish --flag="" to mean []string{} sq.BlockingJobNames = cleanStringSlice(sq.BlockingJobNames) sq.NonBlockingJobNames = cleanStringSlice(sq.NonBlockingJobNames) sq.WeakStableJobNames = cleanStringSlice(sq.WeakStableJobNames) sq.RequiredStatusContexts = cleanStringSlice(sq.RequiredStatusContexts) sq.RequiredRetestContexts = cleanStringSlice(sq.RequiredRetestContexts) sq.doNotMergeMilestones = cleanStringSlice(sq.doNotMergeMilestones) sq.githubConfig = config // TODO: This is not how injection for tests should work. if sq.FakeE2E { sq.e2e = &fake_e2e.FakeE2ETester{ JobNames: sq.BlockingJobNames, WeakStableJobNames: sq.WeakStableJobNames, } } else { var gcs *utils.Utils if overrideUrl != "" { gcs = utils.NewTestUtils(overrideUrl) } else { gcs = utils.NewUtils(utils.KubekinsBucket, utils.LogDir) } sq.e2e = (&e2e.RealE2ETester{ BlockingJobNames: sq.BlockingJobNames, NonBlockingJobNames: sq.NonBlockingJobNames, WeakStableJobNames: sq.WeakStableJobNames, BuildStatus: map[string]e2e.BuildInfo{}, GoogleGCSBucketUtils: gcs, }).Init(admin.Mux) } if len(config.Address) > 0 { if len(config.WWWRoot) > 0 { http.Handle("/", gziphandler.GzipHandler(http.FileServer(http.Dir(config.WWWRoot)))) } http.Handle("/prs", gziphandler.GzipHandler(http.HandlerFunc(sq.servePRs))) http.Handle("/history", gziphandler.GzipHandler(http.HandlerFunc(sq.serveHistory))) http.Handle("/github-e2e-queue", gziphandler.GzipHandler(http.HandlerFunc(sq.serveGithubE2EStatus))) http.Handle("/google-internal-ci", gziphandler.GzipHandler(http.HandlerFunc(sq.serveGoogleInternalStatus))) http.Handle("/merge-info", gziphandler.GzipHandler(http.HandlerFunc(sq.serveMergeInfo))) http.Handle("/priority-info", gziphandler.GzipHandler(http.HandlerFunc(sq.servePriorityInfo))) http.Handle("/health", gziphandler.GzipHandler(http.HandlerFunc(sq.serveHealth))) http.Handle("/health.svg", gziphandler.GzipHandler(http.HandlerFunc(sq.serveHealthSVG))) http.Handle("/sq-stats", gziphandler.GzipHandler(http.HandlerFunc(sq.serveSQStats))) http.Handle("/flakes", gziphandler.GzipHandler(http.HandlerFunc(sq.serveFlakes))) config.ServeDebugStats("/stats") go http.ListenAndServe(config.Address, nil) } admin.Mux.HandleFunc("/api/emergency/stop", sq.EmergencyStopHTTP) admin.Mux.HandleFunc("/api/emergency/resume", sq.EmergencyStopHTTP) admin.Mux.HandleFunc("/api/emergency/status", sq.EmergencyStopHTTP) if sq.githubE2EPollTime == 0 { sq.githubE2EPollTime = githubE2EPollTime } sq.healthHistory = make([]healthRecord, 0) go sq.handleGithubE2EAndMerge() go sq.updateGoogleE2ELoop() if sq.adminPort != 0 { go http.ListenAndServe(fmt.Sprintf("0.0.0.0:%v", sq.adminPort), admin.Mux) } return nil }
// NewGoogleGCSDownloader creates a new GoogleGCSDownloader func NewGoogleGCSDownloader(builds int) *GoogleGCSDownloader { return &GoogleGCSDownloader{ Builds: builds, GoogleGCSBucketUtils: utils.NewUtils(utils.KubekinsBucket, utils.LogDir), } }
// NewGoogleGCSDownloader creates a new GoogleGCSDownloader func NewGoogleGCSDownloader() *GoogleGCSDownloader { return &GoogleGCSDownloader{ GoogleGCSBucketUtils: utils.NewUtils(utils.KubekinsBucket, utils.LogDir), } }
// NewGoogleGCSDownloader creates a new GoogleGCSDownloader func NewGoogleGCSDownloader(builds int) *GoogleGCSDownloader { return &GoogleGCSDownloader{ Builds: builds, GoogleGCSBucketUtils: utils.NewUtils(utils.GoogleBucketURL), } }