// This is a silly test that looks for the latest mozilla-central buildbot linux64 l10n build // and asserts that it must have a created time between a year ago and an hour in the future. // // Could easily break at a point in the future, at which point we can change to something else. // // Note, no credentials are needed, so this can be run even on travis-ci.org, for example. func TestFindLatestBuildbotTask(t *testing.T) { Index := index.New("", "") Queue := queue.New("", "") itr, cs1 := Index.FindTask("buildbot.branches.mozilla-central.linux64.l10n") if cs1.Error != nil { t.Fatalf("%v\n", cs1.Error) } taskId := itr.TaskId td, cs2 := Queue.Task(taskId) if cs2.Error != nil { t.Fatalf("%v\n", cs2.Error) } created := time.Time(td.Created).Local() // calculate time an hour in the future to allow for clock drift now := time.Now().Local() inAnHour := now.Add(time.Hour * 1) aYearAgo := now.AddDate(-1, 0, 0) t.Log("") t.Log(" => Task " + taskId + " was created on " + created.Format("Mon, 2 Jan 2006 at 15:04:00 -0700")) t.Log("") if created.After(inAnHour) { t.Log("Current time: " + now.Format("Mon, 2 Jan 2006 at 15:04:00 -0700")) t.Error("Task " + taskId + " has a creation date that is over an hour in the future") } if created.Before(aYearAgo) { t.Log("Current time: " + now.Format("Mon, 2 Jan 2006 at 15:04:00 -0700")) t.Error("Task " + taskId + " has a creation date that is over a year old") } }
func runWorker() { // Any custom startup per platform... err := startup() // any errors are fatal if err != nil { panic(err) } // Queue is the object we will use for accessing queue api Queue = queue.New(config.TaskclusterClientId, config.TaskclusterAccessToken) // Start the SignedURLsManager in a dedicated go routine, to take care of // keeping signed urls up-to-date (i.e. refreshing as old urls expire). signedURLsRequestChan, signedURLsResponseChan = SignedURLsManager() // Start the TaskStatusHandler in a dedicated go routine, to take care of // all communication with Queue regarding the status of a TaskRun. taskStatusUpdate, taskStatusUpdateErr = TaskStatusHandler() // loop forever claiming and running tasks! for { // make sure at least 1 second passes between iterations waitASec := time.NewTimer(time.Second * 1) taskFound := FindAndRunTask() if !taskFound { debug("No task claimed from any Azure queue...") } else { taskCleanup() } // To avoid hammering queue, make sure there is at least a second // between consecutive requests. Note we do this even if a task ran, // since a task could complete in less than a second. <-waitASec.C } }
func (t *TaskRun) reclaim(until time.Time) { for { duration := until.Sub(time.Now()).Seconds() // Using a reclaim divisor of 1.3 with the default reclaim deadline (20 minutes), // means that a reclaim event will happen with a few minutes left of the origin claim. nextReclaim := duration / 1.3 select { case <-t.stopReclaims: close(t.reclaimsDone) return case <-time.After(time.Duration(nextReclaim * 1e+9)): client := t.controller.Queue() claim, err := reclaimTask(client, t.TaskID, t.RunID, t.log) if err != nil { t.log.WithError(err).Error("Error reclaiming task") t.Abort() close(t.reclaimsDone) return } queueClient := queue.New(&tcclient.Credentials{ ClientID: claim.Credentials.ClientID, AccessToken: claim.Credentials.AccessToken, Certificate: claim.Credentials.Certificate, }) queueClient.BaseURL = t.queueURL t.controller.SetQueueClient(queueClient) until = time.Time(claim.TakenUntil) } } }
func (tsm *TaskStatusManager) Reclaim() error { return tsm.updateStatus( reclaimed, func(task *TaskRun) error { log.Printf("Reclaiming task %v...", task.TaskID) tcrsp, err := Queue.ReclaimTask(task.TaskID, fmt.Sprintf("%d", task.RunID)) // check if an error occurred... if err != nil { // probably task was cancelled - in any case, we should kill the running task... log.Printf("%v", err) task.kill() return err } task.TaskReclaimResponse = *tcrsp // TODO: probably should use a mutex here task.Queue = queue.New(&tcclient.Credentials{ ClientID: tcrsp.Credentials.ClientID, AccessToken: tcrsp.Credentials.AccessToken, Certificate: tcrsp.Credentials.Certificate, }) log.Printf("Reclaimed task %v successfully.", task.TaskID) return nil }, claimed, reclaimed, ) }
func getTaskScopes(taskId string) ([]string, error) { // We do not need auth for this operation q := queue.New("", "") q.Authenticate = false task, callSummary := q.Task(taskId) if callSummary.Error != nil { return nil, callSummary.Error } return task.Scopes, nil }
func newTaskRun( config *configType, claim *taskClaim, environment *runtime.Environment, engine engines.Engine, plugin plugins.Plugin, log *logrus.Entry, ) (*TaskRun, error) { tp := environment.TemporaryStorage.NewFilePath() info := runtime.TaskInfo{ TaskID: claim.taskClaim.Status.TaskID, RunID: claim.taskClaim.RunID, Created: claim.taskClaim.Task.Created, Deadline: claim.taskClaim.Task.Deadline, Expires: claim.taskClaim.Task.Expires, } ctxt, ctxtctl, err := runtime.NewTaskContext(tp, info, environment.WebHookServer) queueClient := queue.New(&tcclient.Credentials{ ClientID: claim.taskClaim.Credentials.ClientID, AccessToken: claim.taskClaim.Credentials.AccessToken, Certificate: claim.taskClaim.Credentials.Certificate, }) if config.QueueBaseURL != "" { queueClient.BaseURL = config.QueueBaseURL } ctxtctl.SetQueueClient(queueClient) if err != nil { return nil, err } t := &TaskRun{ TaskID: claim.taskID, RunID: claim.runID, definition: claim.definition, log: log, context: ctxt, controller: ctxtctl, engine: engine, plugin: plugin, queueURL: config.QueueBaseURL, stopReclaims: make(chan struct{}), reclaimsDone: make(chan struct{}), } go t.reclaim(time.Time(claim.taskClaim.TakenUntil)) return t, nil }
func (tsm *TaskStatusManager) Claim() error { return tsm.updateStatus( claimed, func(task *TaskRun) error { log.Printf("Claiming task %v...", task.TaskID) task.TaskClaimRequest = queue.TaskClaimRequest{ WorkerGroup: config.WorkerGroup, WorkerID: config.WorkerID, } // Using the taskId and runId from the <MessageText> tag, the worker // must call queue.claimTask(). tcrsp, err := Queue.ClaimTask(task.TaskID, fmt.Sprintf("%d", task.RunID), &task.TaskClaimRequest) // check if an error occurred... if err != nil { // If the queue.claimTask() operation fails with a 4xx error, the // worker must delete the messages from the Azure queue (except 401). switch err := err.(type) { case httpbackoff.BadHttpResponseCode: switch { case err.HttpResponseCode == 401: log.Printf("Whoops - not authorized to claim task %v, *not* deleting it from Azure queue!", task.TaskID) case err.HttpResponseCode/100 == 4: // attempt to delete, but if it fails, log and continue // nothing we can do, and better to return the first 4xx error errDelete := task.deleteFromAzure() if errDelete != nil { log.Printf("Not able to delete task %v from Azure after receiving http status code %v when claiming it.", task.TaskID, err.HttpResponseCode) log.Printf("%v", errDelete) } } } log.Print(task.String()) log.Printf("%v", err) return err } task.TaskClaimResponse = *tcrsp // note we don't need to worry about a mutex here since either old // value or new value can be used for some crossover time, and the // update should be atomic task.Queue = queue.New(&tcclient.Credentials{ ClientID: tcrsp.Credentials.ClientID, AccessToken: tcrsp.Credentials.AccessToken, Certificate: tcrsp.Credentials.Certificate, }) // don't report failure if this fails, as it is already logged and failure => task.deleteFromAzure() return nil }, unclaimed, ) }
func main() { myQueue := queue.New( &tcclient.Credentials{ ClientID: os.Getenv("TASKCLUSTER_CLIENT_ID"), AccessToken: os.Getenv("TASKCLUSTER_ACCESS_TOKEN"), Certificate: os.Getenv("TASKCLUSTER_CERTIFICATE"), }, ) taskID := os.Getenv("TASK_ID") _, err := myQueue.CancelTask(taskID) if err != nil { log.Fatal("Failed to cancel task: %v", err) } log.Printf("Cancelled task %v successfully", taskID) }
// Create a new instance of the task manager that will be responsible for claiming, // executing, and resolving units of work (tasks). func newTaskManager( config *configType, engine engines.Engine, pluginManager plugins.Plugin, environment *runtime.Environment, log *logrus.Entry, gc *gc.GarbageCollector, ) (*Manager, error) { queue := tcqueue.New( &tcclient.Credentials{ ClientID: config.Credentials.ClientID, AccessToken: config.Credentials.AccessToken, Certificate: config.Credentials.Certificate, }, ) if config.QueueBaseURL != "" { queue.BaseURL = config.QueueBaseURL } service := &queueService{ capacity: config.Capacity, interval: config.PollingInterval, client: queue, provisionerID: config.ProvisionerID, workerGroup: config.WorkerGroup, workerID: config.WorkerID, workerType: config.WorkerType, log: log.WithField("component", "Queue Service"), expirationOffset: config.ReclaimOffset, } m := &Manager{ tasks: make(map[string]*TaskRun), done: make(chan struct{}), config: config, engine: engine, environment: environment, log: log, queue: service, provisionerID: config.ProvisionerID, workerGroup: config.WorkerGroup, workerID: config.WorkerID, gc: gc, } m.pluginManager = pluginManager return m, nil }
func main() { // Parse the docopt string and exit on any error or help message. arguments, err := docopt.Parse(usage, nil, true, version, false, true) taskId := arguments["<taskId>"].(string) port, err := strconv.Atoi(arguments["--port"].(string)) if err != nil { log.Fatalf("Failed to convert port to integer") } // Parse out additional scopes to add... var additionalScopes []string if arguments["<scope>"] != nil { additionalScopes = arguments["<scope>"].([]string) } else { additionalScopes = make([]string, 0) } // Client is is required but has a default. clientId := arguments["--client-id"] if clientId == nil || clientId == "" { clientId = os.Getenv("TASKCLUSTER_CLIENT_ID") } // Access token is also required but has a default. accessToken := arguments["--access-token"] if accessToken == nil || accessToken == "" { accessToken = os.Getenv("TASKCLUSTER_ACCESS_TOKEN") } certificate := arguments["--certificate"] if certificate == nil || certificate == "" { certificate = os.Getenv("TASKCLUSTER_CERTIFICATE") } log.Printf("clientId: '%v'\naccessToken: '%v'\ncertificate: '%v'\n", clientId, accessToken, certificate) // Ensure we have credentials our auth proxy is pretty much useless without // it. if accessToken == "" || clientId == "" { log.Fatalf( "Credentials must be passed via environment variables or flags...", ) } if certificate == "" { log.Println("Warning - no taskcluster certificate set - assuming permanent credentials are being used") } creds := &tcclient.Credentials{ ClientId: clientId.(string), AccessToken: accessToken.(string), Certificate: certificate.(string), } myQueue := queue.New(creds) // Fetch the task to get the scopes we should be using... task, _, err := myQueue.Task(taskId) if err != nil { log.Fatalf("Could not fetch taskcluster task '%s' : %s", taskId, err) } creds.AuthorizedScopes = append(additionalScopes, task.Scopes...) log.Println("Proxy with scopes: ", creds.AuthorizedScopes) routes := Routes{ ConnectionData: tcclient.ConnectionData{ Authenticate: true, Credentials: creds, }, } startError := http.ListenAndServe(fmt.Sprintf(":%d", port), &routes) if startError != nil { log.Fatal(startError) } }
// Tests whether it is possible to define a task against the production Queue. func TestDefineTask(t *testing.T) { clientId := os.Getenv("TASKCLUSTER_CLIENT_ID") accessToken := os.Getenv("TASKCLUSTER_ACCESS_TOKEN") certificate := os.Getenv("TASKCLUSTER_CERTIFICATE") if clientId == "" || accessToken == "" { t.Skip("Skipping test TestDefineTask since TASKCLUSTER_CLIENT_ID and/or TASKCLUSTER_ACCESS_TOKEN env vars not set") } myQueue := queue.New(clientId, accessToken) myQueue.Certificate = certificate taskId := slugid.Nice() created := time.Now() deadline := created.AddDate(0, 0, 1) expires := deadline td := &queue.TaskDefinition{ Created: queue.Time(created), Deadline: queue.Time(deadline), Expires: queue.Time(expires), Extra: json.RawMessage(`{"index":{"rank":12345}}`), Metadata: struct { Description string `json:"description"` Name string `json:"name"` Owner string `json:"owner"` Source string `json:"source"` }{ Description: "Stuff", Name: "[TC] Pete", Owner: "*****@*****.**", Source: "http://everywhere.com/", }, Payload: json.RawMessage(`{"features":{"relengApiProxy":true}}`), ProvisionerId: "win-provisioner", Retries: 5, Routes: []string{ "tc-treeherder.mozilla-inbound.bcf29c305519d6e120b2e4d3b8aa33baaf5f0163", "tc-treeherder-stage.mozilla-inbound.bcf29c305519d6e120b2e4d3b8aa33baaf5f0163", }, SchedulerId: "go-test-test-scheduler", Scopes: []string{ "test-worker:image:toastposter/pumpkin:0.5.6", }, Tags: json.RawMessage(`{"createdForUser":"******"}`), Priority: json.RawMessage(`"high"`), TaskGroupId: "dtwuF2n9S-i83G37V9eBuQ", WorkerType: "win2008-worker", } tsr, cs := myQueue.DefineTask(taskId, td) ////////////////////////////////// // And now validate results.... // ////////////////////////////////// if cs.Error != nil { b := bytes.Buffer{} cs.HttpRequest.Header.Write(&b) headers := regexp.MustCompile(`(mac|nonce)="[^"]*"`).ReplaceAllString(b.String(), `$1="***********"`) t.Logf("\n\nRequest sent:\n\nURL: %s\nMethod: %s\nHeaders:\n%v\nBody: %s", cs.HttpRequest.URL, cs.HttpRequest.Method, headers, cs.HttpRequestBody) t.Fatalf("\n\nResponse received:\n\n%s", cs.Error) } t.Logf("Task https://queue.taskcluster.net/v1/task/%v created successfully", taskId) if provisionerId := cs.HttpRequestObject.(*queue.TaskDefinition).ProvisionerId; provisionerId != "win-provisioner" { t.Errorf("provisionerId 'win-provisioner' expected but got %s", provisionerId) } if schedulerId := tsr.Status.SchedulerId; schedulerId != "go-test-test-scheduler" { t.Errorf("schedulerId 'go-test-test-scheduler' expected but got %s", schedulerId) } if retriesLeft := tsr.Status.RetriesLeft; retriesLeft != 5 { t.Errorf("Expected 'retriesLeft' to be 5, but got %v", retriesLeft) } if state := tsr.Status.State; state != "unscheduled" { t.Errorf("Expected 'state' to be 'unscheduled', but got %s", state) } submittedPayload := cs.HttpRequestBody // only the contents is relevant below - the formatting and order of properties does not matter // since a json comparison is done, not a string comparison... expectedJson := []byte(` { "created": "` + created.UTC().Format("2006-01-02T15:04:05.000Z") + `", "deadline": "` + deadline.UTC().Format("2006-01-02T15:04:05.000Z") + `", "expires": "` + expires.UTC().Format("2006-01-02T15:04:05.000Z") + `", "taskGroupId": "dtwuF2n9S-i83G37V9eBuQ", "workerType": "win2008-worker", "schedulerId": "go-test-test-scheduler", "payload": { "features": { "relengApiProxy":true } }, "priority": "high", "provisionerId": "win-provisioner", "retries": 5, "routes": [ "tc-treeherder.mozilla-inbound.bcf29c305519d6e120b2e4d3b8aa33baaf5f0163", "tc-treeherder-stage.mozilla-inbound.bcf29c305519d6e120b2e4d3b8aa33baaf5f0163" ], "scopes": [ "test-worker:image:toastposter/pumpkin:0.5.6" ], "tags": { "createdForUser": "******" }, "extra": { "index": { "rank": 12345 } }, "metadata": { "description": "Stuff", "name": "[TC] Pete", "owner": "*****@*****.**", "source": "http://everywhere.com/" } } `) jsonCorrect, formattedExpected, formattedActual, err := jsonEqual(expectedJson, []byte(submittedPayload)) if err != nil { t.Fatalf("Exception thrown formatting json data!\n%s\n\nStruggled to format either:\n%s\n\nor:\n\n%s", err, string(expectedJson), submittedPayload) } if !jsonCorrect { t.Log("Anticipated json not generated. Expected:") t.Logf("%s", formattedExpected) t.Log("Actual:") t.Errorf("%s", formattedActual) } }