コード例 #1
0
ファイル: aws.go プロジェクト: petemoore/generic-worker
func queryMetaData(url string) (string, error) {
	// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html#instancedata-data-retrieval
	// call http://169.254.169.254/latest/meta-data/instance-id with httpbackoff
	resp, _, err := httpbackoff.Get(url)
	if err != nil {
		return "", err
	}
	defer resp.Body.Close()
	content, err := ioutil.ReadAll(resp.Body)
	return string(content), err
}
コード例 #2
0
ファイル: aws.go プロジェクト: gregarndt/generic-worker
// for when running in aws
func queryUserData() (*UserData, error) {
	// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html#instancedata-user-data-retrieval
	resp, _, err := httpbackoff.Get("http://169.254.169.254/latest/user-data")
	if err != nil {
		return nil, err
	}
	defer resp.Body.Close()
	userData := new(UserData)
	decoder := json.NewDecoder(resp.Body)
	err = decoder.Decode(userData)
	return userData, err
}
コード例 #3
0
ファイル: aws.go プロジェクト: mrrrgn/generic-worker
// for when running in aws
func queryUserData() (*UserData, error) {
	// TODO: currently assuming UserData is json, need to work out with jhford how this will work with provisioner
	// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html#instancedata-user-data-retrieval
	// call http://169.254.169.254/latest/user-data with httpbackoff
	resp, _, err := httpbackoff.Get("http://169.254.169.254/latest/user-data")
	if err != nil {
		return nil, err
	}
	defer resp.Body.Close()
	userData := new(UserData)
	// decoder := json.NewDecoder(resp.Body)
	// err = decoder.Decode(userData)
	return userData, err
}
コード例 #4
0
func TestBewit(t *testing.T) {
	test := func(t *testing.T, creds *tcclient.Credentials) *httptest.ResponseRecorder {

		// Test setup
		routes := Routes{
			ConnectionData: tcclient.ConnectionData{
				Credentials: creds,
			},
		}
		req, err := http.NewRequest(
			"POST",
			"http://localhost:60024/bewit",
			bytes.NewBufferString("https://queue.taskcluster.net/v1/task/DD1kmgFiRMWTjyiNoEJIMA/runs/0/artifacts/private%2Fbuild%2Fsources.xml"),
		)
		if err != nil {
			log.Fatal(err)
		}
		res := httptest.NewRecorder()

		// Function to test
		routes.ServeHTTP(res, req)

		// Validate results
		bewitUrl := res.Header().Get("Location")
		_, err = url.Parse(bewitUrl)
		if err != nil {
			t.Fatalf("Bewit URL returned is invalid: %q", bewitUrl)
		}
		resp, _, err := httpbackoff.Get(bewitUrl)
		if err != nil {
			t.Fatalf("Exception thrown:\n%s", err)
		}
		respBody, err := ioutil.ReadAll(resp.Body)
		if err != nil {
			t.Fatalf("Exception thrown:\n%s", err)
		}
		if len(respBody) != 18170 {
			t.Logf("Response received:\n%s", string(respBody))
			t.Fatalf("Expected response body to be 18170 bytes, but was %v bytes", len(respBody))
		}
		return res
	}
	testWithPermCreds(t, test, 303)
	testWithTempCreds(t, test, 303)
}
コード例 #5
0
ファイル: mounts.go プロジェクト: petemoore/generic-worker
// Utility function to aggressively download a url to a file location
func downloadURLToFile(url, file string) error {
	log.Printf("Downloading url %v to %v", url, file)
	err := os.MkdirAll(filepath.Dir(file), 0777)
	if err != nil {
		return err
	}
	resp, _, err := httpbackoff.Get(url)
	if err != nil {
		return err
	}
	defer resp.Body.Close()
	f, err := os.OpenFile(file, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
	if err != nil {
		return err
	}
	defer f.Close()
	_, err = io.Copy(f, resp.Body)
	if err != nil {
		return err
	}
	return nil
}
コード例 #6
0
ファイル: main_test.go プロジェクト: petemoore/generic-worker
func TestAbortAfterMaxRunTime(t *testing.T) {
	setup(t)
	payload := GenericWorkerPayload{
		Command:    sleep(4),
		MaxRunTime: 3,
	}
	td := testTask()
	taskID, myQueue := submitTask(t, td, payload)
	runWorker()

	tsr, err := myQueue.Status(taskID)
	if err != nil {
		t.Fatalf("Could not retrieve task status")
	}
	if tsr.Status.State != "failed" {
		t.Fatalf("Was expecting state %q but got %q", "failed", tsr.Status.State)
	}
	// check uploaded log mentions abortion
	// note: we do this rather than local log, to check also log got uploaded
	// as failure path requires that task is resolved before logs are uploaded
	url, err := myQueue.GetLatestArtifact_SignedURL(taskID, "public/logs/live_backing.log", 10*time.Minute)
	if err != nil {
		t.Fatalf("Cannot retrieve url for live_backing.log: %v", err)
	}
	resp, _, err := httpbackoff.Get(url.String())
	if err != nil {
		t.Fatalf("Could not download log: %v", err)
	}
	defer resp.Body.Close()
	bytes, err := ioutil.ReadAll(resp.Body)
	if err != nil {
		t.Fatalf("Error when trying to read log file over http: %v", err)
	}
	logtext := string(bytes)
	if !strings.Contains(logtext, "max run time exceeded") {
		t.Fatalf("Was expecting log file to mention task abortion, but it doesn't")
	}
}
コード例 #7
0
// Retrieves the number of tasks requested from the Azure queues.
func (q *queueService) pollTaskURL(messageQueue *messageQueue, ntasks int) ([]*taskMessage, error) {
	taskMessages := []*taskMessage{}
	var r queueMessagesList
	// To poll an Azure Queue the worker must do a `GET` request to the
	// `SignedPollURL` from the object, representing the Azure queue. To
	// receive multiple messages at once the parameter `&numofmessages=N`
	// may be appended to `SignedPollURL`. The parameter `N` is the
	// maximum number of messages desired, `N` can be up to 32.
	n := int(math.Min(32, float64(ntasks)))
	u := fmt.Sprintf("%s%s%d", messageQueue.SignedPollURL, "&numofmessages=", n)
	resp, _, err := httpbackoff.Get(u)
	if err != nil {
		return nil, err
	}
	// When executing a `GET` request to `SignedPollURL` from an Azure queue object,
	// the request will return an XML document on the form:
	//
	// ```xml
	// <QueueMessagesList>
	//     <QueueMessage>
	//       <MessageId>...</MessageId>
	//       <InsertionTime>...</InsertionTime>
	//       <ExpirationTime>...</ExpirationTime>
	//       <PopReceipt>...</PopReceipt>
	//       <TimeNextVisible>...</TimeNextVisible>
	//       <DequeueCount>...</DequeueCount>
	//       <MessageText>...</MessageText>
	//     </QueueMessage>
	//     ...
	// </QueueMessagesList>
	// ```
	// We unmarshal the response into go objects, using the go xml decoder.
	defer resp.Body.Close()
	data, _ := ioutil.ReadAll(resp.Body)
	if err := xml.Unmarshal(data, &r); err != nil {
		//if err := xml.NewDecoder(resp.Body).Decode(&queueMessagesList); err != nil {
		q.log.WithFields(logrus.Fields{
			"body":  resp.Body,
			"error": err.Error(),
		}).Debugf("Not able to xml decode the response from the Azure queue")
		return nil, fmt.Errorf("Not able to xml decode the response from the Azure queue. Body: %s, Error: %s", resp.Body, err)
	}

	if len(r.QueueMessages) == 0 {
		q.log.Debug("Zero tasks returned in Azure XML queueMessagesList")
		return []*taskMessage{}, nil
	}

	// Utility method for replacing a placeholder within a uri with
	// a string value which first must be uri encoded...
	detokeniseURI := func(URI, placeholder, rawValue string) string {
		return strings.Replace(URI, placeholder, strings.Replace(url.QueryEscape(rawValue), "+", "%20", -1), -1)
	}

	for _, qm := range r.QueueMessages {

		// Before using the SignedDeleteURL the worker must replace the placeholder
		// {{messageId}} with the contents of the <MessageId> tag. It is also
		// necessary to replace the placeholder {{popReceipt}} with the URI encoded
		// contents of the <PopReceipt> tag.  Notice, that the worker must URI
		// encode the contents of <PopReceipt> before substituting into the
		// SignedDeleteURL. Otherwise, the worker will experience intermittent
		// failures.

		signedDeleteURL := detokeniseURI(
			detokeniseURI(
				messageQueue.SignedDeleteURL,
				"{{messageId}}",
				qm.MessageID,
			),
			"{{popReceipt}}",
			qm.PopReceipt,
		)

		// Workers should read the value of the `<DequeueCount>` and log messages
		// that alert the operator if a message has been dequeued a significant
		// number of times, for example 15 or more.
		if qm.DequeueCount >= 15 {
			q.log.Warnf("Queue Message with message id %v has been dequeued %v times!", qm.MessageID, qm.DequeueCount)
			err := q.deleteFromAzure(signedDeleteURL)
			if err != nil {
				q.log.Warnf("Not able to call Azure delete URL %v. %v", signedDeleteURL, err)
			}
		}

		// To find the task referenced in a message the worker must base64
		// decode and JSON parse the contents of the <MessageText> tag. This
		// would return an object on the form: {taskId, runId}.
		m, err := base64.StdEncoding.DecodeString(qm.MessageText)
		if err != nil {
			// try to delete from Azure, if it fails, nothing we can do about it
			// not very serious - another worker will try to delete it
			q.log.WithField("messageText", qm.MessageText).Errorf("Not able to base64 decode the Message Text in Azure message response.")
			q.log.WithField("messageID", qm.MessageID).Info("Deleting from Azure queue as other workers will have the same problem.")
			err2 := q.deleteFromAzure(signedDeleteURL)
			if err2 != nil {
				q.log.WithFields(logrus.Fields{
					"messageID": qm.MessageID,
					"url":       signedDeleteURL,
					"error":     err2,
				}).Warn("Not able to call Azure delete URL")
			}
			return nil, err2
		}

		// initialise fields of TaskRun not contained in json string m
		tm := &taskMessage{
			signedDeleteURL: signedDeleteURL,
		}

		// now populate remaining json fields of TaskMessage from json string m
		err = json.Unmarshal(m, &tm)
		if err != nil {
			q.log.WithFields(logrus.Fields{
				"error":   err,
				"message": m,
			}).Warn("Not able to unmarshal json from base64 decoded MessageText")
			err := q.deleteFromAzure(signedDeleteURL)
			if err != nil {
				q.log.WithFields(logrus.Fields{
					"url":   signedDeleteURL,
					"error": err,
				}).Warn("Not able to call Azure delete URL")
			}
			continue
		}
		taskMessages = append(taskMessages, tm)
	}

	return taskMessages, nil
}
コード例 #8
0
func TestUpload(t *testing.T) {

	setup(t)

	expires := tcclient.Time(time.Now().Add(time.Minute * 30))

	payload := GenericWorkerPayload{
		Command:    helloGoodbye(),
		MaxRunTime: 7200,
		Artifacts: []struct {
			Expires tcclient.Time `json:"expires"`
			Path    string        `json:"path"`
			Type    string        `json:"type"`
		}{
			{
				Path:    "SampleArtifacts/_/X.txt",
				Expires: expires,
				Type:    "file",
			},
		},
		Features: struct {
			ChainOfTrust bool `json:"chainOfTrust,omitempty"`
		}{
			ChainOfTrust: true,
		},
	}

	td := testTask()

	taskID, myQueue := submitTask(t, td, payload)
	runWorker()

	// some required substrings - not all, just a selection
	expectedArtifacts := map[string]struct {
		extracts        []string
		contentEncoding string
		expires         tcclient.Time
	}{
		"public/logs/live_backing.log": {
			extracts: []string{
				"hello world!",
				"goodbye world!",
				`"instance-type": "p3.enormous"`,
			},
			contentEncoding: "gzip",
			expires:         td.Expires,
		},
		"public/logs/live.log": {
			extracts: []string{
				"hello world!",
				"goodbye world!",
				"=== Task Finished ===",
				"Exit Code: 0",
			},
			contentEncoding: "gzip",
			expires:         td.Expires,
		},
		"public/logs/certified.log": {
			extracts: []string{
				"hello world!",
				"goodbye world!",
				"=== Task Finished ===",
				"Exit Code: 0",
			},
			contentEncoding: "gzip",
			expires:         td.Expires,
		},
		"public/logs/chainOfTrust.json.asc": {
			// e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855  ./%%%/v/X
			// 8308d593eb56527137532595a60255a3fcfbe4b6b068e29b22d99742bad80f6f  ./_/X.txt
			// a0ed21ab50992121f08da55365da0336062205fd6e7953dbff781a7de0d625b7  ./b/c/d.jpg
			extracts: []string{
				"8308d593eb56527137532595a60255a3fcfbe4b6b068e29b22d99742bad80f6f",
			},
			contentEncoding: "gzip",
			expires:         td.Expires,
		},
		"SampleArtifacts/_/X.txt": {
			extracts: []string{
				"test artifact",
			},
			contentEncoding: "",
			expires:         payload.Artifacts[0].Expires,
		},
	}

	artifacts, err := myQueue.ListArtifacts(taskID, "0", "", "")

	if err != nil {
		t.Fatalf("Error listing artifacts: %v", err)
	}

	actualArtifacts := make(map[string]struct {
		ContentType string        `json:"contentType"`
		Expires     tcclient.Time `json:"expires"`
		Name        string        `json:"name"`
		StorageType string        `json:"storageType"`
	}, len(artifacts.Artifacts))

	for _, actualArtifact := range artifacts.Artifacts {
		actualArtifacts[actualArtifact.Name] = actualArtifact
	}

	for artifact := range expectedArtifacts {
		if a, ok := actualArtifacts[artifact]; ok {
			if a.ContentType != "text/plain; charset=utf-8" {
				t.Errorf("Artifact %s should have mime type 'text/plain; charset=utf-8' but has '%s'", artifact, a.ContentType)
			}
			if a.Expires.String() != expectedArtifacts[artifact].expires.String() {
				t.Errorf("Artifact %s should have expiry '%s' but has '%s'", artifact, expires, a.Expires)
			}
		} else {
			t.Errorf("Artifact '%s' not created", artifact)
		}
	}

	// now check content was uploaded to Amazon, and is correct

	// signer of public/logs/chainOfTrust.json.asc
	signer := &openpgp.Entity{}
	cotCert := &ChainOfTrustData{}

	for artifact, content := range expectedArtifacts {
		url, err := myQueue.GetLatestArtifact_SignedURL(taskID, artifact, 10*time.Minute)
		if err != nil {
			t.Fatalf("Error trying to fetch artifacts from Amazon...\n%s", err)
		}
		// need to do this so Content-Encoding header isn't swallowed by Go for test later on
		tr := &http.Transport{
			DisableCompression: true,
		}
		client := &http.Client{Transport: tr}
		rawResp, _, err := httpbackoff.ClientGet(client, url.String())
		if err != nil {
			t.Fatalf("Error trying to fetch decompressed artifact from signed URL %s ...\n%s", url.String(), err)
		}
		resp, _, err := httpbackoff.Get(url.String())
		if err != nil {
			t.Fatalf("Error trying to fetch artifact from signed URL %s ...\n%s", url.String(), err)
		}
		b, err := ioutil.ReadAll(resp.Body)
		if err != nil {
			t.Fatalf("Error trying to read response body of artifact from signed URL %s ...\n%s", url.String(), err)
		}
		for _, requiredSubstring := range content.extracts {
			if strings.Index(string(b), requiredSubstring) < 0 {
				t.Errorf("Artifact '%s': Could not find substring %q in '%s'", artifact, requiredSubstring, string(b))
			}
		}
		if actualContentEncoding := rawResp.Header.Get("Content-Encoding"); actualContentEncoding != content.contentEncoding {
			t.Fatalf("Expected Content-Encoding %q but got Content-Encoding %q for artifact %q from url %v", content.contentEncoding, actualContentEncoding, artifact, url)
		}
		if actualContentType := resp.Header.Get("Content-Type"); actualContentType != "text/plain; charset=utf-8" {
			t.Fatalf("Content-Type in Signed URL response does not match Content-Type of artifact")
		}
		// check openpgp signature is valid
		if artifact == "public/logs/chainOfTrust.json.asc" {
			pubKey, err := os.Open(filepath.Join("testdata", "public-openpgp-key"))
			if err != nil {
				t.Fatalf("Error opening public key file")
			}
			defer pubKey.Close()
			entityList, err := openpgp.ReadArmoredKeyRing(pubKey)
			if err != nil {
				t.Fatalf("Error decoding public key file")
			}
			block, _ := clearsign.Decode(b)
			signer, err = openpgp.CheckDetachedSignature(entityList, bytes.NewBuffer(block.Bytes), block.ArmoredSignature.Body)
			if err != nil {
				t.Fatalf("Not able to validate openpgp signature of public/logs/chainOfTrust.json.asc")
			}
			err = json.Unmarshal(block.Plaintext, cotCert)
			if err != nil {
				t.Fatalf("Could not interpret public/logs/chainOfTrust.json as json")
			}
		}
	}
	if signer == nil {
		t.Fatalf("Signer of public/logs/chainOfTrust.json.asc could not be established (is nil)")
	}
	if signer.Identities["Generic-Worker <*****@*****.**>"] == nil {
		t.Fatalf("Did not get correct signer identity in public/logs/chainOfTrust.json.asc - %#v", signer.Identities)
	}

	// This trickery is to convert a TaskDefinitionResponse into a
	// TaskDefinitionRequest in order that we can compare. We cannot cast, so
	// need to transform to json as an intermediary step.
	b, err := json.Marshal(cotCert.Task)
	if err != nil {
		t.Fatalf("Cannot marshal task into json - %#v\n%v", cotCert.Task, err)
	}
	cotCertTaskRequest := &queue.TaskDefinitionRequest{}
	err = json.Unmarshal(b, cotCertTaskRequest)
	if err != nil {
		t.Fatalf("Cannot unmarshal json into task request - %#v\n%v", string(b), err)
	}

	// The Payload, Tags and Extra fields are raw bytes, so differences may not
	// be valid. Since we are comparing the rest, let's skip these two fields,
	// as the rest should give us good enough coverage already
	cotCertTaskRequest.Payload = nil
	cotCertTaskRequest.Tags = nil
	cotCertTaskRequest.Extra = nil
	td.Payload = nil
	td.Tags = nil
	td.Extra = nil
	if !reflect.DeepEqual(cotCertTaskRequest, td) {
		t.Fatalf("Did not get back expected task definition in chain of trust certificate:\n%#v\n ** vs **\n%#v", cotCertTaskRequest, td)
	}
	if len(cotCert.Artifacts) != 2 {
		t.Fatalf("Expected 2 artifact hashes to be listed")
	}
	if cotCert.TaskID != taskID {
		t.Fatalf("Expected taskId to be %q but was %q", taskID, cotCert.TaskID)
	}
	if cotCert.RunID != 0 {
		t.Fatalf("Expected runId to be 0 but was %v", cotCert.RunID)
	}
	if cotCert.WorkerGroup != "test-worker-group" {
		t.Fatalf("Expected workerGroup to be \"test-worker-group\" but was %q", cotCert.WorkerGroup)
	}
	if cotCert.WorkerID != "test-worker-id" {
		t.Fatalf("Expected workerGroup to be \"test-worker-id\" but was %q", cotCert.WorkerID)
	}
	if cotCert.Environment.PublicIPAddress != "12.34.56.78" {
		t.Fatalf("Expected publicIpAddress to be 12.34.56.78 but was %v", cotCert.Environment.PublicIPAddress)
	}
	if cotCert.Environment.PrivateIPAddress != "87.65.43.21" {
		t.Fatalf("Expected privateIpAddress to be 87.65.43.21 but was %v", cotCert.Environment.PrivateIPAddress)
	}
	if cotCert.Environment.InstanceID != "test-instance-id" {
		t.Fatalf("Expected instanceId to be \"test-instance-id\" but was %v", cotCert.Environment.InstanceID)
	}
	if cotCert.Environment.InstanceType != "p3.enormous" {
		t.Fatalf("Expected instanceType to be \"p3.enormous\" but was %v", cotCert.Environment.InstanceType)
	}
	if cotCert.Environment.Region != "outer-space" {
		t.Fatalf("Expected region to be \"outer-space\" but was %v", cotCert.Environment.Region)
	}
}
コード例 #9
0
ファイル: main.go プロジェクト: mrrrgn/generic-worker
// Queries the given Azure Queue signed url pair (poll url/delete url) and
// translates the Azure response into a Task object
func (urlPair SignedURLPair) Poll() (*TaskRun, error) {
	queueMessagesList := new(QueueMessagesList)
	// To poll an Azure Queue the worker must do a `GET` request to the
	// `signedPollUrl` from the object, representing the Azure queue. To
	// receive multiple messages at once the parameter `&numofmessages=N`
	// may be appended to `signedPollUrl`. The parameter `N` is the
	// maximum number of messages desired, `N` can be up to 32.
	// Since we can only process one task at a time, grab only one.
	resp, _, err := httpbackoff.Get(urlPair.SignedPollUrl + "&numofmessages=1")
	if err != nil {
		debug("%v", err)
		return nil, err
	}
	// When executing a `GET` request to `signedPollUrl` from an Azure queue object,
	// the request will return an XML document on the form:
	//
	// ```xml
	// <QueueMessagesList>
	//     <QueueMessage>
	//       <MessageId>...</MessageId>
	//       <InsertionTime>...</InsertionTime>
	//       <ExpirationTime>...</ExpirationTime>
	//       <PopReceipt>...</PopReceipt>
	//       <TimeNextVisible>...</TimeNextVisible>
	//       <DequeueCount>...</DequeueCount>
	//       <MessageText>...</MessageText>
	//     </QueueMessage>
	//     ...
	// </QueueMessagesList>
	// ```
	// We unmarshal the response into go objects, using the go xml decoder.
	fullBody, err := ioutil.ReadAll(resp.Body)
	if err != nil {
		return nil, err
	}
	reader := strings.NewReader(string(fullBody))
	dec := xml.NewDecoder(reader)
	err = dec.Decode(&queueMessagesList)
	if err != nil {
		debug("ERROR: not able to xml decode the response from the azure Queue:")
		debug(string(fullBody))
		return nil, err
	}
	if len(queueMessagesList.QueueMessages) == 0 {
		debug("Zero tasks returned in Azure XML QueueMessagesList")
		return nil, nil
	}
	if size := len(queueMessagesList.QueueMessages); size > 1 {
		return nil, fmt.Errorf("%v tasks returned in Azure XML QueueMessagesList, even though &numofmessages=1 was specified in poll url", size)
	}

	// at this point we know there is precisely one QueueMessage (== task)
	qm := queueMessagesList.QueueMessages[0]

	// Utility method for replacing a placeholder within a uri with
	// a string value which first must be uri encoded...
	detokeniseUri := func(uri, placeholder, rawValue string) string {
		return strings.Replace(uri, placeholder, strings.Replace(url.QueryEscape(rawValue), "+", "%20", -1), -1)
	}

	// Before using the signedDeleteUrl the worker must replace the placeholder
	// {{messageId}} with the contents of the <MessageId> tag. It is also
	// necessary to replace the placeholder {{popReceipt}} with the URI encoded
	// contents of the <PopReceipt> tag.  Notice, that the worker must URI
	// encode the contents of <PopReceipt> before substituting into the
	// signedDeleteUrl. Otherwise, the worker will experience intermittent
	// failures.

	// Since urlPair is a value, not a pointer, we can update this copy which
	// is associated only with this particular task
	urlPair.SignedDeleteUrl = detokeniseUri(
		detokeniseUri(
			urlPair.SignedDeleteUrl,
			"{{messageId}}",
			qm.MessageId,
		),
		"{{popReceipt}}",
		qm.PopReceipt,
	)

	// Workers should read the value of the `<DequeueCount>` and log messages
	// that alert the operator if a message has been dequeued a significant
	// number of times, for example 15 or more.
	if qm.DequeueCount >= 15 {
		debug("WARN: Queue Message with message id %v has been dequeued %v times!", qm.MessageId, qm.DequeueCount)
		deleteErr := deleteFromAzure(urlPair.SignedDeleteUrl)
		if deleteErr != nil {
			debug("WARN: Not able to call Azure delete URL %v" + urlPair.SignedDeleteUrl)
			debug("%v", deleteErr)
		}
	}

	// To find the task referenced in a message the worker must base64
	// decode and JSON parse the contents of the <MessageText> tag. This
	// would return an object on the form: {taskId, runId}.
	m, err := base64.StdEncoding.DecodeString(qm.MessageText)
	if err != nil {
		// try to delete from Azure, if it fails, nothing we can do about it
		// not very serious - another worker will try to delete it
		debug("ERROR: Not able to base64 decode the Message Text '" + qm.MessageText + "' in Azure QueueMessage response.")
		debug("Deleting from Azure queue as other workers will have the same problem.")
		deleteErr := deleteFromAzure(urlPair.SignedDeleteUrl)
		if deleteErr != nil {
			debug("WARN: Not able to call Azure delete URL %v" + urlPair.SignedDeleteUrl)
			debug("%v", deleteErr)
		}
		return nil, err
	}

	// initialise fields of TaskRun not contained in json string m
	taskRun := TaskRun{
		QueueMessage:  qm,
		SignedURLPair: urlPair,
	}

	// now populate remaining json fields of TaskRun from json string m
	err = json.Unmarshal(m, &taskRun)
	if err != nil {
		debug("Not able to unmarshal json from base64 decoded MessageText '%v'", m)
		debug("%v", err)
		deleteErr := deleteFromAzure(urlPair.SignedDeleteUrl)
		if deleteErr != nil {
			debug("WARN: Not able to call Azure delete URL %v" + urlPair.SignedDeleteUrl)
			debug("%v", deleteErr)
		}
		return nil, err
	}

	return &taskRun, nil
}