func TestTaskContextConcurrentLogging(t *testing.T) {
	t.Parallel()
	path := filepath.Join(os.TempDir(), slugid.Nice())
	context, control, err := NewTaskContext(path, TaskInfo{}, nil)
	nilOrPanic(err, "Failed to create context")

	wg := sync.WaitGroup{}
	wg.Add(5) // This could trigger errors with race condition detector
	go func() { context.Log("Hello World 2"); wg.Done() }()
	go func() { context.Log("Hello World 1"); wg.Done() }()
	go func() { context.Log("Hello World 3 - Cheese"); wg.Done() }()
	go func() { context.Log("Hello World 4"); wg.Done() }()
	go func() { context.Log("Hello World 5"); wg.Done() }()
	wg.Wait()
	err = control.CloseLog()
	nilOrPanic(err, "Failed to close log file")

	reader, err := context.NewLogReader()
	nilOrPanic(err, "Failed to open log file")
	data, err := ioutil.ReadAll(reader)
	nilOrPanic(err, "Failed to read log file")

	if !strings.Contains(string(data), "Cheese") {
		panic("Couldn't find 'Cheese' in the log")
	}
	nilOrPanic(reader.Close(), "Failed to close log file")
	err = context.logStream.Remove()
	nilOrPanic(err, "Failed to remove logStream")
}
Esempio n. 2
0
// New starts a livelog OS process using the executable specified, and returns
// a *LiveLog. The *LiveLog provides access to the GetURL which can be used to
// tail the log by multiple consumers in parallel, together with an
// io.WriteCloser where the logs should be written to. It is envisanged that
// the io.WriteCloser is passed on to the executing process.
//
// sslCert and sslKey should be used to specify the file location of a
// suitable certificate and key on the local filesystem that can be used
// for hosting the livelog service over https. If either is an empty string
// the livelog will resort to running over http transport instead.
//
// Please note the GetURL is for the loopback interface - it is beyond the
// scope of this library to transform this localhost URL into a URL with a
// fully qualified hostname using package
// github.com/taskcluster/stateless-dns-go/hostname since this package can be
// used independently of the former one.
func New(liveLogExecutable, sslCert, sslKey string) (*LiveLog, error) {
	l := &LiveLog{
		secret:  slugid.Nice(),
		command: exec.Command(liveLogExecutable),
		sslCert: sslCert,
		sslKey:  sslKey,
	}
	l.command.Env = append(
		os.Environ(),
		"ACCESS_TOKEN="+l.secret,
		"SERVER_CRT_FILE="+l.sslCert,
		"SERVER_KEY_FILE="+l.sslKey,
	)
	err := l.command.Start()
	// TODO: we need to make sure that this livelog process we just started
	// doesn't just exit, which can happen if the port is already in use!!!
	// Note, this is really bad, since another livelog will use a different
	// secret. Also note we get a 0 exit code when process exits because
	// another process was listening on the port(s).
	if err != nil {
		return nil, err
	}
	l.setRequestURLs()
	err = l.connectInputStream()
	// Note we can't wait for GET port to be active before returning
	// since livelog will only serve from that port once some content is
	// sent - so no good to execute waitForPortToBeActive(60023) here...
	// We would need to fix this in livelog codebase not here...
	return l, err
}
func (a artifactTestCase) Test() {
	taskID := slugid.Nice()
	ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		fmt.Fprintln(w, "Hello, client.")
	}))
	defer ts.Close()

	s3resp, _ := json.Marshal(queue.S3ArtifactResponse{
		PutURL: ts.URL,
	})
	resp := queue.PostArtifactResponse(s3resp)
	mockedQueue := &client.MockQueue{}
	for _, path := range a.Artifacts {
		mockedQueue.On(
			"CreateArtifact",
			taskID,
			"0",
			path,
			client.PostS3ArtifactRequest,
		).Return(&resp, nil)
	}

	a.Case.QueueMock = mockedQueue
	a.Case.TaskID = taskID
	a.Case.Test()
	mockedQueue.AssertExpectations(a.Case.TestStruct)
}
// CreateUser will create a new user, with the given homeFolder, set the user
// owner of the homeFolder, and assign the user membership of given groups.
func CreateUser(homeFolder string, groups []*Group) (*User, error) {
	// Prepare arguments
	args := formatArgs(map[string]string{
		"-d": homeFolder,   // Set home folder
		"-c": "task user",  // Comment
		"-s": defaultShell, // Set default shell
	})
	args = append(args, "-M") // Don't create home, ignoring any global settings
	args = append(args, "-U") // Create primary user-group with same name
	if len(groups) > 0 {
		gids := []string{}
		for _, g := range groups {
			gids = append(gids, strconv.Itoa(g.gid))
		}
		args = append(args, "-G", strings.Join(gids, ","))
	}

	// Generate a random username
	name := slugid.Nice()
	args = append(args, name)

	// Run useradd command
	_, err := exec.Command(systemUserAdd, args...).Output()
	if err != nil {
		if e, ok := err.(*exec.ExitError); ok {
			return nil, fmt.Errorf(
				"Failed to create user with useradd, stderr: '%s'", string(e.Stderr),
			)
		}
		return nil, fmt.Errorf("Failed to run useradd, error: %s", err)
	}

	// Lookup user to get the uid
	u, err := user.Lookup(name)
	if err != nil {
		panic(fmt.Sprintf(
			"Failed to lookup newly created user: '******', error: %s",
			name, err,
		))
	}

	// Parse uid/gid
	uid, err := strconv.ParseUint(u.Uid, 10, 32)
	if err != nil {
		panic(fmt.Sprintf("user.Uid should be an integer on POSIX systems"))
	}
	gid, err := strconv.ParseUint(u.Gid, 10, 32)
	if err != nil {
		panic(fmt.Sprintf("user.Gid should be an integer on POSIX systems"))
	}
	debug("Created user with uid: %d, gid: %d, name: %s", uid, gid, name)

	// Set user as owner of home folder
	err = os.Chown(homeFolder, int(uid), int(gid))
	if err != nil {
		return nil, fmt.Errorf("Failed to chown homeFolder, error: %s", err)
	}

	return &User{uint32(uid), uint32(gid), name, homeFolder}, nil
}
func TestNon200HasErrorBody(t *testing.T) {
	test := func(t *testing.T, creds *tcclient.Credentials) *httptest.ResponseRecorder {

		// Test setup
		routes := Routes{
			ConnectionData: tcclient.ConnectionData{
				Authenticate: true,
				Credentials:  creds,
			},
		}
		taskId := slugid.Nice()

		req, err := http.NewRequest(
			"POST",
			"http://localhost:60024/queue/v1/task/"+taskId+"/define",
			bytes.NewBufferString(
				`{"comment": "Valid json so that we hit endpoint, but should not result in http 200"}`,
			),
		)
		if err != nil {
			log.Fatal(err)
		}
		res := httptest.NewRecorder()

		// Function to test
		routes.ServeHTTP(res, req)

		// Validate results
		return res

	}
	testWithPermCreds(t, test, 400)
	testWithTempCreds(t, test, 400)
}
Esempio n. 6
0
// Instance will return an Instance of the image with imageID. If no such
// image exists in the cache, download() will be called to download it to a
// temporary filename.
//
// This method will insert the downloaded image into the cache, and ensures that
// if won't be downloaded twice, if another invocation already is downloading
// an image with the same imageID.
//
// It is the responsibility of the caller to make sure that imageID is a string
// that uniquely identifies the image. Sane patterns includes "url:<url>", or
// "taskId:<taskId>/<runId>/<artifact>". It also the callers responsibility to
// enforce any sort of access control.
func (m *Manager) Instance(imageID string, download Downloader) (*Instance, error) {
	m.m.Lock()

	// Get image from cache and insert it if not present
	img := m.images[imageID]
	if img == nil {
		imageDone := make(chan struct{})
		img = &image{
			imageID: imageID,
			folder:  filepath.Join(m.imageFolder, slugid.Nice()),
			done:    imageDone,
			manager: m,
		}
		m.images[imageID] = img
		// Start loading the image
		go img.loadImage(download, imageDone)
	}

	// Acqure the image, so we can release lock without risking the image gets
	// garbage collected.
	img.Acquire()
	m.m.Unlock() // Release lock we don't need it anymore

	// Wait for image to be done, then either return the error, or return an
	// instance of the image.
	<-img.done
	if img.err != nil {
		img.Release()
		return nil, img.err
	}
	return img.instance()
}
func newMockDisplay() io.ReadWriteCloser {
	// Create listener for randomly generated addr
	addr := slugid.Nice()
	l, err := mocknet.Listen(addr)
	if err != nil {
		// This shouldn't be possible
		panic(fmt.Sprintf("mocknet.Listen failed using random addr, error: %s", err))
	}

	// Create and start server
	s := rfb.NewServer(mockDisplayWidth, mockDisplayHeight)
	go s.Serve(l)

	// Dial up to server
	conn, err := mocknet.Dial(addr)
	if err != nil {
		// This shouldn't happen either
		panic(fmt.Sprintf("mocknet.Dial failed, error: %s", err))
	}

	// Handle display when we get a connection from the server
	go handleDisplay(<-s.Conns) // This works because Conns has a size 16

	// Stop listener, we'll create one for each mock display connection
	l.Close()

	return conn
}
func ensureEnvironment(t *testing.T) (*runtime.Environment, engines.Engine, plugins.Plugin) {
	tempPath := filepath.Join(os.TempDir(), slugid.Nice())
	tempStorage, err := runtime.NewTemporaryStorage(tempPath)
	if err != nil {
		t.Fatal(err)
	}

	environment := &runtime.Environment{
		TemporaryStorage: tempStorage,
	}
	engineProvider := engines.Engines()["mock"]
	engine, err := engineProvider.NewEngine(engines.EngineOptions{
		Environment: environment,
		Log:         logger.WithField("engine", "mock"),
	})
	if err != nil {
		t.Fatal(err.Error())
	}

	pluginOptions := plugins.PluginOptions{
		Environment: environment,
		Engine:      engine,
		Log:         logger.WithField("component", "Plugin Manager"),
	}

	pm, err := plugins.Plugins()["success"].NewPlugin(pluginOptions)
	if err != nil {
		t.Fatalf("Error creating task manager. Could not create plugin manager. %s", err)
	}

	return environment, engine, pm
}
// NewUserNetwork returns a Network implementation using the QEMU user-space
// network stack. This doesn't provide the same level of isolation, but the
// meta-data service should be sufficiently isolated.
func NewUserNetwork(socketFolder string) (*UserNetwork, error) {
	n := &UserNetwork{
		socketFile: filepath.Join(socketFolder, "meta-"+slugid.Nice()+".sock"),
	}
	n.server = &graceful.Server{
		Timeout: 35 * time.Second,
		Server: &http.Server{
			Addr:    metaDataIP + ":80",
			Handler: http.HandlerFunc(n.dispatchRequest),
		},
		NoSignalHandling: true,
	}

	// Start listening (we handle listener error as a special thing)
	listener, err := net.ListenUnix("unix", &net.UnixAddr{
		Name: n.socketFile,
		Net:  "unix",
	})
	if err != nil {
		return nil, fmt.Errorf("Failed to listen on %s error: %s", n.socketFile, err)
	}

	// Start serving
	serverDone := make(chan struct{})
	n.serverDone = serverDone
	go func(n *UserNetwork, done chan<- struct{}) {
		err := n.server.Serve(listener)
		close(done)
		if err != nil {
			panic(fmt.Sprint("Fatal: meta-data service listener failed, error: ", err))
		}
	}(n, serverDone)

	return n, nil
}
Esempio n. 10
0
func taskWithPayload(payload string) TaskRun {
	return TaskRun{
		TaskID: slugid.Nice(),
		Definition: queue.TaskDefinitionResponse{
			Payload: json.RawMessage(payload),
		},
		logWriter: &bytes.Buffer{},
	}
}
Esempio n. 11
0
// Downloads ArtifactContent to a file inside the downloads directory specified
// in the global config file. The filename is a random slugid, and the
// absolute path of the file is returned.
func (ac *ArtifactContent) Download() (string, error) {
	basename := slugid.Nice()
	file := filepath.Join(config.DownloadsDir, basename)
	signedURL, err := Queue.GetLatestArtifact_SignedURL(ac.TaskID, ac.Artifact, time.Minute*30)
	if err != nil {
		return "", err
	}
	return file, downloadURLToFile(signedURL.String(), file)
}
func TestBuildImage(t *testing.T) {
	// Setup logging
	logger, _ := runtime.CreateLogger("info")
	log := logger.WithField("component", "qemu-build")

	inputImageFile, err := filepath.Abs("../../engines/qemu/test-image/tinycore-setup.tar.zst")
	if err != nil {
		panic(err)
	}
	outputFile := filepath.Join(os.TempDir(), slugid.Nice())
	defer os.Remove(outputFile)
	novnc := true
	cdrom := ""

	// Create ISO file to play with
	datadir := filepath.Join(os.TempDir(), slugid.Nice())
	defer os.RemoveAll(datadir)
	err = os.Mkdir(datadir, 0700)
	if err != nil {
		panic(err)
	}
	err = ioutil.WriteFile(filepath.Join(datadir, "setup.sh"),
		[]byte("#!/bin/sh\necho 'started';\nsudo poweroff;\n"), 0755)
	if err != nil {
		panic(err)
	}
	isofile := filepath.Join(os.TempDir(), slugid.Nice())
	defer os.Remove(isofile)
	err = exec.Command("genisoimage", "-vJrV", "DATA_VOLUME", "-input-charset", "utf-8", "-o", isofile, datadir).Run()
	if err != nil {
		panic(err)
	}

	err = buildImage(
		log, inputImageFile, outputFile,
		true, novnc, isofile, cdrom, 1,
	)
	if err != nil {
		panic(err)
	}
}
Esempio n. 13
0
// instance returns a new instance of the image for use in a virtual machine.
// You must have called image.Acquire() first to prevent garbage collection.
func (img *image) instance() (*Instance, error) {
	// Create a copy of layer.qcow2
	diskFile := filepath.Join(img.folder, slugid.Nice()+".qcow2")
	err := copyFile(filepath.Join(img.folder, "layer.qcow2"), diskFile)
	if err != nil {
		return nil, fmt.Errorf("Failed to make copy of layer.qcow2, error: %s", err)
	}

	return &Instance{
		image:    img,
		diskFile: diskFile,
	}, nil
}
Esempio n. 14
0
func (img *image) loadImage(download Downloader, done chan<- struct{}) {
	imageFile := filepath.Join(img.manager.imageFolder, slugid.Nice()+".tar.zst")

	// Create image folder
	err := os.Mkdir(img.folder, 0777)
	if err != nil {
		goto cleanup
	}

	// Download image to tempory file
	err = download(imageFile)
	if err != nil {
		goto cleanup
	}

	// Extract image and validate image
	img.machine, err = extractImage(imageFile, img.folder)
	if err != nil {
		goto cleanup
	}

	// Clean up if there is any error
cleanup:
	// Delete the image file
	e := os.RemoveAll(imageFile)
	if e != nil {
		eventID := img.manager.sentry.CaptureError(e, nil) // TODO: Severity level warning
		img.manager.log.Warning("Failed to delete image file, err: ", e, " sentry eventId: ", eventID)
	}

	// If there was an err, set ima.err and remove it from cache
	if err != nil {
		img.err = err
		// We should always remove a failed attempt from the cache
		img.manager.m.Lock()
		delete(img.manager.images, img.imageID)
		img.manager.m.Unlock()

		// Delete the image folder
		e := os.RemoveAll(img.folder)
		if e != nil {
			eventID := img.manager.sentry.CaptureError(e, nil) // TODO: Severity level warning
			img.manager.log.Warning("Failed to delete image folder, err: ", e, " sentry eventId: ", eventID)
		}
	} else {
		img.manager.gc.Register(img)
	}
	close(done)
}
// handlePoll handles GET /engine/v1/poll
func (s *MetaService) handlePoll(w http.ResponseWriter, r *http.Request) {
	if !forceMethod(w, r, http.MethodGet) {
		return
	}

	debug("GET /engine/v1/poll")
	select {
	case <-s.haltPolling:
		reply(w, http.StatusOK, Action{
			ID:   slugid.Nice(),
			Type: "none",
		})
	case <-time.After(PollTimeout):
		reply(w, http.StatusOK, Action{
			ID:   slugid.Nice(),
			Type: "none",
		})
	case action := <-s.actionOut:
		debug(" -> Sending action with id=%s", action.ID)
		if reply(w, http.StatusOK, action) != nil {
			debug("Failed to send action id=%s", action.ID)

			// Take the asyncRecord record out of the dictionary
			s.mPendingRecords.Lock()
			rec := s.pendingRecords[action.ID]
			delete(s.pendingRecords, action.ID)
			s.mPendingRecords.Unlock()

			// If nil, then the request is already being handled, and there is no need
			// to abort (presumably the action we received on the other side)
			if rec != nil {
				close(rec.Done)
			}
		}
	}
}
Esempio n. 16
0
func (w *WritableDirectoryCache) Mount() error {
	// cache already there?
	if _, dirCacheExists := directoryCaches[w.CacheName]; dirCacheExists {
		// bump counter
		directoryCaches[w.CacheName].Hits++
		// move it into place...
		src := directoryCaches[w.CacheName].Location
		target := filepath.Join(taskContext.TaskDir, w.Directory)
		err := RenameCrossDevice(src, target)
		if err != nil {
			return fmt.Errorf("Not able to rename dir %v as %v: %v", src, target, err)
		}
		err = makeDirReadable(filepath.Join(taskContext.TaskDir, w.Directory))
		if err != nil {
			return fmt.Errorf("Not able to make cache %v writable to task user: %v", w.CacheName, err)
		}
		return nil
	}
	// new cache, let's initialise it...
	basename := slugid.Nice()
	file := filepath.Join(config.CachesDir, basename)
	directoryCaches[w.CacheName] = &Cache{
		Hits:     1,
		Created:  time.Now(),
		Location: file,
		Owner:    directoryCaches,
		Key:      w.CacheName,
	}
	// preloaded content?
	if w.Content != nil {
		c, err := w.Content.FSContent()
		if err != nil {
			return fmt.Errorf("Not able to retrieve FSContent: %v", err)
		}
		err = extract(c, w.Format, filepath.Join(taskContext.TaskDir, w.Directory))
		if err != nil {
			return err
		}
		return nil
	}
	// no preloaded content => just create dir in place
	err := os.MkdirAll(filepath.Join(taskContext.TaskDir, w.Directory), 0777)
	if err != nil {
		return fmt.Errorf("Not able to create dir: %v", err)
	}
	return nil
}
// AttachHook setups handler such that it gets called when a request arrives
// at the returned url.
func (s *LocalServer) AttachHook(handler http.Handler) (url string, detach func()) {
	s.m.Lock()
	defer s.m.Unlock()

	// Add hook
	id := slugid.Nice()
	s.hooks[id] = handler

	// Create url and detach function
	url = s.url + id + "/"
	detach = func() {
		s.m.Lock()
		defer s.m.Unlock()
		delete(s.hooks, id)
	}
	return
}
// asyncRequest will return action to the current (or next) request to
// GET /engine/v1/poll, then it'll wait for a POST request to /engine/v1/reply
// with matching id in querystring and forward this request to cb.
func (s *MetaService) asyncRequest(action Action, cb asyncCallback) {
	// Ensure the action has a unique id
	action.ID = slugid.Nice()

	// Create channel to track when the callback has been called
	isDone := make(chan struct{})
	rec := asyncRecord{
		Callback: cb,
		Done:     isDone,
	}

	// Insert asyncRecord is pending set
	s.mPendingRecords.Lock()
	s.pendingRecords[action.ID] = &rec
	s.mPendingRecords.Unlock()

	// Send action
	select {
	case <-time.After(30 * time.Second):
		// If sending times out we delete the record
		s.mPendingRecords.Lock()
		delete(s.pendingRecords, action.ID)
		s.mPendingRecords.Unlock()
		return
	case s.actionOut <- action:
	}

	// Wait for async callback to have been called
	select {
	case <-time.After(30 * time.Second):
		// if we timeout, we take the async record
		s.mPendingRecords.Lock()
		rec := s.pendingRecords[action.ID]
		delete(s.pendingRecords, action.ID)
		s.mPendingRecords.Unlock()

		// if there was a record, we've removed it and we're done...
		if rec != nil {
			return
		}
		// if there was no record, we have to wait for isDone as a request must
		// be in the process executing the callback
		<-isDone
	case <-isDone:
	}
}
Esempio n. 19
0
func TestDownloadImageOK(t *testing.T) {
	// Setup a testserver we can test against
	s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		w.WriteHeader(http.StatusOK)
		w.Write([]byte("hello world"))
	}))
	defer s.Close()

	// Create temporary file
	targetFile := filepath.Join(os.TempDir(), slugid.Nice())
	defer os.Remove(targetFile)

	// Download test url to the target file
	err := DownloadImage(s.URL)(targetFile)
	nilOrPanic(err, "Failed to download from testserver")

	result, err := ioutil.ReadFile(targetFile)
	nilOrPanic(err, "Failed to read targetFile, error: ", err)
	text := string(result)
	assert(text == "hello world", "Expected hello world, got ", text)
}
func setupArtifactTest(name string, artifactResp queue.PostArtifactRequest) (*TaskContext, *client.MockQueue) {
	resp := queue.PostArtifactResponse(artifactResp)
	taskID := slugid.Nice()
	context := &TaskContext{
		TaskInfo: TaskInfo{
			TaskID: taskID,
			RunID:  0,
		},
	}
	controller := &TaskContextController{context}
	mockedQueue := &client.MockQueue{}
	mockedQueue.On(
		"CreateArtifact",
		taskID,
		"0",
		name,
		client.PostAnyArtifactRequest,
	).Return(&resp, nil)
	controller.SetQueueClient(mockedQueue)
	return context, mockedQueue
}
Esempio n. 21
0
func TestDownloadImageRetry(t *testing.T) {
	// Setup a testserver we can test against
	count := 0
	m := sync.Mutex{}
	s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		m.Lock()
		defer m.Unlock()
		count++
		w.WriteHeader(http.StatusInternalServerError)
		w.Write([]byte("hello world"))
	}))
	defer s.Close()

	// Create temporary file
	targetFile := filepath.Join(os.TempDir(), slugid.Nice())
	defer os.Remove(targetFile)

	// Download test url to the target file
	err := DownloadImage(s.URL)(targetFile)
	assert(err != nil, "Expected an error")
	assert(count == 7, "Expected 7 attempts, got: ", count)
}
func TestTaskContextLogging(t *testing.T) {
	t.Parallel()
	path := filepath.Join(os.TempDir(), slugid.Nice())
	context, control, err := NewTaskContext(path, TaskInfo{}, nil)
	nilOrPanic(err, "Failed to create context")

	context.Log("Hello World")
	err = control.CloseLog()
	nilOrPanic(err, "Failed to close log file")

	reader, err := context.NewLogReader()
	nilOrPanic(err, "Failed to open log file")
	data, err := ioutil.ReadAll(reader)
	nilOrPanic(err, "Failed to read log file")

	if !strings.Contains(string(data), "Hello World") {
		panic("Couldn't find 'Hello World' in the log")
	}
	nilOrPanic(reader.Close(), "Failed to close log file")
	err = context.logStream.Remove()
	nilOrPanic(err, "Failed to remove logStream")
}
Esempio n. 23
0
func (u *user) create(groups []string) error {
	var err error
	defer func() {
		if err != nil {
			u.delete()
		}
	}()

	uid, err := u.getMaxUID()
	if err != nil {
		return err
	}

	name := "worker-" + slugid.Nice()
	userPath := path.Join("/Users", name)

	err = u.d.create(userPath)
	if err != nil {
		return err
	}

	// We set the uid, then check if it is unique, if
	// not, increment and try again
	duplicated := true
	for duplicated {
		uid++
		strUID := strconv.Itoa(uid)

		// set user uid
		err = u.d.create(userPath, "uid", strUID)
		if err != nil {
			return err
		}

		// check if uid has been used for another user
		duplicated, err = u.isDuplicatedID("uid", uid)
		if err != nil {
			return err
		}
	}

	if len(groups) > 0 {
		var primaryGroupID string
		primaryGroupID, err = u.d.read("/Groups/"+groups[0], "PrimaryGroupID")
		if err != nil {
			return err
		}

		err = u.d.create(userPath, "PrimaryGroupID", primaryGroupID)
		if err != nil {
			return err
		}

		for _, group := range groups[1:] {
			if err = u.d.append("/Groups/"+group, "GroupMembership", name); err != nil {
				return err
			}
			u.supplementaryGroups = append(u.supplementaryGroups, group)
		}
	}

	err = u.d.create(userPath, "NFSHomeDirectory", userPath)
	if err != nil {
		return err
	}

	err = os.MkdirAll(userPath, 0700)
	if err != nil {
		return err
	}

	err = os.Chown(userPath, uid, 0)
	if err != nil {
		return err
	}

	u.name = name

	return nil
}
func TestSystem(t *testing.T) {
	// Setup temporary home directory
	homeDir := filepath.Join(os.TempDir(), slugid.Nice())
	require.NoError(t, os.MkdirAll(homeDir, 0777))
	defer os.RemoveAll(homeDir)

	var u *User
	var err error

	t.Run("CreateUser", func(t *testing.T) {
		u, err = CreateUser(homeDir, nil)
		require.NoError(t, err)
	})

	t.Run("FindGroup", func(t *testing.T) {
		g, err := FindGroup(testGroup)
		require.NoError(t, err)
		require.NotNil(t, g)
	})

	t.Run("StartProcess True", func(t *testing.T) {
		p, err := StartProcess(ProcessOptions{
			Arguments: testTrue,
		})
		require.NoError(t, err)
		require.True(t, p.Wait())
	})

	t.Run("StartProcess False", func(t *testing.T) {
		p, err := StartProcess(ProcessOptions{
			Arguments: testFalse,
		})
		require.NoError(t, err)
		require.False(t, p.Wait())
	})

	t.Run("StartProcess True with TTY", func(t *testing.T) {
		p, err := StartProcess(ProcessOptions{
			Arguments: testTrue,
			TTY:       true,
		})
		require.NoError(t, err)
		require.True(t, p.Wait())
	})

	t.Run("StartProcess False with TTY", func(t *testing.T) {
		p, err := StartProcess(ProcessOptions{
			Arguments: testFalse,
			TTY:       true,
		})
		require.NoError(t, err)
		require.False(t, p.Wait())
	})

	t.Run("StartProcess Cat", func(t *testing.T) {
		var out bytes.Buffer
		p, err := StartProcess(ProcessOptions{
			Arguments: testCat,
			Stdin:     ioutil.NopCloser(bytes.NewBufferString("hello-world")),
			Stdout:    ioext.WriteNopCloser(&out),
		})
		require.NoError(t, err)
		require.True(t, p.Wait())
		require.Equal(t, "hello-world", out.String())
	})

	t.Run("StartProcess Cat with TTY", func(t *testing.T) {
		p, err := StartProcess(ProcessOptions{
			Arguments: testCat,
			TTY:       true,
			Stdin:     ioutil.NopCloser(bytes.NewBufferString("hello-world")),
			// We can't reliably read output as we kill the process with stdin
			// is closed (EOF)
		})
		require.NoError(t, err)
		assert.False(t, p.Wait())
	})

	t.Run("StartProcess Print Dir", func(t *testing.T) {
		var out bytes.Buffer
		p, err := StartProcess(ProcessOptions{
			Arguments:     testPrintDir,
			Stdout:        ioext.WriteNopCloser(&out),
			WorkingFolder: homeDir,
		})
		require.NoError(t, err)
		require.True(t, p.Wait())
		require.Contains(t, out.String(), homeDir)
	})

	t.Run("StartProcess TTY Print Dir", func(t *testing.T) {
		var out bytes.Buffer
		p, err := StartProcess(ProcessOptions{
			Arguments:     testPrintDir,
			Stdout:        ioext.WriteNopCloser(&out),
			WorkingFolder: homeDir,
			TTY:           true,
		})
		require.NoError(t, err)
		require.True(t, p.Wait())
		require.Contains(t, out.String(), homeDir)
	})

	t.Run("StartProcess Owner and Print Dir", func(t *testing.T) {
		var out bytes.Buffer
		p, err := StartProcess(ProcessOptions{
			Arguments: testPrintDir,
			Stdout:    ioext.WriteNopCloser(&out),
			Owner:     u,
		})
		require.NoError(t, err)
		require.True(t, p.Wait())
		require.Contains(t, out.String(), homeDir)
	})

	t.Run("StartProcess TTY, Owner and Print Dir", func(t *testing.T) {
		var out bytes.Buffer
		p, err := StartProcess(ProcessOptions{
			Arguments: testPrintDir,
			Stdout:    ioext.WriteNopCloser(&out),
			Owner:     u,
			TTY:       true,
		})
		require.NoError(t, err)
		require.True(t, p.Wait())
		require.Contains(t, out.String(), homeDir)
	})

	t.Run("StartProcess Owner and True", func(t *testing.T) {
		p, err := StartProcess(ProcessOptions{
			Arguments: testTrue,
			Owner:     u,
		})
		require.NoError(t, err)
		require.True(t, p.Wait())
	})

	t.Run("StartProcess TTY, Owner and True", func(t *testing.T) {
		p, err := StartProcess(ProcessOptions{
			Arguments: testTrue,
			Owner:     u,
			TTY:       true,
		})
		require.NoError(t, err)
		require.True(t, p.Wait())
	})

	t.Run("StartProcess Owner and False", func(t *testing.T) {
		p, err := StartProcess(ProcessOptions{
			Arguments: testFalse,
			Owner:     u,
		})
		require.NoError(t, err)
		require.False(t, p.Wait())
	})

	t.Run("StartProcess TTY, Owner and False", func(t *testing.T) {
		p, err := StartProcess(ProcessOptions{
			Arguments: testFalse,
			Owner:     u,
			TTY:       true,
		})
		require.NoError(t, err)
		require.False(t, p.Wait())
	})

	t.Run("StartProcess Kill", func(t *testing.T) {
		p, err := StartProcess(ProcessOptions{
			Arguments: testSleep,
		})
		require.NoError(t, err)
		var waited atomics.Bool
		done := make(chan bool)
		go func() {
			p.Wait()
			done <- waited.Get()
		}()
		time.Sleep(100 * time.Millisecond)
		waited.Set(true)
		p.Kill()
		require.False(t, p.Wait())
		require.True(t, <-done, "p.Wait was done before p.Kill() was called!")
	})

	t.Run("KillByOwner", func(t *testing.T) {
		p, err := StartProcess(ProcessOptions{
			Arguments: testSleep,
			Owner:     u,
		})
		require.NoError(t, err)
		var waited atomics.Bool
		done := make(chan bool)
		go func() {
			p.Wait()
			done <- waited.Get()
		}()
		time.Sleep(100 * time.Millisecond)
		waited.Set(true)
		require.NoError(t, KillByOwner(u))
		require.True(t, <-done, "p.Wait was done before KillByOwner was called!")
	})

	t.Run("user.Remove", func(t *testing.T) {
		u.Remove()
	})
}
func TestImageManager(t *testing.T) {
	fmt.Println(" - Setup environment needed to test")
	gc := &gc.GarbageCollector{}
	log := logrus.StandardLogger()
	sentry, _ := raven.New("")
	imageFolder := filepath.Join("/tmp", slugid.Nice())

	fmt.Println(" - Create manager")
	manager, err := NewManager(imageFolder, gc, log.WithField("subsystem", "image-manager"), sentry)
	nilOrPanic(err, "Failed to create image manager")

	fmt.Println(" - Test parallel download")
	// Check that download can return and error, and we won't download twice
	// if we call before returning...
	downloadError := errors.New("test error")
	var err1 error
	wg := sync.WaitGroup{}
	wg.Add(2)
	go func() {
		_, err1 = manager.Instance("url:test-image-1", func(target string) error {
			time.Sleep(100 * time.Millisecond) // Sleep giving the second call time
			return downloadError
		})
		wg.Done()
	}()
	time.Sleep(50 * time.Millisecond) // Sleep giving the second call time
	instance, err2 := manager.Instance("url:test-image-1", func(target string) error {
		panic("We shouldn't get here, as the previous download haven't returned")
	})
	wg.Done()
	wg.Wait()
	assert(err1 == err2, "Expected the same errors: ", err1, err2)
	assert(downloadError == err1, "Expected the downloadError: ", err1)
	assert(instance == nil, "Expected instance to nil, when we have an error")

	fmt.Println(" - Test instantiation of image")
	instance, err = manager.Instance("url:test-image-1", func(target string) error {
		return copyFile(testImageFile, target)
	})
	nilOrPanic(err, "Failed to loadImage")
	assert(instance != nil, "Expected an instance")

	fmt.Println(" - Get the diskImage path so we can check it gets deleted")
	diskImage := instance.DiskFile()

	fmt.Println(" - Inspect file for sanity check: ", diskImage)
	info := inspectImageFile(diskImage, imageQCOW2Format)
	assert(info != nil, "Expected a qcow2 file")
	assert(info.Format == formatQCOW2)
	assert(!info.DirtyFlag)
	assert(info.BackingFile != "", "Missing backing file in qcow2")

	fmt.Println(" - Check that backing file exists")
	backingFile := filepath.Join(filepath.Dir(diskImage), info.BackingFile)
	_, err = os.Lstat(backingFile)
	nilOrPanic(err, "backingFile missing")

	fmt.Println(" - Garbage collect and test that image is still there")
	nilOrPanic(gc.CollectAll(), "gc.CollectAll() failed")
	_, err = os.Lstat(backingFile)
	nilOrPanic(err, "backingFile missing after GC")
	info = inspectImageFile(diskImage, imageQCOW2Format)
	assert(info != nil, "diskImage for instance deleted after GC")

	fmt.Println(" - Make a new instance")
	instance2, err := manager.Instance("url:test-image-1", func(target string) error {
		panic("We shouldn't get here, as it is currently in the cache")
	})
	nilOrPanic(err, "Failed to create new instance")
	diskImage2 := instance2.DiskFile()
	assert(diskImage2 != diskImage, "Expected a new disk image")
	info = inspectImageFile(diskImage2, imageQCOW2Format)
	assert(info != nil, "diskImage2 missing initially")

	fmt.Println(" - Release the first instance")
	instance.Release()
	_, err = os.Lstat(diskImage)
	assert(os.IsNotExist(err), "first instance diskImage shouldn't exist!")
	info = inspectImageFile(diskImage2, imageQCOW2Format)
	assert(info != nil, "diskImage2 missing after first instance release")

	fmt.Println(" - Garbage collect and test that image is still there")
	nilOrPanic(gc.CollectAll(), "gc.CollectAll() failed")
	_, err = os.Lstat(backingFile)
	nilOrPanic(err, "backingFile missing after second GC")
	_, err = os.Lstat(diskImage)
	assert(os.IsNotExist(err), "first instance diskImage shouldn't exist!")
	info = inspectImageFile(diskImage2, imageQCOW2Format)
	assert(info != nil, "diskImage2 missing after first instance release")

	fmt.Println(" - Release the second instance")
	instance2.Release()
	_, err = os.Lstat(diskImage2)
	assert(os.IsNotExist(err), "second instance diskImage shouldn't exist!")
	_, err = os.Lstat(backingFile)
	nilOrPanic(err, "backingFile missing after release, this shouldn't be...")

	fmt.Println(" - Garbage collect everything") // this should dispose the image
	nilOrPanic(gc.CollectAll(), "gc.CollectAll() failed")
	_, err = os.Lstat(backingFile)
	assert(os.IsNotExist(err), "Expected backingFile to be deleted after GC, file: ", backingFile)

	fmt.Println(" - Check that we can indeed reload the image")
	_, err = manager.Instance("url:test-image-1", func(target string) error {
		return downloadError
	})
	assert(err == downloadError, "Expected a downloadError", err)
}
Esempio n. 26
0
func (s *temporaryFolder) NewFilePath() string {
	return filepath.Join(s.path, slugid.Nice())
}
	"os"
	"testing"
	"time"

	"github.com/stretchr/testify/assert"
	"github.com/taskcluster/httpbackoff"
	"github.com/taskcluster/slugid-go/slugid"
	"github.com/taskcluster/taskcluster-client-go"
	"github.com/taskcluster/taskcluster-client-go/queue"
	"github.com/taskcluster/taskcluster-worker/runtime"
	"github.com/taskcluster/taskcluster-worker/runtime/client"
)

const ProvisionerID = "dummy-provisioner"

var WorkerType = (fmt.Sprintf("dummy-type-%s", slugid.Nice()))[0:22]
var WorkerID = fmt.Sprintf("dummy-worker-%s", slugid.Nice())

func TestRetrievePollTaskUrls(t *testing.T) {
	logger, _ := runtime.CreateLogger("")
	mockedQueue := &client.MockQueue{}
	service := queueService{
		client:           mockedQueue,
		provisionerID:    ProvisionerID,
		workerType:       WorkerType,
		log:              logger.WithField("component", "Queue Service"),
		expirationOffset: 300,
	}
	mockedQueue.On(
		"PollTaskUrls",
		ProvisionerID,
Esempio n. 28
0
func (cmd) Execute(arguments map[string]interface{}) bool {
	// Read arguments
	imageFile := arguments["<image>"].(string)
	command := arguments["<command>"].([]string)
	vnc := arguments["--vnc"].(bool)

	// Create temporary storage and environment
	storage, err := runtime.NewTemporaryStorage(os.TempDir())
	if err != nil {
		panic("Failed to create TemporaryStorage")
	}
	environment := &runtime.Environment{
		TemporaryStorage: storage,
	}

	// Create a temporary folder
	tempFolder := filepath.Join("/tmp", slugid.Nice())
	if err = os.Mkdir(tempFolder, 0777); err != nil {
		log.Fatal("Failed to create temporary folder in /tmp, error: ", err)
	}

	// Create the necessary runtime setup
	gc := &gc.GarbageCollector{}
	logger, _ := runtime.CreateLogger("info")
	log := logger.WithField("component", "qemu-run")

	// Create image manager
	log.Info("Creating image manager")
	manager, err := image.NewManager(filepath.Join(tempFolder, "/images/"), gc, logger.WithField("component", "image-manager"), nil)
	if err != nil {
		log.Fatal("Failed to create image manager", err)
	}

	// Get an instance of the image
	log.Info("Creating instance of image")
	image, err := manager.Instance("image", func(target string) error {
		return cp.CopyFile(target, imageFile)
	})
	if err != nil {
		log.Fatal("Failed to create instance of image, error: ", err)
	}

	// Setup a user-space network
	log.Info("Creating user-space network")
	net, err := network.NewUserNetwork(tempFolder)
	if err != nil {
		log.Fatal("Failed to create user-space network, error: ", err)
	}

	// Create virtual machine
	log.Info("Creating virtual machine")
	vm, err := vm.NewVirtualMachine(
		image.Machine().Options(), image, net, tempFolder,
		"", "", logger.WithField("component", "vm"),
	)
	if err != nil {
		log.Fatal("Failed to create virtual-machine, error: ", err)
	}

	// Create meta-data service
	log.Info("Creating meta-data service")
	var shellServer *interactive.ShellServer
	var displayServer *interactive.DisplayServer
	ms := metaservice.New(command, make(map[string]string), os.Stdout, func(result bool) {
		fmt.Println("### Task Completed, result = ", result)
		shellServer.WaitAndClose()
		displayServer.Abort()
		vm.Kill()
	}, environment)

	// Setup http handler for network
	vm.SetHTTPHandler(ms)

	// Create ShellServer
	shellServer = interactive.NewShellServer(
		ms.ExecShell, log.WithField("component", "shell-server"),
	)

	// Create displayServer
	displayServer = interactive.NewDisplayServer(
		&socketDisplayProvider{socket: vm.VNCSocket()},
		log.WithField("component", "display-server"),
	)

	interactiveHandler := http.NewServeMux()
	interactiveHandler.Handle("/shell/", shellServer)
	interactiveHandler.Handle("/display/", displayServer)
	interactiveServer := graceful.Server{
		Timeout: 30 * time.Second,
		Server: &http.Server{
			Addr:    "localhost:8080",
			Handler: interactiveHandler,
		},
		NoSignalHandling: true,
	}
	go interactiveServer.ListenAndServe()

	// Start the virtual machine
	log.Info("Start the virtual machine")
	vm.Start()

	// Start vncviewer
	done := make(chan struct{})
	if vnc {
		go StartVNCViewer(vm.VNCSocket(), done)
	}

	// Wait for SIGINT/SIGKILL or vm.Done
	c := make(chan os.Signal, 2)
	signal.Notify(c, os.Interrupt, os.Kill) // This pattern leaks, acceptable here
	select {
	case <-c:
		signal.Stop(c)
		fmt.Println("### Terminating QEMU")
		vm.Kill()
	case <-vm.Done:
		fmt.Println("### QEMU terminated")
	}
	close(done)

	// Ensure that QEMU has terminated before we continue
	<-vm.Done
	interactiveServer.Stop(100 * time.Millisecond)

	// Clean up anything left in the garbage collector
	gc.CollectAll()
	return true
}
func TestTaskManagerRunTask(t *testing.T) {
	resolved := false
	var serverURL string
	var handler = func(w http.ResponseWriter, r *http.Request) {
		if strings.Contains(r.URL.Path, "/artifacts/public/logs/live_backing.log") {
			json.NewEncoder(w).Encode(&queue.S3ArtifactResponse{
				PutURL: serverURL,
			})
			return
		}

		if strings.Contains(r.URL.Path, "/artifacts/public/logs/live.log") {
			json.NewEncoder(w).Encode(&queue.RedirectArtifactResponse{})
			return
		}

		if strings.Contains(r.URL.Path, "/task/abc/runs/1/completed") {
			resolved = true
			w.Header().Set("Content-Type", "application/json; charset=UTF-8")
			json.NewEncoder(w).Encode(&queue.TaskStatusResponse{})
		}
	}

	s := httptest.NewServer(http.HandlerFunc(handler))
	serverURL = s.URL
	defer s.Close()

	tempPath := filepath.Join(os.TempDir(), slugid.Nice())
	tempStorage, err := runtime.NewTemporaryStorage(tempPath)
	if err != nil {
		t.Fatal(err)
	}

	localServer, err := webhookserver.NewLocalServer(
		[]byte{127, 0, 0, 1}, 60000,
		"", 0,
		"example.com",
		"",
		"",
		"",
		10*time.Minute,
	)
	if err != nil {
		t.Error(err)
	}

	gc := &gc.GarbageCollector{}
	environment := &runtime.Environment{
		GarbageCollector: gc,
		TemporaryStorage: tempStorage,
		WebHookServer:    localServer,
	}
	engineProvider := engines.Engines()["mock"]
	engine, err := engineProvider.NewEngine(engines.EngineOptions{
		Environment: environment,
		Log:         logger.WithField("engine", "mock"),
	})
	if err != nil {
		t.Fatal(err.Error())
	}

	cfg := &configType{
		QueueBaseURL: serverURL,
	}

	tm, err := newTaskManager(cfg, engine, MockPlugin{}, environment, logger.WithField("test", "TestTaskManagerRunTask"), gc)
	if err != nil {
		t.Fatal(err)
	}

	claim := &taskClaim{
		taskID: "abc",
		runID:  1,
		taskClaim: &queue.TaskClaimResponse{
			Credentials: struct {
				AccessToken string `json:"accessToken"`
				Certificate string `json:"certificate"`
				ClientID    string `json:"clientId"`
			}{
				AccessToken: "123",
				ClientID:    "abc",
				Certificate: "",
			},
			TakenUntil: tcclient.Time(time.Now().Add(time.Minute * 5)),
		},
		definition: &queue.TaskDefinitionResponse{
			Payload: []byte(`{"delay": 1,"function": "write-log","argument": "Hello World"}`),
		},
	}
	tm.run(claim)
	assert.True(t, resolved, "Task was not resolved")
}
func TestWorkerShutdown(t *testing.T) {
	var resCount int32
	var serverURL string

	var handler = func(w http.ResponseWriter, r *http.Request) {
		if strings.Contains(r.URL.Path, "/artifacts/public/logs/live_backing.log") {
			json.NewEncoder(w).Encode(&queue.S3ArtifactResponse{
				PutURL: serverURL,
			})
			return
		}

		if strings.Contains(r.URL.Path, "/artifacts/public/logs/live.log") {
			json.NewEncoder(w).Encode(&queue.RedirectArtifactResponse{})
			return
		}

		if strings.Contains(r.URL.Path, "exception") {
			var exception queue.TaskExceptionRequest
			err := json.NewDecoder(r.Body).Decode(&exception)
			// Ignore errors for now
			if err != nil {
				return
			}

			assert.Equal(t, "worker-shutdown", exception.Reason)
			atomic.AddInt32(&resCount, 1)

			w.Header().Set("Content-Type", "application/json; charset=UTF-8")
			json.NewEncoder(w).Encode(&queue.TaskStatusResponse{})
		}
	}

	s := httptest.NewServer(http.HandlerFunc(handler))
	serverURL = s.URL
	defer s.Close()

	tempPath := filepath.Join(os.TempDir(), slugid.Nice())
	tempStorage, err := runtime.NewTemporaryStorage(tempPath)
	if err != nil {
		t.Fatal(err)
	}

	localServer, err := webhookserver.NewLocalServer(
		[]byte{127, 0, 0, 1}, 60000,
		"", 0,
		"example.com",
		"",
		"",
		"",
		10*time.Minute,
	)
	if err != nil {
		t.Error(err)
	}

	gc := &gc.GarbageCollector{}
	environment := &runtime.Environment{
		GarbageCollector: gc,
		TemporaryStorage: tempStorage,
		WebHookServer:    localServer,
	}
	engineProvider := engines.Engines()["mock"]
	engine, err := engineProvider.NewEngine(engines.EngineOptions{
		Environment: environment,
		Log:         logger.WithField("engine", "mock"),
	})
	if err != nil {
		t.Fatal(err.Error())
	}

	cfg := &configType{
		QueueBaseURL: serverURL,
	}
	tm, err := newTaskManager(cfg, engine, MockPlugin{}, environment, logger.WithField("test", "TestRunTask"), gc)
	if err != nil {
		t.Fatal(err)
	}

	claims := []*taskClaim{
		&taskClaim{
			taskID: "abc",
			runID:  1,
			definition: &queue.TaskDefinitionResponse{
				Payload: []byte(`{"delay": 5000,"function": "write-log","argument": "Hello World"}`),
			},
			taskClaim: &queue.TaskClaimResponse{
				Credentials: struct {
					AccessToken string `json:"accessToken"`
					Certificate string `json:"certificate"`
					ClientID    string `json:"clientId"`
				}{
					AccessToken: "123",
					ClientID:    "abc",
					Certificate: "",
				},
				TakenUntil: tcclient.Time(time.Now().Add(time.Minute * 5)),
			},
		},
		&taskClaim{
			taskID: "def",
			runID:  0,
			definition: &queue.TaskDefinitionResponse{
				Payload: []byte(`{"delay": 5000,"function": "write-log","argument": "Hello World"}`),
			},
			taskClaim: &queue.TaskClaimResponse{
				Credentials: struct {
					AccessToken string `json:"accessToken"`
					Certificate string `json:"certificate"`
					ClientID    string `json:"clientId"`
				}{
					AccessToken: "123",
					ClientID:    "abc",
					Certificate: "",
				},
				TakenUntil: tcclient.Time(time.Now().Add(time.Minute * 5)),
			},
		},
	}

	var wg sync.WaitGroup
	wg.Add(2)
	go func() {
		for _, c := range claims {
			go func(claim *taskClaim) {
				tm.run(claim)
				wg.Done()
			}(c)
		}
	}()

	time.Sleep(500 * time.Millisecond)
	assert.Equal(t, len(tm.RunningTasks()), 2)
	close(tm.done)
	tm.Stop()

	wg.Wait()
	assert.Equal(t, 0, len(tm.RunningTasks()))
	assert.Equal(t, int32(2), atomic.LoadInt32(&resCount))
}