Exemple #1
0
// AddTask submit a tasks
// method:		POST
// path:		/api/tasks
func (h *Handler) AddTask() martini.Handler {
	return func(w http.ResponseWriter, r *http.Request) {
		task := &registry.Task{}
		if err := json.NewDecoder(r.Body).Decode(&task); err != nil {
			log.Errorf("Cannot decode json: %v", err)
			writeError(w, err)
			return
		}

		// generate task id
		randID, err := utils.Encode(6)
		if err != nil {
			writeError(w, err)
			return
		}

		task.ID = "task-" + randID
		task.CreateTime = time.Now().UnixNano()
		task.State = "TASK_WAITING"
		log.Debugf("Receive task: %v", task)

		err = h.core.AddTask(task.ID, task)
		if err != nil {
			writeError(w, err)
			return
		}

		writeResponse(w, http.StatusOK, task.ID)
	}
}
Exemple #2
0
func (job *Job) InitBasicParams() error {
	// generate task id
	randID, err := utils.Encode(6)
	if err != nil {
		return err
	}
	job.ID = randID
	job.CreateTime = time.Now().UnixNano()
	job.SLAOffers = make(map[string]string)
	job.UsedResources = make(map[string]*mesosproto.Resource)
	job.Health = Healthy
	job.Status = StatusRunning

	// init task
	if job.ContextDir != "" {
		job.TotalTaskLen = job.BuildNodeNumber()
	} else {
		job.TotalTaskLen = 0
	}
	for _, task := range job.Tasks {
		job.TotalTaskLen += task.Scale
	}

	// init splitter
	if job.SplitterType == FileSplitterType {
		job.Splitter = splitterImpl.NewFileSplitter()
	} else if job.SplitterType == LineSplitterType {
		job.Splitter = splitterImpl.NewLineSplitter()
	}
	return nil
}
Exemple #3
0
func (core *Core) addTask(job *registry.Job, scale int, inputs []string) {
	for _, task := range job.Tasks {
		randID, err := utils.Encode(6)
		if err != nil {
			log.Errorf("Error when generate id to task %s of job %v", task.ID, job.ID)
			continue
		}

		if task.Scale <= 0 {
			task.Scale = 1
		}

		if scale == 0 {
			scale = task.Scale
		}

		job.TotalTaskLen = scale
		for index := 1; index <= scale; index++ {
			// To avoid use same pointer of ports
			// Instantiate a new array
			var ports []*registry.Port
			for _, port := range task.Ports {
				ports = append(ports, &registry.Port{
					ContainerPort: port.ContainerPort,
					HostPort:      port.HostPort,
				})
			}

			taskInstance := &registry.Task{
				JobID:       job.ID,
				ID:          "task-" + job.ID + "-" + randID + "-" + strconv.Itoa(index),
				Name:        job.Name,
				DockerImage: job.Image,
				Cpus:        task.Cpus,
				Mem:         task.Mem,
				Disk:        task.Disk,
				Ports:       ports,
				Command:     task.Command,
				Volumes:     task.Volumes,
				Resources:   task.Resources,
				Attributes:  task.Attributes,
				CreateTime:  time.Now().UnixNano(),
				Type:        registry.TaskTypeTest,
				State:       "TASK_WAITING",
			}

			// mount input path
			if len(inputs) > 0 {
				taskInstance.Volumes = append(taskInstance.Volumes, &registry.Volume{
					HostPath:      fs.NormalizePath(inputs[index-1]),
					ContainerPath: "/input",
				})
			}

			// mount work directory
			taskInstance.Volumes = append(taskInstance.Volumes, &registry.Volume{
				HostPath:      fs.NormalizePath(job.WorkDirectory),
				ContainerPath: "/workspace",
			})

			// mount output
			taskInstance.Volumes = append(taskInstance.Volumes, &registry.Volume{
				HostPath:      fs.NormalizePath(job.OutputPath),
				ContainerPath: "/output",
			})

			// if task was build from dockerfile
			// add attribute to task
			if job.ContextDir != "" {
				taskInstance.Attributes = append(task.Attributes, &mesosproto.Attribute{
					Name: proto.String("Image"),
					Text: &mesosproto.Value_Text{
						Value: proto.String(job.Image),
					},
				})
			}

			// TODO bugfix: task point to one pointer
			var taskArguments []string
			for _, arg := range task.Arguments {
				taskArguments = append(taskArguments, arg)
			}
			taskInstance.Arguments = taskArguments

			err = core.AddTask(taskInstance.ID, taskInstance)
			job.PushTask(taskInstance)
			if err != nil {
				task.State = "TASK_FAILED"
				log.Errorf("Error when running task %v: %v", task.ID, err)
				job.PopLastTask()
				continue
			}
		}
	}
}