Ejemplo n.º 1
0
// validateJob validates a Job and task drivers and returns an error if there is
// a validation problem or if the Job is of a type a user is not allowed to
// submit.
func validateJob(job *structs.Job) error {
	validationErrors := new(multierror.Error)
	if err := job.Validate(); err != nil {
		multierror.Append(validationErrors, err)
	}

	// Validate the driver configurations.
	for _, tg := range job.TaskGroups {
		for _, task := range tg.Tasks {
			d, err := driver.NewDriver(
				task.Driver,
				driver.NewEmptyDriverContext(),
			)
			if err != nil {
				msg := "failed to create driver for task %q in group %q for validation: %v"
				multierror.Append(validationErrors, fmt.Errorf(msg, tg.Name, task.Name, err))
				continue
			}

			if err := d.Validate(task.Config); err != nil {
				formatted := fmt.Errorf("group %q -> task %q -> config: %v", tg.Name, task.Name, err)
				multierror.Append(validationErrors, formatted)
			}
		}
	}

	if job.Type == structs.JobTypeCore {
		multierror.Append(validationErrors, fmt.Errorf("job type cannot be core"))
	}

	return validationErrors.ErrorOrNil()
}
Ejemplo n.º 2
0
// UpsertJob is used to register a job or update a job definition
func (s *StateStore) UpsertJob(index uint64, job *structs.Job) error {
	txn := s.db.Txn(true)
	defer txn.Abort()

	// Check if the job already exists
	existing, err := txn.First("jobs", "id", job.ID)
	if err != nil {
		return fmt.Errorf("job lookup failed: %v", err)
	}

	// Setup the indexes correctly
	if existing != nil {
		job.CreateIndex = existing.(*structs.Job).CreateIndex
		job.ModifyIndex = index
	} else {
		job.CreateIndex = index
		job.ModifyIndex = index
	}

	// Insert the job
	if err := txn.Insert("jobs", job); err != nil {
		return fmt.Errorf("job insert failed: %v", err)
	}
	if err := txn.Insert("index", &IndexEntry{"jobs", index}); err != nil {
		return fmt.Errorf("index update failed: %v", err)
	}

	txn.Commit()
	return nil
}
Ejemplo n.º 3
0
// deriveJob instantiates a new job based on the passed periodic job and the
// launch time.
func (p *PeriodicDispatch) deriveJob(periodicJob *structs.Job, time time.Time) (
	derived *structs.Job, err error) {

	// Have to recover in case the job copy panics.
	defer func() {
		if r := recover(); r != nil {
			p.logger.Printf("[ERR] nomad.periodic: deriving job from"+
				" periodic job %v failed; deregistering from periodic runner: %v",
				periodicJob.ID, r)
			p.Remove(periodicJob.ID)
			derived = nil
			err = fmt.Errorf("Failed to create a copy of the periodic job %v: %v", periodicJob.ID, r)
		}
	}()

	// Create a copy of the periodic job, give it a derived ID/Name and make it
	// non-periodic.
	derived = periodicJob.Copy()
	derived.ParentID = periodicJob.ID
	derived.ID = p.derivedJobID(periodicJob, time)
	derived.Name = derived.ID
	derived.Periodic = nil
	derived.GC = true
	return
}
Ejemplo n.º 4
0
// UpsertJob is used to register a job or update a job definition
func (s *StateStore) UpsertJob(index uint64, job *structs.Job) error {
	txn := s.db.Txn(true)
	defer txn.Abort()

	watcher := watch.NewItems()
	watcher.Add(watch.Item{Table: "jobs"})
	watcher.Add(watch.Item{Job: job.ID})

	// Check if the job already exists
	existing, err := txn.First("jobs", "id", job.ID)
	if err != nil {
		return fmt.Errorf("job lookup failed: %v", err)
	}

	// Setup the indexes correctly
	if existing != nil {
		job.CreateIndex = existing.(*structs.Job).CreateIndex
		job.ModifyIndex = index
		job.JobModifyIndex = index

		// Compute the job status
		var err error
		job.Status, err = s.getJobStatus(txn, job, false)
		if err != nil {
			return fmt.Errorf("setting job status for %q failed: %v", job.ID, err)
		}
	} else {
		job.CreateIndex = index
		job.ModifyIndex = index
		job.JobModifyIndex = index

		// If we are inserting the job for the first time, we don't need to
		// calculate the jobs status as it is known.
		if job.IsPeriodic() {
			job.Status = structs.JobStatusRunning
		} else {
			job.Status = structs.JobStatusPending
		}
	}

	if err := s.updateSummaryWithJob(index, job, watcher, txn); err != nil {
		return fmt.Errorf("unable to create job summary: %v", err)
	}

	// Create the LocalDisk if it's nil by adding up DiskMB from task resources.
	// COMPAT 0.4.1 -> 0.5
	s.addLocalDiskToTaskGroups(job)

	// Insert the job
	if err := txn.Insert("jobs", job); err != nil {
		return fmt.Errorf("job insert failed: %v", err)
	}
	if err := txn.Insert("index", &IndexEntry{"jobs", index}); err != nil {
		return fmt.Errorf("index update failed: %v", err)
	}

	txn.Defer(func() { s.watch.notify(watcher) })
	txn.Commit()
	return nil
}
Ejemplo n.º 5
0
// setImplicitConstraints adds implicit constraints to the job based on the
// features it is requesting.
func setImplicitConstraints(j *structs.Job) {
	// Get the required Vault Policies
	policies := j.VaultPolicies()

	// Get the required signals
	signals := j.RequiredSignals()

	// Hot path
	if len(signals) == 0 && len(policies) == 0 {
		return
	}

	// Add Vault constraints
	for _, tg := range j.TaskGroups {
		_, ok := policies[tg.Name]
		if !ok {
			// Not requesting Vault
			continue
		}

		found := false
		for _, c := range tg.Constraints {
			if c.Equal(vaultConstraint) {
				found = true
				break
			}
		}

		if !found {
			tg.Constraints = append(tg.Constraints, vaultConstraint)
		}
	}

	// Add signal constraints
	for _, tg := range j.TaskGroups {
		tgSignals, ok := signals[tg.Name]
		if !ok {
			// Not requesting Vault
			continue
		}

		// Flatten the signals
		required := structs.MapStringStringSliceValueSet(tgSignals)
		sigConstraint := getSignalConstraint(required)

		found := false
		for _, c := range tg.Constraints {
			if c.Equal(sigConstraint) {
				found = true
				break
			}
		}

		if !found {
			tg.Constraints = append(tg.Constraints, sigConstraint)
		}
	}
}
Ejemplo n.º 6
0
// validateJob validates a Job and task drivers and returns an error if there is
// a validation problem or if the Job is of a type a user is not allowed to
// submit.
func validateJob(job *structs.Job) error {
	validationErrors := new(multierror.Error)
	if err := job.Validate(); err != nil {
		multierror.Append(validationErrors, err)
	}

	// Get the signals required
	signals := job.RequiredSignals()

	// Validate the driver configurations.
	for _, tg := range job.TaskGroups {
		// Get the signals for the task group
		tgSignals, tgOk := signals[tg.Name]

		for _, task := range tg.Tasks {
			d, err := driver.NewDriver(
				task.Driver,
				driver.NewEmptyDriverContext(),
			)
			if err != nil {
				msg := "failed to create driver for task %q in group %q for validation: %v"
				multierror.Append(validationErrors, fmt.Errorf(msg, tg.Name, task.Name, err))
				continue
			}

			if err := d.Validate(task.Config); err != nil {
				formatted := fmt.Errorf("group %q -> task %q -> config: %v", tg.Name, task.Name, err)
				multierror.Append(validationErrors, formatted)
			}

			// The task group didn't have any task that required signals
			if !tgOk {
				continue
			}

			// This task requires signals. Ensure the driver is capable
			if required, ok := tgSignals[task.Name]; ok {
				abilities := d.Abilities()
				if !abilities.SendSignals {
					formatted := fmt.Errorf("group %q -> task %q: driver %q doesn't support sending signals. Requested signals are %v",
						tg.Name, task.Name, task.Driver, strings.Join(required, ", "))
					multierror.Append(validationErrors, formatted)
				}
			}
		}
	}

	if job.Type == structs.JobTypeCore {
		multierror.Append(validationErrors, fmt.Errorf("job type cannot be core"))
	}

	return validationErrors.ErrorOrNil()
}
Ejemplo n.º 7
0
// Add begins tracking of a periodic job. If it is already tracked, it acts as
// an update to the jobs periodic spec.
func (p *PeriodicDispatch) Add(job *structs.Job) error {
	p.l.Lock()
	defer p.l.Unlock()

	// Do nothing if not enabled
	if !p.enabled {
		return nil
	}

	// If we were tracking a job and it has been disabled or made non-periodic remove it.
	disabled := !job.IsPeriodic() || !job.Periodic.Enabled
	_, tracked := p.tracked[job.ID]
	if disabled {
		if tracked {
			p.removeLocked(job.ID)
		}

		// If the job is disabled and we aren't tracking it, do nothing.
		return nil
	}

	// Add or update the job.
	p.tracked[job.ID] = job
	next := job.Periodic.Next(time.Now())
	if tracked {
		if err := p.heap.Update(job, next); err != nil {
			return fmt.Errorf("failed to update job %v launch time: %v", job.ID, err)
		}
		p.logger.Printf("[DEBUG] nomad.periodic: updated periodic job %q", job.ID)
	} else {
		if err := p.heap.Push(job, next); err != nil {
			return fmt.Errorf("failed to add job %v: %v", job.ID, err)
		}
		p.logger.Printf("[DEBUG] nomad.periodic: registered periodic job %q", job.ID)
	}

	// Signal an update.
	if p.running {
		select {
		case p.updateCh <- struct{}{}:
		default:
		}
	}

	return nil
}
Ejemplo n.º 8
0
func (s *StateStore) getJobStatus(txn *memdb.Txn, job *structs.Job, evalDelete bool) (string, error) {
	allocs, err := txn.Get("allocs", "job", job.ID)
	if err != nil {
		return "", err
	}

	// If there is a non-terminal allocation, the job is running.
	hasAlloc := false
	for alloc := allocs.Next(); alloc != nil; alloc = allocs.Next() {
		hasAlloc = true
		if !alloc.(*structs.Allocation).TerminalStatus() {
			return structs.JobStatusRunning, nil
		}
	}

	evals, err := txn.Get("evals", "job", job.ID)
	if err != nil {
		return "", err
	}

	hasEval := false
	for eval := evals.Next(); eval != nil; eval = evals.Next() {
		hasEval = true
		if !eval.(*structs.Evaluation).TerminalStatus() {
			return structs.JobStatusPending, nil
		}
	}

	// The job is dead if all the allocations and evals are terminal or if there
	// are no evals because of garbage collection.
	if evalDelete || hasEval || hasAlloc {
		return structs.JobStatusDead, nil
	}

	// If there are no allocations or evaluations it is a new job. If the job is
	// periodic, we mark it as running as it will never have an
	// allocation/evaluation against it.
	if job.IsPeriodic() {
		return structs.JobStatusRunning, nil
	}
	return structs.JobStatusPending, nil
}
Ejemplo n.º 9
0
// setJobStatus sets the status of the job by looking up associated evaluations
// and allocations. evalDelete should be set to true if setJobStatus is being
// called because an evaluation is being deleted (potentially because of garbage
// collection). If forceStatus is non-empty, the job's status will be set to the
// passed status.
func (s *StateStore) setJobStatus(index uint64, watcher watch.Items, txn *memdb.Txn,
	job *structs.Job, evalDelete bool, forceStatus string) error {

	// Capture the current status so we can check if there is a change
	oldStatus := job.Status
	newStatus := forceStatus

	// If forceStatus is not set, compute the jobs status.
	if forceStatus == "" {
		var err error
		newStatus, err = s.getJobStatus(txn, job, evalDelete)
		if err != nil {
			return err
		}
	}

	// Fast-path if nothing has changed.
	if oldStatus == newStatus {
		return nil
	}

	// The job has changed, so add to watcher.
	watcher.Add(watch.Item{Table: "jobs"})
	watcher.Add(watch.Item{Job: job.ID})

	// Copy and update the existing job
	updated := job.Copy()
	updated.Status = newStatus
	updated.ModifyIndex = index

	// Insert the job
	if err := txn.Insert("jobs", updated); err != nil {
		return fmt.Errorf("job insert failed: %v", err)
	}
	if err := txn.Insert("index", &IndexEntry{"jobs", index}); err != nil {
		return fmt.Errorf("index update failed: %v", err)
	}
	return nil
}
Ejemplo n.º 10
0
func TestDiffSystemAllocs(t *testing.T) {
	job := mock.SystemJob()

	drainNode := mock.Node()
	drainNode.Drain = true

	deadNode := mock.Node()
	deadNode.Status = structs.NodeStatusDown

	tainted := map[string]*structs.Node{
		deadNode.ID:  deadNode,
		drainNode.ID: drainNode,
	}

	// Create three alive nodes.
	nodes := []*structs.Node{{ID: "foo"}, {ID: "bar"}, {ID: "baz"},
		{ID: "pipe"}, {ID: drainNode.ID}, {ID: deadNode.ID}}

	// The "old" job has a previous modify index
	oldJob := new(structs.Job)
	*oldJob = *job
	oldJob.JobModifyIndex -= 1

	allocs := []*structs.Allocation{
		// Update allocation on baz
		&structs.Allocation{
			ID:     structs.GenerateUUID(),
			NodeID: "baz",
			Name:   "my-job.web[0]",
			Job:    oldJob,
		},

		// Ignore allocation on bar
		&structs.Allocation{
			ID:     structs.GenerateUUID(),
			NodeID: "bar",
			Name:   "my-job.web[0]",
			Job:    job,
		},

		// Stop allocation on draining node.
		&structs.Allocation{
			ID:     structs.GenerateUUID(),
			NodeID: drainNode.ID,
			Name:   "my-job.web[0]",
			Job:    oldJob,
		},
		// Mark as lost on a dead node
		&structs.Allocation{
			ID:     structs.GenerateUUID(),
			NodeID: deadNode.ID,
			Name:   "my-job.web[0]",
			Job:    oldJob,
		},
	}

	// Have three terminal allocs
	terminalAllocs := map[string]*structs.Allocation{
		"my-job.web[0]": &structs.Allocation{
			ID:     structs.GenerateUUID(),
			NodeID: "pipe",
			Name:   "my-job.web[0]",
			Job:    job,
		},
	}

	diff := diffSystemAllocs(job, nodes, tainted, allocs, terminalAllocs)
	place := diff.place
	update := diff.update
	migrate := diff.migrate
	stop := diff.stop
	ignore := diff.ignore
	lost := diff.lost

	// We should update the first alloc
	if len(update) != 1 || update[0].Alloc != allocs[0] {
		t.Fatalf("bad: %#v", update)
	}

	// We should ignore the second alloc
	if len(ignore) != 1 || ignore[0].Alloc != allocs[1] {
		t.Fatalf("bad: %#v", ignore)
	}

	// We should stop the third alloc
	if len(stop) != 1 || stop[0].Alloc != allocs[2] {
		t.Fatalf("bad: %#v", stop)
	}

	// There should be no migrates.
	if len(migrate) != 0 {
		t.Fatalf("bad: %#v", migrate)
	}

	// We should mark the 5th alloc as lost
	if len(lost) != 1 || lost[0].Alloc != allocs[3] {
		t.Fatalf("bad: %#v", migrate)
	}

	// We should place 1
	if l := len(place); l != 2 {
		t.Fatalf("bad: %#v", l)
	}

	// Ensure that the allocations which are replacements of terminal allocs are
	// annotated
	for _, alloc := range terminalAllocs {
		for _, allocTuple := range diff.place {
			if alloc.NodeID == allocTuple.Alloc.NodeID {
				if !reflect.DeepEqual(alloc, allocTuple.Alloc) {
					t.Fatalf("expected: %#v, actual: %#v", alloc, allocTuple.Alloc)
				}
			}
		}
	}
}
Ejemplo n.º 11
0
func parseGroups(result *structs.Job, list *ast.ObjectList) error {
	list = list.Children()
	if len(list.Items) == 0 {
		return nil
	}

	// Go through each object and turn it into an actual result.
	collection := make([]*structs.TaskGroup, 0, len(list.Items))
	seen := make(map[string]struct{})
	for _, item := range list.Items {
		n := item.Keys[0].Token.Value().(string)

		// Make sure we haven't already found this
		if _, ok := seen[n]; ok {
			return fmt.Errorf("group '%s' defined more than once", n)
		}
		seen[n] = struct{}{}

		// We need this later
		var listVal *ast.ObjectList
		if ot, ok := item.Val.(*ast.ObjectType); ok {
			listVal = ot.List
		} else {
			return fmt.Errorf("group '%s': should be an object", n)
		}

		// Check for invalid keys
		valid := []string{
			"count",
			"constraint",
			"restart",
			"meta",
			"task",
			"local_disk",
		}
		if err := checkHCLKeys(listVal, valid); err != nil {
			return multierror.Prefix(err, fmt.Sprintf("'%s' ->", n))
		}

		var m map[string]interface{}
		if err := hcl.DecodeObject(&m, item.Val); err != nil {
			return err
		}
		delete(m, "constraint")
		delete(m, "meta")
		delete(m, "task")
		delete(m, "restart")
		delete(m, "local_disk")

		// Default count to 1 if not specified
		if _, ok := m["count"]; !ok {
			m["count"] = 1
		}

		// Build the group with the basic decode
		var g structs.TaskGroup
		g.Name = n
		if err := mapstructure.WeakDecode(m, &g); err != nil {
			return err
		}

		// Parse constraints
		if o := listVal.Filter("constraint"); len(o.Items) > 0 {
			if err := parseConstraints(&g.Constraints, o); err != nil {
				return multierror.Prefix(err, fmt.Sprintf("'%s', constraint ->", n))
			}
		}

		// Parse restart policy
		if o := listVal.Filter("restart"); len(o.Items) > 0 {
			if err := parseRestartPolicy(&g.RestartPolicy, o); err != nil {
				return multierror.Prefix(err, fmt.Sprintf("'%s', restart ->", n))
			}
		}

		// Parse local disk
		g.LocalDisk = structs.DefaultLocalDisk()
		if o := listVal.Filter("local_disk"); len(o.Items) > 0 {
			if err := parseLocalDisk(&g.LocalDisk, o); err != nil {
				return multierror.Prefix(err, fmt.Sprintf("'%s', local_disk ->", n))
			}
		}

		// Parse out meta fields. These are in HCL as a list so we need
		// to iterate over them and merge them.
		if metaO := listVal.Filter("meta"); len(metaO.Items) > 0 {
			for _, o := range metaO.Elem().Items {
				var m map[string]interface{}
				if err := hcl.DecodeObject(&m, o.Val); err != nil {
					return err
				}
				if err := mapstructure.WeakDecode(m, &g.Meta); err != nil {
					return err
				}
			}
		}

		// Parse tasks
		if o := listVal.Filter("task"); len(o.Items) > 0 {
			if err := parseTasks(result.Name, g.Name, &g.Tasks, o); err != nil {
				return multierror.Prefix(err, fmt.Sprintf("'%s', task:", n))
			}
		}

		collection = append(collection, &g)
	}

	result.TaskGroups = append(result.TaskGroups, collection...)
	return nil
}
Ejemplo n.º 12
0
func parseJob(result *structs.Job, list *ast.ObjectList) error {
	list = list.Children()
	if len(list.Items) != 1 {
		return fmt.Errorf("only one 'job' block allowed")
	}

	// Get our job object
	obj := list.Items[0]

	// Decode the full thing into a map[string]interface for ease
	var m map[string]interface{}
	if err := hcl.DecodeObject(&m, obj.Val); err != nil {
		return err
	}
	delete(m, "constraint")
	delete(m, "meta")
	delete(m, "update")
	delete(m, "periodic")

	// Set the ID and name to the object key
	result.ID = obj.Keys[0].Token.Value().(string)
	result.Name = result.ID

	// Defaults
	result.Priority = 50
	result.Region = "global"
	result.Type = "service"

	// Decode the rest
	if err := mapstructure.WeakDecode(m, result); err != nil {
		return err
	}

	// Value should be an object
	var listVal *ast.ObjectList
	if ot, ok := obj.Val.(*ast.ObjectType); ok {
		listVal = ot.List
	} else {
		return fmt.Errorf("job '%s' value: should be an object", result.ID)
	}

	// Check for invalid keys
	valid := []string{
		"id",
		"name",
		"region",
		"all_at_once",
		"type",
		"priority",
		"datacenters",
		"constraint",
		"update",
		"periodic",
		"meta",
		"task",
		"group",
		"vault_token",
	}
	if err := checkHCLKeys(listVal, valid); err != nil {
		return multierror.Prefix(err, "job:")
	}

	// Parse constraints
	if o := listVal.Filter("constraint"); len(o.Items) > 0 {
		if err := parseConstraints(&result.Constraints, o); err != nil {
			return multierror.Prefix(err, "constraint ->")
		}
	}

	// If we have an update strategy, then parse that
	if o := listVal.Filter("update"); len(o.Items) > 0 {
		if err := parseUpdate(&result.Update, o); err != nil {
			return multierror.Prefix(err, "update ->")
		}
	}

	// If we have a periodic definition, then parse that
	if o := listVal.Filter("periodic"); len(o.Items) > 0 {
		if err := parsePeriodic(&result.Periodic, o); err != nil {
			return multierror.Prefix(err, "periodic ->")
		}
	}

	// Parse out meta fields. These are in HCL as a list so we need
	// to iterate over them and merge them.
	if metaO := listVal.Filter("meta"); len(metaO.Items) > 0 {
		for _, o := range metaO.Elem().Items {
			var m map[string]interface{}
			if err := hcl.DecodeObject(&m, o.Val); err != nil {
				return err
			}
			if err := mapstructure.WeakDecode(m, &result.Meta); err != nil {
				return err
			}
		}
	}

	// If we have tasks outside, create TaskGroups for them
	if o := listVal.Filter("task"); len(o.Items) > 0 {
		var tasks []*structs.Task
		if err := parseTasks(result.Name, "", &tasks, o); err != nil {
			return multierror.Prefix(err, "task:")
		}

		result.TaskGroups = make([]*structs.TaskGroup, len(tasks), len(tasks)*2)
		for i, t := range tasks {
			result.TaskGroups[i] = &structs.TaskGroup{
				Name:      t.Name,
				Count:     1,
				LocalDisk: structs.DefaultLocalDisk(),
				Tasks:     []*structs.Task{t},
			}
		}
	}

	// Parse the task groups
	if o := listVal.Filter("group"); len(o.Items) > 0 {
		if err := parseGroups(result, o); err != nil {
			return multierror.Prefix(err, "group:")
		}
	}

	return nil
}
Ejemplo n.º 13
0
func parseGroups(result *structs.Job, obj *hclobj.Object) error {
	// Get all the maps of keys to the actual object
	objects := make(map[string]*hclobj.Object)
	for _, o1 := range obj.Elem(false) {
		for _, o2 := range o1.Elem(true) {
			if _, ok := objects[o2.Key]; ok {
				return fmt.Errorf(
					"group '%s' defined more than once",
					o2.Key)
			}

			objects[o2.Key] = o2
		}
	}

	if len(objects) == 0 {
		return nil
	}

	// Go through each object and turn it into an actual result.
	collection := make([]*structs.TaskGroup, 0, len(objects))
	for n, o := range objects {
		var m map[string]interface{}
		if err := hcl.DecodeObject(&m, o); err != nil {
			return err
		}
		delete(m, "constraint")
		delete(m, "meta")
		delete(m, "task")

		// Default count to 1 if not specified
		if _, ok := m["count"]; !ok {
			m["count"] = 1
		}

		// Build the group with the basic decode
		var g structs.TaskGroup
		g.Name = n
		if err := mapstructure.WeakDecode(m, &g); err != nil {
			return err
		}

		// Parse constraints
		if o := o.Get("constraint", false); o != nil {
			if err := parseConstraints(&g.Constraints, o); err != nil {
				return err
			}
		}

		// Parse out meta fields. These are in HCL as a list so we need
		// to iterate over them and merge them.
		if metaO := o.Get("meta", false); metaO != nil {
			for _, o := range metaO.Elem(false) {
				var m map[string]interface{}
				if err := hcl.DecodeObject(&m, o); err != nil {
					return err
				}
				if err := mapstructure.WeakDecode(m, &g.Meta); err != nil {
					return err
				}
			}
		}

		// Parse tasks
		if o := o.Get("task", false); o != nil {
			if err := parseTasks(&g.Tasks, o); err != nil {
				return err
			}
		}

		collection = append(collection, &g)
	}

	result.TaskGroups = append(result.TaskGroups, collection...)
	return nil
}
Ejemplo n.º 14
0
func parseJob(result *structs.Job, obj *hclobj.Object) error {
	if obj.Len() > 1 {
		return fmt.Errorf("only one 'job' block allowed")
	}

	// Get our job object
	obj = obj.Elem(true)[0]

	// Decode the full thing into a map[string]interface for ease
	var m map[string]interface{}
	if err := hcl.DecodeObject(&m, obj); err != nil {
		return err
	}
	delete(m, "constraint")
	delete(m, "meta")
	delete(m, "update")

	// Set the ID and name to the object key
	result.ID = obj.Key
	result.Name = obj.Key

	// Defaults
	result.Priority = 50
	result.Region = "global"
	result.Type = "service"

	// Decode the rest
	if err := mapstructure.WeakDecode(m, result); err != nil {
		return err
	}

	// Parse constraints
	if o := obj.Get("constraint", false); o != nil {
		if err := parseConstraints(&result.Constraints, o); err != nil {
			return err
		}
	}

	// If we have an update strategy, then parse that
	if o := obj.Get("update", false); o != nil {
		if err := parseUpdate(&result.Update, o); err != nil {
			return err
		}
	}

	// Parse out meta fields. These are in HCL as a list so we need
	// to iterate over them and merge them.
	if metaO := obj.Get("meta", false); metaO != nil {
		for _, o := range metaO.Elem(false) {
			var m map[string]interface{}
			if err := hcl.DecodeObject(&m, o); err != nil {
				return err
			}
			if err := mapstructure.WeakDecode(m, &result.Meta); err != nil {
				return err
			}
		}
	}

	// If we have tasks outside, do those
	if o := obj.Get("task", false); o != nil {
		var tasks []*structs.Task
		if err := parseTasks(&tasks, o); err != nil {
			return err
		}

		result.TaskGroups = make([]*structs.TaskGroup, len(tasks), len(tasks)*2)
		for i, t := range tasks {
			result.TaskGroups[i] = &structs.TaskGroup{
				Name:  t.Name,
				Count: 1,
				Tasks: []*structs.Task{t},
			}
		}
	}

	// Parse the task groups
	if o := obj.Get("group", false); o != nil {
		if err := parseGroups(result, o); err != nil {
			return fmt.Errorf("error parsing 'group': %s", err)
		}
	}

	return nil
}
Ejemplo n.º 15
0
func TestDiffAllocs(t *testing.T) {
	job := mock.Job()
	required := materializeTaskGroups(job)

	// The "old" job has a previous modify index
	oldJob := new(structs.Job)
	*oldJob = *job
	oldJob.ModifyIndex -= 1

	tainted := map[string]bool{
		"dead": true,
		"zip":  false,
	}

	allocs := []*structs.Allocation{
		// Update the 1st
		&structs.Allocation{
			ID:     structs.GenerateUUID(),
			NodeID: "zip",
			Name:   "my-job.web[0]",
			Job:    oldJob,
		},

		// Ignore the 2rd
		&structs.Allocation{
			ID:     structs.GenerateUUID(),
			NodeID: "zip",
			Name:   "my-job.web[1]",
			Job:    job,
		},

		// Evict 11th
		&structs.Allocation{
			ID:     structs.GenerateUUID(),
			NodeID: "zip",
			Name:   "my-job.web[10]",
		},

		// Migrate the 3rd
		&structs.Allocation{
			ID:     structs.GenerateUUID(),
			NodeID: "dead",
			Name:   "my-job.web[2]",
		},
	}

	diff := diffAllocs(job, tainted, required, allocs)
	place := diff.place
	update := diff.update
	migrate := diff.migrate
	stop := diff.stop
	ignore := diff.ignore

	// We should update the first alloc
	if len(update) != 1 || update[0].Alloc != allocs[0] {
		t.Fatalf("bad: %#v", update)
	}

	// We should ignore the second alloc
	if len(ignore) != 1 || ignore[0].Alloc != allocs[1] {
		t.Fatalf("bad: %#v", ignore)
	}

	// We should stop the 3rd alloc
	if len(stop) != 1 || stop[0].Alloc != allocs[2] {
		t.Fatalf("bad: %#v", stop)
	}

	// We should migrate the 4rd alloc
	if len(migrate) != 1 || migrate[0].Alloc != allocs[3] {
		t.Fatalf("bad: %#v", migrate)
	}

	// We should place 7
	if len(place) != 7 {
		t.Fatalf("bad: %#v", place)
	}
}
Ejemplo n.º 16
0
func TestDiffSystemAllocs(t *testing.T) {
	job := mock.SystemJob()

	// Create three alive nodes.
	nodes := []*structs.Node{{ID: "foo"}, {ID: "bar"}, {ID: "baz"}}

	// The "old" job has a previous modify index
	oldJob := new(structs.Job)
	*oldJob = *job
	oldJob.ModifyIndex -= 1

	tainted := map[string]bool{
		"dead": true,
		"baz":  false,
	}

	allocs := []*structs.Allocation{
		// Update allocation on baz
		&structs.Allocation{
			ID:     structs.GenerateUUID(),
			NodeID: "baz",
			Name:   "my-job.web[0]",
			Job:    oldJob,
		},

		// Ignore allocation on bar
		&structs.Allocation{
			ID:     structs.GenerateUUID(),
			NodeID: "bar",
			Name:   "my-job.web[0]",
			Job:    job,
		},

		// Stop allocation on dead.
		&structs.Allocation{
			ID:     structs.GenerateUUID(),
			NodeID: "dead",
			Name:   "my-job.web[0]",
		},
	}

	diff := diffSystemAllocs(job, nodes, tainted, allocs)
	place := diff.place
	update := diff.update
	migrate := diff.migrate
	stop := diff.stop
	ignore := diff.ignore

	// We should update the first alloc
	if len(update) != 1 || update[0].Alloc != allocs[0] {
		t.Fatalf("bad: %#v", update)
	}

	// We should ignore the second alloc
	if len(ignore) != 1 || ignore[0].Alloc != allocs[1] {
		t.Fatalf("bad: %#v", ignore)
	}

	// We should stop the third alloc
	if len(stop) != 1 || stop[0].Alloc != allocs[2] {
		t.Fatalf("bad: %#v", stop)
	}

	// There should be no migrates.
	if len(migrate) != 0 {
		t.Fatalf("bad: %#v", migrate)
	}

	// We should place 1
	if len(place) != 1 {
		t.Fatalf("bad: %#v", place)
	}
}
Ejemplo n.º 17
0
Archivo: fsm.go Proyecto: zanella/nomad
func (n *nomadFSM) Restore(old io.ReadCloser) error {
	defer old.Close()

	// Create a new state store
	newState, err := state.NewStateStore(n.logOutput)
	if err != nil {
		return err
	}
	n.state = newState

	// Start the state restore
	restore, err := newState.Restore()
	if err != nil {
		return err
	}
	defer restore.Abort()

	// Create a decoder
	dec := codec.NewDecoder(old, structs.MsgpackHandle)

	// Read in the header
	var header snapshotHeader
	if err := dec.Decode(&header); err != nil {
		return err
	}

	// Populate the new state
	msgType := make([]byte, 1)
	for {
		// Read the message type
		_, err := old.Read(msgType)
		if err == io.EOF {
			break
		} else if err != nil {
			return err
		}

		// Decode
		switch SnapshotType(msgType[0]) {
		case TimeTableSnapshot:
			if err := n.timetable.Deserialize(dec); err != nil {
				return fmt.Errorf("time table deserialize failed: %v", err)
			}

		case NodeSnapshot:
			node := new(structs.Node)
			if err := dec.Decode(node); err != nil {
				return err
			}
			if err := restore.NodeRestore(node); err != nil {
				return err
			}

		case JobSnapshot:
			job := new(structs.Job)
			if err := dec.Decode(job); err != nil {
				return err
			}

			// COMPAT: Remove in 0.5
			// Empty maps and slices should be treated as nil to avoid
			// un-intended destructive updates in scheduler since we use
			// reflect.DeepEqual. Starting Nomad 0.4.1, job submission sanatizes
			// the incoming job.
			job.Canonicalize()

			if err := restore.JobRestore(job); err != nil {
				return err
			}

		case EvalSnapshot:
			eval := new(structs.Evaluation)
			if err := dec.Decode(eval); err != nil {
				return err
			}
			if err := restore.EvalRestore(eval); err != nil {
				return err
			}

		case AllocSnapshot:
			alloc := new(structs.Allocation)
			if err := dec.Decode(alloc); err != nil {
				return err
			}
			if err := restore.AllocRestore(alloc); err != nil {
				return err
			}

		case IndexSnapshot:
			idx := new(state.IndexEntry)
			if err := dec.Decode(idx); err != nil {
				return err
			}
			if err := restore.IndexRestore(idx); err != nil {
				return err
			}

		case PeriodicLaunchSnapshot:
			launch := new(structs.PeriodicLaunch)
			if err := dec.Decode(launch); err != nil {
				return err
			}
			if err := restore.PeriodicLaunchRestore(launch); err != nil {
				return err
			}

		case JobSummarySnapshot:
			summary := new(structs.JobSummary)
			if err := dec.Decode(summary); err != nil {
				return err
			}
			if err := restore.JobSummaryRestore(summary); err != nil {
				return err
			}

		case VaultAccessorSnapshot:
			accessor := new(structs.VaultAccessor)
			if err := dec.Decode(accessor); err != nil {
				return err
			}
			if err := restore.VaultAccessorRestore(accessor); err != nil {
				return err
			}

		default:
			return fmt.Errorf("Unrecognized snapshot type: %v", msgType)
		}
	}

	restore.Commit()

	// Create Job Summaries
	// COMPAT 0.4 -> 0.4.1
	// We can remove this in 0.5. This exists so that the server creates job
	// summaries if they were not present previously. When users upgrade to 0.5
	// from 0.4.1, the snapshot will contain job summaries so it will be safe to
	// remove this block.
	index, err := n.state.Index("job_summary")
	if err != nil {
		return fmt.Errorf("couldn't fetch index of job summary table: %v", err)
	}

	// If the index is 0 that means there is no job summary in the snapshot so
	// we will have to create them
	if index == 0 {
		// query the latest index
		latestIndex, err := n.state.LatestIndex()
		if err != nil {
			return fmt.Errorf("unable to query latest index: %v", index)
		}
		if err := n.state.ReconcileJobSummaries(latestIndex); err != nil {
			return fmt.Errorf("error reconciling summaries: %v", err)
		}
	}

	return nil
}
Ejemplo n.º 18
0
func TestDiffSystemAllocs(t *testing.T) {
	job := mock.SystemJob()

	// Create three alive nodes.
	nodes := []*structs.Node{{ID: "foo"}, {ID: "bar"}, {ID: "baz"}}

	// The "old" job has a previous modify index
	oldJob := new(structs.Job)
	*oldJob = *job
	oldJob.JobModifyIndex -= 1

	drainNode := mock.Node()
	drainNode.Drain = true

	deadNode := mock.Node()
	deadNode.Status = structs.NodeStatusDown

	tainted := map[string]*structs.Node{
		"dead":      deadNode,
		"drainNode": drainNode,
	}

	allocs := []*structs.Allocation{
		// Update allocation on baz
		&structs.Allocation{
			ID:     structs.GenerateUUID(),
			NodeID: "baz",
			Name:   "my-job.web[0]",
			Job:    oldJob,
		},

		// Ignore allocation on bar
		&structs.Allocation{
			ID:     structs.GenerateUUID(),
			NodeID: "bar",
			Name:   "my-job.web[0]",
			Job:    job,
		},

		// Stop allocation on draining node.
		&structs.Allocation{
			ID:     structs.GenerateUUID(),
			NodeID: "drainNode",
			Name:   "my-job.web[0]",
			Job:    oldJob,
		},
		// Mark as lost on a dead node
		&structs.Allocation{
			ID:     structs.GenerateUUID(),
			NodeID: "dead",
			Name:   "my-job.web[0]",
			Job:    oldJob,
		},
	}

	diff := diffSystemAllocs(job, nodes, tainted, allocs)
	place := diff.place
	update := diff.update
	migrate := diff.migrate
	stop := diff.stop
	ignore := diff.ignore
	lost := diff.lost

	// We should update the first alloc
	if len(update) != 1 || update[0].Alloc != allocs[0] {
		t.Fatalf("bad: %#v", update)
	}

	// We should ignore the second alloc
	if len(ignore) != 1 || ignore[0].Alloc != allocs[1] {
		t.Fatalf("bad: %#v", ignore)
	}

	// We should stop the third alloc
	if len(stop) != 1 || stop[0].Alloc != allocs[2] {
		t.Fatalf("bad: %#v", stop)
	}

	// There should be no migrates.
	if len(migrate) != 0 {
		t.Fatalf("bad: %#v", migrate)
	}

	// We should mark the 5th alloc as lost
	if len(lost) != 1 || lost[0].Alloc != allocs[3] {
		t.Fatalf("bad: %#v", migrate)
	}

	// We should place 1
	if len(place) != 1 {
		t.Fatalf("bad: %#v", place)
	}
}
Ejemplo n.º 19
0
func TestDiffAllocs(t *testing.T) {
	job := mock.Job()
	required := materializeTaskGroups(job)

	// The "old" job has a previous modify index
	oldJob := new(structs.Job)
	*oldJob = *job
	oldJob.JobModifyIndex -= 1

	drainNode := mock.Node()
	drainNode.Drain = true

	deadNode := mock.Node()
	deadNode.Status = structs.NodeStatusDown

	tainted := map[string]*structs.Node{
		"dead":      deadNode,
		"drainNode": drainNode,
	}

	allocs := []*structs.Allocation{
		// Update the 1st
		&structs.Allocation{
			ID:     structs.GenerateUUID(),
			NodeID: "zip",
			Name:   "my-job.web[0]",
			Job:    oldJob,
		},

		// Ignore the 2rd
		&structs.Allocation{
			ID:     structs.GenerateUUID(),
			NodeID: "zip",
			Name:   "my-job.web[1]",
			Job:    job,
		},

		// Evict 11th
		&structs.Allocation{
			ID:     structs.GenerateUUID(),
			NodeID: "zip",
			Name:   "my-job.web[10]",
			Job:    oldJob,
		},

		// Migrate the 3rd
		&structs.Allocation{
			ID:     structs.GenerateUUID(),
			NodeID: "drainNode",
			Name:   "my-job.web[2]",
			Job:    oldJob,
		},
		// Mark the 4th lost
		&structs.Allocation{
			ID:     structs.GenerateUUID(),
			NodeID: "dead",
			Name:   "my-job.web[3]",
			Job:    oldJob,
		},
	}

	diff := diffAllocs(job, tainted, required, allocs)
	place := diff.place
	update := diff.update
	migrate := diff.migrate
	stop := diff.stop
	ignore := diff.ignore
	lost := diff.lost

	// We should update the first alloc
	if len(update) != 1 || update[0].Alloc != allocs[0] {
		t.Fatalf("bad: %#v", update)
	}

	// We should ignore the second alloc
	if len(ignore) != 1 || ignore[0].Alloc != allocs[1] {
		t.Fatalf("bad: %#v", ignore)
	}

	// We should stop the 3rd alloc
	if len(stop) != 1 || stop[0].Alloc != allocs[2] {
		t.Fatalf("bad: %#v", stop)
	}

	// We should migrate the 4rd alloc
	if len(migrate) != 1 || migrate[0].Alloc != allocs[3] {
		t.Fatalf("bad: %#v", migrate)
	}

	// We should mark the 5th alloc as lost
	if len(lost) != 1 || lost[0].Alloc != allocs[4] {
		t.Fatalf("bad: %#v", migrate)
	}

	// We should place 6
	if len(place) != 6 {
		t.Fatalf("bad: %#v", place)
	}
}
Ejemplo n.º 20
0
func TestDiffAllocs(t *testing.T) {
	job := mock.Job()
	required := materializeTaskGroups(job)

	// The "old" job has a previous modify index
	oldJob := new(structs.Job)
	*oldJob = *job
	oldJob.JobModifyIndex -= 1

	drainNode := mock.Node()
	drainNode.Drain = true

	deadNode := mock.Node()
	deadNode.Status = structs.NodeStatusDown

	tainted := map[string]*structs.Node{
		"dead":      deadNode,
		"drainNode": drainNode,
	}

	allocs := []*structs.Allocation{
		// Update the 1st
		&structs.Allocation{
			ID:     structs.GenerateUUID(),
			NodeID: "zip",
			Name:   "my-job.web[0]",
			Job:    oldJob,
		},

		// Ignore the 2rd
		&structs.Allocation{
			ID:     structs.GenerateUUID(),
			NodeID: "zip",
			Name:   "my-job.web[1]",
			Job:    job,
		},

		// Evict 11th
		&structs.Allocation{
			ID:     structs.GenerateUUID(),
			NodeID: "zip",
			Name:   "my-job.web[10]",
			Job:    oldJob,
		},

		// Migrate the 3rd
		&structs.Allocation{
			ID:     structs.GenerateUUID(),
			NodeID: "drainNode",
			Name:   "my-job.web[2]",
			Job:    oldJob,
		},
		// Mark the 4th lost
		&structs.Allocation{
			ID:     structs.GenerateUUID(),
			NodeID: "dead",
			Name:   "my-job.web[3]",
			Job:    oldJob,
		},
	}

	// Have three terminal allocs
	terminalAllocs := map[string]*structs.Allocation{
		"my-job.web[4]": &structs.Allocation{
			ID:     structs.GenerateUUID(),
			NodeID: "zip",
			Name:   "my-job.web[4]",
			Job:    job,
		},
		"my-job.web[5]": &structs.Allocation{
			ID:     structs.GenerateUUID(),
			NodeID: "zip",
			Name:   "my-job.web[5]",
			Job:    job,
		},
		"my-job.web[6]": &structs.Allocation{
			ID:     structs.GenerateUUID(),
			NodeID: "zip",
			Name:   "my-job.web[6]",
			Job:    job,
		},
	}

	diff := diffAllocs(job, tainted, required, allocs, terminalAllocs)
	place := diff.place
	update := diff.update
	migrate := diff.migrate
	stop := diff.stop
	ignore := diff.ignore
	lost := diff.lost

	// We should update the first alloc
	if len(update) != 1 || update[0].Alloc != allocs[0] {
		t.Fatalf("bad: %#v", update)
	}

	// We should ignore the second alloc
	if len(ignore) != 1 || ignore[0].Alloc != allocs[1] {
		t.Fatalf("bad: %#v", ignore)
	}

	// We should stop the 3rd alloc
	if len(stop) != 1 || stop[0].Alloc != allocs[2] {
		t.Fatalf("bad: %#v", stop)
	}

	// We should migrate the 4rd alloc
	if len(migrate) != 1 || migrate[0].Alloc != allocs[3] {
		t.Fatalf("bad: %#v", migrate)
	}

	// We should mark the 5th alloc as lost
	if len(lost) != 1 || lost[0].Alloc != allocs[4] {
		t.Fatalf("bad: %#v", migrate)
	}

	// We should place 6
	if len(place) != 6 {
		t.Fatalf("bad: %#v", place)
	}

	// Ensure that the allocations which are replacements of terminal allocs are
	// annotated
	for name, alloc := range terminalAllocs {
		for _, allocTuple := range diff.place {
			if name == allocTuple.Name {
				if !reflect.DeepEqual(alloc, allocTuple.Alloc) {
					t.Fatalf("expected: %#v, actual: %#v", alloc, allocTuple.Alloc)
				}
			}
		}
	}
}
Ejemplo n.º 21
0
func parseJob(result *structs.Job, list *ast.ObjectList) error {
	list = list.Children()
	if len(list.Items) != 1 {
		return fmt.Errorf("only one 'job' block allowed")
	}

	// Get our job object
	obj := list.Items[0]

	// Decode the full thing into a map[string]interface for ease
	var m map[string]interface{}
	if err := hcl.DecodeObject(&m, obj.Val); err != nil {
		return err
	}
	delete(m, "constraint")
	delete(m, "meta")
	delete(m, "update")

	// Set the ID and name to the object key
	result.ID = obj.Keys[0].Token.Value().(string)
	result.Name = result.ID

	// Defaults
	result.Priority = 50
	result.Region = "global"
	result.Type = "service"

	// Decode the rest
	if err := mapstructure.WeakDecode(m, result); err != nil {
		return err
	}

	// Value should be an object
	var listVal *ast.ObjectList
	if ot, ok := obj.Val.(*ast.ObjectType); ok {
		listVal = ot.List
	} else {
		return fmt.Errorf("job '%s' value: should be an object", result.ID)
	}

	// Parse constraints
	if o := listVal.Filter("constraint"); len(o.Items) > 0 {
		if err := parseConstraints(&result.Constraints, o); err != nil {
			return err
		}
	}

	// If we have an update strategy, then parse that
	if o := listVal.Filter("update"); len(o.Items) > 0 {
		if err := parseUpdate(&result.Update, o); err != nil {
			return err
		}
	}

	// Parse out meta fields. These are in HCL as a list so we need
	// to iterate over them and merge them.
	if metaO := listVal.Filter("meta"); len(metaO.Items) > 0 {
		for _, o := range metaO.Elem().Items {
			var m map[string]interface{}
			if err := hcl.DecodeObject(&m, o.Val); err != nil {
				return err
			}
			if err := mapstructure.WeakDecode(m, &result.Meta); err != nil {
				return err
			}
		}
	}

	// If we have tasks outside, create TaskGroups for them
	if o := listVal.Filter("task"); len(o.Items) > 0 {
		var tasks []*structs.Task
		if err := parseTasks(result.Name, "", &tasks, o); err != nil {
			return err
		}

		result.TaskGroups = make([]*structs.TaskGroup, len(tasks), len(tasks)*2)
		for i, t := range tasks {
			result.TaskGroups[i] = &structs.TaskGroup{
				Name:          t.Name,
				Count:         1,
				Tasks:         []*structs.Task{t},
				RestartPolicy: structs.NewRestartPolicy(result.Type),
			}
		}
	}

	// Parse the task groups
	if o := listVal.Filter("group"); len(o.Items) > 0 {
		if err := parseGroups(result, o); err != nil {
			return fmt.Errorf("error parsing 'group': %s", err)
		}
	}

	return nil
}