Пример #1
0
func SystemJob() *structs.Job {
	job := &structs.Job{
		Region:      "global",
		ID:          structs.GenerateUUID(),
		Name:        "my-job",
		Type:        structs.JobTypeSystem,
		Priority:    100,
		AllAtOnce:   false,
		Datacenters: []string{"dc1"},
		Constraints: []*structs.Constraint{
			&structs.Constraint{
				LTarget: "${attr.kernel.name}",
				RTarget: "linux",
				Operand: "=",
			},
		},
		TaskGroups: []*structs.TaskGroup{
			&structs.TaskGroup{
				Name:  "web",
				Count: 1,
				RestartPolicy: &structs.RestartPolicy{
					Attempts: 3,
					Interval: 10 * time.Minute,
					Delay:    1 * time.Minute,
					Mode:     structs.RestartPolicyModeDelay,
				},
				LocalDisk: structs.DefaultLocalDisk(),
				Tasks: []*structs.Task{
					&structs.Task{
						Name:   "web",
						Driver: "exec",
						Config: map[string]interface{}{
							"command": "/bin/date",
						},
						Env: map[string]string{},
						Resources: &structs.Resources{
							CPU:      500,
							MemoryMB: 256,
							Networks: []*structs.NetworkResource{
								&structs.NetworkResource{
									MBits:        50,
									DynamicPorts: []structs.Port{{Label: "http"}},
								},
							},
						},
						LogConfig: structs.DefaultLogConfig(),
					},
				},
			},
		},
		Meta: map[string]string{
			"owner": "armon",
		},
		Status:      structs.JobStatusPending,
		CreateIndex: 42,
		ModifyIndex: 99,
	}
	return job
}
Пример #2
0
func parseJob(result *structs.Job, list *ast.ObjectList) error {
	list = list.Children()
	if len(list.Items) != 1 {
		return fmt.Errorf("only one 'job' block allowed")
	}

	// Get our job object
	obj := list.Items[0]

	// Decode the full thing into a map[string]interface for ease
	var m map[string]interface{}
	if err := hcl.DecodeObject(&m, obj.Val); err != nil {
		return err
	}
	delete(m, "constraint")
	delete(m, "meta")
	delete(m, "update")
	delete(m, "periodic")

	// Set the ID and name to the object key
	result.ID = obj.Keys[0].Token.Value().(string)
	result.Name = result.ID

	// Defaults
	result.Priority = 50
	result.Region = "global"
	result.Type = "service"

	// Decode the rest
	if err := mapstructure.WeakDecode(m, result); err != nil {
		return err
	}

	// Value should be an object
	var listVal *ast.ObjectList
	if ot, ok := obj.Val.(*ast.ObjectType); ok {
		listVal = ot.List
	} else {
		return fmt.Errorf("job '%s' value: should be an object", result.ID)
	}

	// Check for invalid keys
	valid := []string{
		"id",
		"name",
		"region",
		"all_at_once",
		"type",
		"priority",
		"datacenters",
		"constraint",
		"update",
		"periodic",
		"meta",
		"task",
		"group",
		"vault_token",
	}
	if err := checkHCLKeys(listVal, valid); err != nil {
		return multierror.Prefix(err, "job:")
	}

	// Parse constraints
	if o := listVal.Filter("constraint"); len(o.Items) > 0 {
		if err := parseConstraints(&result.Constraints, o); err != nil {
			return multierror.Prefix(err, "constraint ->")
		}
	}

	// If we have an update strategy, then parse that
	if o := listVal.Filter("update"); len(o.Items) > 0 {
		if err := parseUpdate(&result.Update, o); err != nil {
			return multierror.Prefix(err, "update ->")
		}
	}

	// If we have a periodic definition, then parse that
	if o := listVal.Filter("periodic"); len(o.Items) > 0 {
		if err := parsePeriodic(&result.Periodic, o); err != nil {
			return multierror.Prefix(err, "periodic ->")
		}
	}

	// Parse out meta fields. These are in HCL as a list so we need
	// to iterate over them and merge them.
	if metaO := listVal.Filter("meta"); len(metaO.Items) > 0 {
		for _, o := range metaO.Elem().Items {
			var m map[string]interface{}
			if err := hcl.DecodeObject(&m, o.Val); err != nil {
				return err
			}
			if err := mapstructure.WeakDecode(m, &result.Meta); err != nil {
				return err
			}
		}
	}

	// If we have tasks outside, create TaskGroups for them
	if o := listVal.Filter("task"); len(o.Items) > 0 {
		var tasks []*structs.Task
		if err := parseTasks(result.Name, "", &tasks, o); err != nil {
			return multierror.Prefix(err, "task:")
		}

		result.TaskGroups = make([]*structs.TaskGroup, len(tasks), len(tasks)*2)
		for i, t := range tasks {
			result.TaskGroups[i] = &structs.TaskGroup{
				Name:      t.Name,
				Count:     1,
				LocalDisk: structs.DefaultLocalDisk(),
				Tasks:     []*structs.Task{t},
			}
		}
	}

	// Parse the task groups
	if o := listVal.Filter("group"); len(o.Items) > 0 {
		if err := parseGroups(result, o); err != nil {
			return multierror.Prefix(err, "group:")
		}
	}

	return nil
}
Пример #3
0
func parseGroups(result *structs.Job, list *ast.ObjectList) error {
	list = list.Children()
	if len(list.Items) == 0 {
		return nil
	}

	// Go through each object and turn it into an actual result.
	collection := make([]*structs.TaskGroup, 0, len(list.Items))
	seen := make(map[string]struct{})
	for _, item := range list.Items {
		n := item.Keys[0].Token.Value().(string)

		// Make sure we haven't already found this
		if _, ok := seen[n]; ok {
			return fmt.Errorf("group '%s' defined more than once", n)
		}
		seen[n] = struct{}{}

		// We need this later
		var listVal *ast.ObjectList
		if ot, ok := item.Val.(*ast.ObjectType); ok {
			listVal = ot.List
		} else {
			return fmt.Errorf("group '%s': should be an object", n)
		}

		// Check for invalid keys
		valid := []string{
			"count",
			"constraint",
			"restart",
			"meta",
			"task",
			"local_disk",
		}
		if err := checkHCLKeys(listVal, valid); err != nil {
			return multierror.Prefix(err, fmt.Sprintf("'%s' ->", n))
		}

		var m map[string]interface{}
		if err := hcl.DecodeObject(&m, item.Val); err != nil {
			return err
		}
		delete(m, "constraint")
		delete(m, "meta")
		delete(m, "task")
		delete(m, "restart")
		delete(m, "local_disk")

		// Default count to 1 if not specified
		if _, ok := m["count"]; !ok {
			m["count"] = 1
		}

		// Build the group with the basic decode
		var g structs.TaskGroup
		g.Name = n
		if err := mapstructure.WeakDecode(m, &g); err != nil {
			return err
		}

		// Parse constraints
		if o := listVal.Filter("constraint"); len(o.Items) > 0 {
			if err := parseConstraints(&g.Constraints, o); err != nil {
				return multierror.Prefix(err, fmt.Sprintf("'%s', constraint ->", n))
			}
		}

		// Parse restart policy
		if o := listVal.Filter("restart"); len(o.Items) > 0 {
			if err := parseRestartPolicy(&g.RestartPolicy, o); err != nil {
				return multierror.Prefix(err, fmt.Sprintf("'%s', restart ->", n))
			}
		}

		// Parse local disk
		g.LocalDisk = structs.DefaultLocalDisk()
		if o := listVal.Filter("local_disk"); len(o.Items) > 0 {
			if err := parseLocalDisk(&g.LocalDisk, o); err != nil {
				return multierror.Prefix(err, fmt.Sprintf("'%s', local_disk ->", n))
			}
		}

		// Parse out meta fields. These are in HCL as a list so we need
		// to iterate over them and merge them.
		if metaO := listVal.Filter("meta"); len(metaO.Items) > 0 {
			for _, o := range metaO.Elem().Items {
				var m map[string]interface{}
				if err := hcl.DecodeObject(&m, o.Val); err != nil {
					return err
				}
				if err := mapstructure.WeakDecode(m, &g.Meta); err != nil {
					return err
				}
			}
		}

		// Parse tasks
		if o := listVal.Filter("task"); len(o.Items) > 0 {
			if err := parseTasks(result.Name, g.Name, &g.Tasks, o); err != nil {
				return multierror.Prefix(err, fmt.Sprintf("'%s', task:", n))
			}
		}

		collection = append(collection, &g)
	}

	result.TaskGroups = append(result.TaskGroups, collection...)
	return nil
}
Пример #4
0
func TestParse(t *testing.T) {
	cases := []struct {
		File   string
		Result *structs.Job
		Err    bool
	}{
		{
			"basic.hcl",
			&structs.Job{
				ID:          "binstore-storagelocker",
				Name:        "binstore-storagelocker",
				Type:        "service",
				Priority:    50,
				AllAtOnce:   true,
				Datacenters: []string{"us2", "eu1"},
				Region:      "global",
				VaultToken:  "foo",

				Meta: map[string]string{
					"foo": "bar",
				},

				Constraints: []*structs.Constraint{
					&structs.Constraint{
						LTarget: "kernel.os",
						RTarget: "windows",
						Operand: "=",
					},
				},

				Update: structs.UpdateStrategy{
					Stagger:     60 * time.Second,
					MaxParallel: 2,
				},

				TaskGroups: []*structs.TaskGroup{
					&structs.TaskGroup{
						Name:      "outside",
						Count:     1,
						LocalDisk: structs.DefaultLocalDisk(),
						Tasks: []*structs.Task{
							&structs.Task{
								Name:   "outside",
								Driver: "java",
								Config: map[string]interface{}{
									"jar_path": "s3://my-cool-store/foo.jar",
								},
								Meta: map[string]string{
									"my-cool-key": "foobar",
								},
								LogConfig: structs.DefaultLogConfig(),
							},
						},
					},

					&structs.TaskGroup{
						Name:  "binsl",
						Count: 5,
						Constraints: []*structs.Constraint{
							&structs.Constraint{
								LTarget: "kernel.os",
								RTarget: "linux",
								Operand: "=",
							},
						},
						Meta: map[string]string{
							"elb_mode":     "tcp",
							"elb_interval": "10",
							"elb_checks":   "3",
						},
						RestartPolicy: &structs.RestartPolicy{
							Interval: 10 * time.Minute,
							Attempts: 5,
							Delay:    15 * time.Second,
							Mode:     "delay",
						},
						LocalDisk: &structs.LocalDisk{
							Sticky: true,
							DiskMB: 150,
						},
						Tasks: []*structs.Task{
							&structs.Task{
								Name:   "binstore",
								Driver: "docker",
								User:   "******",
								Config: map[string]interface{}{
									"image": "hashicorp/binstore",
									"labels": []map[string]interface{}{
										map[string]interface{}{
											"FOO": "bar",
										},
									},
								},
								Services: []*structs.Service{
									{
										Name:      "binstore-storagelocker-binsl-binstore",
										Tags:      []string{"foo", "bar"},
										PortLabel: "http",
										Checks: []*structs.ServiceCheck{
											{
												Name:      "check-name",
												Type:      "tcp",
												PortLabel: "admin",
												Interval:  10 * time.Second,
												Timeout:   2 * time.Second,
											},
										},
									},
								},
								Env: map[string]string{
									"HELLO": "world",
									"LOREM": "ipsum",
								},
								Resources: &structs.Resources{
									CPU:      500,
									MemoryMB: 128,
									IOPS:     0,
									Networks: []*structs.NetworkResource{
										&structs.NetworkResource{
											MBits:         100,
											ReservedPorts: []structs.Port{{"one", 1}, {"two", 2}, {"three", 3}},
											DynamicPorts:  []structs.Port{{"http", 0}, {"https", 0}, {"admin", 0}},
										},
									},
								},
								KillTimeout: 22 * time.Second,
								LogConfig: &structs.LogConfig{
									MaxFiles:      10,
									MaxFileSizeMB: 100,
								},
								Artifacts: []*structs.TaskArtifact{
									{
										GetterSource: "http://foo.com/artifact",
										RelativeDest: "local/",
										GetterOptions: map[string]string{
											"checksum": "md5:b8a4f3f72ecab0510a6a31e997461c5f",
										},
									},
									{
										GetterSource: "http://bar.com/artifact",
										RelativeDest: "local/",
										GetterOptions: map[string]string{
											"checksum": "md5:ff1cc0d3432dad54d607c1505fb7245c",
										},
									},
								},
								Vault: &structs.Vault{
									Policies: []string{"foo", "bar"},
								},
							},
							&structs.Task{
								Name:   "storagelocker",
								Driver: "docker",
								User:   "",
								Config: map[string]interface{}{
									"image": "hashicorp/storagelocker",
								},
								Resources: &structs.Resources{
									CPU:      500,
									MemoryMB: 128,
									IOPS:     30,
								},
								Constraints: []*structs.Constraint{
									&structs.Constraint{
										LTarget: "kernel.arch",
										RTarget: "amd64",
										Operand: "=",
									},
								},
								LogConfig: structs.DefaultLogConfig(),
							},
						},
					},
				},
			},
			false,
		},

		{
			"multi-network.hcl",
			nil,
			true,
		},

		{
			"multi-resource.hcl",
			nil,
			true,
		},

		{
			"multi-vault.hcl",
			nil,
			true,
		},

		{
			"default-job.hcl",
			&structs.Job{
				ID:       "foo",
				Name:     "foo",
				Priority: 50,
				Region:   "global",
				Type:     "service",
			},
			false,
		},

		{
			"version-constraint.hcl",
			&structs.Job{
				ID:       "foo",
				Name:     "foo",
				Priority: 50,
				Region:   "global",
				Type:     "service",
				Constraints: []*structs.Constraint{
					&structs.Constraint{
						LTarget: "$attr.kernel.version",
						RTarget: "~> 3.2",
						Operand: structs.ConstraintVersion,
					},
				},
			},
			false,
		},

		{
			"regexp-constraint.hcl",
			&structs.Job{
				ID:       "foo",
				Name:     "foo",
				Priority: 50,
				Region:   "global",
				Type:     "service",
				Constraints: []*structs.Constraint{
					&structs.Constraint{
						LTarget: "$attr.kernel.version",
						RTarget: "[0-9.]+",
						Operand: structs.ConstraintRegex,
					},
				},
			},
			false,
		},

		{
			"distinctHosts-constraint.hcl",
			&structs.Job{
				ID:       "foo",
				Name:     "foo",
				Priority: 50,
				Region:   "global",
				Type:     "service",
				Constraints: []*structs.Constraint{
					&structs.Constraint{
						Operand: structs.ConstraintDistinctHosts,
					},
				},
			},
			false,
		},

		{
			"periodic-cron.hcl",
			&structs.Job{
				ID:       "foo",
				Name:     "foo",
				Priority: 50,
				Region:   "global",
				Type:     "service",
				Periodic: &structs.PeriodicConfig{
					Enabled:         true,
					SpecType:        structs.PeriodicSpecCron,
					Spec:            "*/5 * * *",
					ProhibitOverlap: true,
				},
			},
			false,
		},

		{
			"specify-job.hcl",
			&structs.Job{
				ID:       "job1",
				Name:     "My Job",
				Priority: 50,
				Region:   "global",
				Type:     "service",
			},
			false,
		},

		{
			"task-nested-config.hcl",
			&structs.Job{
				Region:   "global",
				ID:       "foo",
				Name:     "foo",
				Type:     "service",
				Priority: 50,

				TaskGroups: []*structs.TaskGroup{
					&structs.TaskGroup{
						Name:      "bar",
						Count:     1,
						LocalDisk: structs.DefaultLocalDisk(),
						Tasks: []*structs.Task{
							&structs.Task{
								Name:   "bar",
								Driver: "docker",
								Config: map[string]interface{}{
									"image": "hashicorp/image",
									"port_map": []map[string]interface{}{
										map[string]interface{}{
											"db": 1234,
										},
									},
								},
								LogConfig: &structs.LogConfig{
									MaxFiles:      10,
									MaxFileSizeMB: 10,
								},
							},
						},
					},
				},
			},
			false,
		},

		{
			"bad-artifact.hcl",
			nil,
			true,
		},

		{
			"artifacts.hcl",
			&structs.Job{
				ID:       "binstore-storagelocker",
				Name:     "binstore-storagelocker",
				Type:     "service",
				Priority: 50,
				Region:   "global",

				TaskGroups: []*structs.TaskGroup{
					&structs.TaskGroup{
						Name:      "binsl",
						Count:     1,
						LocalDisk: structs.DefaultLocalDisk(),
						Tasks: []*structs.Task{
							&structs.Task{
								Name:   "binstore",
								Driver: "docker",
								Resources: &structs.Resources{
									CPU:      100,
									MemoryMB: 10,
									IOPS:     0,
								},
								LogConfig: &structs.LogConfig{
									MaxFiles:      10,
									MaxFileSizeMB: 10,
								},
								Artifacts: []*structs.TaskArtifact{
									{
										GetterSource:  "http://foo.com/bar",
										GetterOptions: map[string]string{"foo": "bar"},
										RelativeDest:  "",
									},
									{
										GetterSource:  "http://foo.com/baz",
										GetterOptions: nil,
										RelativeDest:  "local/",
									},
									{
										GetterSource:  "http://foo.com/bam",
										GetterOptions: nil,
										RelativeDest:  "var/foo",
									},
								},
							},
						},
					},
				},
			},
			false,
		},
		{
			"service-check-initial-status.hcl",
			&structs.Job{
				ID:       "check_initial_status",
				Name:     "check_initial_status",
				Type:     "service",
				Priority: 50,
				Region:   "global",
				TaskGroups: []*structs.TaskGroup{
					&structs.TaskGroup{
						Name:      "group",
						Count:     1,
						LocalDisk: structs.DefaultLocalDisk(),
						Tasks: []*structs.Task{
							&structs.Task{
								Name: "task",
								Services: []*structs.Service{
									{
										Name:      "check_initial_status-group-task",
										Tags:      []string{"foo", "bar"},
										PortLabel: "http",
										Checks: []*structs.ServiceCheck{
											{
												Name:          "check-name",
												Type:          "http",
												Interval:      10 * time.Second,
												Timeout:       2 * time.Second,
												InitialStatus: api.HealthPassing,
											},
										},
									},
								},
								LogConfig: structs.DefaultLogConfig(),
							},
						},
					},
				},
			},
			false,
		},
	}

	for _, tc := range cases {
		t.Logf("Testing parse: %s", tc.File)

		path, err := filepath.Abs(filepath.Join("./test-fixtures", tc.File))
		if err != nil {
			t.Fatalf("file: %s\n\n%s", tc.File, err)
			continue
		}

		actual, err := ParseFile(path)
		if (err != nil) != tc.Err {
			t.Fatalf("file: %s\n\n%s", tc.File, err)
			continue
		}

		if !reflect.DeepEqual(actual, tc.Result) {
			t.Fatalf("file: %s\n\n%#v\n\n%#v", tc.File, actual, tc.Result)
		}
	}
}