func SystemJob() *structs.Job { job := &structs.Job{ Region: "global", ID: structs.GenerateUUID(), Name: "my-job", Type: structs.JobTypeSystem, Priority: 100, AllAtOnce: false, Datacenters: []string{"dc1"}, Constraints: []*structs.Constraint{ &structs.Constraint{ LTarget: "${attr.kernel.name}", RTarget: "linux", Operand: "=", }, }, TaskGroups: []*structs.TaskGroup{ &structs.TaskGroup{ Name: "web", Count: 1, RestartPolicy: &structs.RestartPolicy{ Attempts: 3, Interval: 10 * time.Minute, Delay: 1 * time.Minute, Mode: structs.RestartPolicyModeDelay, }, EphemeralDisk: structs.DefaultEphemeralDisk(), Tasks: []*structs.Task{ &structs.Task{ Name: "web", Driver: "exec", Config: map[string]interface{}{ "command": "/bin/date", }, Env: map[string]string{}, Resources: &structs.Resources{ CPU: 500, MemoryMB: 256, Networks: []*structs.NetworkResource{ &structs.NetworkResource{ MBits: 50, DynamicPorts: []structs.Port{{Label: "http"}}, }, }, }, LogConfig: structs.DefaultLogConfig(), }, }, }, }, Meta: map[string]string{ "owner": "armon", }, Status: structs.JobStatusPending, CreateIndex: 42, ModifyIndex: 99, } return job }
func testExecutorContext(t *testing.T) *ExecutorContext { taskEnv := env.NewTaskEnvironment(mock.Node()) taskName, allocDir := mockAllocDir(t) ctx := &ExecutorContext{ TaskEnv: taskEnv, TaskName: taskName, AllocDir: allocDir, TaskResources: constraint, LogConfig: structs.DefaultLogConfig(), } return ctx }
func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, list *ast.ObjectList) error { list = list.Children() if len(list.Items) == 0 { return nil } // Go through each object and turn it into an actual result. seen := make(map[string]struct{}) for _, item := range list.Items { n := item.Keys[0].Token.Value().(string) // Make sure we haven't already found this if _, ok := seen[n]; ok { return fmt.Errorf("task '%s' defined more than once", n) } seen[n] = struct{}{} // We need this later var listVal *ast.ObjectList if ot, ok := item.Val.(*ast.ObjectType); ok { listVal = ot.List } else { return fmt.Errorf("group '%s': should be an object", n) } // Check for invalid keys valid := []string{ "artifact", "config", "constraint", "driver", "env", "kill_timeout", "logs", "meta", "resources", "service", "user", "vault", } if err := checkHCLKeys(listVal, valid); err != nil { return multierror.Prefix(err, fmt.Sprintf("'%s' ->", n)) } var m map[string]interface{} if err := hcl.DecodeObject(&m, item.Val); err != nil { return err } delete(m, "artifact") delete(m, "config") delete(m, "constraint") delete(m, "env") delete(m, "logs") delete(m, "meta") delete(m, "resources") delete(m, "service") delete(m, "vault") // Build the task var t structs.Task t.Name = n if taskGroupName == "" { taskGroupName = n } dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ DecodeHook: mapstructure.StringToTimeDurationHookFunc(), WeaklyTypedInput: true, Result: &t, }) if err != nil { return err } if err := dec.Decode(m); err != nil { return err } // If we have env, then parse them if o := listVal.Filter("env"); len(o.Items) > 0 { for _, o := range o.Elem().Items { var m map[string]interface{} if err := hcl.DecodeObject(&m, o.Val); err != nil { return err } if err := mapstructure.WeakDecode(m, &t.Env); err != nil { return err } } } if o := listVal.Filter("service"); len(o.Items) > 0 { if err := parseServices(jobName, taskGroupName, &t, o); err != nil { return multierror.Prefix(err, fmt.Sprintf("'%s',", n)) } } // If we have config, then parse that if o := listVal.Filter("config"); len(o.Items) > 0 { for _, o := range o.Elem().Items { var m map[string]interface{} if err := hcl.DecodeObject(&m, o.Val); err != nil { return err } if err := mapstructure.WeakDecode(m, &t.Config); err != nil { return err } } // Instantiate a driver to validate the configuration d, err := driver.NewDriver( t.Driver, driver.NewEmptyDriverContext(), ) if err != nil { return multierror.Prefix(err, fmt.Sprintf("'%s', config ->", n)) } if err := d.Validate(t.Config); err != nil { return multierror.Prefix(err, fmt.Sprintf("'%s', config ->", n)) } } // Parse constraints if o := listVal.Filter("constraint"); len(o.Items) > 0 { if err := parseConstraints(&t.Constraints, o); err != nil { return multierror.Prefix(err, fmt.Sprintf( "'%s', constraint ->", n)) } } // Parse out meta fields. These are in HCL as a list so we need // to iterate over them and merge them. if metaO := listVal.Filter("meta"); len(metaO.Items) > 0 { for _, o := range metaO.Elem().Items { var m map[string]interface{} if err := hcl.DecodeObject(&m, o.Val); err != nil { return err } if err := mapstructure.WeakDecode(m, &t.Meta); err != nil { return err } } } // If we have resources, then parse that if o := listVal.Filter("resources"); len(o.Items) > 0 { var r structs.Resources if err := parseResources(&r, o); err != nil { return multierror.Prefix(err, fmt.Sprintf("'%s',", n)) } t.Resources = &r } // If we have logs then parse that logConfig := structs.DefaultLogConfig() if o := listVal.Filter("logs"); len(o.Items) > 0 { if len(o.Items) > 1 { return fmt.Errorf("only one logs block is allowed in a Task. Number of logs block found: %d", len(o.Items)) } var m map[string]interface{} logsBlock := o.Items[0] // Check for invalid keys valid := []string{ "max_files", "max_file_size", } if err := checkHCLKeys(logsBlock.Val, valid); err != nil { return multierror.Prefix(err, fmt.Sprintf("'%s', logs ->", n)) } if err := hcl.DecodeObject(&m, logsBlock.Val); err != nil { return err } if err := mapstructure.WeakDecode(m, &logConfig); err != nil { return err } } t.LogConfig = logConfig // Parse artifacts if o := listVal.Filter("artifact"); len(o.Items) > 0 { if err := parseArtifacts(&t.Artifacts, o); err != nil { return multierror.Prefix(err, fmt.Sprintf("'%s', artifact ->", n)) } } // If we have a vault block, then parse that if o := listVal.Filter("vault"); len(o.Items) > 0 { var v structs.Vault if err := parseVault(&v, o); err != nil { return multierror.Prefix(err, fmt.Sprintf("'%s', vault ->", n)) } t.Vault = &v } *result = append(*result, &t) } return nil }
func TestParse(t *testing.T) { cases := []struct { File string Result *structs.Job Err bool }{ { "basic.hcl", &structs.Job{ ID: "binstore-storagelocker", Name: "binstore-storagelocker", Type: "service", Priority: 50, AllAtOnce: true, Datacenters: []string{"us2", "eu1"}, Region: "global", Meta: map[string]string{ "foo": "bar", }, Constraints: []*structs.Constraint{ &structs.Constraint{ LTarget: "kernel.os", RTarget: "windows", Operand: "=", }, }, Update: structs.UpdateStrategy{ Stagger: 60 * time.Second, MaxParallel: 2, }, TaskGroups: []*structs.TaskGroup{ &structs.TaskGroup{ Name: "outside", Count: 1, Tasks: []*structs.Task{ &structs.Task{ Name: "outside", Driver: "java", Config: map[string]interface{}{ "jar_path": "s3://my-cool-store/foo.jar", }, Meta: map[string]string{ "my-cool-key": "foobar", }, LogConfig: structs.DefaultLogConfig(), }, }, }, &structs.TaskGroup{ Name: "binsl", Count: 5, Constraints: []*structs.Constraint{ &structs.Constraint{ LTarget: "kernel.os", RTarget: "linux", Operand: "=", }, }, Meta: map[string]string{ "elb_mode": "tcp", "elb_interval": "10", "elb_checks": "3", }, RestartPolicy: &structs.RestartPolicy{ Interval: 10 * time.Minute, Attempts: 5, Delay: 15 * time.Second, Mode: "delay", }, Tasks: []*structs.Task{ &structs.Task{ Name: "binstore", Driver: "docker", User: "******", Config: map[string]interface{}{ "image": "hashicorp/binstore", }, Services: []*structs.Service{ { Name: "binstore-storagelocker-binsl-binstore", Tags: []string{"foo", "bar"}, PortLabel: "http", Checks: []*structs.ServiceCheck{ { Name: "check-name", Type: "tcp", Interval: 10 * time.Second, Timeout: 2 * time.Second, }, }, }, }, Env: map[string]string{ "HELLO": "world", "LOREM": "ipsum", }, Resources: &structs.Resources{ CPU: 500, MemoryMB: 128, DiskMB: 300, IOPS: 0, Networks: []*structs.NetworkResource{ &structs.NetworkResource{ MBits: 100, ReservedPorts: []structs.Port{{"one", 1}, {"two", 2}, {"three", 3}}, DynamicPorts: []structs.Port{{"http", 0}, {"https", 0}, {"admin", 0}}, }, }, }, KillTimeout: 22 * time.Second, LogConfig: &structs.LogConfig{ MaxFiles: 10, MaxFileSizeMB: 100, }, Artifacts: []*structs.TaskArtifact{ { GetterSource: "http://foo.com/artifact", RelativeDest: "local/", GetterOptions: map[string]string{ "checksum": "md5:b8a4f3f72ecab0510a6a31e997461c5f", }, }, { GetterSource: "http://bar.com/artifact", RelativeDest: "local/", GetterOptions: map[string]string{ "checksum": "md5:ff1cc0d3432dad54d607c1505fb7245c", }, }, }, }, &structs.Task{ Name: "storagelocker", Driver: "docker", User: "", Config: map[string]interface{}{ "image": "hashicorp/storagelocker", }, Resources: &structs.Resources{ CPU: 500, MemoryMB: 128, DiskMB: 300, IOPS: 30, }, Constraints: []*structs.Constraint{ &structs.Constraint{ LTarget: "kernel.arch", RTarget: "amd64", Operand: "=", }, }, LogConfig: structs.DefaultLogConfig(), }, }, }, }, }, false, }, { "multi-network.hcl", nil, true, }, { "multi-resource.hcl", nil, true, }, { "default-job.hcl", &structs.Job{ ID: "foo", Name: "foo", Priority: 50, Region: "global", Type: "service", }, false, }, { "version-constraint.hcl", &structs.Job{ ID: "foo", Name: "foo", Priority: 50, Region: "global", Type: "service", Constraints: []*structs.Constraint{ &structs.Constraint{ LTarget: "$attr.kernel.version", RTarget: "~> 3.2", Operand: structs.ConstraintVersion, }, }, }, false, }, { "regexp-constraint.hcl", &structs.Job{ ID: "foo", Name: "foo", Priority: 50, Region: "global", Type: "service", Constraints: []*structs.Constraint{ &structs.Constraint{ LTarget: "$attr.kernel.version", RTarget: "[0-9.]+", Operand: structs.ConstraintRegex, }, }, }, false, }, { "distinctHosts-constraint.hcl", &structs.Job{ ID: "foo", Name: "foo", Priority: 50, Region: "global", Type: "service", Constraints: []*structs.Constraint{ &structs.Constraint{ Operand: structs.ConstraintDistinctHosts, }, }, }, false, }, { "periodic-cron.hcl", &structs.Job{ ID: "foo", Name: "foo", Priority: 50, Region: "global", Type: "service", Periodic: &structs.PeriodicConfig{ Enabled: true, SpecType: structs.PeriodicSpecCron, Spec: "*/5 * * *", ProhibitOverlap: true, }, }, false, }, { "specify-job.hcl", &structs.Job{ ID: "job1", Name: "My Job", Priority: 50, Region: "global", Type: "service", }, false, }, { "task-nested-config.hcl", &structs.Job{ Region: "global", ID: "foo", Name: "foo", Type: "service", Priority: 50, TaskGroups: []*structs.TaskGroup{ &structs.TaskGroup{ Name: "bar", Count: 1, Tasks: []*structs.Task{ &structs.Task{ Name: "bar", Driver: "docker", Config: map[string]interface{}{ "image": "hashicorp/image", "port_map": []map[string]interface{}{ map[string]interface{}{ "db": 1234, }, }, }, LogConfig: &structs.LogConfig{ MaxFiles: 10, MaxFileSizeMB: 10, }, }, }, }, }, }, false, }, { "bad-artifact.hcl", nil, true, }, { "artifacts.hcl", &structs.Job{ ID: "binstore-storagelocker", Name: "binstore-storagelocker", Type: "service", Priority: 50, Region: "global", TaskGroups: []*structs.TaskGroup{ &structs.TaskGroup{ Name: "binsl", Count: 1, Tasks: []*structs.Task{ &structs.Task{ Name: "binstore", Driver: "docker", Resources: &structs.Resources{ CPU: 100, MemoryMB: 10, DiskMB: 300, IOPS: 0, }, LogConfig: &structs.LogConfig{ MaxFiles: 10, MaxFileSizeMB: 10, }, Artifacts: []*structs.TaskArtifact{ { GetterSource: "http://foo.com/bar", GetterOptions: map[string]string{}, RelativeDest: "", }, { GetterSource: "http://foo.com/baz", GetterOptions: map[string]string{}, RelativeDest: "local/", }, { GetterSource: "http://foo.com/bam", GetterOptions: map[string]string{}, RelativeDest: "var/foo", }, }, }, }, }, }, }, false, }, } for _, tc := range cases { t.Logf("Testing parse: %s", tc.File) path, err := filepath.Abs(filepath.Join("./test-fixtures", tc.File)) if err != nil { t.Fatalf("file: %s\n\n%s", tc.File, err) continue } actual, err := ParseFile(path) if (err != nil) != tc.Err { t.Fatalf("file: %s\n\n%s", tc.File, err) continue } if !reflect.DeepEqual(actual, tc.Result) { t.Fatalf("file: %s\n\n%#v\n\n%#v", tc.File, actual, tc.Result) } } }
func Job() *structs.Job { job := &structs.Job{ Region: "global", ID: structs.GenerateUUID(), Name: "my-job", Type: structs.JobTypeService, Priority: 50, AllAtOnce: false, Datacenters: []string{"dc1"}, Constraints: []*structs.Constraint{ &structs.Constraint{ LTarget: "${attr.kernel.name}", RTarget: "linux", Operand: "=", }, }, TaskGroups: []*structs.TaskGroup{ &structs.TaskGroup{ Name: "web", Count: 10, EphemeralDisk: &structs.EphemeralDisk{ SizeMB: 150, }, RestartPolicy: &structs.RestartPolicy{ Attempts: 3, Interval: 10 * time.Minute, Delay: 1 * time.Minute, Mode: structs.RestartPolicyModeDelay, }, Tasks: []*structs.Task{ &structs.Task{ Name: "web", Driver: "exec", Config: map[string]interface{}{ "command": "/bin/date", }, Env: map[string]string{ "FOO": "bar", }, Services: []*structs.Service{ { Name: "${TASK}-frontend", PortLabel: "http", Tags: []string{"pci:${meta.pci-dss}", "datacenter:${node.datacenter}"}, Checks: []*structs.ServiceCheck{ { Name: "check-table", Type: structs.ServiceCheckScript, Command: "/usr/local/check-table-${meta.database}", Args: []string{"${meta.version}"}, Interval: 30 * time.Second, Timeout: 5 * time.Second, }, }, }, { Name: "${TASK}-admin", PortLabel: "admin", }, }, LogConfig: structs.DefaultLogConfig(), Resources: &structs.Resources{ CPU: 500, MemoryMB: 256, Networks: []*structs.NetworkResource{ &structs.NetworkResource{ MBits: 50, DynamicPorts: []structs.Port{{Label: "http"}, {Label: "admin"}}, }, }, }, Meta: map[string]string{ "foo": "bar", }, }, }, Meta: map[string]string{ "elb_check_type": "http", "elb_check_interval": "30s", "elb_check_min": "3", }, }, }, Meta: map[string]string{ "owner": "armon", }, Status: structs.JobStatusPending, CreateIndex: 42, ModifyIndex: 99, JobModifyIndex: 99, } job.Canonicalize() return job }
func Job() *structs.Job { job := &structs.Job{ Region: "global", ID: structs.GenerateUUID(), Name: "my-job", Type: structs.JobTypeService, Priority: 50, AllAtOnce: false, Datacenters: []string{"dc1"}, Constraints: []*structs.Constraint{ &structs.Constraint{ LTarget: "${attr.kernel.name}", RTarget: "linux", Operand: "=", }, }, TaskGroups: []*structs.TaskGroup{ &structs.TaskGroup{ Name: "web", Count: 10, RestartPolicy: &structs.RestartPolicy{ Attempts: 3, Interval: 10 * time.Minute, Delay: 1 * time.Minute, Mode: structs.RestartPolicyModeDelay, }, Tasks: []*structs.Task{ &structs.Task{ Name: "web", Driver: "exec", Config: map[string]interface{}{ "command": "/bin/date", }, Env: map[string]string{ "FOO": "bar", }, Services: []*structs.Service{ { Name: "${TASK}-frontend", PortLabel: "http", }, { Name: "${TASK}-admin", PortLabel: "admin", }, }, LogConfig: structs.DefaultLogConfig(), Resources: &structs.Resources{ CPU: 500, MemoryMB: 256, DiskMB: 150, Networks: []*structs.NetworkResource{ &structs.NetworkResource{ MBits: 50, DynamicPorts: []structs.Port{{Label: "http"}, {Label: "admin"}}, }, }, }, }, }, Meta: map[string]string{ "elb_check_type": "http", "elb_check_interval": "30s", "elb_check_min": "3", }, }, }, Meta: map[string]string{ "owner": "armon", }, Status: structs.JobStatusPending, CreateIndex: 42, ModifyIndex: 99, JobModifyIndex: 99, } job.InitFields() return job }