Beispiel #1
0
func testConfig() *config.Config {
	conf := config.DefaultConfig()
	conf.StateDir = os.TempDir()
	conf.AllocDir = os.TempDir()
	conf.MaxKillTimeout = 10 * time.Second
	return conf
}
Beispiel #2
0
func TestConsulFingerprint(t *testing.T) {
	fp := NewConsulFingerprint(testLogger())
	node := &structs.Node{
		Attributes: make(map[string]string),
	}

	ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		w.Header().Set("Content-Type", "application/json")
		fmt.Fprintln(w, mockConsulResponse)
	}))
	defer ts.Close()

	config := config.DefaultConfig()
	config.ConsulConfig.Addr = strings.TrimPrefix(ts.URL, "http://")

	ok, err := fp.Fingerprint(config, node)
	if err != nil {
		t.Fatalf("Failed to fingerprint: %s", err)
	}
	if !ok {
		t.Fatalf("Failed to apply node attributes")
	}

	assertNodeAttributeContains(t, node, "consul.server")
	assertNodeAttributeContains(t, node, "consul.version")
	assertNodeAttributeContains(t, node, "consul.revision")
	assertNodeAttributeContains(t, node, "unique.consul.name")
	assertNodeAttributeContains(t, node, "consul.datacenter")

	if _, ok := node.Links["consul"]; !ok {
		t.Errorf("Expected a link to consul, none found")
	}
}
Beispiel #3
0
func TestVaultFingerprint(t *testing.T) {
	tv := testutil.NewTestVault(t).Start()
	defer tv.Stop()

	fp := NewVaultFingerprint(testLogger())
	node := &structs.Node{
		Attributes: make(map[string]string),
	}

	config := config.DefaultConfig()
	config.VaultConfig = tv.Config

	ok, err := fp.Fingerprint(config, node)
	if err != nil {
		t.Fatalf("Failed to fingerprint: %s", err)
	}
	if !ok {
		t.Fatalf("Failed to apply node attributes")
	}

	assertNodeAttributeContains(t, node, "vault.accessible")
	assertNodeAttributeContains(t, node, "vault.version")
	assertNodeAttributeContains(t, node, "vault.cluster_id")
	assertNodeAttributeContains(t, node, "vault.cluster_name")
}
Beispiel #4
0
func testAllocRunnerFromAlloc(alloc *structs.Allocation, restarts bool) (*MockAllocStateUpdater, *AllocRunner) {
	logger := testLogger()
	conf := config.DefaultConfig()
	conf.StateDir = os.TempDir()
	conf.AllocDir = os.TempDir()
	upd := &MockAllocStateUpdater{}
	if !restarts {
		*alloc.Job.LookupTaskGroup(alloc.TaskGroup).RestartPolicy = structs.RestartPolicy{Attempts: 0}
		alloc.Job.Type = structs.JobTypeBatch
	}
	ar := NewAllocRunner(logger, conf, upd.Update, alloc)
	return upd, ar
}
Beispiel #5
0
func testClient(t *testing.T, cb func(c *config.Config)) *Client {
	conf := config.DefaultConfig()
	conf.DevMode = true
	if cb != nil {
		cb(conf)
	}

	shutdownCh := make(chan struct{})
	consulSyncer, err := consul.NewSyncer(conf.ConsulConfig, shutdownCh, log.New(os.Stderr, "", log.LstdFlags))
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	client, err := NewClient(conf, consulSyncer)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	return client
}
Beispiel #6
0
// Creates a mock task runner using the first task in the first task group of
// the passed allocation.
func testTaskRunnerFromAlloc(restarts bool, alloc *structs.Allocation) (*MockTaskStateUpdater, *TaskRunner) {
	logger := testLogger()
	conf := config.DefaultConfig()
	conf.StateDir = os.TempDir()
	conf.AllocDir = os.TempDir()
	upd := &MockTaskStateUpdater{}
	task := alloc.Job.TaskGroups[0].Tasks[0]
	// Initialize the port listing. This should be done by the offer process but
	// we have a mock so that doesn't happen.
	task.Resources.Networks[0].ReservedPorts = []structs.Port{{"", 80}}

	allocDir := allocdir.NewAllocDir(filepath.Join(conf.AllocDir, alloc.ID))
	allocDir.Build([]*structs.Task{task})

	ctx := driver.NewExecContext(allocDir, alloc.ID)
	tr := NewTaskRunner(logger, conf, upd.Update, ctx, alloc, task)
	if !restarts {
		tr.restartTracker = noRestartsTracker()
	}
	return upd, tr
}
Beispiel #7
0
// clientConfig is used to generate a new client configuration struct
// for initializing a Nomad client.
func (a *Agent) clientConfig() (*clientconfig.Config, error) {
	// Setup the configuration
	conf := a.config.ClientConfig
	if conf == nil {
		conf = clientconfig.DefaultConfig()
	}
	if a.server != nil {
		conf.RPCHandler = a.server
	}
	conf.LogOutput = a.logOutput
	conf.DevMode = a.config.DevMode
	if a.config.Region != "" {
		conf.Region = a.config.Region
	}
	if a.config.DataDir != "" {
		conf.StateDir = filepath.Join(a.config.DataDir, "client")
		conf.AllocDir = filepath.Join(a.config.DataDir, "alloc")
	}
	if a.config.Client.StateDir != "" {
		conf.StateDir = a.config.Client.StateDir
	}
	if a.config.Client.AllocDir != "" {
		conf.AllocDir = a.config.Client.AllocDir
	}
	conf.Servers = a.config.Client.Servers
	if a.config.Client.NetworkInterface != "" {
		conf.NetworkInterface = a.config.Client.NetworkInterface
	}
	conf.ChrootEnv = a.config.Client.ChrootEnv
	conf.Options = a.config.Client.Options
	// Logging deprecation messages about consul related configuration in client
	// options
	var invalidConsulKeys []string
	for key := range conf.Options {
		if strings.HasPrefix(key, "consul") {
			invalidConsulKeys = append(invalidConsulKeys, fmt.Sprintf("options.%s", key))
		}
	}
	if len(invalidConsulKeys) > 0 {
		a.logger.Printf("[WARN] agent: Invalid keys: %v", strings.Join(invalidConsulKeys, ","))
		a.logger.Printf(`Nomad client ignores consul related configuration in client options. 
		Please refer to the guide https://www.nomadproject.io/docs/agent/config.html#consul_options 
		to configure Nomad to work with Consul.`)
	}

	if a.config.Client.NetworkSpeed != 0 {
		conf.NetworkSpeed = a.config.Client.NetworkSpeed
	}
	if a.config.Client.MaxKillTimeout != "" {
		dur, err := time.ParseDuration(a.config.Client.MaxKillTimeout)
		if err != nil {
			return nil, fmt.Errorf("Error parsing retry interval: %s", err)
		}
		conf.MaxKillTimeout = dur
	}
	conf.ClientMaxPort = uint(a.config.Client.ClientMaxPort)
	conf.ClientMinPort = uint(a.config.Client.ClientMinPort)

	// Setup the node
	conf.Node = new(structs.Node)
	conf.Node.Datacenter = a.config.Datacenter
	conf.Node.Name = a.config.NodeName
	conf.Node.Meta = a.config.Client.Meta
	conf.Node.NodeClass = a.config.Client.NodeClass

	// Resolve the Client's HTTP address
	if a.config.AdvertiseAddrs.HTTP != "" {
		a.clientHTTPAddr = a.config.AdvertiseAddrs.HTTP
	} else if a.config.Addresses.HTTP != "" {
		a.clientHTTPAddr = net.JoinHostPort(a.config.Addresses.HTTP, strconv.Itoa(a.config.Ports.HTTP))
	} else if a.config.BindAddr != "" {
		a.clientHTTPAddr = net.JoinHostPort(a.config.BindAddr, strconv.Itoa(a.config.Ports.HTTP))
	} else {
		a.clientHTTPAddr = net.JoinHostPort("127.0.0.1", strconv.Itoa(a.config.Ports.HTTP))
	}
	addr, err := net.ResolveTCPAddr("tcp", a.clientHTTPAddr)
	if err != nil {
		return nil, fmt.Errorf("error resolving HTTP addr %+q: %v", a.clientHTTPAddr, err)
	}
	httpAddr := net.JoinHostPort(addr.IP.String(), strconv.Itoa(addr.Port))

	conf.Node.HTTPAddr = httpAddr
	a.clientHTTPAddr = httpAddr

	// Reserve resources on the node.
	r := conf.Node.Reserved
	if r == nil {
		r = new(structs.Resources)
		conf.Node.Reserved = r
	}
	r.CPU = a.config.Client.Reserved.CPU
	r.MemoryMB = a.config.Client.Reserved.MemoryMB
	r.DiskMB = a.config.Client.Reserved.DiskMB
	r.IOPS = a.config.Client.Reserved.IOPS
	conf.GloballyReservedPorts = a.config.Client.Reserved.ParsedReservedPorts

	conf.Version = fmt.Sprintf("%s%s", a.config.Version, a.config.VersionPrerelease)
	conf.Revision = a.config.Revision

	if a.config.Consul.AutoAdvertise && a.config.Consul.ClientServiceName == "" {
		return nil, fmt.Errorf("client_service_name must be set when auto_advertise is enabled")
	}

	conf.ConsulConfig = a.config.Consul
	conf.VaultConfig = a.config.Vault
	conf.StatsCollectionInterval = a.config.Telemetry.collectionInterval
	conf.PublishNodeMetrics = a.config.Telemetry.PublishNodeMetrics
	conf.PublishAllocationMetrics = a.config.Telemetry.PublishAllocationMetrics
	return conf, nil
}
Beispiel #8
0
func TestVaultClient_Heap(t *testing.T) {
	conf := config.DefaultConfig()
	conf.VaultConfig.Enabled = true
	conf.VaultConfig.Token = "testvaulttoken"
	conf.VaultConfig.TaskTokenTTL = "10s"

	logger := log.New(os.Stderr, "TEST: ", log.Lshortfile|log.LstdFlags)
	c, err := NewVaultClient(conf.VaultConfig, logger, nil)
	if err != nil {
		t.Fatal(err)
	}
	if c == nil {
		t.Fatal("failed to create vault client")
	}

	now := time.Now()

	renewalReq1 := &vaultClientRenewalRequest{
		errCh:     make(chan error, 1),
		id:        "id1",
		increment: 10,
	}
	if err := c.heap.Push(renewalReq1, now.Add(50*time.Second)); err != nil {
		t.Fatal(err)
	}
	if !c.isTracked("id1") {
		t.Fatalf("id1 should have been tracked")
	}

	renewalReq2 := &vaultClientRenewalRequest{
		errCh:     make(chan error, 1),
		id:        "id2",
		increment: 10,
	}
	if err := c.heap.Push(renewalReq2, now.Add(40*time.Second)); err != nil {
		t.Fatal(err)
	}
	if !c.isTracked("id2") {
		t.Fatalf("id2 should have been tracked")
	}

	renewalReq3 := &vaultClientRenewalRequest{
		errCh:     make(chan error, 1),
		id:        "id3",
		increment: 10,
	}
	if err := c.heap.Push(renewalReq3, now.Add(60*time.Second)); err != nil {
		t.Fatal(err)
	}
	if !c.isTracked("id3") {
		t.Fatalf("id3 should have been tracked")
	}

	// Reading elements should yield id2, id1 and id3 in order
	req, _ := c.nextRenewal()
	if req != renewalReq2 {
		t.Fatalf("bad: expected: %#v, actual: %#v", renewalReq2, req)
	}
	if err := c.heap.Update(req, now.Add(70*time.Second)); err != nil {
		t.Fatal(err)
	}

	req, _ = c.nextRenewal()
	if req != renewalReq1 {
		t.Fatalf("bad: expected: %#v, actual: %#v", renewalReq1, req)
	}
	if err := c.heap.Update(req, now.Add(80*time.Second)); err != nil {
		t.Fatal(err)
	}

	req, _ = c.nextRenewal()
	if req != renewalReq3 {
		t.Fatalf("bad: expected: %#v, actual: %#v", renewalReq3, req)
	}
	if err := c.heap.Update(req, now.Add(90*time.Second)); err != nil {
		t.Fatal(err)
	}

	if err := c.StopRenewToken("id1"); err != nil {
		t.Fatal(err)
	}

	if err := c.StopRenewToken("id2"); err != nil {
		t.Fatal(err)
	}

	if err := c.StopRenewToken("id3"); err != nil {
		t.Fatal(err)
	}

	if c.isTracked("id1") {
		t.Fatalf("id1 should not have been tracked")
	}

	if c.isTracked("id1") {
		t.Fatalf("id1 should not have been tracked")
	}

	if c.isTracked("id1") {
		t.Fatalf("id1 should not have been tracked")
	}

}
Beispiel #9
0
// NewSyncer returns a new consul.Syncer
func NewSyncer(consulConfig *config.ConsulConfig, shutdownCh chan struct{}, logger *log.Logger) (*Syncer, error) {
	var err error
	var c *consul.Client

	cfg := consul.DefaultConfig()

	// If a nil consulConfig was provided, fall back to the default config
	if consulConfig == nil {
		consulConfig = cconfig.DefaultConfig().ConsulConfig
	}

	if consulConfig.Addr != "" {
		cfg.Address = consulConfig.Addr
	}
	if consulConfig.Token != "" {
		cfg.Token = consulConfig.Token
	}
	if consulConfig.Auth != "" {
		var username, password string
		if strings.Contains(consulConfig.Auth, ":") {
			split := strings.SplitN(consulConfig.Auth, ":", 2)
			username = split[0]
			password = split[1]
		} else {
			username = consulConfig.Auth
		}

		cfg.HttpAuth = &consul.HttpBasicAuth{
			Username: username,
			Password: password,
		}
	}
	if consulConfig.EnableSSL {
		cfg.Scheme = "https"
		tlsCfg := consul.TLSConfig{
			Address:            cfg.Address,
			CAFile:             consulConfig.CAFile,
			CertFile:           consulConfig.CertFile,
			KeyFile:            consulConfig.KeyFile,
			InsecureSkipVerify: !consulConfig.VerifySSL,
		}
		tlsClientCfg, err := consul.SetupTLSConfig(&tlsCfg)
		if err != nil {
			return nil, fmt.Errorf("error creating tls client config for consul: %v", err)
		}
		cfg.HttpClient.Transport = &http.Transport{
			TLSClientConfig: tlsClientCfg,
		}
	}
	if consulConfig.EnableSSL && !consulConfig.VerifySSL {
		cfg.HttpClient.Transport = &http.Transport{
			TLSClientConfig: &tls.Config{
				InsecureSkipVerify: true,
			},
		}
	}
	if c, err = consul.NewClient(cfg); err != nil {
		return nil, err
	}
	consulSyncer := Syncer{
		client:            c,
		logger:            logger,
		consulAvailable:   true,
		shutdownCh:        shutdownCh,
		servicesGroups:    make(map[ServiceDomain]map[ServiceKey]*consul.AgentServiceRegistration),
		checkGroups:       make(map[ServiceDomain]map[ServiceKey][]*consul.AgentCheckRegistration),
		trackedServices:   make(map[consulServiceID]*consul.AgentServiceRegistration),
		trackedChecks:     make(map[consulCheckID]*consul.AgentCheckRegistration),
		checkRunners:      make(map[consulCheckID]*CheckRunner),
		periodicCallbacks: make(map[string]types.PeriodicCallback),
	}

	return &consulSyncer, nil
}
Beispiel #10
0
// clientConfig is used to generate a new client configuration struct
// for initializing a Nomad client.
func (a *Agent) clientConfig() (*clientconfig.Config, error) {
	// Setup the configuration
	conf := a.config.ClientConfig
	if conf == nil {
		conf = clientconfig.DefaultConfig()
	}
	if a.server != nil {
		conf.RPCHandler = a.server
	}
	conf.LogOutput = a.logOutput
	conf.DevMode = a.config.DevMode
	if a.config.Region != "" {
		conf.Region = a.config.Region
	}
	if a.config.DataDir != "" {
		conf.StateDir = filepath.Join(a.config.DataDir, "client")
		conf.AllocDir = filepath.Join(a.config.DataDir, "alloc")
	}
	if a.config.Client.StateDir != "" {
		conf.StateDir = a.config.Client.StateDir
	}
	if a.config.Client.AllocDir != "" {
		conf.AllocDir = a.config.Client.AllocDir
	}
	conf.Servers = a.config.Client.Servers
	if a.config.Client.NetworkInterface != "" {
		conf.NetworkInterface = a.config.Client.NetworkInterface
	}
	conf.Options = a.config.Client.Options
	if a.config.Client.NetworkSpeed != 0 {
		conf.NetworkSpeed = a.config.Client.NetworkSpeed
	}
	if a.config.Client.MaxKillTimeout != "" {
		dur, err := time.ParseDuration(a.config.Client.MaxKillTimeout)
		if err != nil {
			return nil, fmt.Errorf("Error parsing retry interval: %s", err)
		}
		conf.MaxKillTimeout = dur
	}
	conf.ClientMaxPort = uint(a.config.Client.ClientMaxPort)
	conf.ClientMinPort = uint(a.config.Client.ClientMinPort)

	// Setup the node
	conf.Node = new(structs.Node)
	conf.Node.Datacenter = a.config.Datacenter
	conf.Node.Name = a.config.NodeName
	conf.Node.Meta = a.config.Client.Meta
	conf.Node.NodeClass = a.config.Client.NodeClass

	// Resolve the Client's HTTP address
	if a.config.AdvertiseAddrs.HTTP != "" {
		a.clientHTTPAddr = a.config.AdvertiseAddrs.HTTP
	} else if a.config.Addresses.HTTP != "" {
		a.clientHTTPAddr = fmt.Sprintf("%v:%v", a.config.Addresses.HTTP, a.config.Ports.HTTP)
	} else if a.config.BindAddr != "" {
		a.clientHTTPAddr = fmt.Sprintf("%v:%v", a.config.BindAddr, a.config.Ports.HTTP)
	} else {
		a.clientHTTPAddr = fmt.Sprintf("%v:%v", "127.0.0.1", a.config.Ports.HTTP)
	}
	addr, err := net.ResolveTCPAddr("tcp", a.clientHTTPAddr)
	if err != nil {
		return nil, fmt.Errorf("error resolving HTTP addr %+q: %v", a.clientHTTPAddr, err)
	}
	httpAddr := fmt.Sprintf("%s:%d", addr.IP.String(), addr.Port)
	conf.Node.HTTPAddr = httpAddr
	a.clientHTTPAddr = httpAddr

	// Resolve the Client's RPC address
	if a.config.AdvertiseAddrs.RPC != "" {
		a.clientRPCAddr = a.config.AdvertiseAddrs.RPC
	} else if a.config.Addresses.RPC != "" {
		a.clientRPCAddr = fmt.Sprintf("%v:%v", a.config.Addresses.RPC, a.config.Ports.RPC)
	} else if a.config.BindAddr != "" {
		a.clientRPCAddr = fmt.Sprintf("%v:%v", a.config.BindAddr, a.config.Ports.RPC)
	} else {
		a.clientRPCAddr = fmt.Sprintf("%v:%v", "127.0.0.1", a.config.Ports.RPC)
	}
	addr, err = net.ResolveTCPAddr("tcp", a.clientRPCAddr)
	if err != nil {
		return nil, fmt.Errorf("error resolving RPC addr %+q: %v", a.clientRPCAddr, err)
	}
	a.clientRPCAddr = fmt.Sprintf("%s:%d", addr.IP.String(), addr.Port)

	// Reserve resources on the node.
	r := conf.Node.Reserved
	if r == nil {
		r = new(structs.Resources)
		conf.Node.Reserved = r
	}
	r.CPU = a.config.Client.Reserved.CPU
	r.MemoryMB = a.config.Client.Reserved.MemoryMB
	r.DiskMB = a.config.Client.Reserved.DiskMB
	r.IOPS = a.config.Client.Reserved.IOPS
	conf.GloballyReservedPorts = a.config.Client.Reserved.ParsedReservedPorts

	conf.Version = fmt.Sprintf("%s%s", a.config.Version, a.config.VersionPrerelease)
	conf.Revision = a.config.Revision

	if a.config.Consul.AutoAdvertise && a.config.Consul.ClientServiceName == "" {
		return nil, fmt.Errorf("client_service_name must be set when auto_advertise is enabled")
	}

	conf.ConsulConfig = a.config.Consul

	conf.StatsCollectionInterval = a.config.Client.StatsConfig.collectionInterval

	return conf, nil
}