Ejemplo n.º 1
0
func testServer(t *testing.T, cb func(*Config)) *Server {
	// Setup the default settings
	config := DefaultConfig()
	config.Build = "unittest"
	config.DevMode = true
	config.RPCAddr = &net.TCPAddr{
		IP:   []byte{127, 0, 0, 1},
		Port: getPort(),
	}
	nodeNum := atomic.AddUint32(&nodeNumber, 1)
	config.NodeName = fmt.Sprintf("nomad-%03d", nodeNum)

	// Tighten the Serf timing
	config.SerfConfig.MemberlistConfig.BindAddr = "127.0.0.1"
	config.SerfConfig.MemberlistConfig.BindPort = getPort()
	config.SerfConfig.MemberlistConfig.SuspicionMult = 2
	config.SerfConfig.MemberlistConfig.RetransmitMult = 2
	config.SerfConfig.MemberlistConfig.ProbeTimeout = 50 * time.Millisecond
	config.SerfConfig.MemberlistConfig.ProbeInterval = 100 * time.Millisecond
	config.SerfConfig.MemberlistConfig.GossipInterval = 100 * time.Millisecond

	// Tighten the Raft timing
	config.RaftConfig.LeaderLeaseTimeout = 50 * time.Millisecond
	config.RaftConfig.HeartbeatTimeout = 50 * time.Millisecond
	config.RaftConfig.ElectionTimeout = 50 * time.Millisecond
	config.RaftTimeout = 500 * time.Millisecond

	// Disable Vault
	f := false
	config.VaultConfig.Enabled = &f

	// Invoke the callback if any
	if cb != nil {
		cb(config)
	}

	// Enable raft as leader if we have bootstrap on
	config.RaftConfig.StartAsLeader = !config.DevDisableBootstrap

	shutdownCh := make(chan struct{})
	logger := log.New(config.LogOutput, "", log.LstdFlags)
	consulSyncer, err := consul.NewSyncer(config.ConsulConfig, shutdownCh, logger)
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	// Create server
	server, err := NewServer(config, consulSyncer, logger)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	return server
}
Ejemplo n.º 2
0
func testServer(t *testing.T, cb func(*nomad.Config)) (*nomad.Server, string) {
	f := false

	// Setup the default settings
	config := nomad.DefaultConfig()
	config.VaultConfig.Enabled = &f
	config.Build = "unittest"
	config.DevMode = true
	config.RPCAddr = &net.TCPAddr{
		IP:   []byte{127, 0, 0, 1},
		Port: getPort(),
	}
	config.NodeName = fmt.Sprintf("Node %d", config.RPCAddr.Port)

	// Tighten the Serf timing
	config.SerfConfig.MemberlistConfig.BindAddr = "127.0.0.1"
	config.SerfConfig.MemberlistConfig.BindPort = getPort()
	config.SerfConfig.MemberlistConfig.SuspicionMult = 2
	config.SerfConfig.MemberlistConfig.RetransmitMult = 2
	config.SerfConfig.MemberlistConfig.ProbeTimeout = 50 * time.Millisecond
	config.SerfConfig.MemberlistConfig.ProbeInterval = 100 * time.Millisecond
	config.SerfConfig.MemberlistConfig.GossipInterval = 100 * time.Millisecond

	// Tighten the Raft timing
	config.RaftConfig.LeaderLeaseTimeout = 20 * time.Millisecond
	config.RaftConfig.HeartbeatTimeout = 40 * time.Millisecond
	config.RaftConfig.ElectionTimeout = 40 * time.Millisecond
	config.RaftConfig.StartAsLeader = true
	config.RaftTimeout = 500 * time.Millisecond

	// Invoke the callback if any
	if cb != nil {
		cb(config)
	}

	shutdownCh := make(chan struct{})
	logger := log.New(config.LogOutput, "", log.LstdFlags)
	consulSyncer, err := consul.NewSyncer(config.ConsulConfig, shutdownCh, logger)
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	// Create server
	server, err := nomad.NewServer(config, consulSyncer, logger)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	return server, config.RPCAddr.String()
}
Ejemplo n.º 3
0
// SyncServices syncs the services of the task that the executor is running with
// Consul
func (e *UniversalExecutor) SyncServices(ctx *ConsulContext) error {
	e.logger.Printf("[INFO] executor: registering services")
	e.consulCtx = ctx
	if e.consulSyncer == nil {
		cs, err := consul.NewSyncer(ctx.ConsulConfig, e.shutdownCh, e.logger)
		if err != nil {
			return err
		}
		e.consulSyncer = cs
		go e.consulSyncer.Run()
	}
	e.interpolateServices(e.ctx.Task)
	e.consulSyncer.SetDelegatedChecks(e.createCheckMap(), e.createCheck)
	e.consulSyncer.SetAddrFinder(e.ctx.Task.FindHostAndPortFor)
	domain := consul.NewExecutorDomain(e.ctx.AllocID, e.ctx.Task.Name)
	serviceMap := generateServiceKeys(e.ctx.AllocID, e.ctx.Task.Services)
	e.consulSyncer.SetServices(domain, serviceMap)
	return nil
}
Ejemplo n.º 4
0
func testClient(t *testing.T, cb func(c *config.Config)) *Client {
	conf := config.DefaultConfig()
	conf.DevMode = true
	if cb != nil {
		cb(conf)
	}

	shutdownCh := make(chan struct{})
	consulSyncer, err := consul.NewSyncer(conf.ConsulConfig, shutdownCh, log.New(os.Stderr, "", log.LstdFlags))
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	client, err := NewClient(conf, consulSyncer)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	return client
}
Ejemplo n.º 5
0
Archivo: agent.go Proyecto: nak3/nomad
// setupConsulSyncer creates the Consul tasks used by this Nomad Agent
// (either Client or Server mode).
func (a *Agent) setupConsulSyncer() error {
	var err error
	a.consulSyncer, err = consul.NewSyncer(a.config.Consul, a.shutdownCh, a.logger)
	if err != nil {
		return err
	}

	a.consulSyncer.SetAddrFinder(func(portLabel string) (string, int) {
		host, port, err := net.SplitHostPort(portLabel)
		if err != nil {
			p, err := strconv.Atoi(port)
			if err != nil {
				return "", 0
			}
			return "", p
		}

		// If the addr for the service is ":port", then we fall back
		// to Nomad's default address resolution protocol.
		//
		// TODO(sean@): This should poll Consul to figure out what
		// its advertise address is and use that in order to handle
		// the case where there is something funky like NAT on this
		// host.  For now we just use the BindAddr if set, otherwise
		// we fall back to a loopback addr.
		if host == "" {
			if a.config.BindAddr != "" {
				host = a.config.BindAddr
			} else {
				host = "127.0.0.1"
			}
		}
		p, err := strconv.Atoi(port)
		if err != nil {
			return host, 0
		}
		return host, p
	})

	return nil
}
Ejemplo n.º 6
0
func TestClient_SaveRestoreState(t *testing.T) {
	ctestutil.ExecCompatible(t)
	s1, _ := testServer(t, nil)
	defer s1.Shutdown()
	testutil.WaitForLeader(t, s1.RPC)

	c1 := testClient(t, func(c *config.Config) {
		c.DevMode = false
		c.RPCHandler = s1
	})
	defer c1.Shutdown()

	// Wait til the node is ready
	waitTilNodeReady(c1, t)

	// Create mock allocations
	job := mock.Job()
	alloc1 := mock.Alloc()
	alloc1.NodeID = c1.Node().ID
	alloc1.Job = job
	alloc1.JobID = job.ID
	alloc1.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
	task := alloc1.Job.TaskGroups[0].Tasks[0]
	task.Config["run_for"] = "10s"

	state := s1.State()
	if err := state.UpsertJob(100, job); err != nil {
		t.Fatal(err)
	}
	if err := state.UpsertJobSummary(101, mock.JobSummary(alloc1.JobID)); err != nil {
		t.Fatal(err)
	}
	if err := state.UpsertAllocs(102, []*structs.Allocation{alloc1}); err != nil {
		t.Fatalf("err: %v", err)
	}

	// Allocations should get registered
	testutil.WaitForResult(func() (bool, error) {
		c1.allocLock.RLock()
		ar := c1.allocs[alloc1.ID]
		c1.allocLock.RUnlock()
		if ar == nil {
			return false, fmt.Errorf("nil alloc runner")
		}
		if ar.Alloc().ClientStatus != structs.AllocClientStatusRunning {
			return false, fmt.Errorf("client status: got %v; want %v", ar.Alloc().ClientStatus, structs.AllocClientStatusRunning)
		}
		return true, nil
	}, func(err error) {
		t.Fatalf("err: %v", err)
	})

	// Shutdown the client, saves state
	if err := c1.Shutdown(); err != nil {
		t.Fatalf("err: %v", err)
	}

	// Create a new client
	shutdownCh := make(chan struct{})
	logger := log.New(c1.config.LogOutput, "", log.LstdFlags)
	consulSyncer, err := consul.NewSyncer(c1.config.ConsulConfig, shutdownCh, logger)
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	c2, err := NewClient(c1.config, consulSyncer, logger)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	defer c2.Shutdown()

	// Ensure the allocation is running
	testutil.WaitForResult(func() (bool, error) {
		c2.allocLock.RLock()
		ar := c2.allocs[alloc1.ID]
		c2.allocLock.RUnlock()
		status := ar.Alloc().ClientStatus
		alive := status != structs.AllocClientStatusRunning ||
			status != structs.AllocClientStatusPending
		if !alive {
			return false, fmt.Errorf("incorrect client status: %#v", ar.Alloc())
		}
		return true, nil
	}, func(err error) {
		t.Fatalf("err: %v", err)
	})

	// Destroy all the allocations
	c2.allocLock.Lock()
	for _, ar := range c2.allocs {
		ar.Destroy()
		<-ar.WaitCh()
	}
	c2.allocLock.Unlock()
}
Ejemplo n.º 7
0
func TestClient_SaveRestoreState(t *testing.T) {
	ctestutil.ExecCompatible(t)
	s1, _ := testServer(t, nil)
	defer s1.Shutdown()
	testutil.WaitForLeader(t, s1.RPC)

	c1 := testClient(t, func(c *config.Config) {
		c.DevMode = false
		c.RPCHandler = s1
	})
	defer c1.Shutdown()

	// Create mock allocations
	alloc1 := mock.Alloc()
	alloc1.NodeID = c1.Node().ID
	task := alloc1.Job.TaskGroups[0].Tasks[0]
	task.Config["command"] = "/bin/sleep"
	task.Config["args"] = []string{"10"}

	state := s1.State()
	err := state.UpsertAllocs(100, []*structs.Allocation{alloc1})
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	// Allocations should get registered
	testutil.WaitForResult(func() (bool, error) {
		c1.allocLock.RLock()
		ar := c1.allocs[alloc1.ID]
		c1.allocLock.RUnlock()
		return ar != nil && ar.Alloc().ClientStatus == structs.AllocClientStatusRunning, nil
	}, func(err error) {
		t.Fatalf("err: %v", err)
	})

	// Shutdown the client, saves state
	err = c1.Shutdown()
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	// Create a new client
	shutdownCh := make(chan struct{})
	consulSyncer, err := consul.NewSyncer(c1.config.ConsulConfig, shutdownCh, log.New(os.Stderr, "", log.LstdFlags))
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	c2, err := NewClient(c1.config, consulSyncer)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	defer c2.Shutdown()

	// Ensure the allocation is running
	testutil.WaitForResult(func() (bool, error) {
		c2.allocLock.RLock()
		ar := c2.allocs[alloc1.ID]
		c2.allocLock.RUnlock()
		status := ar.Alloc().ClientStatus
		alive := status != structs.AllocClientStatusRunning ||
			status != structs.AllocClientStatusPending
		if !alive {
			return false, fmt.Errorf("incorrect client status: %#v", ar.Alloc())
		}
		return true, nil
	}, func(err error) {
		t.Fatalf("err: %v", err)
	})
}