Ejemplo n.º 1
0
func TestConsulServiceRegisterServices(t *testing.T) {
	t.Skip()

	shutdownCh := make(chan struct{})
	cs, err := NewSyncer(config.DefaultConsulConfig(), shutdownCh, logger)
	if err != nil {
		t.Fatalf("Err: %v", err)
	}
	// Skipping the test if consul isn't present
	if !cs.consulPresent() {
		return
	}
	task := mockTask()
	//cs.SetServiceRegPrefix(serviceRegPrefix)
	cs.SetAddrFinder(task.FindHostAndPortFor)
	if err := cs.SyncServices(); err != nil {
		t.Fatalf("err: %v", err)
	}
	defer cs.Shutdown()

	// service1 := &structs.Service{Name: task.Name}
	// service2 := &structs.Service{Name: task.Name}
	//services := []*structs.Service{service1, service2}
	//service1.ServiceID = fmt.Sprintf("%s-%s:%s/%s", cs.GenerateServiceID(serviceGroupName, service1), task.Name, allocID)
	//service2.ServiceID = fmt.Sprintf("%s-%s:%s/%s", cs.GenerateServiceID(serviceGroupName, service2), task.Name, allocID)

	//cs.SetServices(serviceGroupName, services)
	// if err := servicesPresent(t, services, cs); err != nil {
	// 	t.Fatalf("err : %v", err)
	// }
	// FIXME(sean@)
	// if err := checksPresent(t, []string{check1.Hash(service1ID)}, cs); err != nil {
	// 	t.Fatalf("err : %v", err)
	// }
}
Ejemplo n.º 2
0
func TestCheckRegistration(t *testing.T) {
	cs, err := NewSyncer(config.DefaultConsulConfig(), make(chan struct{}), logger)
	if err != nil {
		t.Fatalf("Err: %v", err)
	}

	task := mockTask()
	cs.SetAddrFinder(task.FindHostAndPortFor)

	srvReg, _ := cs.createService(&service1, "domain", "key")
	check1Reg, _ := cs.createCheckReg(&check1, srvReg)
	check2Reg, _ := cs.createCheckReg(&check2, srvReg)
	check3Reg, _ := cs.createCheckReg(&check3, srvReg)

	expected := "10.10.11.5:20002"
	if check1Reg.TCP != expected {
		t.Fatalf("expected: %v, actual: %v", expected, check1Reg.TCP)
	}

	expected = "10.10.11.5:20003"
	if check2Reg.TCP != expected {
		t.Fatalf("expected: %v, actual: %v", expected, check2Reg.TCP)
	}

	expected = "http://10.10.11.5:20004/health?p1=1&p2=2"
	if check3Reg.HTTP != expected {
		t.Fatalf("expected: %v, actual: %v", expected, check3Reg.HTTP)
	}

}
Ejemplo n.º 3
0
// DefaultConfig returns the default configuration
func DefaultConfig() *Config {
	return &Config{
		ConsulConfig:            config.DefaultConsulConfig(),
		LogOutput:               os.Stderr,
		Region:                  "global",
		StatsCollectionInterval: 1 * time.Second,
	}
}
Ejemplo n.º 4
0
// DefaultConfig returns the default configuration
func DefaultConfig() *Config {
	hostname, err := os.Hostname()
	if err != nil {
		panic(err)
	}

	c := &Config{
		Region:                 DefaultRegion,
		Datacenter:             DefaultDC,
		NodeName:               hostname,
		ProtocolVersion:        ProtocolVersionMax,
		RaftConfig:             raft.DefaultConfig(),
		RaftTimeout:            10 * time.Second,
		LogOutput:              os.Stderr,
		RPCAddr:                DefaultRPCAddr,
		SerfConfig:             serf.DefaultConfig(),
		NumSchedulers:          1,
		ReconcileInterval:      60 * time.Second,
		EvalGCInterval:         5 * time.Minute,
		EvalGCThreshold:        1 * time.Hour,
		JobGCInterval:          5 * time.Minute,
		JobGCThreshold:         4 * time.Hour,
		NodeGCInterval:         5 * time.Minute,
		NodeGCThreshold:        24 * time.Hour,
		EvalNackTimeout:        60 * time.Second,
		EvalDeliveryLimit:      3,
		MinHeartbeatTTL:        10 * time.Second,
		MaxHeartbeatsPerSecond: 50.0,
		HeartbeatGrace:         10 * time.Second,
		FailoverHeartbeatTTL:   300 * time.Second,
		ConsulConfig:           config.DefaultConsulConfig(),
		VaultConfig:            config.DefaultVaultConfig(),
		RPCHoldTimeout:         5 * time.Second,
		TLSConfig:              &config.TLSConfig{},
	}

	// Enable all known schedulers by default
	c.EnabledSchedulers = make([]string, 0, len(scheduler.BuiltinSchedulers))
	for name := range scheduler.BuiltinSchedulers {
		c.EnabledSchedulers = append(c.EnabledSchedulers, name)
	}
	c.EnabledSchedulers = append(c.EnabledSchedulers, structs.JobTypeCore)

	// Default the number of schedulers to match the coores
	c.NumSchedulers = runtime.NumCPU()

	// Increase our reap interval to 3 days instead of 24h.
	c.SerfConfig.ReconnectTimeout = 3 * 24 * time.Hour

	// Serf should use the WAN timing, since we are using it
	// to communicate between DC's
	c.SerfConfig.MemberlistConfig = memberlist.DefaultWANConfig()
	c.SerfConfig.MemberlistConfig.BindPort = DefaultSerfPort

	// Disable shutdown on removal
	c.RaftConfig.ShutdownOnRemove = false
	return c
}
Ejemplo n.º 5
0
func parseConsulConfig(result **config.ConsulConfig, list *ast.ObjectList) error {
	list = list.Elem()
	if len(list.Items) > 1 {
		return fmt.Errorf("only one 'consul' block allowed")
	}

	// Get our Consul object
	listVal := list.Items[0].Val

	// Check for invalid keys
	valid := []string{
		"address",
		"auth",
		"auto_advertise",
		"ca_file",
		"cert_file",
		"checks_use_advertise",
		"client_auto_join",
		"client_service_name",
		"key_file",
		"server_auto_join",
		"server_service_name",
		"ssl",
		"timeout",
		"token",
		"verify_ssl",
	}

	if err := checkHCLKeys(listVal, valid); err != nil {
		return err
	}

	var m map[string]interface{}
	if err := hcl.DecodeObject(&m, listVal); err != nil {
		return err
	}

	consulConfig := config.DefaultConsulConfig()
	dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
		DecodeHook:       mapstructure.StringToTimeDurationHookFunc(),
		WeaklyTypedInput: true,
		Result:           &consulConfig,
	})
	if err != nil {
		return err
	}
	if err := dec.Decode(m); err != nil {
		return err
	}

	*result = consulConfig
	return nil
}
Ejemplo n.º 6
0
func makeAgent(t testing.TB, cb func(*Config)) (string, *Agent) {
	dir := tmpDir(t)
	conf := DevConfig()

	// Customize the server configuration
	config := nomad.DefaultConfig()
	conf.NomadConfig = config

	// Set the data_dir
	conf.DataDir = dir
	conf.NomadConfig.DataDir = dir

	// Bind and set ports
	conf.BindAddr = "127.0.0.1"
	conf.Ports = &Ports{
		HTTP: getPort(),
		RPC:  getPort(),
		Serf: getPort(),
	}
	conf.NodeName = fmt.Sprintf("Node %d", conf.Ports.RPC)
	conf.Consul = sconfig.DefaultConsulConfig()
	conf.Vault.Enabled = new(bool)

	// Tighten the Serf timing
	config.SerfConfig.MemberlistConfig.SuspicionMult = 2
	config.SerfConfig.MemberlistConfig.RetransmitMult = 2
	config.SerfConfig.MemberlistConfig.ProbeTimeout = 50 * time.Millisecond
	config.SerfConfig.MemberlistConfig.ProbeInterval = 100 * time.Millisecond
	config.SerfConfig.MemberlistConfig.GossipInterval = 100 * time.Millisecond

	// Tighten the Raft timing
	config.RaftConfig.LeaderLeaseTimeout = 20 * time.Millisecond
	config.RaftConfig.HeartbeatTimeout = 40 * time.Millisecond
	config.RaftConfig.ElectionTimeout = 40 * time.Millisecond
	config.RaftConfig.StartAsLeader = true
	config.RaftTimeout = 500 * time.Millisecond

	if cb != nil {
		cb(conf)
	}

	if err := conf.normalizeAddrs(); err != nil {
		t.Fatalf("error normalizing config: %v", err)
	}
	agent, err := NewAgent(conf, os.Stderr)
	if err != nil {
		os.RemoveAll(dir)
		t.Fatalf("err: %v", err)
	}
	return dir, agent
}
Ejemplo n.º 7
0
func TestConsulServiceUpdateService(t *testing.T) {
	t.Skip()

	shutdownCh := make(chan struct{})
	cs, err := NewSyncer(config.DefaultConsulConfig(), shutdownCh, logger)
	if err != nil {
		t.Fatalf("Err: %v", err)
	}
	// Skipping the test if consul isn't present
	if !cs.consulPresent() {
		return
	}

	task := mockTask()
	//cs.SetServiceRegPrefix(serviceRegPrefix)
	cs.SetAddrFinder(task.FindHostAndPortFor)
	if err := cs.SyncServices(); err != nil {
		t.Fatalf("err: %v", err)
	}
	defer cs.Shutdown()

	//Update Service defn 1
	newTags := []string{"tag3"}
	task.Services[0].Tags = newTags
	if err := cs.SyncServices(); err != nil {
		t.Fatalf("err: %v", err)
	}
	// Make sure all the services and checks are still present
	// service1 := &structs.Service{Name: task.Name}
	// service2 := &structs.Service{Name: task.Name}
	//services := []*structs.Service{service1, service2}
	//service1.ServiceID = fmt.Sprintf("%s-%s:%s/%s", cs.GenerateServiceID(serviceGroupName, service1), task.Name, allocID)
	//service2.ServiceID = fmt.Sprintf("%s-%s:%s/%s", cs.GenerateServiceID(serviceGroupName, service2), task.Name, allocID)
	// if err := servicesPresent(t, services, cs); err != nil {
	// 	t.Fatalf("err : %v", err)
	// }
	// FIXME(sean@)
	// if err := checksPresent(t, []string{check1.Hash(service1ID)}, cs); err != nil {
	// 	t.Fatalf("err : %v", err)
	// }

	// check if service defn 1 has been updated
	// consulServices, err := cs.client.Agent().Services()
	// if err != nil {
	// 	t.Fatalf("errL: %v", err)
	// }
	// srv, _ := consulServices[service1.ServiceID]
	// if !reflect.DeepEqual(srv.Tags, newTags) {
	// 	t.Fatalf("expected tags: %v, actual: %v", newTags, srv.Tags)
	// }
}
Ejemplo n.º 8
0
// testConsul returns a Syncer configured with an embedded Consul server.
//
// Callers must defer Syncer.Shutdown() and TestServer.Stop()
//
func testConsul(t *testing.T) (*Syncer, *testutil.TestServer) {
	// Create an embedded Consul server
	testconsul := testutil.NewTestServerConfig(t, func(c *testutil.TestServerConfig) {
		// If -v wasn't specified squelch consul logging
		if !testing.Verbose() {
			c.Stdout = ioutil.Discard
			c.Stderr = ioutil.Discard
		}
	})

	// Configure Syncer to talk to the test server
	cconf := config.DefaultConsulConfig()
	cconf.Addr = testconsul.HTTPAddr

	cs, err := NewSyncer(cconf, nil, logger)
	if err != nil {
		t.Fatalf("Error creating Syncer: %v", err)
	}
	return cs, testconsul
}
Ejemplo n.º 9
0
// DefaultConfig is a the baseline configuration for Nomad
func DefaultConfig() *Config {
	return &Config{
		LogLevel:   "INFO",
		Region:     "global",
		Datacenter: "dc1",
		BindAddr:   "127.0.0.1",
		Ports: &Ports{
			HTTP: 4646,
			RPC:  4647,
			Serf: 4648,
		},
		Addresses:      &Addresses{},
		AdvertiseAddrs: &AdvertiseAddrs{},
		Atlas:          &AtlasConfig{},
		Consul:         config.DefaultConsulConfig(),
		Vault:          config.DefaultVaultConfig(),
		Client: &ClientConfig{
			Enabled:        false,
			NetworkSpeed:   100,
			MaxKillTimeout: "30s",
			ClientMinPort:  14000,
			ClientMaxPort:  14512,
			Reserved:       &Resources{},
		},
		Server: &ServerConfig{
			Enabled:          false,
			StartJoin:        []string{},
			RetryJoin:        []string{},
			RetryInterval:    "30s",
			RetryMaxAttempts: 0,
		},
		SyslogFacility: "LOCAL0",
		Telemetry: &Telemetry{
			CollectionInterval: "1s",
			collectionInterval: 1 * time.Second,
		},
	}
}
Ejemplo n.º 10
0
func TestSyncerChaos(t *testing.T) {
	// Create an embedded Consul server
	testconsul := testutil.NewTestServerConfig(t, func(c *testutil.TestServerConfig) {
		// If -v wasn't specified squelch consul logging
		if !testing.Verbose() {
			c.Stdout = ioutil.Discard
			c.Stderr = ioutil.Discard
		}
	})
	defer testconsul.Stop()

	// Configure Syncer to talk to the test server
	cconf := config.DefaultConsulConfig()
	cconf.Addr = testconsul.HTTPAddr

	clientSyncer, err := NewSyncer(cconf, nil, logger)
	if err != nil {
		t.Fatalf("Error creating Syncer: %v", err)
	}
	defer clientSyncer.Shutdown()

	execSyncer, err := NewSyncer(cconf, nil, logger)
	if err != nil {
		t.Fatalf("Error creating Syncer: %v", err)
	}
	defer execSyncer.Shutdown()

	clientService := &structs.Service{Name: "nomad-client"}
	services := map[ServiceKey]*structs.Service{
		GenerateServiceKey(clientService): clientService,
	}
	if err := clientSyncer.SetServices("client", services); err != nil {
		t.Fatalf("error setting client service: %v", err)
	}

	const execn = 100
	const reapern = 2
	errors := make(chan error, 100)
	wg := sync.WaitGroup{}

	// Start goroutines to concurrently SetServices
	for i := 0; i < execn; i++ {
		wg.Add(1)
		go func(i int) {
			defer wg.Done()
			domain := ServiceDomain(fmt.Sprintf("exec-%d", i))
			services := map[ServiceKey]*structs.Service{}
			for ii := 0; ii < 10; ii++ {
				s := &structs.Service{Name: fmt.Sprintf("exec-%d-%d", i, ii)}
				services[GenerateServiceKey(s)] = s
				if err := execSyncer.SetServices(domain, services); err != nil {
					select {
					case errors <- err:
					default:
					}
					return
				}
				time.Sleep(1)
			}
		}(i)
	}

	// SyncServices runs a timer started by Syncer.Run which we don't use
	// in this test, so run SyncServices concurrently
	wg.Add(1)
	go func() {
		defer wg.Done()
		for i := 0; i < execn; i++ {
			if err := execSyncer.SyncServices(); err != nil {
				select {
				case errors <- err:
				default:
				}
				return
			}
			time.Sleep(100)
		}
	}()

	wg.Add(1)
	go func() {
		defer wg.Done()
		if err := clientSyncer.ReapUnmatched([]ServiceDomain{"nomad-client"}); err != nil {
			select {
			case errors <- err:
			default:
			}
			return
		}
	}()

	// Reap all but exec-0-*
	wg.Add(1)
	go func() {
		defer wg.Done()
		for i := 0; i < execn; i++ {
			if err := execSyncer.ReapUnmatched([]ServiceDomain{"exec-0", ServiceDomain(fmt.Sprintf("exec-%d", i))}); err != nil {
				select {
				case errors <- err:
				default:
				}
			}
			time.Sleep(100)
		}
	}()

	go func() {
		wg.Wait()
		close(errors)
	}()

	for err := range errors {
		if err != nil {
			t.Errorf("error setting service from executor goroutine: %v", err)
		}
	}

	// Do a final ReapUnmatched to get consul back into a deterministic state
	if err := execSyncer.ReapUnmatched([]ServiceDomain{"exec-0"}); err != nil {
		t.Fatalf("error doing final reap: %v", err)
	}

	// flattenedServices should be fully populated as ReapUnmatched doesn't
	// touch Syncer's internal state
	expected := map[string]struct{}{}
	for i := 0; i < execn; i++ {
		for ii := 0; ii < 10; ii++ {
			expected[fmt.Sprintf("exec-%d-%d", i, ii)] = struct{}{}
		}
	}

	for _, s := range execSyncer.flattenedServices() {
		_, ok := expected[s.Name]
		if !ok {
			t.Errorf("%s unexpected", s.Name)
		}
		delete(expected, s.Name)
	}
	if len(expected) > 0 {
		left := []string{}
		for s := range expected {
			left = append(left, s)
		}
		sort.Strings(left)
		t.Errorf("Couldn't find %d names in flattened services:\n%s", len(expected), strings.Join(left, "\n"))
	}

	// All but exec-0 and possibly some of exec-99 should have been reaped
	{
		services, err := execSyncer.client.Agent().Services()
		if err != nil {
			t.Fatalf("Error getting services: %v", err)
		}
		expected := []int{}
		for k, service := range services {
			if service.Service == "consul" {
				continue
			}
			i := -1
			ii := -1
			fmt.Sscanf(service.Service, "exec-%d-%d", &i, &ii)
			switch {
			case i == -1 || ii == -1:
				t.Errorf("invalid service: %s -> %s", k, service.Service)
			case i != 0 || ii > 9:
				t.Errorf("unexpected service: %s -> %s", k, service.Service)
			default:
				expected = append(expected, ii)
			}
		}
		if len(expected) != 10 {
			t.Errorf("expected 0-9 but found: %#q", expected)
		}
	}
}
Ejemplo n.º 11
0
func TestCheckRegistration(t *testing.T) {
	cs, err := NewSyncer(config.DefaultConsulConfig(), make(chan struct{}), logger)
	if err != nil {
		t.Fatalf("Err: %v", err)
	}

	check1 := structs.ServiceCheck{
		Name:          "check-foo-1",
		Type:          structs.ServiceCheckTCP,
		Interval:      30 * time.Second,
		Timeout:       5 * time.Second,
		InitialStatus: api.HealthPassing,
	}
	check2 := structs.ServiceCheck{
		Name:      "check1",
		Type:      "tcp",
		PortLabel: "port2",
		Interval:  3 * time.Second,
		Timeout:   1 * time.Second,
	}
	check3 := structs.ServiceCheck{
		Name:      "check3",
		Type:      "http",
		PortLabel: "port3",
		Path:      "/health?p1=1&p2=2",
		Interval:  3 * time.Second,
		Timeout:   1 * time.Second,
	}
	service1 := structs.Service{
		Name:      "foo-1",
		Tags:      []string{"tag1", "tag2"},
		PortLabel: "port1",
		Checks: []*structs.ServiceCheck{
			&check1, &check2,
		},
	}
	task := structs.Task{
		Name:     "foo",
		Services: []*structs.Service{&service1},
		Resources: &structs.Resources{
			Networks: []*structs.NetworkResource{
				&structs.NetworkResource{
					IP: "10.10.11.5",
					DynamicPorts: []structs.Port{
						structs.Port{
							Label: "port1",
							Value: 20002,
						},
						structs.Port{
							Label: "port2",
							Value: 20003,
						},
						structs.Port{
							Label: "port3",
							Value: 20004,
						},
					},
				},
			},
		},
	}
	cs.SetAddrFinder(task.FindHostAndPortFor)
	srvReg, _ := cs.createService(&service1, "domain", "key")
	check1Reg, _ := cs.createCheckReg(&check1, srvReg)
	check2Reg, _ := cs.createCheckReg(&check2, srvReg)
	check3Reg, _ := cs.createCheckReg(&check3, srvReg)

	expected := "10.10.11.5:20002"
	if check1Reg.TCP != expected {
		t.Fatalf("expected: %v, actual: %v", expected, check1Reg.TCP)
	}

	expected = "10.10.11.5:20003"
	if check2Reg.TCP != expected {
		t.Fatalf("expected: %v, actual: %v", expected, check2Reg.TCP)
	}

	expected = "http://10.10.11.5:20004/health?p1=1&p2=2"
	if check3Reg.HTTP != expected {
		t.Fatalf("expected: %v, actual: %v", expected, check3Reg.HTTP)
	}

	expected = api.HealthPassing
	if check1Reg.Status != expected {
		t.Fatalf("expected: %v, actual: %v", expected, check1Reg.Status)
	}
}
Ejemplo n.º 12
0
func TestConsulServiceUpdateService(t *testing.T) {
	cs, err := NewSyncer(config.DefaultConsulConfig(), nil, logger)
	if err != nil {
		t.Fatalf("Err: %v", err)
	}
	defer cs.Shutdown()
	// Skipping the test if consul isn't present
	if !cs.consulPresent() {
		t.Skip("skipping because consul isn't present")
	}
	cs.SetAddrFinder(func(h string) (string, int) {
		a, pstr, _ := net.SplitHostPort(h)
		p, _ := net.LookupPort("tcp", pstr)
		return a, p
	})

	service1 := &structs.Service{Name: "foo1", Tags: []string{"a", "b"}}
	service2 := &structs.Service{Name: "foo2"}
	services := map[ServiceKey]*structs.Service{
		GenerateServiceKey(service1): service1,
		GenerateServiceKey(service2): service2,
	}
	if err := cs.SetServices(serviceGroupName, services); err != nil {
		t.Fatalf("error setting services: %v", err)
	}
	if err := cs.SyncServices(); err != nil {
		t.Fatalf("error syncing services: %v", err)
	}

	// Now update both services
	service1 = &structs.Service{Name: "foo1", Tags: []string{"a", "z"}}
	service2 = &structs.Service{Name: "foo2", PortLabel: ":8899"}
	service3 := &structs.Service{Name: "foo3"}
	services = map[ServiceKey]*structs.Service{
		GenerateServiceKey(service1): service1,
		GenerateServiceKey(service2): service2,
		GenerateServiceKey(service3): service3,
	}
	if err := cs.SetServices(serviceGroupName, services); err != nil {
		t.Fatalf("error setting services: %v", err)
	}
	if err := cs.SyncServices(); err != nil {
		t.Fatalf("error syncing services: %v", err)
	}

	agentServices, err := cs.queryAgentServices()
	if err != nil {
		t.Fatalf("error querying consul services: %v", err)
	}
	if len(agentServices) != 3 {
		t.Fatalf("expected 3 services in consul but found %d:\n%#v", len(agentServices), agentServices)
	}
	consulServices := make(map[string]*api.AgentService, 3)
	for _, as := range agentServices {
		consulServices[as.ID] = as
	}

	found := 0
	for _, s := range cs.flattenedServices() {
		// Assert sure changes were applied to internal state
		switch s.Name {
		case "foo1":
			found++
			if !reflect.DeepEqual(service1.Tags, s.Tags) {
				t.Errorf("incorrect tags on foo1:\n  expected: %v\n  found: %v", service1.Tags, s.Tags)
			}
		case "foo2":
			found++
			if s.Address != "" {
				t.Errorf("expected empty host on foo2 but found %q", s.Address)
			}
			if s.Port != 8899 {
				t.Errorf("expected port 8899 on foo2 but found %d", s.Port)
			}
		case "foo3":
			found++
		default:
			t.Errorf("unexpected service: %s", s.Name)
		}

		// Assert internal state equals consul's state
		cs, ok := consulServices[s.ID]
		if !ok {
			t.Errorf("service not in consul: %s id: %s", s.Name, s.ID)
			continue
		}
		if !reflect.DeepEqual(s.Tags, cs.Tags) {
			t.Errorf("mismatched tags in syncer state and consul for %s:\nsyncer: %v\nconsul: %v", s.Name, s.Tags, cs.Tags)
		}
		if cs.Port != s.Port {
			t.Errorf("mismatched port in syncer state and consul for %s\nsyncer: %v\nconsul: %v", s.Name, s.Port, cs.Port)
		}
		if cs.Address != s.Address {
			t.Errorf("mismatched address in syncer state and consul for %s\nsyncer: %v\nconsul: %v", s.Name, s.Address, cs.Address)
		}
	}
	if found != 3 {
		t.Fatalf("expected 3 services locally but found %d", found)
	}
}
Ejemplo n.º 13
0
func TestConsulServiceRegisterServices(t *testing.T) {
	cs, err := NewSyncer(config.DefaultConsulConfig(), nil, logger)
	if err != nil {
		t.Fatalf("Err: %v", err)
	}
	defer cs.Shutdown()
	// Skipping the test if consul isn't present
	if !cs.consulPresent() {
		t.Skip("skipping because consul isn't present")
	}

	service1 := &structs.Service{Name: "foo", Tags: []string{"a", "b"}}
	service2 := &structs.Service{Name: "foo"}
	services := map[ServiceKey]*structs.Service{
		GenerateServiceKey(service1): service1,
		GenerateServiceKey(service2): service2,
	}

	// Call SetServices to update services in consul
	if err := cs.SetServices(serviceGroupName, services); err != nil {
		t.Fatalf("error setting services: %v", err)
	}

	// Manually call SyncServers to cause a synchronous consul update
	if err := cs.SyncServices(); err != nil {
		t.Fatalf("error syncing services: %v", err)
	}

	numservices := len(cs.flattenedServices())
	if numservices != 2 {
		t.Fatalf("expected 2 services but found %d", numservices)
	}

	numchecks := len(cs.flattenedChecks())
	if numchecks != 0 {
		t.Fatalf("expected 0 checks but found %d", numchecks)
	}

	// Assert services are in consul
	agentServices, err := cs.client.Agent().Services()
	if err != nil {
		t.Fatalf("error querying consul services: %v", err)
	}
	found := 0
	for id, as := range agentServices {
		if id == "consul" {
			found++
			continue
		}
		if _, ok := services[ServiceKey(as.Service)]; ok {
			found++
			continue
		}
		t.Errorf("unexpected service in consul: %s", id)
	}
	if found != 3 {
		t.Fatalf("expected 3 services in consul but found %d:\nconsul: %#v", len(agentServices), agentServices)
	}

	agentChecks, err := cs.queryChecks()
	if err != nil {
		t.Fatalf("error querying consul checks: %v", err)
	}
	if len(agentChecks) != numchecks {
		t.Fatalf("expected %d checks in consul but found %d:\n%#v", numservices, len(agentChecks), agentChecks)
	}
}