Esempio n. 1
0
func StartHeartbeats(localIp string, ttl time.Duration, config *config.Config, storeAdapter storeadapter.StoreAdapter, logger *gosteno.Logger) (stopChan chan (chan bool)) {
	if len(config.EtcdUrls) == 0 {
		return
	}

	if storeAdapter == nil {
		panic("store adapter is nil")
	}

	logger.Debugf("Starting Health Status Updates to Store: /healthstatus/doppler/%s/%s/%d", config.Zone, config.JobName, config.Index)
	status, stopChan, err := storeAdapter.MaintainNode(storeadapter.StoreNode{
		Key:   fmt.Sprintf("/healthstatus/doppler/%s/%s/%d", config.Zone, config.JobName, config.Index),
		Value: []byte(localIp),
		TTL:   uint64(ttl.Seconds()),
	})

	if err != nil {
		panic(err)
	}

	go func() {
		for stat := range status {
			logger.Debugf("Health updates channel pushed %v at time %v", stat, time.Now())
		}
	}()

	return stopChan
}
Esempio n. 2
0
func Announce(localIP string, ttl time.Duration, config *config.Config, storeAdapter storeadapter.StoreAdapter, logger *gosteno.Logger) chan (chan bool) {
	dopplerMetaBytes, err := buildDopplerMeta(localIP, config)
	if err != nil {
		panic(err)
	}

	key := fmt.Sprintf("%s/%s/%s/%d", META_ROOT, config.Zone, config.JobName, config.Index)
	logger.Debugf("Starting Health Status Updates to Store: %s", key)

	node := storeadapter.StoreNode{
		Key:   key,
		Value: dopplerMetaBytes,
		TTL:   uint64(ttl.Seconds()),
	}
	// Call to create to make sure node is created before we return
	storeAdapter.Create(node)
	status, stopChan, err := storeAdapter.MaintainNode(node)

	if err != nil {
		panic(err)
	}

	// The status channel needs to be drained to maintain the node within the etcd cluster
	go func() {
		for stat := range status {
			logger.Debugf("Health updates channel pushed %v at time %v", stat, time.Now())
		}
	}()

	return stopChan
}
Esempio n. 3
0
func connectToStoreAdapter(l logger.Logger, conf *config.Config) storeadapter.StoreAdapter {
	var adapter storeadapter.StoreAdapter
	workPool, err := workpool.NewWorkPool(conf.StoreMaxConcurrentRequests)
	if err != nil {
		l.Error("Failed to create workpool", err)
		os.Exit(1)
	}

	options := &etcdstoreadapter.ETCDOptions{
		ClusterUrls: conf.StoreURLs,
	}
	adapter, err = etcdstoreadapter.New(options, workPool)
	if err != nil {
		l.Error("Failed to create the store adapter", err)
		os.Exit(1)
	}

	err = adapter.Connect()
	if err != nil {
		l.Error("Failed to connect to the store", err)
		os.Exit(1)
	}

	return adapter
}
Esempio n. 4
0
func Daemonize(
	component string,
	callback func() error,
	period time.Duration,
	timeout time.Duration,
	logger logger.Logger,
	adapter storeadapter.StoreAdapter,
) error {
	logger.Info("Acquiring lock for " + component)

	lostLockChannel, releaseLockChannel, err := adapter.GetAndMaintainLock(component, 10)
	if err != nil {
		logger.Info(fmt.Sprintf("Failed to acquire lock: %s", err))
		return err
	}

	go func() {
		<-lostLockChannel
		logger.Error("Lost the lock", errors.New("Lock the lock"))
		os.Exit(197)
	}()

	logger.Info("Acquired lock for " + component)

	logger.Info(fmt.Sprintf("Running Daemon every %d seconds with a timeout of %d", int(period.Seconds()), int(timeout.Seconds())))

	for {
		afterChan := time.After(period)
		timeoutChan := time.After(timeout)
		errorChan := make(chan error, 1)

		t := time.Now()

		go func() {
			errorChan <- callback()
		}()

		select {
		case err := <-errorChan:
			logger.Info("Daemonize Time", map[string]string{
				"Component": component,
				"Duration":  fmt.Sprintf("%.4f", time.Since(t).Seconds()),
			})
			if err != nil {
				logger.Error("Daemon returned an error. Continuining...", err)
			}
		case <-timeoutChan:
			releaseLockChannel <- true
			return errors.New("Daemon timed out. Aborting!")
		}

		<-afterChan
	}

	return nil
}
Esempio n. 5
0
func AddETCDNode(etcdAdapter storeadapter.StoreAdapter, key string, value string) {
	node := storeadapter.StoreNode{
		Key:   key,
		Value: []byte(value),
		TTL:   uint64(20),
	}
	etcdAdapter.Create(node)
	recvNode, err := etcdAdapter.Get(key)
	Expect(err).NotTo(HaveOccurred())
	Expect(string(recvNode.Value)).To(Equal(value))
}
Esempio n. 6
0
func connectToStoreAdapter(l logger.Logger, conf *config.Config) (storeadapter.StoreAdapter, metricsaccountant.UsageTracker) {
	var adapter storeadapter.StoreAdapter
	workerPool := workerpool.NewWorkerPool(conf.StoreMaxConcurrentRequests)
	adapter = etcdstoreadapter.NewETCDStoreAdapter(conf.StoreURLs, workerPool)
	err := adapter.Connect()
	if err != nil {
		l.Error("Failed to connect to the store", err)
		os.Exit(1)
	}

	return adapter, workerPool
}
Esempio n. 7
0
func connectToStoreAdapter(l logger.Logger, conf *config.Config, usage *usageTracker) storeadapter.StoreAdapter {
	var adapter storeadapter.StoreAdapter
	var around workpool.AroundWork = workpool.DefaultAround
	if usage != nil {
		around = usage
	}
	workPool := workpool.New(conf.StoreMaxConcurrentRequests, 0, around)
	adapter = etcdstoreadapter.NewETCDStoreAdapter(conf.StoreURLs, workPool)
	err := adapter.Connect()
	if err != nil {
		l.Error("Failed to connect to the store", err)
		os.Exit(1)
	}

	return adapter
}
Esempio n. 8
0
func NewElector(instanceName string, adapter storeadapter.StoreAdapter, updateInterval time.Duration, logger *gosteno.Logger) *Elector {
	for {
		err := adapter.Connect()

		if err == nil {
			break
		}

		logger.Errorf("Elector: Unable to connect to store: '%s'", err.Error())
		time.Sleep(updateInterval)
	}

	return &Elector{
		instanceName:   []byte(instanceName),
		adapter:        adapter,
		updateInterval: updateInterval,
		logger:         logger,
	}
}
func connectToStoreAdapter(l logger.Logger, conf *config.Config) (storeadapter.StoreAdapter, metricsaccountant.UsageTracker) {
	var adapter storeadapter.StoreAdapter
	workerPool := workerpool.NewWorkerPool(conf.StoreMaxConcurrentRequests)
	if conf.StoreType == "etcd" {
		adapter = etcdstoreadapter.NewETCDStoreAdapter(conf.StoreURLs, workerPool)
	} else if conf.StoreType == "ZooKeeper" {
		adapter = zookeeperstoreadapter.NewZookeeperStoreAdapter(conf.StoreURLs, workerPool, buildTimeProvider(l), time.Second)
	} else {
		l.Error(fmt.Sprintf("Unknown store type %s.  Choose one of 'etcd' or 'ZooKeeper'", conf.StoreType), fmt.Errorf("Unkown store type"))
		os.Exit(1)
	}
	err := adapter.Connect()
	if err != nil {
		l.Error("Failed to connect to the store", err)
		os.Exit(1)
	}

	return adapter, workerPool
}
Esempio n. 10
0
func AnnounceLegacy(localIP string, ttl time.Duration, config *config.Config, storeAdapter storeadapter.StoreAdapter, logger *gosteno.Logger) chan (chan bool) {
	key := fmt.Sprintf("%s/%s/%s/%d", LEGACY_ROOT, config.Zone, config.JobName, config.Index)
	status, stopChan, err := storeAdapter.MaintainNode(storeadapter.StoreNode{
		Key:   key,
		Value: []byte(localIP),
		TTL:   uint64(ttl.Seconds()),
	})

	if err != nil {
		panic(err)
	}

	// The status channel needs to be drained to maintain the node within the etcd cluster
	go func() {
		for stat := range status {
			logger.Debugf("Health updates channel pushed %v at time %v", stat, time.Now())
		}
	}()

	return stopChan
}
Esempio n. 11
0
func getAllTasks(store storeadapter.StoreAdapter, state models.TaskState) ([]*models.Task, error) {
	node, err := store.ListRecursively(TaskSchemaRoot)
	if err == storeadapter.ErrorKeyNotFound {
		return []*models.Task{}, nil
	}

	if err != nil {
		return []*models.Task{}, err
	}

	tasks := []*models.Task{}
	for _, node := range node.ChildNodes {
		task, err := models.NewTaskFromJSON(node.Value)
		if err != nil {
			steno.NewLogger("bbs").Errorf("cannot parse task JSON for key %s: %s", node.Key, err.Error())
		} else if task.State == state {
			tasks = append(tasks, &task)
		}
	}

	return tasks, nil
}
Esempio n. 12
0
	. "github.com/cloudfoundry/hm9000/testhelpers/custommatchers"
	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"

	"github.com/cloudfoundry/hm9000/config"
	"github.com/cloudfoundry/hm9000/testhelpers/appfixture"
	"github.com/cloudfoundry/hm9000/testhelpers/fakelogger"
	"github.com/cloudfoundry/storeadapter"
	"github.com/cloudfoundry/storeadapter/etcdstoreadapter"
)

var _ = Describe("Desired State", func() {
	var (
		store        Store
		storeAdapter storeadapter.StoreAdapter
		conf         *config.Config
		app1         appfixture.AppFixture
		app2         appfixture.AppFixture
		app3         appfixture.AppFixture
	)

	BeforeEach(func() {
		var err error
		conf, err = config.DefaultConfig()
		Expect(err).NotTo(HaveOccurred())
		wpool, err := workpool.NewWorkPool(conf.StoreMaxConcurrentRequests)
		Expect(err).NotTo(HaveOccurred())
		storeAdapter, err = etcdstoreadapter.New(
			&etcdstoreadapter.ETCDOptions{ClusterUrls: etcdRunner.NodeURLS()},
			wpool,
		)
		Expect(err).NotTo(HaveOccurred())
)

func TestIntegrationTest(t *testing.T) {
	RegisterFailHandler(Fail)
	RunSpecs(t, "End-to-end Integration Test Suite")
}

var (
	LocalIPAddress string

	etcdRunner  *etcdstorerunner.ETCDClusterRunner
	etcdAdapter storeadapter.StoreAdapter

	metronExecutablePath            string
	dopplerExecutablePath           string
	trafficControllerExecutablePath string

	metronSession  *gexec.Session
	dopplerSession *gexec.Session
	tcSession      *gexec.Session

	dopplerConfig string
	metronConfig  string
)

var _ = BeforeSuite(func() {
	runtime.GOMAXPROCS(runtime.NumCPU())
	etcdRunner = etcdstorerunner.NewETCDClusterRunner(49623, 1, nil)
	etcdRunner.Start()
	etcdAdapter = etcdRunner.Adapter(nil)
	metronExecutablePath = buildComponent("metron")
Esempio n. 14
0
import (
	"github.com/cloudfoundry/gunk/workpool"
	. "github.com/cloudfoundry/hm9000/store"
	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"

	"github.com/cloudfoundry/hm9000/config"
	"github.com/cloudfoundry/hm9000/testhelpers/fakelogger"
	"github.com/cloudfoundry/storeadapter"
	"github.com/cloudfoundry/storeadapter/etcdstoreadapter"
)

var _ = Describe("Compact", func() {
	var (
		store        Store
		storeAdapter storeadapter.StoreAdapter
		conf         *config.Config
	)

	BeforeEach(func() {
		var err error
		conf, err = config.DefaultConfig()
		conf.StoreSchemaVersion = 17
		Ω(err).ShouldNot(HaveOccurred())
		storeAdapter = etcdstoreadapter.NewETCDStoreAdapter(etcdRunner.NodeURLS(),
			workpool.NewWorkPool(conf.StoreMaxConcurrentRequests))
		err = storeAdapter.Connect()
		Ω(err).ShouldNot(HaveOccurred())
		store = NewStore(conf, storeAdapter, fakelogger.NewFakeLogger())
	})
	"runtime"
	"time"
)

func TestIntegrationTest(t *testing.T) {
	RegisterFailHandler(Fail)
	RunSpecs(t, "End-to-end Integration Test Suite")
}

var (
	LocalIPAddress string

	etcdRunner  *etcdstorerunner.ETCDClusterRunner
	etcdAdapter storeadapter.StoreAdapter

	metronExecutablePath            string
	dopplerExecutablePath           string
	trafficControllerExecutablePath string

	metronSession  *gexec.Session
	dopplerSession *gexec.Session
	tcSession      *gexec.Session
)

var _ = BeforeSuite(func() {
	runtime.GOMAXPROCS(runtime.NumCPU())
	etcdRunner = etcdstorerunner.NewETCDClusterRunner(49623, 1, nil)
	etcdRunner.Start()
	etcdAdapter = etcdRunner.Adapter(nil)
	metronExecutablePath = buildComponent("metron")
	dopplerExecutablePath = buildComponent("doppler")
	trafficControllerExecutablePath = buildComponent("trafficcontroller")
Esempio n. 16
0
const (
	username = "******"
	password = "******"
)

var natsPort int
var natsAddress string
var natsClient diegonats.NATSClient
var natsServerRunner *ginkgomon.Runner
var natsClientRunner diegonats.NATSClientRunner
var natsGroupProcess ifrit.Process

var etcdPort int
var etcdUrl string
var etcdRunner *etcdstorerunner.ETCDClusterRunner
var etcdAdapter storeadapter.StoreAdapter

var consulRunner *consulrunner.ClusterRunner
var consulSession *consuladapter.Session

var bbsArgs bbstestrunner.Args
var bbsBinPath string
var bbsURL *url.URL
var bbsRunner *ginkgomon.Runner
var bbsProcess ifrit.Process
var bbsClient bbs.Client

var legacyBBS *Bbs.BBS

var logger lager.Logger
Esempio n. 17
0
	. "github.com/cloudfoundry/hm9000/testhelpers/custommatchers"
	"github.com/cloudfoundry/hm9000/testhelpers/fakelogger"
	"github.com/cloudfoundry/storeadapter"
	"github.com/cloudfoundry/storeadapter/etcdstoreadapter"
	"github.com/cloudfoundry/storeadapter/workerpool"
	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"
)

var _ = Describe("Apps", func() {
	var (
		store        Store
		storeAdapter storeadapter.StoreAdapter
		conf         *config.Config

		dea        appfixture.DeaFixture
		app1       appfixture.AppFixture
		app2       appfixture.AppFixture
		app3       appfixture.AppFixture
		app4       appfixture.AppFixture
		crashCount []models.CrashCount
	)

	conf, _ = config.DefaultConfig()

	BeforeEach(func() {
		storeAdapter = etcdstoreadapter.NewETCDStoreAdapter(etcdRunner.NodeURLS(), workerpool.NewWorkerPool(conf.StoreMaxConcurrentRequests))
		err := storeAdapter.Connect()
		Ω(err).ShouldNot(HaveOccurred())

		store = NewStore(conf, storeAdapter, fakelogger.NewFakeLogger())
	"github.com/cloudfoundry/storeadapter/workerpool"
	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"

	"github.com/cloudfoundry/hm9000/config"
	"github.com/cloudfoundry/hm9000/models"
	"github.com/cloudfoundry/hm9000/testhelpers/fakelogger"
	"github.com/cloudfoundry/storeadapter"
	"github.com/cloudfoundry/storeadapter/etcdstoreadapter"
)

var _ = Describe("Crash Count", func() {
	var (
		store        Store
		storeAdapter storeadapter.StoreAdapter
		conf         *config.Config
		crashCount1  models.CrashCount
		crashCount2  models.CrashCount
		crashCount3  models.CrashCount
	)

	BeforeEach(func() {
		var err error
		conf, err = config.DefaultConfig()
		Ω(err).ShouldNot(HaveOccurred())
		storeAdapter = etcdstoreadapter.NewETCDStoreAdapter(etcdRunner.NodeURLS(), workerpool.NewWorkerPool(conf.StoreMaxConcurrentRequests))
		err = storeAdapter.Connect()
		Ω(err).ShouldNot(HaveOccurred())

		crashCount1 = models.CrashCount{AppGuid: models.Guid(), AppVersion: models.Guid(), InstanceIndex: 1, CrashCount: 17}
		crashCount2 = models.CrashCount{AppGuid: models.Guid(), AppVersion: models.Guid(), InstanceIndex: 4, CrashCount: 17}
		crashCount3 = models.CrashCount{AppGuid: models.Guid(), AppVersion: models.Guid(), InstanceIndex: 3, CrashCount: 17}
package store_test

import (
	"github.com/cloudfoundry/storeadapter"
	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"
	. "loggregator/store"
	"path"

	"loggregator/domain"
	"time"
)

var _ = Describe("AppServiceStoreWatcher", func() {
	var listener *AppServiceStoreWatcher
	var adapter storeadapter.StoreAdapter
	var outAddChan <-chan domain.AppService
	var outRemoveChan <-chan domain.AppService

	var app1Service1 domain.AppService
	var app1Service2 domain.AppService
	var app2Service1 domain.AppService

	drainOutgoingChannel := func(c <-chan domain.AppService, count int) []domain.AppService {
		appServices := []domain.AppService{}
		for i := 0; i < count; i++ {
			appService, ok := <-c
			if !ok {
				break
			}
			appServices = append(appServices, appService)
	. "github.com/onsi/gomega"
)

const (
	APP1_ID = "app-1"
	APP2_ID = "app-2"
	APP3_ID = "app-3"
)

var _ = Describe("AppServiceStoreWatcher", func() {
	var watcher *AppServiceStoreWatcher
	var watcherRunComplete sync.WaitGroup

	var runWatcher func()

	var adapter storeadapter.StoreAdapter
	var outAddChan <-chan appservice.AppService
	var outRemoveChan <-chan appservice.AppService

	var app1Service1 appservice.AppService
	var app1Service2 appservice.AppService
	var app2Service1 appservice.AppService

	BeforeEach(func() {
		app1Service1 = appservice.AppService{AppId: APP1_ID, Url: "syslog://example.com:12345"}
		app1Service2 = appservice.AppService{AppId: APP1_ID, Url: "syslog://example.com:12346"}
		app2Service1 = appservice.AppService{AppId: APP2_ID, Url: "syslog://example.com:12345"}

		workPool, err := workpool.NewWorkPool(10)
		Expect(err).NotTo(HaveOccurred())
Esempio n. 21
0
func Daemonize(
	component string,
	callback func() error,
	period time.Duration,
	timeout time.Duration,
	logger logger.Logger,
	adapter storeadapter.StoreAdapter,
) error {
	logger.Info("Acquiring lock for " + component)

	lock := storeadapter.StoreNode{
		Key: "/hm/locks/" + component,
		TTL: 10,
	}

	status, releaseLockChannel, err := adapter.MaintainNode(lock)
	if err != nil {
		logger.Info(fmt.Sprintf("Failed to acquire lock: %s", err))
		return err
	}

	lockAcquired := make(chan bool)

	go func() {
		for {
			if <-status {
				if lockAcquired != nil {
					close(lockAcquired)
					lockAcquired = nil
				}
			} else {
				logger.Error("Lost the lock", errors.New("Lock the lock"))
				os.Exit(197)
			}
		}
	}()

	<-lockAcquired

	logger.Info("Acquired lock for " + component)

	logger.Info(fmt.Sprintf("Running Daemon every %d seconds with a timeout of %d", int(period.Seconds()), int(timeout.Seconds())))

	for {
		afterChan := time.After(period)
		timeoutChan := time.After(timeout)
		errorChan := make(chan error, 1)

		t := time.Now()

		go func() {
			errorChan <- callback()
		}()

		select {
		case err := <-errorChan:
			logger.Info("Daemonize Time", map[string]string{
				"Component": component,
				"Duration":  fmt.Sprintf("%.4f", time.Since(t).Seconds()),
			})
			if err != nil {
				logger.Error("Daemon returned an error. Continuining...", err)
			}
		case <-timeoutChan:
			released := make(chan bool)
			releaseLockChannel <- released
			<-released

			return errors.New("Daemon timed out. Aborting!")
		}

		<-afterChan
	}

	return nil
}
package store_test

import (
	"github.com/cloudfoundry/storeadapter"
	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"
	. "loggregator/store"
	"path"

	"loggregator/domain"
)

var _ = Describe("AppServiceStore", func() {
	var store *AppServiceStore
	var adapter storeadapter.StoreAdapter
	var incomingChan chan domain.AppServices

	var app1Service1 domain.AppService
	var app1Service2 domain.AppService
	var app2Service1 domain.AppService

	assertInStore := func(appServices ...domain.AppService) {
		for _, appService := range appServices {
			Eventually(func() error {
				_, err := adapter.Get(path.Join("/loggregator/services/", appService.AppId, appService.Id()))
				return err
			}).ShouldNot(HaveOccurred())
		}
	}

	assertNotInStore := func(appServices ...domain.AppService) {
Esempio n. 23
0
	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"

	"github.com/cloudfoundry/hm9000/config"
	"github.com/cloudfoundry/hm9000/models"
	"github.com/cloudfoundry/hm9000/testhelpers/appfixture"
	"github.com/cloudfoundry/hm9000/testhelpers/fakelogger"
	"github.com/cloudfoundry/storeadapter"
	"github.com/cloudfoundry/storeadapter/etcdstoreadapter"
)

var _ = Describe("Actual State", func() {
	var (
		store        Store
		storeAdapter storeadapter.StoreAdapter
		conf         *config.Config
		dea          appfixture.DeaFixture
		otherDea     appfixture.DeaFixture
	)

	BeforeEach(func() {
		var err error
		conf, err = config.DefaultConfig()
		Ω(err).ShouldNot(HaveOccurred())
		storeAdapter = etcdstoreadapter.NewETCDStoreAdapter(etcdRunner.NodeURLS(),
			workpool.NewWorkPool(conf.StoreMaxConcurrentRequests))
		err = storeAdapter.Connect()
		Ω(err).ShouldNot(HaveOccurred())
		conf.StoreHeartbeatCacheRefreshIntervalInMilliseconds = 100
		store = NewStore(conf, storeAdapter, fakelogger.NewFakeLogger())
	"github.com/cloudfoundry/hm9000/models"
	. "github.com/cloudfoundry/hm9000/store"
	"github.com/cloudfoundry/hm9000/testhelpers/fakelogger"
	"github.com/cloudfoundry/storeadapter"
	"github.com/cloudfoundry/storeadapter/etcdstoreadapter"
	"github.com/cloudfoundry/storeadapter/workerpool"
	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"
	"time"
)

var _ = Describe("Storing PendingStopMessages", func() {
	var (
		store        Store
		storeAdapter storeadapter.StoreAdapter
		conf         *config.Config
		message1     models.PendingStopMessage
		message2     models.PendingStopMessage
		message3     models.PendingStopMessage
	)

	BeforeEach(func() {
		var err error
		conf, err = config.DefaultConfig()
		Ω(err).ShouldNot(HaveOccurred())
		storeAdapter = etcdstoreadapter.NewETCDStoreAdapter(etcdRunner.NodeURLS(), workerpool.NewWorkerPool(conf.StoreMaxConcurrentRequests))
		err = storeAdapter.Connect()
		Ω(err).ShouldNot(HaveOccurred())

		message1 = models.NewPendingStopMessage(time.Unix(100, 0), 10, 4, "ABC", "123", "XYZ", models.PendingStopMessageReasonInvalid)
		message2 = models.NewPendingStopMessage(time.Unix(100, 0), 10, 4, "DEF", "456", "ALPHA", models.PendingStopMessageReasonInvalid)
		message3 = models.NewPendingStopMessage(time.Unix(100, 0), 10, 4, "GHI", "789", "BETA", models.PendingStopMessageReasonInvalid)
Esempio n. 25
0
	. "github.com/cloudfoundry/hm9000/store"
	"github.com/cloudfoundry/hm9000/testhelpers/fakelogger"
	"github.com/cloudfoundry/storeadapter"
	"github.com/cloudfoundry/storeadapter/etcdstoreadapter"
	"github.com/cloudfoundry/storeadapter/workerpool"
	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"

	"encoding/json"
	"time"
)

var _ = Describe("Freshness", func() {
	var (
		store        Store
		storeAdapter storeadapter.StoreAdapter
		conf         *config.Config
	)

	conf, _ = config.DefaultConfig()

	BeforeEach(func() {
		storeAdapter = etcdstoreadapter.NewETCDStoreAdapter(etcdRunner.NodeURLS(), workerpool.NewWorkerPool(conf.StoreMaxConcurrentRequests))
		err := storeAdapter.Connect()
		Ω(err).ShouldNot(HaveOccurred())

		store = NewStore(conf, storeAdapter, fakelogger.NewFakeLogger())
	})

	Describe("Bumping freshness", func() {
		bumpingFreshness := func(key string, ttl uint64, bump func(store Store, timestamp time.Time) error) {
			})
		})

		It("Stops", func() {
			finder.Stop()
			Eventually(stopChan).Should(BeClosed())
		})
	})

	Context("with a real etcd", func() {
		var (
			storeAdapter storeadapter.StoreAdapter
			node         storeadapter.StoreNode
			updateNode   storeadapter.StoreNode

			updateCallback func(all map[string]string, preferred map[string]string)
			callbackCount  *int32

			preferredCallback func(key string) bool
			preferredCount    *int32
		)

		BeforeEach(func() {
			workPool, err := workpool.NewWorkPool(10)
			Expect(err).NotTo(HaveOccurred())
			options := &etcdstoreadapter.ETCDOptions{
				ClusterUrls: etcdRunner.NodeURLS(),
			}
			storeAdapter, err = etcdstoreadapter.New(options, workPool)
			Expect(err).NotTo(HaveOccurred())
Esempio n. 27
0
import (
	"github.com/cloudfoundry/hm9000/config"
	. "github.com/cloudfoundry/hm9000/store"
	"github.com/cloudfoundry/hm9000/testhelpers/fakelogger"
	"github.com/cloudfoundry/storeadapter"
	"github.com/cloudfoundry/storeadapter/etcdstoreadapter"
	"github.com/cloudfoundry/storeadapter/workerpool"
	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"
)

var _ = Describe("Metrics", func() {
	var (
		store        Store
		storeAdapter storeadapter.StoreAdapter
		conf         *config.Config
	)

	conf, _ = config.DefaultConfig()

	BeforeEach(func() {
		storeAdapter = etcdstoreadapter.NewETCDStoreAdapter(etcdRunner.NodeURLS(), workerpool.NewWorkerPool(conf.StoreMaxConcurrentRequests))
		err := storeAdapter.Connect()
		Ω(err).ShouldNot(HaveOccurred())

		store = NewStore(conf, storeAdapter, fakelogger.NewFakeLogger())
	})

	Describe("Getting and setting a metric", func() {
		BeforeEach(func() {
Esempio n. 28
0
	"time"

	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"

	"code.cloudfoundry.org/localip"
	"github.com/cloudfoundry/loggregatorlib/loggertesthelper"
	"github.com/cloudfoundry/storeadapter"
	"github.com/cloudfoundry/storeadapter/storerunner/etcdstorerunner"
	ginkgoConfig "github.com/onsi/ginkgo/config"
)

var _ = Describe("Announcer", func() {
	var (
		localIP     string
		conf        config.Config
		etcdRunner  *etcdstorerunner.ETCDClusterRunner
		etcdAdapter storeadapter.StoreAdapter
	)

	BeforeSuite(func() {
		localIP, _ = localip.LocalIP()

		etcdPort := 5500 + ginkgoConfig.GinkgoConfig.ParallelNode*10
		etcdRunner = etcdstorerunner.NewETCDClusterRunner(etcdPort, 1, nil)
		etcdRunner.Start()

		etcdAdapter = etcdRunner.Adapter(nil)

		conf = config.Config{
			JobName: "doppler_z1",
			Index:   "0",
Esempio n. 29
0
	"fmt"

	"github.com/cloudfoundry/storeadapter"
	"github.com/cloudfoundry/storeadapter/storerunner/etcdstorerunner"
	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"

	"testing"
)

func TestDB(t *testing.T) {
	RegisterFailHandler(Fail)
	RunSpecs(t, "DB Suite")
}

var etcdClient storeadapter.StoreAdapter
var etcdPort int
var etcdUrl string
var etcdRunner *etcdstorerunner.ETCDClusterRunner
var routingAPIBinPath string

var _ = BeforeEach(func() {
	etcdPort = 4001 + GinkgoParallelNode()
	etcdUrl = fmt.Sprintf("http://127.0.0.1:%d", etcdPort)
	etcdRunner = etcdstorerunner.NewETCDClusterRunner(etcdPort, 1)
	etcdRunner.Start()

	etcdClient = etcdRunner.Adapter()
})

var _ = AfterEach(func() {