func Init(serviceName, registryLocation string) error { log.Print("Initializing registry connection.") nodeId := config.Service.NodeId ttl = time.Duration(config.ServiceRegistry.EntryTTL) * time.Second cfg := etcd.Config{ Endpoints: []string{config.Service.RegistryLocation}, Transport: etcd.DefaultTransport, } client, err := etcd.New(cfg) if err != nil { log.Fatal(err) return err } kapi = etcd.NewKeysAPI(client) log.Print("Registering " + serviceName + " Service node at " + hostUrl()) if _, err := kapi.Set(context.Background(), registryLocation+nodeId, hostUrl(), &etcd.SetOptions{TTL: ttl}); err != nil { fmt.Println(err) log.Fatal(err) return err } fetchServiceLists() fetchPluginNodes() go sendHeartbeat(registryLocation) go updateServiceCache() return nil }
func etcdFactory(conf map[string]string) (Client, error) { path, ok := conf["path"] if !ok { return nil, fmt.Errorf("missing 'path' configuration") } endpoints, ok := conf["endpoints"] if !ok || endpoints == "" { return nil, fmt.Errorf("missing 'endpoints' configuration") } config := etcdapi.Config{ Endpoints: strings.Split(endpoints, " "), } if username, ok := conf["username"]; ok && username != "" { config.Username = username } if password, ok := conf["password"]; ok && password != "" { config.Password = password } client, err := etcdapi.New(config) if err != nil { return nil, err } return &EtcdClient{ Client: client, Path: path, }, nil }
func newClient(c *cli.Context) (client.Client, error) { eps, err := getEndpoints(c) if err != nil { return nil, err } tr, err := getTransport(c) if err != nil { return nil, err } cfg := client.Config{ Transport: tr, Endpoints: eps, HeaderTimeoutPerRequest: c.GlobalDuration("timeout"), } uFlag := c.GlobalString("username") if uFlag != "" { username, password, err := getUsernamePasswordFromFlag(uFlag) if err != nil { return nil, err } cfg.Username = username cfg.Password = password } return client.New(cfg) }
func syncToEtcd(ctx context.Context, cfg *config.Config) error { log.Log(ctx).Debug("Connecting to etcd") etcdCfg := client.Config{ Endpoints: strings.Split(etcdEndpoints, ","), } c, err := client.New(etcdCfg) if err != nil { return err } root := "/quicklog/" + instanceName kapi := client.NewKeysAPI(c) input := cfg.Input inputConfig, err := json.Marshal(input.Config) if err != nil { log.Log(ctx).Error("Error converting input config to JSON data", "error", err) return err } output := cfg.Output outputConfig, err := json.Marshal(output.Config) if err != nil { log.Log(ctx).Error("Error converting output config to JSON data", "error", err) return err } filters := cfg.Filters kapi.Set(ctx, root+"/input/driver", input.Driver, nil) kapi.Set(ctx, root+"/input/parser", input.Parser, nil) kapi.Set(ctx, root+"/input/config", string(inputConfig), nil) kapi.Set(ctx, root+"/output/driver", output.Driver, nil) kapi.Set(ctx, root+"/output/config", string(outputConfig), nil) // clear all the filters before re-creating them kapi.Delete(ctx, root+"/filters", &client.DeleteOptions{Recursive: true, Dir: true}) // filters must exist even if empty kapi.Set(ctx, root+"/filters", "", &client.SetOptions{Dir: true}) for idx, filter := range filters { filterConfig, err := json.Marshal(filter.Config) if err != nil { log.Log(ctx).Error("Error converting filter config to JSON data", "error", err) return err } kapi.Set(ctx, root+"/filters/"+strconv.Itoa(idx)+"/driver", filter.Driver, nil) kapi.Set(ctx, root+"/filters/"+strconv.Itoa(idx)+"/config", string(filterConfig), nil) } kapi.Set(ctx, root+"/reload", "1", nil) return nil }
// NewEtcdTestClientServer creates a new client and server for testing func NewEtcdTestClientServer(t *testing.T) *EtcdTestServer { server := configureTestCluster(t, "foo", true) err := server.launch(t) if err != nil { t.Fatalf("Failed to start etcd server error=%v", err) return nil } cfg := etcd.Config{ Endpoints: server.ClientURLs.StringSlice(), Transport: newHttpTransport(t, server.CertFile, server.KeyFile, server.CAFile), } server.Client, err = etcd.New(cfg) if err != nil { server.Terminate(t) t.Fatalf("Unexpected error in NewEtcdTestClientServer (%v)", err) return nil } if err := server.waitUntilUp(); err != nil { server.Terminate(t) t.Fatalf("Unexpected error in waitUntilUp (%v)", err) return nil } return server }
func stress(mb int) error { time.Sleep(5 * time.Second) cfg := client.Config{ Endpoints: []string{"http://localhost:12379", "http://localhost:22379", "http://localhost:32379"}, Transport: client.DefaultTransport, // set timeout per request to fail fast when the target endpoint is unavailable HeaderTimeoutPerRequest: time.Second, } c, err := client.New(cfg) if err != nil { return err } kapi := client.NewKeysAPI(c) for i := 0; i < mb*2; i++ { fmt.Println("stressing", i) k := make([]byte, 100) binary.PutVarint(k, int64(rand.Intn(putSize))) _, err = kapi.Set(context.Background(), string(k), "", nil) if err != nil { if i < 2 { return err } } time.Sleep(500 * time.Millisecond) } return nil }
func NewSource(opts ...config.SourceOption) config.Source { options := config.SourceOptions{ Name: DefaultPath, } for _, o := range opts { o(&options) } var cAddrs []string for _, addr := range options.Hosts { if len(addr) > 0 { cAddrs = append(cAddrs, addr) } } if len(cAddrs) == 0 { cAddrs = []string{"http://127.0.0.1:2379"} } c, err := client.New(client.Config{ Endpoints: cAddrs, }) if err != nil { log.Fatal(err) } return &etcd{ addrs: cAddrs, opts: options, client: c, } }
// GetV2 gets the value to the key using V2 API. func (c *Cluster) GetV2(w io.Writer, key []byte) error { endpoints := []string{} for _, nd := range c.NameToNode { for v := range nd.Flags.ListenClientURLs { endpoints = append(endpoints, v) } } cfg := client.Config{ Endpoints: endpoints, Transport: client.DefaultTransport, HeaderTimeoutPerRequest: time.Second, // SelectionMode: client.EndpointSelectionPrioritizeLeader, } ct, err := client.New(cfg) if err != nil { return err } kapi := client.NewKeysAPI(ct) ts := time.Now() resp, err := kapi.Get(context.Background(), string(key), nil) if err != nil { return err } fmt.Fprintf(w, "[GetV2] Done! Took %v for %s/%s.\n", time.Since(ts), key, resp.Node.Value) return nil }
// NOTE: this test depends on an etcd cluster available // on 127.0.0.1:2379 (the default port) // TODO: remove the dependence on etcd for testing func TestLockManager(t *testing.T) { t.Parallel() cfg := client.Config{ Endpoints: []string{"http://127.0.0.1:2379"}, Transport: client.DefaultTransport, // set timeout per request to fail fast when the target endpoint is unavailable HeaderTimeoutPerRequest: time.Second, } c, _ := client.New(cfg) ecrpErrorChan := make(chan error) ecrp := retryproxy.NewEtcdClientRetryProxy(c, ecrpErrorChan, 1, 60) path := "locktest" hn, _ := os.Hostname() path += hn path += uuid.New() ttl := LockTTL lm := NewLockManager(ecrp, path, ttl) defer lm.Shutdown() closeit := time.After(10 * time.Second) for { select { case <-closeit: return case <-time.After(time.Second): fmt.Printf("Have Lock: %t\n", lm.HaveLock()) } } }
func NewNode(stop chan bool) *Node { n := new(Node) n.stop = stop n.stopBoard = make(chan bool, 2) n.stopThread = make(chan bool, 2) n.stopPost = make(chan bool, 2) n.stopFile = make(chan bool, 2) n.Stats = NewNodeStats() n.Config = parseFlags() n.Storage = fourchan.NewStorage(n.Config.CassKeyspace, n.Config.CassEndpoints...) cfg := etcd.Config{ Endpoints: n.Config.EtcdEndpoints, Transport: etcd.DefaultTransport, HeaderTimeoutPerRequest: 3 * time.Second, } c, err := etcd.New(cfg) if err != nil { log.Fatal("Failed to connected to etcd: ", err) } n.Keys = etcd.NewKeysAPI(c) n.Closed = false // TODO these chan sizes are rather arbitrary... n.CBoard = make(chan *fourchan.Board, numBoardRoutines) n.CThread = make(chan *fourchan.Thread, numThreadRoutines) n.CPost = make(chan *fourchan.Post, numPostRoutines) n.CFile = make(chan *fourchan.File, numFileRoutines) n.Files = make(map[int]string) return n }
func main() { myFlagSet.Parse(os.Args[1:]) logger := log.New(os.Stderr, "wr ", log.LstdFlags) client, err := etcd.New(etcd.Config{Endpoints: []string{"http://localhost:2379"}}) if err != nil { logger.Fatalf("Failed setting up %v", err) } w, err := watcher.New(myFlagSet, etcd.NewKeysAPI(client), "/example/flagz", logger) if err != nil { logger.Fatalf("Failed setting up %v", err) } err = w.Initialize() if err != nil { logger.Fatalf("Failed setting up %v", err) } w.Start() for true { logger.Printf("staticint: %v dynint: %v dynstring: %v", *staticInt, dynInt.Get(), dynStr.Get()) time.Sleep(1500 * time.Millisecond) } }
func (e *EtcdAuth) Init(endpoint string, path string) { e.path = path var endpoints []string endpoints = append(endpoints, fmt.Sprintf("http://%s", endpoint)) e.etcdConfig = client.Config{ Endpoints: endpoints, Transport: client.DefaultTransport, HeaderTimeoutPerRequest: time.Second * 5, } // Initialize etcd client c, err := client.New(e.etcdConfig) e.etcdClient = c if err != nil { log.Error.Printf("Failed to initialize etcd client: %v", err) return } // Create a keys api // Are we okay to use this instances for the lifetime // of the application? What happens if the etcd // instance we are connecting dies? e.kapi = client.NewKeysAPI(e.etcdClient) }
func connectEtcd(ctx context.Context) error { var err error cfg := client.Config{ Endpoints: strings.Split(*etcdAddr, ","), Transport: client.DefaultTransport, HeaderTimeoutPerRequest: time.Second, } if *clusterName != "" { clusterEtcdPrefix = fmt.Sprintf("%s/cluster/%s", *etcdPrefix, *clusterName) } else { clusterEtcdPrefix = fmt.Sprintf("%s/cluster", *etcdPrefix) } etcdClient, err = client.New(cfg) if err != nil { log.Fatal(err) } kapi = client.NewKeysAPI(etcdClient) // Run Sync every 10 seconds wg.Add(1) go func() { defer wg.Done() for { if err := etcdClient.AutoSync(ctx, 10*time.Second); err == context.DeadlineExceeded || err == context.Canceled { break } if err != nil { log.Print(err) } } }() return nil }
// NewEtcdMinion creates a new minion with etcd backend func NewEtcdMinion(config *EtcdMinionConfig) (Minion, error) { c, err := etcdclient.New(config.EtcdConfig) if err != nil { return nil, err } cwd, err := os.Getwd() if err != nil { return nil, err } gitRepo, err := utils.NewGitRepo(filepath.Join(cwd, "site"), config.SiteRepo) if err != nil { return nil, err } id := utils.GenerateUUID(config.Name) rootDir := filepath.Join(EtcdMinionSpace, id.String()) m := &etcdMinion{ name: config.Name, rootDir: rootDir, queueDir: filepath.Join(rootDir, "queue"), classifierDir: filepath.Join(rootDir, "classifier"), logDir: filepath.Join(rootDir, "log"), id: id, kapi: etcdclient.NewKeysAPI(c), taskQueue: make(chan *task.Task), gitRepo: gitRepo, done: make(chan struct{}), } return m, nil }
func (etc *etcd) Init() error { var err error if len(etc.Endpoints) == 0 { return e.New("no end points") } cfg := client.Config{ Endpoints: etc.Endpoints, //Transport: http.DefaultTransport, } c, err := client.New(cfg) if err != nil { return e.Forward(err) } etc.kapi = client.NewKeysAPI(c) if etc.SecKeyRing == "" { return nil } kr, err := os.Open(etc.SecKeyRing) if err != nil { return e.Forward(err) } defer kr.Close() etc.cm, err = config.NewEtcdConfigManager(etc.Endpoints, kr) if err != nil { return e.Forward(err) } return nil }
func NewKeysAPI(cfg etcd.Config) (etcd.KeysAPI, error) { eCli, err := etcd.New(cfg) if err != nil { return nil, err } return etcd.NewKeysAPI(eCli), nil }
func (s *Store) New(c config.Config) *Store { // Open an etcd client var endpoints []string endpoints = append(endpoints, fmt.Sprintf("http://%v:%v", c.Etcd.Hostname, c.Etcd.Port)) ec := client.Config{ Endpoints: endpoints, Transport: client.DefaultTransport, } e, err := client.New(ec) if err != nil { log.Fatalln("Could not connect to etcd:", err) } // Create a new KeysAPI for etcd k := client.NewKeysAPI(e) ns := &Store{ e: e, c: c, k: k, basePath: fmt.Sprint(c.Etcd.BasePath, "/vips/"), } return ns }
func main() { logger := log.New(os.Stderr, "server", log.LstdFlags) if err := serverFlags.Parse(os.Args[1:]); err != nil { logger.Fatalf("%v", err) } client, err := etcd.New(etcd.Config{Endpoints: *etcdEndpoints}) if err != nil { logger.Fatalf("Failed setting up etcd %v", err) } w, err := watcher.New(serverFlags, etcd.NewKeysAPI(client), *etcdFlagzPath, logger) if err != nil { logger.Fatalf("Failed setting up watcher %v", err) } err = w.Initialize() if err != nil { logger.Fatalf("Failed initializing watcher %v", err) } w.Start() logger.Printf("etcd flag value watching initialized") flagzEndpoint := flagz.NewStatusEndpoint(serverFlags) http.HandleFunc("/debug/flagz", flagzEndpoint.ListFlags) http.HandleFunc("/", handleDefaultPage) addr := fmt.Sprintf("%s:%d", *listenHost, *listenPort) logger.Printf("Serving at: %v", addr) if err := http.ListenAndServe(addr, http.DefaultServeMux); err != nil { logger.Fatalf("Failed serving: %v", err) } logger.Printf("Done, bye.") }
func getFleetRegistryClient(fleetEndpoints []string) (fleetClient.API, error) { var dial func(string, string) (net.Conn, error) tlsConfig, err := fleetPkg.ReadTLSConfigFiles("", "", "") if err != nil { return nil, err } trans := &http.Transport{ Dial: dial, TLSClientConfig: tlsConfig, } timeout := 3 * time.Second eCfg := etcd.Config{ Endpoints: fleetEndpoints, Transport: trans, } eClient, err := etcd.New(eCfg) if err != nil { return nil, err } kAPI := etcd.NewKeysAPI(eClient) reg := registry.NewEtcdRegistry(kAPI, registry.DefaultKeyPrefix, timeout) return &fleetClient.RegistryClient{Registry: reg}, nil }
// NewEtcdConfig creates a new service discovery backend for etcd func NewEtcdConfig(config map[string]interface{}) Etcd { etcd := Etcd{ Prefix: "/containerbuddy", } etcdConfig := client.Config{} switch endpoints := config["endpoints"].(type) { case string: etcdConfig.Endpoints = []string{endpoints} case []string: etcdConfig.Endpoints = endpoints default: log.Fatal("Must provide etcd endpoints") } prefix, ok := config["prefix"].(string) if ok { etcd.Prefix = prefix } etcdClient, err := client.New(etcdConfig) if err != nil { log.Fatal(err) } etcd.Client = etcdClient etcd.API = client.NewKeysAPI(etcdClient) return etcd }
// MakeEtcdClient creates an etcd client based on the provided config. func MakeEtcdClient(etcdClientInfo configapi.EtcdConnectionInfo) (etcdclient.Client, error) { tlsConfig, err := restclient.TLSConfigFor(&restclient.Config{ TLSClientConfig: restclient.TLSClientConfig{ CertFile: etcdClientInfo.ClientCert.CertFile, KeyFile: etcdClientInfo.ClientCert.KeyFile, CAFile: etcdClientInfo.CA, }, }) if err != nil { return nil, err } transport := knet.SetTransportDefaults(&http.Transport{ TLSClientConfig: tlsConfig, Dial: (&net.Dialer{ // default from http.DefaultTransport Timeout: 30 * time.Second, // Lower the keep alive for connections. KeepAlive: 1 * time.Second, }).Dial, // Because watches are very bursty, defends against long delays in watch reconnections. MaxIdleConnsPerHost: 500, }) cfg := etcdclient.Config{ Endpoints: etcdClientInfo.URLs, // TODO: Determine if transport needs optimization Transport: transport, } return etcdclient.New(cfg) }
func Conn() { cfg := client.Config{ Endpoints: []string{"http://127.0.0.1:2379/", "http://127.0.0.1:4001"}, Transport: client.DefaultTransport, HeaderTimeoutPerRequest: time.Second, } c, err := client.New(cfg) if err != nil { log.Fatal(err) } kapi := client.NewKeysAPI(c) log.Print("Setting '/foo' key with 'bar' value") resp, err := kapi.Set(context.Background(), "/foo", "bar", nil) if err != nil { log.Fatal(err) } else { log.Printf("Set is done.Metadata is %q\n", resp) } log.Print("Getting '/foo' key value") resp, err = kapi.Get(context.Background(), "/foo", nil) if err != nil { log.Fatal(err) } else { log.Printf("Get is done,Metadata is %q\n", resp) log.Printf("%q key has %q value\n", resp.Node.Key, resp.Node.Value) } }
func NewEtcdBackend(address string) (Backend, error) { if address == "" { address = "http://127.0.0.1:2379/vault" } url, err := url.Parse(address) if err != nil { return nil, maskAny(err) } path := url.Path // Ensure path is prefixed. if !strings.HasPrefix(path, "/") { path = "/" + path } url.Path = "" endpoint := url.String() c, err := client.New(client.Config{ Endpoints: []string{endpoint}, }) if err != nil { return nil, err } kAPI := client.NewKeysAPI(c) return &etcdBackend{ path: path, kAPI: kAPI, }, nil }
func TestBasic(t *testing.T) { cfg := client.Config{ Endpoints: []string{"http://127.0.0.1:4001"}, Transport: client.DefaultTransport, HeaderTimeoutPerRequest: time.Second, } c, err := client.New(cfg) if err != nil { log.Panicln(err) } kapi := client.NewKeysAPI(c) client := EtcdReigistryClient{ EtcdRegistryConfig{ ServiceName: "test", InstanceName: "test1", BaseURL: "127.0.0.1:8080", }, kapi, } client.Register() response, _ := client.ServicesByName("test") if len(response) == 0 { t.Error("No service registered") } client.Unregister() response, _ = client.ServicesByName("test") if len(response) != 0 { t.Error("Service not unregistered") } }
/* Monitoring changes in etcd server. It designed for run in separate goroutine. */ func etcdMon(etcdRootPath string, config client.Config, bus chan fileChangeEvent, startIndex uint64) { c, err := client.New(config) if err != nil { panic(err) } kapi := client.NewKeysAPI(c) var nextEvent uint64 = startIndex for { response, err := kapi.Watcher(etcdRootPath, &client.WatcherOptions{AfterIndex: nextEvent, Recursive: true}).Next(context.Background()) if err != nil { log.Println(err) time.Sleep(time.Second) continue } nextEvent = response.Index if response.Action == "delete" { bus <- fileChangeEvent{Path: response.Node.Key, IsRemoved: true, IsDir: response.Node.Dir} continue } if response.Node.Dir { bus <- fileChangeEvent{Path: response.Node.Key, IsDir: response.Node.Dir} continue } bus <- fileChangeEvent{Path: response.Node.Key, Content: []byte(response.Node.Value)} } }
func TestKeepAlive(t *testing.T) { cfg := client.Config{ Endpoints: []string{"http://127.0.0.1:2379"}, Transport: client.DefaultTransport, HeaderTimeoutPerRequest: time.Second, } c, err := client.New(cfg) if err != nil { log.Panicln(err) } kapi := client.NewKeysAPI(c) client := EtcdReigistryClient{ EtcdRegistryConfig{ ServiceName: "test", InstanceName: "test1", BaseURL: "127.0.0.1:8080", }, kapi, } client.Register() time.Sleep(50 * time.Second) response, _ := client.ServicesByName("test") log.Println(response) // Ahoj }
func etcdConnect() (client.Client, error) { cfg := client.Config{ Endpoints: []string{etcdPeer}, } return client.New(cfg) }
func newDiscovery(durl, dproxyurl string, id types.ID) (*discovery, error) { u, err := url.Parse(durl) if err != nil { return nil, err } token := u.Path u.Path = "" pf, err := newProxyFunc(dproxyurl) if err != nil { return nil, err } // TODO: add ResponseHeaderTimeout back when watch on discovery service writes header early tr, err := transport.NewTransport(transport.TLSInfo{}, 30*time.Second) if err != nil { return nil, err } tr.Proxy = pf cfg := client.Config{ Transport: tr, Endpoints: []string{u.String()}, } c, err := client.New(cfg) if err != nil { return nil, err } dc := client.NewKeysAPIWithPrefix(c, "") return &discovery{ cluster: token, c: dc, id: id, url: u, clock: clockwork.NewRealClock(), }, nil }
// Creates a new etcd minion func NewEtcdMinion(name string, cfg etcdclient.Config) Minion { c, err := etcdclient.New(cfg) if err != nil { log.Fatal(err) } kapi := etcdclient.NewKeysAPI(c) id := utils.GenerateUUID(name) rootDir := filepath.Join(EtcdMinionSpace, id.String()) queueDir := filepath.Join(rootDir, "queue") classifierDir := filepath.Join(rootDir, "classifier") logDir := filepath.Join(rootDir, "log") taskQueue := make(chan *task.Task) done := make(chan struct{}) m := &etcdMinion{ name: name, rootDir: rootDir, queueDir: queueDir, classifierDir: classifierDir, logDir: logDir, id: id, kapi: kapi, taskQueue: taskQueue, done: done, } return m }
func newDiscovery(durl, dproxyurl string, id types.ID) (*discovery, error) { u, err := url.Parse(durl) if err != nil { return nil, err } token := u.Path u.Path = "" pf, err := newProxyFunc(dproxyurl) if err != nil { return nil, err } cfg := client.Config{ Transport: &http.Transport{Proxy: pf}, Endpoints: []string{u.String()}, } c, err := client.New(cfg) if err != nil { return nil, err } dc := client.NewKeysAPIWithPrefix(c, "") return &discovery{ cluster: token, c: dc, id: id, url: u, clock: clockwork.NewRealClock(), }, nil }