Beispiel #1
0
// New is used to create a storage client based on our configuration.
func New(config Config) (StoreClient, error) {
	if config.Backend == "" {
		config.Backend = "etcd"
	}
	backendNodes := config.BackendNodes
	log.Info("Backend nodes set to " + strings.Join(backendNodes, ", "))
	switch config.Backend {
	case "consul":
		return consul.New(config.BackendNodes, config.Scheme,
			config.ClientCert, config.ClientKey,
			config.ClientCaKeys)
	case "etcd":
		// Create the etcd client upfront and use it for the life of the process.
		// The etcdClient is an http.Client and designed to be reused.
		return etcd.NewEtcdClient(backendNodes, config.ClientCert, config.ClientKey, config.ClientCaKeys)
	case "zookeeper":
		return zookeeper.NewZookeeperClient(backendNodes)
	case "redis":
		return redis.NewRedisClient(backendNodes)
	case "env":
		return env.NewEnvClient()
	case "dynamodb":
		table := config.Table
		log.Info("DynamoDB table set to " + table)
		return dynamodb.NewDynamoDBClient(table)
	}
	return nil, errors.New("Invalid backend")
}
Beispiel #2
0
// sync compares the staged and dest config files and attempts to sync them
// if they differ. sync will run a config check command if set before
// overwriting the target config file. Finally, sync will run a reload command
// if set to have the application or service pick up the changes.
// It returns an error if any.
func (t *TemplateResource) sync() error {
	staged := t.StageFile.Name()
	defer os.Remove(staged)
	log.Debug("Comparing candidate config to " + t.Dest)
	ok, err := sameConfig(staged, t.Dest)
	if err != nil {
		log.Error(err.Error())
	}
	if config.Noop() {
		log.Warning("Noop mode enabled " + t.Dest + " will not be modified")
		return nil
	}
	if !ok {
		log.Info("Target config " + t.Dest + " out of sync")
		if t.CheckCmd != "" {
			if err := t.check(); err != nil {
				return errors.New("Config check failed: " + err.Error())
			}
		}
		log.Debug("Overwriting target config " + t.Dest)
		if err := os.Rename(staged, t.Dest); err != nil {
			return err
		}
		if t.ReloadCmd != "" {
			if err := t.reload(); err != nil {
				return err
			}
		}
		log.Info("Target config " + t.Dest + " has been updated")
	} else {
		log.Info("Target config " + t.Dest + " in sync")
	}
	return nil
}
Beispiel #3
0
// sync compares the staged and dest config files and attempts to sync them
// if they differ. sync will run a config check command if set before
// overwriting the target config file. Finally, sync will run a reload command
// if set to have the application or service pick up the changes.
// It returns an error if any.
func (t *TemplateResource) sync() error {
	staged := t.StageFile.Name()
	defer os.Remove(staged)
	err, ok := sameConfig(staged, t.Dest)
	if err != nil {
		log.Error(err.Error())
	}
	if !ok {
		log.Info(t.Dest + " not in sync")
		if t.CheckCmd != "" {
			if err := t.check(); err != nil {
				return errors.New("Config check failed: " + err.Error())
			}
		}
		os.Rename(staged, t.Dest)
		if t.ReloadCmd != "" {
			if err := t.reload(); err != nil {
				return err
			}
		}
	} else {
		log.Info(t.Dest + " in sync")
	}
	return nil
}
Beispiel #4
0
// sameConfig reports whether src and dest config files are equal.
// Two config files are equal when they have the same file contents and
// Unix permissions. The owner, group, and mode must match.
// It return false in other cases.
func sameConfig(src, dest string) (bool, error) {
	if !isFileExist(dest) {
		return false, nil
	}
	d, err := fileStat(dest)
	if err != nil {
		return false, err
	}
	s, err := fileStat(src)
	if err != nil {
		return false, err
	}
	if d.Uid != s.Uid {
		log.Info(fmt.Sprintf("%s has UID %d should be %d", dest, d.Uid, s.Uid))
	}
	if d.Gid != s.Gid {
		log.Info(fmt.Sprintf("%s has GID %d should be %d", dest, d.Gid, s.Gid))
	}
	if d.Mode != s.Mode {
		log.Info(fmt.Sprintf("%s has mode %s should be %s", dest, os.FileMode(d.Mode), os.FileMode(s.Mode)))
	}
	if d.Md5 != s.Md5 {
		log.Info(fmt.Sprintf("%s has md5sum %s should be %s", dest, d.Md5, s.Md5))
	}
	if d.Uid != s.Uid || d.Gid != s.Gid || d.Mode != s.Mode || d.Md5 != s.Md5 {
		return false, nil
	}
	return true, nil
}
Beispiel #5
0
// sync compares the staged and dest config files and attempts to sync them
// if they differ. sync will run a config check command if set before
// overwriting the target config file. Finally, sync will run a reload command
// if set to have the application or service pick up the changes.
// It returns an error if any.
func (t *TemplateResource) sync() error {
	staged := t.StageFile.Name()
	if t.keepStageFile {
		log.Info("Keeping staged file: " + staged)
	} else {
		defer os.Remove(staged)
	}

	log.Debug("Comparing candidate config to " + t.Dest)
	ok, err := sameConfig(staged, t.Dest)
	if err != nil {
		log.Error(err.Error())
	}
	if t.noop {
		log.Warning("Noop mode enabled. " + t.Dest + " will not be modified")
		return nil
	}
	if !ok {
		log.Info("Target config " + t.Dest + " out of sync")
		if t.CheckCmd != "" {
			if err := t.check(); err != nil {
				return errors.New("Config check failed: " + err.Error())
			}
		}
		log.Debug("Overwriting target config " + t.Dest)
		err := os.Rename(staged, t.Dest)
		if err != nil {
			if strings.Contains(err.Error(), "device or resource busy") {
				log.Debug("Rename failed - target is likely a mount. Trying to write instead")
				// try to open the file and write to it
				var contents []byte
				var rerr error
				contents, rerr = ioutil.ReadFile(staged)
				if rerr != nil {
					return rerr
				}
				err := ioutil.WriteFile(t.Dest, contents, t.FileMode)
				// make sure owner and group match the temp file, in case the file was created with WriteFile
				os.Chown(t.Dest, t.Uid, t.Gid)
				if err != nil {
					return err
				}
			} else {
				return err
			}
		}
		if t.ReloadCmd != "" {
			if err := t.reload(); err != nil {
				return err
			}
		}
		log.Info("Target config " + t.Dest + " has been updated")
	} else {
		log.Debug("Target config " + t.Dest + " in sync")
	}
	return nil
}
Beispiel #6
0
func main() {
	flag.Parse()
	if printVersion {
		fmt.Printf("confd %s\n", Version)
		os.Exit(0)
	}
	if err := initConfig(); err != nil {
		log.Fatal(err.Error())
	}

	log.Info("Starting confd")

	storeClient, err := backends.New(backendsConfig)
	if err != nil {
		log.Fatal(err.Error())
	}

	templateConfig.StoreClient = storeClient
	if onetime {
		if err := template.Process(templateConfig); err != nil {
			os.Exit(1)
		}
		os.Exit(0)
	}

	stopChan := make(chan bool)
	doneChan := make(chan bool)
	errChan := make(chan error, 10)

	var processor template.Processor
	switch {
	case config.Watch:
		processor = template.WatchProcessor(templateConfig, stopChan, doneChan, errChan)
	default:
		processor = template.IntervalProcessor(templateConfig, stopChan, doneChan, errChan, config.Interval)
	}

	go processor.Process()

	signalChan := make(chan os.Signal, 1)
	signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)
	for {
		select {
		case err := <-errChan:
			log.Error(err.Error())
		case s := <-signalChan:
			log.Info(fmt.Sprintf("Captured %v. Exiting...", s))
			close(doneChan)
		case <-doneChan:
			os.Exit(0)
		}
	}
}
Beispiel #7
0
// NewEtcdClient returns an *etcd.Client with a connection to named machines.
// It returns an error if a connection to the cluster cannot be made.
func NewEtcdClient(machines []string, cert, key string, caCert string) (*Client, error) {
	var c *goetcd.Client
	var err error
	if cert != "" && key != "" {
		c, err = goetcd.NewTLSClient(machines, cert, key, caCert)
		if err != nil {
			return &Client{c}, err
		}
	} else {
		c = goetcd.NewClient(machines)
	}
	// Configure the DialTimeout, since 1 second is often too short
	c.SetDialTimeout(time.Duration(3) * time.Second)

	maxConnectAttempts := 10
	for attempt := 1; attempt <= maxConnectAttempts; attempt++ {
		success := c.SetCluster(machines)
		if success {
			break
			return &Client{c}, nil
		}

		if attempt == maxConnectAttempts {
			break
			return &Client{c}, errors.New("cannot connect to etcd cluster: " + strings.Join(machines, ","))
		}
		log.Info(fmt.Sprintf("[Attempt: %d] Attempting access to etcd after 5 second sleep", attempt))
		time.Sleep(5 * time.Second)
	}

	return &Client{c}, nil
}
Beispiel #8
0
func getTemplateResources(config Config) ([]*TemplateResource, error) {
	var lastError error
	templates := make([]*TemplateResource, 0)
	log.Debug("Loading template resources from confdir " + config.ConfDir)
	if !isFileExist(config.ConfDir) {
		log.Warning(fmt.Sprintf("Cannot load template resources: confdir '%s' does not exist", config.ConfDir))
		return nil, nil
	}
	paths, err := recursiveFindFiles(config.ConfigDir, "*toml")
	if err != nil {
		return nil, err
	}
	for _, p := range paths {
		t, err := NewTemplateResource(p, config)
		if err != nil {
			lastError = err
			continue
		}
		templates = append(templates, t)
	}

	// get dynamic TemplateResources
	log.Info("parse dynamic keys")
	result, err := config.StoreClient.GetValues([]string{"_confd"})
	if err == nil {
		for k, v := range result {
			log.Info("dynamic key: " + k + " / " + v)
			t, err := NewTemplateResource(config.ConfigDir+"/"+v, config)
			if err != nil {
				lastError = err
				continue
			}

			split := strings.Split(k, "/")
			key := "/" + split[len(split)-1]
			t.Dest = strings.Replace(t.Dest, "{{.token}}", key, 1)
			t.ReloadCmd = strings.Replace(t.ReloadCmd, "{{.token}}", key, 1)
			t.Prefix = key
			t.prefix = key
			templates = append(templates, t)
		}
	}

	return templates, lastError
}
Beispiel #9
0
// New is used to create a storage client based on our configuration.
func New(config Config) (StoreClient, error) {
	if config.Backend == "" {
		config.Backend = "etcd"
	}
	backendNodes := config.BackendNodes
	log.Info("Backend nodes set to " + strings.Join(backendNodes, ", "))
	switch config.Backend {
	case "consul":
		return consul.New(config.BackendNodes, config.Scheme,
			config.ClientCert, config.ClientKey,
			config.ClientCaKeys)
	case "etcd":
		// Create the etcd client upfront and use it for the life of the process.
		// The etcdClient is an http.Client and designed to be reused.
		return etcd.NewEtcdClient(backendNodes, config.ClientCert, config.ClientKey, config.ClientCaKeys, config.BasicAuth, config.Username, config.Password)
	case "zookeeper":
		return zookeeper.NewZookeeperClient(backendNodes)
	case "rancher":
		return rancher.NewRancherClient(backendNodes)
	case "redis":
		return redis.NewRedisClient(backendNodes, config.ClientKey)
	case "env":
		return env.NewEnvClient()
	case "vault":
		vaultConfig := map[string]string{
			"app-id":   config.AppID,
			"user-id":  config.UserID,
			"username": config.Username,
			"password": config.Password,
			"token":    config.AuthToken,
			"cert":     config.ClientCert,
			"key":      config.ClientKey,
			"caCert":   config.ClientCaKeys,
		}
		return vault.New(backendNodes[0], config.AuthType, vaultConfig)
	case "dynamodb":
		table := config.Table
		log.Info("DynamoDB table set to " + table)
		return dynamodb.NewDynamoDBClient(table)
	case "stackengine":
		return stackengine.NewStackEngineClient(backendNodes, config.Scheme, config.ClientCert, config.ClientKey, config.ClientCaKeys, config.AuthToken)
	}
	return nil, errors.New("Invalid backend")
}
Beispiel #10
0
func main() {
	// Most flags are defined in the confd/config package which allows us to
	// override configuration settings from the command line. Parse the flags now
	// to make them active.
	flag.Parse()
	if printVersion {
		fmt.Printf("confd %s\n", Version)
		os.Exit(0)
	}
	if configFile == "" {
		if IsFileExist(defaultConfigFile) {
			configFile = defaultConfigFile
		}
	}
	// Initialize the global configuration.
	log.Debug("Loading confd configuration")
	if err := config.LoadConfig(configFile); err != nil {
		log.Fatal(err.Error())
	}
	// Configure logging. While you can enable debug and verbose logging, however
	// if quiet is set to true then debug and verbose messages will not be printed.
	log.SetQuiet(config.Quiet())
	log.SetVerbose(config.Verbose())
	log.SetDebug(config.Debug())
	log.Notice("Starting confd")

	// Create the storage client
	log.Notice("Backend set to " + config.Backend())
	store, err := backends.New(config.Backend())
	if err != nil {
		log.Fatal(err.Error())
	}

	signalChan := make(chan os.Signal, 1)
	signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)
	for {
		runErrors := template.ProcessTemplateResources(store)
		// If the -onetime flag is passed on the command line we immediately exit
		// after processing the template config files.
		if onetime {
			if len(runErrors) > 0 {
				os.Exit(1)
			}
			os.Exit(0)
		}
		select {
		case c := <-signalChan:
			log.Info(fmt.Sprintf("captured %v exiting...", c))
			os.Exit(0)
		case <-time.After(time.Duration(config.Interval()) * time.Second):
			// Continue processing templates.
		}
	}
}
Beispiel #11
0
// New is used to create a storage client based on our configuration.
func New(config Config) (StoreClient, error) {
	if config.Backend == "" {
		config.Backend = "etcd"
	}
	backendNodes := config.BackendNodes
	log.Info("Backend nodes set to " + strings.Join(backendNodes, ", "))
	switch config.Backend {
	case "consul":
		return consul.New(config.BackendNodes, config.Scheme,
			config.ClientCert, config.ClientKey,
			config.ClientCaKeys)
	case "etcd":
		// Create the etcd client upfront and use it for the life of the process.
		// The etcdClient is an http.Client and designed to be reused.
		return etcd.NewEtcdClient(backendNodes, config.ClientCert, config.ClientKey, config.ClientCaKeys)
	case "zookeeper":
		return zookeeper.NewZookeeperClient(backendNodes)
	case "redis":
		return redis.NewRedisClient(backendNodes)
	case "env":
		return env.NewEnvClient()
	case "dynamodb":
		table := config.Table
		log.Info("DynamoDB table set to " + table)
		return dynamodb.NewDynamoDBClient(table)
	case "stackengine":
		return stackengine.NewStackEngineClient(backendNodes, config.Scheme, config.ClientCert, config.ClientKey, config.ClientCaKeys, config.AuthToken)
	case "autoscaling":
		asg := config.Asg
		region := config.AWSRegion
		log.Info("Auto Scaling Group set to " + asg)
		log.Debug("AWS Region set to " + region)
		return autoscaling.NewAsgClient(asg, &region)
	}
	return nil, errors.New("Invalid backend")
}
Beispiel #12
0
// setEtcdHosts.
func setEtcdHosts() error {
	scheme := config.Confd.EtcdScheme
	hosts := make([]string, 0)
	// If a domain name is given then lookup the etcd SRV record, and override
	// all other etcd node settings.
	if config.Confd.SRVDomain != "" {
		log.Info("SRV domain set to " + config.Confd.SRVDomain)
		etcdHosts, err := getEtcdHostsFromSRV(config.Confd.SRVDomain)
		if err != nil {
			return errors.New("Cannot get etcd hosts from SRV records " + err.Error())
		}
		for _, h := range etcdHosts {
			uri := formatEtcdHostURL(scheme, h.Hostname, strconv.FormatUint(uint64(h.Port), 10))
			hosts = append(hosts, uri)
		}
		config.Confd.EtcdNodes = hosts
		return nil
	}
	// No domain name was given, so just process the etcd node list.
	// An etcdNode can be a URL, http://etcd.example.com:4001, or a host, etcd.example.com:4001.
	for _, node := range config.Confd.EtcdNodes {
		etcdURL, err := url.Parse(node)
		if err != nil {
			log.Error(err.Error())
			return err
		}
		if etcdURL.Scheme != "" && etcdURL.Host != "" {
			if !isValidateEtcdScheme(etcdURL.Scheme) {
				return errors.New("The etcd node list contains an invalid URL: " + node)
			}
			host, port, err := net.SplitHostPort(etcdURL.Host)
			if err != nil {
				return err
			}
			hosts = append(hosts, formatEtcdHostURL(etcdURL.Scheme, host, port))
			continue
		}
		// At this point node is not an etcd URL, i.e. http://etcd.example.com:4001,
		// but a host:port string, i.e. etcd.example.com:4001
		host, port, err := net.SplitHostPort(node)
		if err != nil {
			return err
		}
		hosts = append(hosts, formatEtcdHostURL(scheme, host, port))
	}
	config.Confd.EtcdNodes = hosts
	return nil
}
Beispiel #13
0
func NewRancherClient(backendNodes []string) (*Client, error) {
	url := MetaDataURL

	if len(backendNodes) > 0 {
		url = "http://" + backendNodes[0]
	}

	log.Info("Using Rancher Metadata URL: " + url)
	client := &Client{
		url:        url,
		httpClient: &http.Client{},
	}

	err := client.testConnection()
	return client, err

}
Beispiel #14
0
// New returns an *vault.Client with a connection to named machines.
// It returns an error if a connection to the cluster cannot be made.
func New(address, authType string, params map[string]string) (*Client, error) {
	if authType == "" {
		return nil, errors.New("you have to set the auth type when using the vault backend")
	}
	log.Info("Vault authentication backend set to %s", authType)
	conf, err := getConfig(address, params["cert"], params["key"], params["caCert"])

	if err != nil {
		return nil, err
	}

	c, err := vaultapi.NewClient(conf)
	if err != nil {
		return nil, err
	}

	if err := authenticate(c, authType, params); err != nil {
		return nil, err
	}
	return &Client{c}, nil
}
Beispiel #15
0
func main() {
	log.Info("Starting confd")
	// All flags are defined in the confd/config package which allow us to
	// override configuration settings from the cli. Parse the flags now to
	// make them active.
	flag.Parse()
	if err := InitConfig(); err != nil {
		log.Fatal(err.Error())
	}
	for {
		if err := ProcessTemplateResources(); err != nil {
			log.Error(err.Error())
		}
		// If the -onetime flag is passed on the command line we immediately exit
		// after processing the template config files.
		if Onetime() {
			break
		}
		// By default we poll etcd every 30 seconds
		time.Sleep(time.Duration(Interval()) * time.Second)
	}
}
Beispiel #16
0
// GetValues retrieves the private and public ips and DNS names
// of instances with HealthStatus == "Healthy" and
// LifecycleState == "InService" for the Auto Scaling Group in c.asg
func (c *Client) GetValues(keys []string) (map[string]string, error) {
	vars := make(map[string]string)

	asgResponse, err := c.asgClient.DescribeAutoScalingGroups(
		&autoscaling.DescribeAutoScalingGroupsInput{
			AutoScalingGroupNames: []*string{&c.asg},
		},
	)
	if err != nil || len(asgResponse.AutoScalingGroups) == 0 {
		log.Info("Can't find Auto Scaling Group with name '" + c.asg + "'")
		return nil, err
	}
	asg := asgResponse.AutoScalingGroups[0]

	instance_ids := []*string{}
	for _, instance := range asg.Instances {
		if *instance.HealthStatus == "Healthy" && *instance.LifecycleState == "InService" {
			instance_ids = append(instance_ids, instance.InstanceId)
		}
	}
	if len(instance_ids) == 0 {
		log.Info("Can't find any instances in Auto Scaling Group with name '" + c.asg + "'")
		return nil, errors.New("Can't find any instances in Auto Scaling Group with name '" + c.asg + "'")
	}

	ec2Response, err := c.ec2Client.DescribeInstances(
		&ec2.DescribeInstancesInput{
			InstanceIds: instance_ids,
		},
	)
	if err != nil {
		log.Error("Failed describing instances")
		return nil, err
	}

	instances := map[string]map[string]string{}
	for _, reservation := range ec2Response.Reservations {
		for _, instance := range reservation.Instances {
			if instance.InstanceId != nil {
				// Do not include instance if it doesn't have a PrivateIpAddress
				if instance.PrivateIpAddress != nil {
					instances[*instance.InstanceId] = map[string]string{
						"PrivateIpAddress": *instance.PrivateIpAddress,
					}
					if instance.PrivateDnsName != nil {
						instances[*instance.InstanceId]["PrivateDnsName"] = *instance.PrivateDnsName
					}
					if instance.PublicIpAddress != nil {
						instances[*instance.InstanceId]["PublicIpAddress"] = *instance.PublicIpAddress
					}
					if instance.PublicDnsName != nil {
						instances[*instance.InstanceId]["PublicDnsName"] = *instance.PublicDnsName
					}
				}
			}
		}
	}
	var instancesKeys []string
	for k := range instances {
		instancesKeys = append(instancesKeys, k)
	}
	sort.Strings(instancesKeys)

	var i int = 0
	for _, k := range instancesKeys {
		iStr := strconv.Itoa(i)
		vars["privateIps/"+iStr] = instances[k]["PrivateIpAddress"]
		vars["privateDnsNames/"+iStr] = instances[k]["PrivateDnsName"]
		if _, present := instances[k]["PublicIpAddress"]; present {
			vars["publicIps/"+iStr] = instances[k]["PublicIpAddress"]
		}
		if _, present := instances[k]["PublicDnsName"]; present {
			vars["publicDnsNames/"+iStr] = instances[k]["PublicDnsName"]
		}
		i++
	}
	return vars, nil
}
Beispiel #17
0
func (this *BucketListener) Connected(bucketName string) {
	log.Info("Connected! " + bucketName)
}
Beispiel #18
0
func (this *BucketListener) Disconnected(bucketName string, err error) {
	log.Info("Disconnected! " + bucketName)
	this.watchResp <- &watchResponse{waitIndex: this.currentIndex, err: err}
}
Beispiel #19
0
// initConfig initializes the confd configuration by first setting defaults,
// then overriding settings from the confd config file, then overriding
// settings from environment variables, and finally overriding
// settings from flags set on the command line.
// It returns an error if any.
func initConfig() error {
	if configFile == "" {
		if _, err := os.Stat(defaultConfigFile); !os.IsNotExist(err) {
			configFile = defaultConfigFile
		}
	}
	// Set defaults.
	config = Config{
		Backend:  "etcd",
		ConfDir:  "/etc/confd",
		Interval: 600,
		Prefix:   "/",
		Scheme:   "http",
	}
	// Update config from the TOML configuration file.
	if configFile == "" {
		log.Debug("Skipping confd config file.")
	} else {
		log.Debug("Loading " + configFile)
		configBytes, err := ioutil.ReadFile(configFile)
		if err != nil {
			return err
		}
		_, err = toml.Decode(string(configBytes), &config)
		if err != nil {
			return err
		}
	}

	// Update config from environment variables.
	processEnv()

	// Update config from commandline flags.
	processFlags()

	if config.LogLevel != "" {
		log.SetLevel(config.LogLevel)
	}

	// Update BackendNodes from SRV records.
	if config.Backend != "env" && config.SRVDomain != "" {
		log.Info("SRV domain set to " + config.SRVDomain)
		srvNodes, err := getBackendNodesFromSRV(config.Backend, config.SRVDomain, config.Scheme)
		if err != nil {
			return errors.New("Cannot get nodes from SRV records " + err.Error())
		}
		config.BackendNodes = srvNodes
	}
	if len(config.BackendNodes) == 0 {
		switch config.Backend {
		case "consul":
			config.BackendNodes = []string{"127.0.0.1:8500"}
		case "etcd":
			peerstr := os.Getenv("ETCDCTL_PEERS")
			if len(peerstr) > 0 {
				config.BackendNodes = strings.Split(peerstr, ",")
			} else {
				config.BackendNodes = []string{"http://127.0.0.1:4001"}
			}
		case "redis":
			config.BackendNodes = []string{"127.0.0.1:6379"}
		case "zookeeper":
			config.BackendNodes = []string{"127.0.0.1:2181"}
		}
	}
	// Initialize the storage client
	log.Info("Backend set to " + config.Backend)

	if config.Watch {
		unsupportedBackends := map[string]bool{
			"zookeeper": true,
			"redis":     true,
			"dynamodb":  true,
			"rancher":   true,
		}

		if unsupportedBackends[config.Backend] {
			log.Info(fmt.Sprintf("Watch is not supported for backend %s. Exiting...", config.Backend))
			os.Exit(1)
		}
	}

	if config.Backend == "dynamodb" && config.Table == "" {
		return errors.New("No DynamoDB table configured")
	}

	backendsConfig = backends.Config{
		AuthToken:    config.AuthToken,
		Backend:      config.Backend,
		ClientCaKeys: config.ClientCaKeys,
		ClientCert:   config.ClientCert,
		ClientKey:    config.ClientKey,
		BackendNodes: config.BackendNodes,
		Scheme:       config.Scheme,
		Table:        config.Table,
	}
	// Template configuration.
	templateConfig = template.Config{
		ConfDir:       config.ConfDir,
		ConfigDir:     filepath.Join(config.ConfDir, "conf.d"),
		KeepStageFile: keepStageFile,
		Noop:          config.Noop,
		Prefix:        config.Prefix,
		TemplateDir:   filepath.Join(config.ConfDir, "templates"),
	}
	return nil
}
Beispiel #20
0
func (this *BucketListener) Deleted(bucketName string) {
	log.Info("deleted " + bucketName)
	this.watchResp <- &watchResponse{waitIndex: 0, err: errors.New(bucketName + " was deleted")}
}