Beispiel #1
0
func Store(api etcd.KeysAPI, path string, ttl time.Duration) frameworkid.Storage {
	// TODO(jdef) validate Config
	return &storage{
		LookupFunc: func(ctx context.Context) (string, error) {
			if response, err := api.Get(ctx, path, nil); err != nil {
				if !etcdutil.IsEtcdNotFound(err) {
					return "", fmt.Errorf("unexpected failure attempting to load framework ID from etcd: %v", err)
				}
			} else {
				return response.Node.Value, nil
			}
			return "", nil
		},
		RemoveFunc: func(ctx context.Context) (err error) {
			if _, err = api.Delete(ctx, path, &etcd.DeleteOptions{Recursive: true}); err != nil {
				if !etcdutil.IsEtcdNotFound(err) {
					return fmt.Errorf("failed to delete framework ID from etcd: %v", err)
				}
			}
			return
		},
		StoreFunc: func(ctx context.Context, id string) (err error) {
			_, err = api.Set(ctx, path, id, &etcd.SetOptions{TTL: ttl})
			return
		},
	}
}
Beispiel #2
0
func mustGetServices(ctx context.Context, client etcd.KeysAPI, basepath *string) services {
	resp, err := client.Get(ctx, *basepath, &etcd.GetOptions{Recursive: true})
	if err != nil {
		log.WithFields(log.Fields{
			"error":    err,
			"basepath": *basepath,
		}).Fatal("unable to get service definitions from etcd")
	}

	var svcs services
	for _, node := range resp.Node.Nodes {
		s, err := newService(node.Key, []byte(node.Value))
		if err != nil {
			log.WithFields(log.Fields{
				"error":    err,
				"basepath": *basepath,
				"key":      node.Key,
			}).Warn("invalid service definition. skipping.")
		} else {
			svcs = append(svcs, s)
		}
	}

	return svcs
}
Beispiel #3
0
Datei: etcd.go Projekt: helgi/pkg
// keysToLocal copies SSH host keys from etcd to the local file system.
//
// This only fails if the main key, sshHostKey cannot be stored or retrieved.
func keysToLocal(c cookoo.Context, k client.KeysAPI, ciphers []string, etcdPath string) error {
	lpath := "/etc/ssh/ssh_host_%s_key"
	privkey := "%s/sshHost%sKey"
	for _, cipher := range ciphers {
		path := fmt.Sprintf(lpath, cipher)
		key := fmt.Sprintf(privkey, etcdPath, cipher)
		res, err := k.Get(dctx(), key, &client.GetOptions{})
		if err != nil || res.Node == nil {
			continue
		}

		content := res.Node.Value
		if err := ioutil.WriteFile(path, []byte(content), 0600); err != nil {
			log.Errf(c, "Error writing ssh host key file: %s", err)
		}
	}

	// Now get generic key.
	res, err := k.Get(dctx(), "sshHostKey", &client.GetOptions{})
	if err != nil || res.Node == nil {
		return fmt.Errorf("Failed to get sshHostKey from etcd. %v", err)
	}

	content := res.Node.Value
	if err := ioutil.WriteFile("/etc/ssh/ssh_host_key", []byte(content), 0600); err != nil {
		log.Errf(c, "Error writing ssh host key file: %s", err)
		return err
	}
	return nil
}
func cleanBackends(kapi client.KeysAPI) {

	resp, err := kapi.Get(context.Background(), "/vulcand/backends/", &client.GetOptions{Recursive: true})
	if err != nil {
		if e, _ := err.(client.Error); e.Code == etcderr.EcodeKeyNotFound {
			return
		}
		panic(err)
	}
	if !resp.Node.Dir {
		log.Println("/vulcand/backends is not a directory.")
		return
	}
	for _, be := range resp.Node.Nodes {
		beHasContent := false
		if be.Dir {
			for _, child := range be.Nodes {
				// anything apart from an empty "servers" dir means this is needed.
				if filepath.Base(child.Key) != "servers" || len(child.Nodes) > 0 {
					beHasContent = true
					break
				}
			}
		}
		if !beHasContent {
			_, err := kapi.Delete(context.Background(), be.Key, &client.DeleteOptions{Recursive: true})
			if err != nil {
				log.Printf("failed to remove unwanted backend %v\n", be.Key)
			}
		}
	}

}
Beispiel #5
0
func NewPubSubTopicByKey(ctx context.Context, keyid string, ttl time.Duration, kapi etcdc.KeysAPI) (*EtcdPubSubTopic, error) {
	_, err := kapi.Get(ctx, keyid, nil)
	if IsKeyNotFound(err) {
		opt := &etcdc.SetOptions{PrevExist: etcdc.PrevNoExist, TTL: ttl, Dir: true}
		_, err = kapi.Set(ctx, keyid, "", opt)
		if err != nil && !IsCompareAndSwapFailure(err) && !IsNodeExists(err) {
			return nil, err
		}
	} else if err != nil {
		return nil, err
	}

	keepalive, err := NewNodeKeepAlive(keyid, ttl, kapi)
	if err != nil {
		return nil, err
	}

	dlog("signal: new signal(pub/sub) for %v, ttl:%v", keyid, ttl)

	return &EtcdPubSubTopic{
		ctx:       ctx,
		keyid:     keyid,
		kapi:      kapi,
		ttl:       ttl,
		keepalive: keepalive,
		stop:      make(chan bool),
	}, nil
}
func generateConfig(kapi client.KeysAPI) (*client.Response, error) {
	resp, err := kapi.Get(context.Background(), *etcdPrefix, &client.GetOptions{Recursive: true})
	if err != nil {
		return resp, err
	}
	traverseConfigDirectory(resp.Node)
	return resp, nil
}
Beispiel #7
0
// ClientGet gets data from etcd sending in an url and receiving a etcd.Response object
func ClientGet(kapi client.KeysAPI, url string) *client.Response {
	resp, err := kapi.Get(context.Background(), url, &clientGetOpts)
	if err != nil {
		logr.LogLine(logr.Lfatal, ltagsrc, err.Error())
		os.Exit(2)
	}
	return resp
}
Beispiel #8
0
func doDumpQueue(kapi client.KeysAPI) {
	var key = queuebase + *dumpqueuename

	resp, err := kapi.Get(context.TODO(), key, &client.GetOptions{Sort: true})

	if err != nil {
		log.Fatal(err)
	}
	for _, v := range resp.Node.Nodes {
		fmt.Println(v.Key + " set to " + v.Value)
	}
}
Beispiel #9
0
func dirExists(dir string, c *cli.Context, ki client.KeysAPI) (bool, error) {
	ctx, cancel := contextWithCommandTimeout(c)
	_, err := ki.Get(ctx, dir, &client.GetOptions{})
	cancel()
	if err != nil {
		if cerr, ok := err.(client.Error); ok && cerr.Code == 100 {
			return false, nil
		}
		return false, err
	}
	return true, nil
}
Beispiel #10
0
func isDir(dir string, c *cli.Context, ki client.KeysAPI) (bool, error) {
	ctx, cancel := contextWithCommandTimeout(c)
	resp, err := ki.Get(ctx, dir, &client.GetOptions{})
	cancel()
	if err != nil {
		return false, err
	}
	if resp.Node.Dir {
		return false, nil
	}
	return true, nil
}
func readServices(kapi client.KeysAPI) []Service {
	resp, err := kapi.Get(context.Background(), "/ft/services/", &client.GetOptions{Recursive: true})
	if err != nil {
		log.Println("error reading etcd keys")
		if e, _ := err.(client.Error); e.Code == etcderr.EcodeKeyNotFound {
			log.Println("core key not found")
			return []Service{}
		}
		log.Panicf("failed to read from etcd: %v\n", err.Error())
	}
	if !resp.Node.Dir {
		log.Panicf("%v is not a directory", resp.Node.Key)
	}

	var services []Service
	for _, node := range resp.Node.Nodes {
		if !node.Dir {
			log.Printf("skipping non-directory %v\n", node.Key)
			continue
		}
		service := Service{
			Name:         filepath.Base(node.Key),
			Addresses:    make(map[string]string),
			PathPrefixes: make(map[string]string),
			PathHosts:    make(map[string]string),
		}
		for _, child := range node.Nodes {
			switch filepath.Base(child.Key) {
			case "healthcheck":
				service.HasHealthCheck = child.Value == "true"
			case "servers":
				for _, server := range child.Nodes {
					service.Addresses[filepath.Base(server.Key)] = server.Value
				}
			case "path-regex":
				for _, path := range child.Nodes {
					service.PathPrefixes[filepath.Base(path.Key)] = path.Value
				}
			case "path-host":
				for _, path := range child.Nodes {
					service.PathHosts[filepath.Base(path.Key)] = path.Value
				}
			case "failover-predicate":
				service.FailoverPredicate = child.Value
			default:
				fmt.Printf("skipped key %v for node %v\n", child.Key, child)
			}
		}
		services = append(services, service)
	}
	return services
}
Beispiel #12
0
func loadConfig(client etcd.KeysAPI, config *server.Config) error {
	// Override what isn't set yet from the command line.
	configPath := "/" + msg.PathPrefix + "/config"
	resp, err := client.Get(ctx, configPath, nil)
	if err != nil {
		log.Printf("skydns: falling back to default configuration, could not read from etcd: %s", err)
		return nil
	}
	if err := json.Unmarshal([]byte(resp.Node.Value), config); err != nil {
		return fmt.Errorf("failed to unmarshal config: %s", err.Error())
	}
	return nil
}
func readAllKeysFromEtcd(kapi client.KeysAPI, root string) (map[string]string, error) {
	m := make(map[string]string)

	resp, err := kapi.Get(context.Background(), root, &client.GetOptions{Recursive: true})
	if err != nil {
		if e, _ := err.(client.Error); e.Code == etcderr.EcodeKeyNotFound {
			return m, nil
		}
		panic(err)
	}
	addAllValuesToMap(m, resp.Node)
	return m, nil
}
Beispiel #14
0
func get(kapi client.KeysAPI, key string, fn callback) {
	resp, err := kapi.Get(context.TODO(), key, nil)

	if err != nil {
		log.Printf("!! ERR: %v\n", err)
		return
	}

	if err := fn(resp.Node); err != nil {
		log.Printf("!! ERR: %v\n", err)
		log.Printf("!! Calling get again")
		get(kapi, key, fn)
	}
}
Beispiel #15
0
//Fetches a single config section
func fetchConfigSection(configStruct interface{}, location string, kapi etcd.KeysAPI) {
	cfg := reflect.ValueOf(configStruct).Elem()
	for i := 0; i < cfg.NumField(); i++ {
		key := cfg.Type().Field(i).Name
		resp, getErr := kapi.Get(context.Background(), location+key, nil)
		if getErr != nil {
			log.Printf("Error getting key %v: %v\n", key, getErr)
			continue
		}
		valErr := setConfigVal(resp.Node.Value, cfg.Field(i))
		if valErr != nil {
			log.Printf("Error setting config field %v: %v\n", key, valErr)
		}
	}
}
Beispiel #16
0
// LoadConfig gets the JSON from ETCD and unmarshals it to the config object
func LoadConfig(kapi client.KeysAPI) (*Config, error) {
	// Read from remote config the first time
	resp, err := kapi.Get(context.Background(), etcdConfigPath, nil)
	if err != nil {
		return nil, err
	}

	// Unmarshal the config JSON into the cnf object
	newCnf := new(Config)
	if err := json.Unmarshal([]byte(resp.Node.Value), newCnf); err != nil {
		return nil, err
	}

	return newCnf, nil
}
Beispiel #17
0
// lsCommandFunc executes the "ls" command.
func lsCommandFunc(c *cli.Context, ki client.KeysAPI) {
	key := "/"
	if len(c.Args()) != 0 {
		key = c.Args()[0]
	}

	sort := c.Bool("sort")
	recursive := c.Bool("recursive")

	resp, err := ki.Get(context.TODO(), key, &client.GetOptions{Sort: sort, Recursive: recursive})
	if err != nil {
		handleError(ExitServerError, err)
	}

	printLs(c, resp)
}
Beispiel #18
0
// exportCommandFunc exports data as either JSON, YAML or TOML.
func exportFunc(dir string, sort bool, file string, f iodatafmt.DataFmt, c *cli.Context, ki client.KeysAPI) {
	ctx, cancel := contextWithCommandTimeout(c)
	resp, err := ki.Get(ctx, dir, &client.GetOptions{Sort: sort, Recursive: true})
	cancel()
	if err != nil {
		fatal(err.Error())
	}

	// Export and write output.
	m := etcdmap.Map(resp.Node)
	if file != "" {
		iodatafmt.Write(file, m, f)
	} else {
		iodatafmt.Print(m, f)
	}
}
Beispiel #19
0
// get retrieves all the jobs in an etcd server.
// It only gets the Jobs that have not been done.
//
//   curl http://$INFRA_PUBLIC_IP_0:2379/v2/keys/queue
//
func get(kapi client.KeysAPI, queueName string) (map[string]*Job, error) {
	resp, err := kapi.Get(context.Background(), ctx, queueName, nil)
	if err != nil {
		if err == context.Canceled {
			return nil, fmt.Errorf("ctx is canceled by another routine")
		} else if err == context.DeadlineExceeded {
			return nil, fmt.Errorf("ctx is attached with a deadline and it exceeded")
		} else if cerr, ok := err.(*client.ClusterError); ok {
			return nil, fmt.Errorf("*client.ClusterError %v", cerr.Errors())
		} else {
			return nil, fmt.Errorf("bad cluster endpoints, which are not etcd servers: %+v", err)
		}
	}
	if resp == nil {
		log.Printf("Empty resp: %+v\n", resp)
		return nil, nil
	}
	if resp.Node == nil {
		log.Printf("Empty Queue: %+v\n", resp)
		return nil, nil
	}
	if resp.Node.Nodes.Len() == 0 {
		log.Printf("Empty Queue: %+v\n", resp)
		return nil, nil
	}
	queueMap := make(map[string]*Job)
	for _, elem := range resp.Node.Nodes {
		if _, ok := queueMap[elem.Key]; !ok {
			job := Job{}
			if err := json.NewDecoder(strings.NewReader(elem.Value)).Decode(&job); err != nil {
				log.Println("json.NewDecoder error:", elem.Value, err)
				continue
			}
			job.ETCDKey = elem.Key
			ids := strings.Replace(elem.Key, "/"+queueName+"/", "", -1)
			idv, err := strconv.Atoi(ids)
			if err != nil {
				log.Println("strconv.Atoi error:", ids, err)
				continue
			}
			job.ETCDIndex = idv
			queueMap[elem.Key] = &job
		}
	}
	return queueMap, nil
}
Beispiel #20
0
func waitForEtcd(api client.KeysAPI, c *cli.Context) error {
	_, err := api.Get(context.Background(), c.String("key-watch"), nil)
	if err != nil {
		// key is not present have to watch it
		if m, _ := regexp.MatchString("100", err.Error()); m {
			w := api.Watcher(c.String("key-watch"), &client.WatcherOptions{AfterIndex: 0, Recursive: false})
			_, err := w.Next(context.Background())
			if err != nil {
				return err
			}
			return nil
		} else {
			return err
		}
	}
	// key is already present
	return nil
}
Beispiel #21
0
// lsCommandFunc executes the "ls" command.
func lsCommandFunc(c *cli.Context, ki client.KeysAPI) {
	key := "/"
	if len(c.Args()) != 0 {
		key = c.Args()[0]
	}

	sort := c.Bool("sort")
	recursive := c.Bool("recursive")
	quorum := c.Bool("quorum")

	ctx, cancel := contextWithTotalTimeout(c)
	resp, err := ki.Get(ctx, key, &client.GetOptions{Sort: sort, Recursive: recursive, Quorum: quorum})
	cancel()
	if err != nil {
		handleError(ExitServerError, err)
	}

	printLs(c, resp)
}
Beispiel #22
0
func doServer(kapi client.KeysAPI) {
	var key = configbase + *servername

	var settings map[string]string
	settings = make(map[string]string)

	resp, err := kapi.Get(context.TODO(), key, &client.GetOptions{Recursive: true})
	if err != nil {
		log.Fatal(err)
	}

	for _, node := range resp.Node.Nodes {
		_, setting := path.Split(node.Key)
		settings[setting] = node.Value
	}

	fmt.Println(settings)

	watcher := kapi.Watcher(key, &client.WatcherOptions{Recursive: true})

	for true {
		resp, err := watcher.Next(context.TODO())

		if err != nil {
			if _, ok := err.(*client.ClusterError); ok {
				continue
			}
			log.Fatal(err)
		}

		switch resp.Action {
		case "set":
			_, setting := path.Split(resp.Node.Key)
			settings[setting] = resp.Node.Value
		case "delete", "expire":
			_, setting := path.Split(resp.Node.Key)
			delete(settings, setting)
		}

		fmt.Println(settings)
	}

}
Beispiel #23
0
// etcdGetInitialWatchState turns an etcd Get request into a watch equivalent
func etcdGetInitialWatchState(ctx context.Context, client etcd.KeysAPI, key string, recursive bool, incoming chan<- *etcd.Response) (resourceVersion uint64, err error) {
	opts := etcd.GetOptions{
		Recursive: recursive,
		Sort:      false,
	}
	resp, err := client.Get(ctx, key, &opts)
	if err != nil {
		if !etcdutil.IsEtcdNotFound(err) {
			glog.Errorf("watch was unable to retrieve the current index for the provided key (%q): %v", key, err)
			return resourceVersion, err
		}
		if etcdError, ok := err.(etcd.Error); ok {
			resourceVersion = etcdError.Index
		}
		return resourceVersion, nil
	}
	resourceVersion = resp.Index
	convertRecursiveResponse(resp.Node, resp, incoming)
	return
}
Beispiel #24
0
// getCommandFunc executes the "get" command.
func getCommandFunc(c *cli.Context, ki client.KeysAPI) {
	if len(c.Args()) == 0 {
		handleError(ExitBadArgs, errors.New("key required"))
	}

	key := c.Args()[0]
	sorted := c.Bool("sort")

	resp, err := ki.Get(context.TODO(), key, &client.GetOptions{Sort: sorted})
	if err != nil {
		handleError(ExitServerError, err)
	}

	if resp.Node.Dir {
		fmt.Fprintln(os.Stderr, fmt.Sprintf("%s: is a directory", resp.Node.Key))
		os.Exit(1)
	}

	printResponseKey(resp, c.GlobalString("output"))
}
Beispiel #25
0
func findLimit(kAPI client.KeysAPI, dir string) uint64 {
	now := time.Now().UnixNano()
	sevenDays := int64(7 * day)

	var (
		resp *client.Response
		err  error
		ct   uint64
		pt   uint64
		pi   uint64
	)

	for i := 0; ; i++ {
		resp, err = kAPI.Get(context.TODO(), dir, &client.GetOptions{Sort: true})
		if err == nil {
			break
		}
		if i >= 10 {
			fmt.Fprintln(os.Stderr, "failed after 10 retries:", err.Error())
			os.Exit(1)
		}
	}

	pt = math.MaxUint64
	// now we traverse all timestamps
	for _, n := range resp.Node.Nodes {
		ct, err = strconv.ParseUint(path.Base(n.Key), 16, 64)
		if err != nil {
			fmt.Printf("skipped bad timestamp %s\n", n.Key)
			continue
		}
		if ct > uint64(now-sevenDays) && pt < uint64(now-sevenDays) {
			return pi
		}
		pt = ct
		pi = n.CreatedIndex
	}

	return 0
}
Beispiel #26
0
func compareAndSwapUntil(ctx context.Context, tries int, keyid string, kapi etcdc.KeysAPI,
	evaluator func(res *etcdc.Response, setOpts *etcdc.SetOptions) (val string, err error),
) error {
	//uncomment for debugging..
	id := int64(0)
	if Usedtracedlogging {
		id = dice().Int63()
	}
	for i := 0; i < tries; i++ {
		resp, err := kapi.Get(ctx, keyid, &etcdc.GetOptions{Quorum: true})
		if err != nil {
			dlog("%v kapi get error %v", keyid, err)
			return err
		}

		opt := &etcdc.SetOptions{}
		nv, err := evaluator(resp, opt)
		if err != nil {
			dlog("%v eval error %v", keyid, err)
			return err
		}

		dtrace("before: %v \tnewval:%v try:%v idx:%v key:%v", id, nv, i, resp.Index, keyid)
		_, err = kapi.Set(ctx, keyid, nv, opt)
		if err == nil {
			dlog("%v update successful %v", keyid, err)
			return nil
		} else if !IsCompareAndSwapFailure(err) {
			dlog("unexpected error %v", err)
			return err
		}

		dtrace("after : %v \tnewval:%v try:%v key:%v error: %v", id, nv, i, keyid, err)

		backoff(i)
	}

	return CASErrorOutOfRetries
}
Beispiel #27
0
// getCommandFunc executes the "get" command.
func getCommandFunc(c *cli.Context, ki client.KeysAPI) {
	if len(c.Args()) == 0 {
		handleError(c, ExitBadArgs, errors.New("key required"))
	}

	key := c.Args()[0]
	sorted := c.Bool("sort")
	quorum := c.Bool("quorum")

	ctx, cancel := contextWithTotalTimeout(c)
	resp, err := ki.Get(ctx, key, &client.GetOptions{Sort: sorted, Quorum: quorum})
	cancel()
	if err != nil {
		handleError(c, ExitServerError, err)
	}

	if resp.Node.Dir {
		fmt.Fprintln(os.Stderr, fmt.Sprintf("%s: is a directory", resp.Node.Key))
		os.Exit(1)
	}

	printResponseKey(resp, c.GlobalString("output"))
}
Beispiel #28
0
func (s *SchedulerServer) fetchFrameworkID(client etcd.KeysAPI) (*mesos.FrameworkID, error) {
	if s.failoverTimeout > 0 {
		if response, err := client.Get(context.TODO(), meta.FrameworkIDKey, nil); err != nil {
			if !etcdutil.IsEtcdNotFound(err) {
				return nil, fmt.Errorf("unexpected failure attempting to load framework ID from etcd: %v", err)
			}
			log.V(1).Infof("did not find framework ID in etcd")
		} else if response.Node.Value != "" {
			log.Infof("configuring FrameworkInfo with Id found in etcd: '%s'", response.Node.Value)
			return mutil.NewFrameworkID(response.Node.Value), nil
		}
	} else {
		//TODO(jdef) this seems like a totally hackish way to clean up the framework ID
		if _, err := client.Delete(context.TODO(), meta.FrameworkIDKey, &etcd.DeleteOptions{Recursive: true}); err != nil {
			if !etcdutil.IsEtcdNotFound(err) {
				return nil, fmt.Errorf("failed to delete framework ID from etcd: %v", err)
			}
			log.V(1).Infof("nothing to delete: did not find framework ID in etcd")
		}
	}
	return nil, nil
}
Beispiel #29
0
func purge(kAPI client.KeysAPI, dir string, limit uint64, dry, oldv2 bool) {
	resp, err := kAPI.Get(context.TODO(), dir, &client.GetOptions{})
	if err != nil {
		fmt.Fprintln(os.Stderr, err.Error())
		os.Exit(1)
	}

	total := 0
	all := 0

	// now we traverse all {UUID}
	for _, n := range resp.Node.Nodes {
		all++
		// do not overload discovery service
		time.Sleep(10 * time.Millisecond)

		var (
			gresp *client.Response
			err   error
		)

		for i := 0; ; i++ {
			gresp, err = kAPI.Get(context.TODO(), n.Key, &client.GetOptions{})
			if err == nil {
				break
			}
			if i >= 10 {
				fmt.Fprintln(os.Stderr, "failed after 10 retries:", err.Error())
				os.Exit(1)
			}
		}

		if gresp.Node.CreatedIndex > limit {
			fmt.Printf("skipped young uuid directory %s with index %d\n", n.Key, gresp.Node.CreatedIndex)
			continue
		}
		// _config is a hidden dir, so we only purge "empty" directory.
		if len(gresp.Node.Nodes) != 0 {
			if !oldv2 {
				fmt.Printf("skipped non-empty uuid directory %s\n", n.Key)
				continue
			}
			for _, n := range gresp.Node.Nodes {
				if n.TTL != 0 {
					fmt.Println("skipped 0.4 uuid directory %s\n", n.Key)
					continue
				}
			}
		}
		fmt.Printf("found empty/old uuid directory %s and start to purge...\n", n.Key)

		for i := 0; ; i++ {
			if dry {
				break
			}

			_, err = kAPI.Delete(context.TODO(), n.Key, &client.DeleteOptions{Recursive: true})
			if err == nil {
				break
			}

			if eerr, ok := err.(client.Error); ok {
				if eerr.Code == client.ErrorCodeKeyNotFound {
					break
				}
			}

			if i >= 10 {
				fmt.Fprintln(os.Stderr, "failed after 10 retries:", err.Error())
				os.Exit(1)
			}
		}

		fmt.Printf("successfully purged uuid directory %s\n", n.Key)

		total++
		if total%10 == 0 {
			fmt.Printf("already purged %d directories\n", total)
			fmt.Printf("empty ratio = %f\n", float64(total)/float64(all))
		}
	}
}
Beispiel #30
0
func getJobs(ctx context.Context, kapi client.KeysAPI) (map[string]*job, error) {
	rmapChan := make(chan map[string]*job)
	errChan := make(chan error)
	go func() {
		opts := &client.GetOptions{}
		opts.Recursive = false
		opts.Sort = false
		resp, err := kapi.Get(ctx, dirName, opts)
		if err != nil {
			errChan <- err
			return
		}
		if resp == nil {
			errChan <- fmt.Errorf("Empty Response: %+v", resp)
			return
		}
		if resp.Node == nil {
			fmt.Printf("Empty Queue: %+v\n", resp)
			rmapChan <- nil
			return
		}
		if resp.Node.Nodes.Len() == 0 {
			fmt.Printf("Empty Queue: %+v\n", resp)
			rmapChan <- nil
			return
		}
		queueMap := make(map[string]*job)
		for _, elem := range resp.Node.Nodes {
			if _, ok := queueMap[elem.Key]; !ok {
				j := job{}
				if err := json.NewDecoder(strings.NewReader(elem.Value)).Decode(&j); err != nil {

					log.WithFields(log.Fields{
						"event_type": "error",
						"value":      elem.Value,
						"error":      err,
					}).Errorln("getJobs json Decode error")

					j.Done = true
					j.Status = err.Error()
					if err := setJob(ctx, kapi, &j); err != nil {
						errChan <- err
						return
					}
					continue
				}
				j.ETCDKey = elem.Key
				id := strings.Replace(elem.Key, "/"+dirName+"/", "", -1)
				iv, err := strconv.Atoi(id)
				if err != nil {
					log.WithFields(log.Fields{
						"event_type": "error",
						"error":      err,
					}).Errorln("getJobs strconv.Atoi error")
					j.Done = true
					j.Status = err.Error()
					if err := setJob(ctx, kapi, &j); err != nil {
						errChan <- err
						return
					}
					continue
				}
				j.ETCDIndex = iv
				queueMap[elem.Key] = &j
				rmapChan <- queueMap
				return
			}
		}
	}()
	select {
	case v := <-rmapChan:
		return v, nil

	case v := <-errChan:
		return nil, v

	case <-ctx.Done():
		return nil, ctx.Err()
	}
}