Пример #1
0
func replenishWorker(durableFile string, c *Config, i int) (replenished bool, addr circuit.Addr, err error) {

	// Check if worker already running
	anchor := path.Join(c.Anchor, strconv.Itoa(i))
	dir, e := anchorfs.OpenDir(anchor)
	if e != nil {
		return false, nil, e
	}
	_, files, err := dir.Files()
	if e != nil {
		return false, nil, e
	}
	if len(files) > 0 {
		return false, nil, nil
	}

	// If not, start a new worker
	retrn, addr, err := circuit.Spawn(c.Workers[i].Host, []string{anchor}, start{}, durableFile, c.Workers[i].Port, c.ReadOnly)
	if err != nil {
		return false, nil, err
	}
	if retrn[1] != nil {
		err = retrn[1].(error)
		return false, addr, err
	}

	return true, addr, nil
}
Пример #2
0
func top(query string, recurse bool) {
	dir, err := anchorfs.OpenDir(query)
	if err != nil {
		fmt.Fprintf(os.Stderr, "Problem opening (%s)", err)
		os.Exit(1)
	}

	// Read files
	_, files, err := dir.Files()
	if err != nil {
		fmt.Fprintf(os.Stderr, "Problem listing files (%s)", err)
		os.Exit(1)
	}

	// Print files
	for id, f := range files {
		topFile(path.Join(query, id.String()), id, f.Owner())
	}

	// Print sub-directories
	if recurse {
		dirs, err := dir.Dirs()
		if err != nil {
			fmt.Fprintf(os.Stderr, "Problem listing directories (%s)", err)
			os.Exit(1)
		}
		sort.Strings(dirs)

		for _, d := range dirs {
			top(path.Join(query, d), recurse)
		}
	}
}
Пример #3
0
func ls(query string, recurse, short bool) {
	dir, err := anchorfs.OpenDir(query)
	if err != nil {
		log.Printf("Problem opening (%s)", err)
		os.Exit(1)
	}

	// Read dirs
	dirs, err := dir.Dirs()
	if err != nil {
		log.Printf("Problem listing directories (%s)", err)
		os.Exit(1)
	}
	sort.Strings(dirs)

	// Read files
	_, filesMap, err := dir.Files()
	if err != nil {
		log.Printf("Problem listing files (%s)", err)
		os.Exit(1)
	}
	files := fileMapToSlice(filesMap)
	sort.Strings(files)

	// Print sub-directories
	for _, d := range dirs {
		if !*flagShort {
			fmt.Println(path.Join(query, d))
		} else {
			fmt.Printf("/%s\n", d)
		}
		if recurse {
			ls(path.Join(query, d), recurse, short)
		}
	}
	// Print files
	for _, f := range files {
		if !*flagShort {
			fmt.Println(path.Join(query, f))
		} else {
			fmt.Printf("%s\n", f)
		}
	}
}
Пример #4
0
func (cli *Client) addServer(shardKey xor.Key) {
	cli.lk.Lock()
	defer cli.lk.Unlock()
	anchor := cli.config.ShardAnchor(shardKey)
	dir, err := anchorfs.OpenDir(anchor)
	if err != nil {
		panic(err)
	}
	_, workers, err := dir.Files()
	if err != nil {
		panic(err)
	}
	for _, file := range workers {
		x := circuit.Dial(file.Owner(), "vena")
		cli.metric.Add(&shard{shardKey, x})
		return
	}
	panic("found no shard workers")
}
Пример #5
0
// StartAggregator.Start is a worker function that starts an infinite loop,
// which polls all reducers for their local top ten posts, computes the global
// top ten posts, and prints them out.
func (StartAggregator) Start(reducerAnchor string) {
	circuit.Daemonize(func() {
		for {
			time.Sleep(2 * time.Second)

			// Read anchor directory containing all live reducers
			d, err := anchorfs.OpenDir(reducerAnchor)
			if err != nil {
				println("opendir:", err.Error())
				continue
			}
			// List all anchor files; they correspond to circuit workers hosting Reducer objects
			_, files, err := d.Files()
			if err != nil {
				println("files:", err.Error())
				continue
			}
			// Fetch top ten posts from each reducer, in parallel
			var (
				l   limiter.Limiter
				lk  sync.Mutex
				top SortablePosts
			)
			println("Starting parallel aggregation")
			l.Init(10) // At most 10 concurrent reducer requests at a time
			for _, f_ := range files {
				println("f=", f_.Owner().String())
				f := f_ // Explain...
				l.Go(func() { getReducerTop(f.Owner(), &lk, &top) })
			}
			l.Wait()
			top = top[:min(10, len(top))]
			println("Completed aggregation of", len(top), "best posts")
			// Print the global top ten
			fmt.Printf("Top ten, %s:\n", time.Now().Format(time.UnixDate))
			for i, p := range top {
				fmt.Printf("#% 2d: % 30s id=%d\n", i, p.Name, p.ID)
			}
		}
	})
}
Пример #6
0
func replenish(c *vena.Config, w *WorkerConfig, anchor string) (re bool, addr circuit.Addr, err error) {

	// Check if worker already running
	dir, e := anchorfs.OpenDir(anchor)
	if e != nil {
		return false, nil, e
	}
	_, files, err := dir.Files()
	if e != nil {
		return false, nil, e
	}
	if len(files) > 0 {
		return false, nil, nil
	}

	// If not, start a new worker
	if _, addr, err = circuit.Spawn(w.Host, []string{anchor}, start{}, c, w.HTTPPort, w.TSDBPort); err != nil {
		return false, nil, err
	}

	return true, addr, nil
}
Пример #7
0
func killdir(dir string, recurse bool) error {
	d, err := anchorfs.OpenDir(dir)
	if err != nil {
		log.Printf("Problem opening directory (%s)", err)
		return err
	}

	// Recurse
	if recurse {
		dirs, err := d.Dirs()
		if err != nil {
			log.Printf("Problem listing directories in %s (%s)", dir, err)
			return err
		}
		for _, dd := range dirs {
			if err = killdir(path.Join(dir, dd), recurse); err != nil {
				return err
			}
		}
	}

	// Kill files
	_, files, err := d.Files()
	if err != nil {
		log.Printf("Problem listing files in %s (%s)", dir, err)
		return err
	}
	for _, f := range files {
		if err = worker.Kill(f.Owner()); err != nil {
			log.Printf("Problem killing %s (%s)", f.Owner(), err)
			return err
		} else {
			log.Printf("Killed %s", f.Owner())
		}
	}

	return nil
}
Пример #8
0
func (StartAggregator) Start(reducerAnchor string) {
	circuit.Daemonize(func() {
		for {
			time.Sleep(2 * time.Second)

			d, err := anchorfs.OpenDir(reducerAnchor)
			if err != nil {
				println("opendir:", err.Error())
				continue
			}
			_, files, err := d.Files()
			if err != nil {
				println("files:", err.Error())
				continue
			}
			var (
				l   limiter.Limiter
				lk  sync.Mutex
				top SortablePosts
			)
			println("Starting parallel aggregation")
			l.Init(10)
			for _, f_ := range files {
				println("f=", f_.Owner().String())
				f := f_ // Explain...
				l.Go(func() { getReducerTop(f.Owner(), &lk, &top) })
			}
			l.Wait()
			sort.Sort(top)
			top = top[:min(10, len(top))]
			println("Completed aggregation of", len(top), "best posts")
			fmt.Printf("Top ten, %s:\n", time.Now().Format(time.UnixDate))
			for i, p := range top {
				fmt.Printf("#% 2d: % 30s id=%d Score=%d\n", i, p.Name, p.ID, p.Score)
			}
		}
	})
}