Exemple #1
0
func (start) Start(c *vena.Config, httpPort, tsdbPort int) circuit.XPerm {
	front := New(c, httpPort, tsdbPort)
	circuit.Daemonize(func() {
		<-(chan int)(nil)
	})
	return circuit.PermRef(front)
}
// StartMapper.Start is a worker function that launches a mapper worker.
// firehoseConfig specifies the credentials for connecting to the Tumblr Firehose.
// reducer is an array of circuit cross-runtime pointers, listing all available reducers.
func (StartMapper) Start(firehoseConfig *firehose.Request, reducer []circuit.X) {
	circuit.Daemonize(func() {
		f := firehose.Redial(firehoseConfig)
		var n int64
		// Repeat forever: Read an event from the Firehose and pass is on to an appropriate reducer
		for {
			event := f.Read()
			p := &Post{}
			if event.Post != nil {
				p.ID = event.Post.ID
				p.Name = event.Post.BlogName
			}
			if event.Like != nil {
				p.ID = event.Like.RootPostID
			}
			p.Score = 1
			// XXX panic-protect
			reducer[int(p.ID%int64(len(reducer)))].Call("Add", p)
			n++
			if n%100 == 0 {
				println("Consumed", n, "events from the firehose")
			}
		}
	})
}
Exemple #3
0
// Main is App's only public method.
// The name of this method and its signature (arguments and their types and
// return values and their types) are up to you.
func (App) Main(suffix string) time.Time {
	circuit.Daemonize(func() {
		fmt.Printf("Waiting ...\n")
		time.Sleep(30 * time.Second)
		fmt.Printf("Hello %s\n", suffix)
	})
	return time.Now()
}
Exemple #4
0
func (start) Start(durableFile string, port int, readOnly bool) (circuit.XPerm, error) {
	a, err := New(durableFile, port, readOnly)
	if err != nil {
		return nil, err
	}
	circuit.Daemonize(func() { <-(chan int)(nil) }) // Daemonize this worker forever, i.e. worker should never die
	return circuit.PermRef(a), nil
}
Exemple #5
0
func (App) Open(filepath string) circuit.X {
	f, err := os.Open(filepath)
	if err != nil {
		return nil
	}
	circuit.Daemonize(func() { time.Sleep(5 * time.Second) })
	return circuit.Ref(file.NewFileServer(f))
}
Exemple #6
0
func (StartMapper) Start(firehoseConfig *firehose.Request, reducer []circuit.X) {
	circuit.Daemonize(func() {
		f := firehose.Redial(firehoseConfig)
		for {
			poll(reducer, f)
		}
	})
}
Exemple #7
0
// Main starts a sumr shard server
// diskpath is a directory path on the local file system, where the function is executed,
// where the shard will persist its data.
func (main) Main(diskpath string, forgetafter time.Duration) (circuit.XPerm, error) {
	srv, err := New(diskpath, forgetafter)
	if err != nil {
		return nil, err
	}
	circuit.Daemonize(func() { <-(chan int)(nil) })
	return circuit.PermRef(srv), nil
}
Exemple #8
0
func (start) Start(dbDir string, cacheSize int) circuit.XPerm {
	srv, err := New(dbDir, cacheSize)
	if err != nil {
		panic(err)
	}
	circuit.Listen("vena", srv)
	circuit.Daemonize(func() { <-(chan int)(nil) })
	return circuit.PermRef(srv)
}
// StartReducer.Start is a worker function that initializes a new Reducer and
// returns a cross-runtime pointer to it
func (StartReducer) Start() circuit.X {
	r := &Reducer{}                      // Create a new reducer object
	r.m = make(map[int64]*Post)          // Create the map holding the posts, indexed by ID
	circuit.Listen("reducer-service", r) // Register Reducer as public service that can be accessed on this worker
	circuit.Daemonize(func() {
		r.maintainTop() // Start a background goroutine that maintains the top ten posts on this reducer
	})
	return circuit.Ref(r) // Make the pointer to the Reducer object exportable and return it
}
Exemple #10
0
func (StartReducer) Start() circuit.X {
	r := &Reducer{}
	r.m = make(map[int64]*SlidingPost)
	r.rank = llrb.New(SlidingPostLess)
	circuit.Listen("reducer-service", r)
	circuit.Daemonize(func() {
		r.maintainTop()
	})
	return circuit.Ref(r)
}
Exemple #11
0
// StartAggregator.Start is a worker function that starts an infinite loop,
// which polls all reducers for their local top ten posts, computes the global
// top ten posts, and prints them out.
func (StartAggregator) Start(reducerAnchor string) {
	circuit.Daemonize(func() {
		for {
			time.Sleep(2 * time.Second)

			// Read anchor directory containing all live reducers
			d, err := anchorfs.OpenDir(reducerAnchor)
			if err != nil {
				println("opendir:", err.Error())
				continue
			}
			// List all anchor files; they correspond to circuit workers hosting Reducer objects
			_, files, err := d.Files()
			if err != nil {
				println("files:", err.Error())
				continue
			}
			// Fetch top ten posts from each reducer, in parallel
			var (
				l   limiter.Limiter
				lk  sync.Mutex
				top SortablePosts
			)
			println("Starting parallel aggregation")
			l.Init(10) // At most 10 concurrent reducer requests at a time
			for _, f_ := range files {
				println("f=", f_.Owner().String())
				f := f_ // Explain...
				l.Go(func() { getReducerTop(f.Owner(), &lk, &top) })
			}
			l.Wait()
			top = top[:min(10, len(top))]
			println("Completed aggregation of", len(top), "best posts")
			// Print the global top ten
			fmt.Printf("Top ten, %s:\n", time.Now().Format(time.UnixDate))
			for i, p := range top {
				fmt.Printf("#% 2d: % 30s id=%d\n", i, p.Name, p.ID)
			}
		}
	})
}
Exemple #12
0
func (StartAggregator) Start(reducerAnchor string) {
	circuit.Daemonize(func() {
		for {
			time.Sleep(2 * time.Second)

			d, err := anchorfs.OpenDir(reducerAnchor)
			if err != nil {
				println("opendir:", err.Error())
				continue
			}
			_, files, err := d.Files()
			if err != nil {
				println("files:", err.Error())
				continue
			}
			var (
				l   limiter.Limiter
				lk  sync.Mutex
				top SortablePosts
			)
			println("Starting parallel aggregation")
			l.Init(10)
			for _, f_ := range files {
				println("f=", f_.Owner().String())
				f := f_ // Explain...
				l.Go(func() { getReducerTop(f.Owner(), &lk, &top) })
			}
			l.Wait()
			sort.Sort(top)
			top = top[:min(10, len(top))]
			println("Completed aggregation of", len(top), "best posts")
			fmt.Printf("Top ten, %s:\n", time.Now().Format(time.UnixDate))
			for i, p := range top {
				fmt.Printf("#% 2d: % 30s id=%d Score=%d\n", i, p.Name, p.ID, p.Score)
			}
		}
	})
}