Exemplo n.º 1
0
func Spawn(config *vena.Config) {
	lmtr := limiter.New(20)
	for _, sh_ := range config.Shard {
		sh := sh_
		lmtr.Go(func() { spawn(sh, config.ShardAnchor(sh.Key)) })
	}
	lmtr.Wait()
}
Exemplo n.º 2
0
func (t *Listener) loop() {
	lmtr := limiter.New(MaxParallelHandshakes)
	for {
		c, err := t.listener.AcceptTCP()
		if err != nil {
			panic(err) // Best not to be quiet about it
		}
		lmtr.Go(func() { t.accept(c) })
	}
}
Exemplo n.º 3
0
func Install(i *config.InstallConfig, b *config.BuildConfig, hosts []string) {
	l := limiter.New(limitParallelTasks)
	for _, host_ := range hosts {
		host := host_
		l.Go(func() {
			fmt.Printf("Installing on %s\n", host)
			if err := installHost(i, b, host); err != nil {
				fmt.Fprintf(os.Stderr, "Issue on %s: %s\n", host, err)
			}
		})
	}
	l.Wait()
}
Exemplo n.º 4
0
// SumBatch sends a batch of SUM requests to the sumr database
func (cli *Client) SumBatch(batch []SumRequest) []float64 {
	var lk sync.Mutex
	r := make([]float64, len(batch))

	blmtr := limiter.New(10)
	for i_, a_ := range batch {
		i, a := i_, a_
		blmtr.Go(func() {
			q := cli.Sum(a.Key)
			lk.Lock()
			r[i] = q
			lk.Unlock()
		})
	}
	blmtr.Wait()
	return r
}
Exemplo n.º 5
0
// AddBatch sends a batch of ADD requests to the sumr database
func (cli *Client) AddBatch(batch []AddRequest) []float64 {
	var lk sync.Mutex
	r := make([]float64, len(batch))

	blmtr := limiter.New(10)
	for i_, a_ := range batch {
		i, a := i_, a_
		blmtr.Go(func() {
			q := cli.Add(a.UpdateTime, a.Key, a.Value)
			lk.Lock()
			r[i] = q
			lk.Unlock()
		})
	}
	blmtr.Wait()
	return r
}
Exemplo n.º 6
0
func (t *worker) schedule() {
	println("Scheduling")
	lmtr := limiter.New(MaxOutstandingRequests)
	for {
		var job *request
		select {
		case job = <-t.fwdch:
		case job = <-t.apich:
		}

		lmtr.Open()
		go func(job *request) {
			defer lmtr.Close()
			job.ReturnResponse(t.fwd.Forward(job.Query, job.Source == "fwd"))
		}(job)
	}
}
Exemplo n.º 7
0
func (t *worker) schedule() {
	println("Scheduling")

	var (
		lk   sync.Mutex
		nxqb int64
		nxqe int64
	)

	lmtr := limiter.New(MaxOutstandingRequests)
	for {
		var job interface{}
		select {
		case job = <-t.fCreate:
		case job = <-t.hCreate:
			//println("+ Processing HTTP-originated XCreatePost")
		case job = <-t.xCreate:
		case job = <-t.xQuery:
			//println("+ Processing X-originated XTimelineQuery")
		case job = <-t.hQuery:
			//println("+ Processing HTTP-originated XTimelineQuery")
		}
		lk.Lock()
		nxqb++
		lk.Unlock()

		lmtr.Open()
		go func(job interface{}) {
			defer lmtr.Close()
			switch q := job.(type) {
			case *createRequest:
				q.ReturnResponse(t.fwd.Forward(q.Post, q.Forwarded))
			case *queryRequest:
				q.ReturnResponse(t.srv.Query(q.Query))
			default:
				panic("naah")
			}
			lk.Lock()
			nxqe++
			if nxqb%1000 == 0 {
				println("+ Finished", nxqe, "/", nxqb)
			}
			lk.Unlock()
		}(job)
	}
}
Exemplo n.º 8
0
func listenTSDB(addr string, reply Replier) {
	l, err := net.Listen("tcp", addr)
	if err != nil {
		panic(err)
	}
	// Accept incoming requests
	go func() {
		lmtr := limiter.New(100) // At most 100 concurrent connections
		for {
			lmtr.Open()
			conn, err := l.Accept()
			if err != nil {
				panic(err)
			}
			// Serve individual connection
			go func() {
				defer lmtr.Close()
				defer conn.Close()
				defer recover() // Recover from panics in reply logic
				// Read request, send reply
				r := bufio.NewReader(conn)
				for {
					line, err := r.ReadString('\n')
					if err != nil {
						println("read line", err.Error())
						break
					}
					cmd, err := parse(line)
					if err != nil {
						println("parse", err.Error())
						break
					}
					if cmd == nil {
						continue
					}
					switch p := cmd.(type) {
					case diediedie:
						reply.DieDieDie()
					case *put:
						reply.Put(p.Time, p.Metric, p.Tags, p.Value)
					}
				}
			}()
		}
	}()
}
Exemplo n.º 9
0
// New creates a new server instance backed by a local directory in diskPath.
// Keys not updated for forgetAfter duration are evicted from the in-memory replica of the shard's data.
// Keys not in memory are not reflected in read operations.
func New(diskPath string, forgetAfter time.Duration) (*Server, error) {
	s := &Server{}

	os.MkdirAll(diskPath, 0700)

	// Mount disk
	disk, err := diskfs.Mount(diskPath, false)
	if err != nil {
		return nil, err
	}
	// Make db block
	if s.block, err = block.NewBlock(disk, forgetAfter); err != nil {
		return nil, err
	}
	// Prepare incoming call rate-limiter
	s.lmtr = limiter.New(10)
	return s, nil
}
Exemplo n.º 10
0
func (srv *DashboardServer) Query(xq *proto.XDashboardQuery) ([]*proto.Post, error) {
	if xq.Limit < 1 {
		return nil, errors.New("zero query limit")
	}
	// Retrieve followed timelines from network service
	var err error
	follows := xq.Follows
	if follows == nil {
		if follows, err = srv.Follows(xq.DashboardID); err != nil {
			return nil, err
		}
	}
	// Concurrently fetch the most recent limit+1 posts from each followed timeline.
	// Put them in the results slice.
	var rlk sync.Mutex
	var err0 error
	results := make([]*proto.Post, 0, xq.Limit*len(follows))
	l := limiter.New(MaxConcurrentTimelineQueries)
	for _, followedTimelineID := range follows {
		followedID := followedTimelineID
		l.Go(func() {
			posts, err := srv.queryTimeline(followedID, xq.BeforePostID, xq.Limit)
			rlk.Lock()
			defer rlk.Unlock()
			if err != nil {
				if err0 == nil {
					err0 = err
				}
				return
			}
			results = append(results, posts...)
		})
	}
	l.Wait()
	// Sort all results from most recent to least (descending PostID)
	sort.Sort(proto.ChronoPosts(results))
	return results[:min(xq.Limit, len(results))], err0
}