Beispiel #1
0
func (r *BucketReader) scan() {
	for _ = range time.Tick(r.Interval) {
		//TODO(ryandotsmith): It is a shame that we have to lock
		//for each interval. It would be great if we could get a lock
		//and work for like 1,000 intervals and then relock.
		p, err := utils.LockPartition(r.Partition, r.Store.MaxPartitions(), r.Ttl)
		if err != nil {
			continue
		}
		partition := fmt.Sprintf("outlet.%d", p)
		for bucket := range r.Store.Scan(partition) {
			valid := time.Now().Add(bucket.Id.Resolution)
			//TODO(ryandotsmith): This seems ripe for a lua script.
			//The goal would to be receive data from scan that is sure
			//to be valid.
			if bucket.Id.Time.Before(valid) {
				r.Inbox <- bucket
			} else {
				if err := r.Store.Putback(partition, bucket.Id); err != nil {
					fmt.Printf("error=%s\n", err)
				}
			}
		}
		utils.UnlockPartition(fmt.Sprintf("%s.%d", r.Partition, p))
	}
}
Beispiel #2
0
func fetch(t time.Time, inbox chan<- *store.Bucket) {
	pid, err := utils.LockPartition("librato_outlet", numPartitions, lockTTL)
	if err != nil {
		log.Fatal("Unable to lock partition.")
	}
	fmt.Printf("at=start_fetch minute=%d\n", t.Minute())
	mailbox := fmt.Sprintf("librato_outlet.%d", pid)
	for bucket := range store.ScanBuckets(mailbox) {
		inbox <- bucket
	}
}
Beispiel #3
0
func fetch(t time.Time, inbox chan<- *bucket.Bucket) {
	pid, err := utils.LockPartition("librato_outlet", numPartitions, lockTTL)
	if err != nil {
		log.Fatal("Unable to lock partition.")
	}
	fmt.Printf("at=start_fetch minute=%d\n", t.Minute())
	//TODO(ryandotsmith): Ensure consistent keys are being written.
	mailbox := fmt.Sprintf("outlet.%d", pid)
	for bucket := range rs.Scan(mailbox) {
		inbox <- bucket
	}
}
Beispiel #4
0
func (r *BucketReader) Start(out chan<- *bucket.Bucket) {
	for _ = range time.Tick(r.Interval) {
		p, err := utils.LockPartition(r.Partition, r.Store.MaxPartitions(), r.Ttl)
		if err != nil {
			continue
		}
		partition := fmt.Sprintf("outlet.%d", p)
		for bucket := range r.Store.Scan(partition) {
			r.Store.Get(bucket)
			out <- bucket
		}
	}
}
Beispiel #5
0
func fetch(t time.Time, outbox chan<- *store.Bucket) {
	fmt.Printf("at=start_fetch minute=%d\n", t.Minute())
	defer utils.MeasureT("postgres_outlet.fetch", time.Now())

	pid, err := utils.LockPartition("postgres_outlet", numPartitions, lockTTL)
	if err != nil {
		log.Fatal("Unable to lock partition.")
	}

	mailbox := fmt.Sprintf("postgres_outlet.%d", pid)
	for bucket := range store.ScanBuckets(mailbox) {
		outbox <- bucket
	}
}
Beispiel #6
0
func main() {
	var err error
	partitionId, err = utils.LockPartition(pg, "postgres_outlet", numPartitions)
	if err != nil {
		log.Fatal("Unable to lock partition.")
	}

	outbox := make(chan *store.Bucket, 1000)
	go scheduleFetch(outbox)
	for i := 0; i < workers; i++ {
		go handleBuckets(outbox)
	}

	// Print chanel metrics & live forever.
	report(outbox)
}
Beispiel #7
0
func main() {
	var err error
	partitionId, err = utils.LockPartition(pg, "librato_outlet", numPartitions)
	if err != nil {
		log.Fatal("Unable to lock partition.")
	}
	// The inbox is used to hold empty buckets that are
	// waiting to be processed. We buffer the chanel so
	// as not to slow down the fetch routine. We can
	inbox := make(chan *store.Bucket, 1000)
	// The converter will take items from the inbox,
	// fill in the bucket with the vals, then convert the
	// bucket into a librato metric.
	lms := make(chan *LM, 1000)
	// The converter will place the librato metrics into
	// the outbox for HTTP submission. We rely on the batch
	// routine to make sure that the collections of librato metrics
	// in the outbox are homogeneous with respect to their token.
	// This ensures that we route the metrics to the correct librato account.
	outbox := make(chan []*LM, 1000)

	// Routine that reads ints from the database
	// and sends them to the inbox.
	go scheduleFetch(inbox)

	// We take the empty buckets from the inbox,
	// get the values from the database, then make librato metrics out of them.
	for i := 0; i < workers; i++ {
		go scheduleConvert(inbox, lms)
	}

	// Shouldn't need to be concurrent since it's responsibility
	// it to serialize a collection of librato metrics.
	go batch(lms, outbox)

	// These routines involve reading data from the database
	// and making HTTP requests. We will want to take advantage of
	// parallel processing.
	for i := 0; i < workers; i++ {
		go post(outbox)
	}

	// Print chanel metrics & live forever.
	report(inbox, lms, outbox)
}