Esempio n. 1
0
// BuildTiers sets up tiers so it's ready to dispatch metrics
func BuildTiers(tiers *[]Tier) {
	// Initialise the error counts
	errorCounts.Add("buildtiers.dial", 0)

	for i, tier := range *tiers {
		// The consistent hashing function used to map sample hosts to targets
		(*tiers)[i].Hash = consistent.New()
		// Shadow names for targets, used to improve hash distribution
		(*tiers)[i].Shadows = make(map[string]string)
		// map that tracks all the UDP connections
		(*tiers)[i].Connections = make(map[string]net.Conn)
		// map that tracks all target -> host -> metric -> last dispatched relationships
		(*tiers)[i].Mappings = make(map[string]map[string]map[string]int64)
		// Set the virtual replica number from magical pre-computed values
		(*tiers)[i].SetMagicVirtualReplicaNumber(len(tier.Targets))

		// Populate ratio counters per tier
		distCounts.Set(tier.Name, new(expvar.Map).Init())

		for it, t := range tier.Targets {
			conn, err := net.Dial("udp", t)
			if err != nil {
				log.Printf("[warning] BuildTiers: Couldn't establish connection to '%s': %s", t, err)
				log.Printf("[warning] BuildTiers: Adding %s to hash anyway, so it's consistent.", t)
				errorCounts.Add("buildtiers.dial", 1)
			} else {
				// Only add the target to the hash if the connection can initially be established
				re := regexp.MustCompile("^(127.|localhost)")
				if re.FindStringIndex(conn.RemoteAddr().String()) != nil {
					log.Printf("[warning] BuildTiers: %s is local. You may be looping metrics back to Coco!", conn.RemoteAddr())
					log.Printf("[warning] BuildTiers: Dutifully adding %s to hash anyway, but beware of loops.", conn.RemoteAddr())
				}
			}
			(*tiers)[i].Connections[t] = conn
			(*tiers)[i].Mappings[t] = make(map[string]map[string]int64)
			// Setup a shadow mapping so we get a more even hash distribution
			shadow_t := string(it)
			(*tiers)[i].Shadows[shadow_t] = t
			(*tiers)[i].Hash.Add(shadow_t)
			metricCounts.Set(t, &expvar.Int{})
			hostCounts.Set(t, &expvar.Int{})
		}
	}

	// Log how the hashes are set up
	for _, tier := range *tiers {
		hash := tier.Hash
		var targets []string
		for _, shadow_t := range hash.Members() {
			targets = append(targets, tier.Shadows[shadow_t])
		}
		log.Printf("[info] BuildTiers: tier '%s' hash ring has %d members: %s", tier.Name, len(hash.Members()), targets)
	}

	for _, tier := range *tiers {
		if len(tier.Connections) == 0 {
			log.Fatalf("[fatal] BuildTiers: no targets available in tier '%s'", tier.Name)
		}
	}
}
Esempio n. 2
0
/**
 * Broadcast out the stats message to a single backend.
 *
 * @return void
 */
func retransmitUsingConsistentHashing(message string) {
	log.Print(fmt.Sprintf("Retransmitting %s to the appropriate backend", message))

	cons := consistent.New()

	for _, server := range Backends {
		log.Print(fmt.Sprintf("Adding %s to consistent hash ring", server))
		cons.Add(server)
	}

	// Carbon is format:  $key $value $timestamp
	var message_bits = strings.Fields(message)

	// Get just the $key.
	stat_name := message_bits[0]
	log.Print("Determining backend for message based on ", stat_name)

	// Get which hashed server to use based on the stat name.
	hashed_server, err := cons.Get(stat_name)

	log.Print("Chosen server: ", hashed_server)
	conn, err := net.Dial("tcp", hashed_server)
	if err != nil {
		log.Print("WARNING: Problem with TCP connection: ", err)
		return
	}

	// Send the message to the backend host.
	fmt.Fprintf(conn, message)
}
Esempio n. 3
0
// returns an operation filter which uses a consistent hash to determine
// if the operation will be accepted for processing. can be used to distribute work.
// name:		the name of the worker creating this filter. e.g. "Harry"
// workers:		a slice of strings representing the available worker names
func ConsistentHashFilter(name string, workers []interface{}) (gtm.OpFilter, error) {
	if len(workers) == 0 {
		return nil, EmptyWorkers
	}
	found := false
	consist := consistent.New()
	for _, worker := range workers {
		next, ok := worker.(string)
		if !ok {
			return nil, InvalidWorkers
		}
		if next == name {
			found = true
		}
		consist.Add(next)
	}
	if !found {
		return nil, WorkerMissing
	}
	return func(op *gtm.Op) bool {
		var idStr string
		switch op.Id.(type) {
		case bson.ObjectId:
			idStr = op.Id.(bson.ObjectId).Hex()
		default:
			idStr = fmt.Sprintf("%v", op.Id)
		}
		who, err := consist.Get(idStr)
		if err != nil {
			return false
		} else {
			return name == who
		}
	}, nil
}
Esempio n. 4
0
func NewExecutor() *Executor {
	e := &Executor{
		c:       consistent.New(),
		workers: make(map[string]*Worker),
	}

	return e
}
Esempio n. 5
0
func NewHasher(loggregatorServers []string) (h *hasher) {
	if len(loggregatorServers) == 0 {
		panic("Hasher must be seeded with one or more Loggregator Servers")
	}

	c := consistent.New()
	c.Set(loggregatorServers)

	h = &hasher{c: c}
	return
}
Esempio n. 6
0
func (r *AdminRpcs) Init() (err error) {
	r.Config = Conf.Server.RpcAdmin
	r.CHash = consistent.New()
	r.adminRpc = make(map[string]*AdminRpc)
	for _, node := range r.Config {

		adminRpc := NewAdminRpc()
		if err = adminRpc.Init(node); err != nil {
			return
		} else {
			r.adminRpc[node.Name] = adminRpc
			r.CHash.Add(node.Name)
		}
	}
	if len(r.adminRpc) == 0 {
		return ErrAdminRpc
	}
	return nil
}
Esempio n. 7
0
func TestVariancePermutations(t *testing.T) {
	lines, err := ioutil.ReadFile("hosts.txt")
	if err != nil {
		t.Fatalf("Couldn't read test data: %s", err)
	}
	hosts := strings.Split(string(lines), "\n")

	maxSites := 100
	maxReplicas := 100

	for s := 2; s <= maxSites; s++ {
		for i := 1; i <= maxReplicas; i++ {
			// Initialize the mappings and consistent hasher
			mapping := make(map[string][]string, len(hosts))
			con := consistent.New()
			con.NumberOfReplicas = i

			// Add members to the circle
			for i := 0; i < s; i++ {
				target := string(i)
				//target := strconv.Itoa(i)
				con.Add(target)
			}

			// Build before mapping
			buildMapping(mapping, hosts, con)

			var data []int
			for _, objects := range mapping {
				data = append(data, len(objects))
			}
			sort.Ints(data)
			max := float64(data[len(data)-1])
			min := float64(data[0])
			variance := max / min

			// Print results
			t.Logf("{\"sites\":%d,\"replicas\":%d,\"variance\":%.4f}\n", s, con.NumberOfReplicas, variance)
		}
	}
}
Esempio n. 8
0
func main() {
	runtime.GOMAXPROCS(4)
	debug.SetGCPercent(90)
	//	go freemem()

	rand.Seed(time.Now().Unix())
	// parse config
	flag.Parse()
	if flag.NFlag() != 4 {
		fmt.Printf("usage: cacher -config config_file -log log_file -port listen_port -deltaPort delta_port\n")
		flag.PrintDefaults()
		os.Exit(1)
	}

	logger := startLogger(*logFile)

	var config mylib.Config
	if _, err := os.Stat(*confFile); os.IsNotExist(err) {
		logger.Printf("no such file: %s, loading default\n", *confFile)
		config = mylib.Load("")
	} else {
		config = mylib.Load(*confFile)
		logger.Printf("using %s as config file\n", *confFile)
	}

	// set hash ring object
	r := consistent.New()
	// set up monitoring
	mon := new(mylib.Mmon)
	// spawn db writers and fill hash ring object
	workers := startWorkers(config, r, mon)

	var boss mylib.Boss
	deltaChan := make(chan string, 5000000)
	// create Boss var (used to hide tons of vars in functions stack)
	boss.Senders = workers
	boss.Rf = config.Rf
	boss.Ring = r
	boss.Single = 0
	boss.Port = *listenPort
	boss.DeltaChan = deltaChan
	// if we have a single host, than we can ignore hash ring mess
	// and do simple rr rotation of senders
	if len(boss.Ring.Members()) == 1 {
		boss.Single = 1
	}
	// start delta manager
	if config.EnableDelta > 0 {
		logger.Printf("Delta enabled on %s", *deltaPort)
		go delta.DeltaManager(deltaChan, workers, *deltaPort, boss, logger)

		// FIXIT
		// deltaPort is legacy option and now used for debugging purposes
		go func() {
			http.ListenAndServe(":"+*deltaPort, nil)
		}()
		// FIXIT
	} else {
		go delta.BogusDelta(deltaChan)
	}

	go monitor(mon, boss)

	// start listener
	ln, err := net.Listen("tcp", ":"+*listenPort)
	logger.Printf("Started on %s port\n", *listenPort)
	logger.Printf("worker chanLimit %d\n", config.ChanLimit)
	if err != nil {
		logger.Fatalf("Unable to start listener, %v\n", err)
	}

	// main loop
	for {
		conn, err := ln.Accept()
		if err == nil {
			go process_connection(conn, boss, mon)
			// received new connection
			atomic.AddInt32(&mon.Conn, 1)
			atomic.AddInt64(&con_alive, 1)
		} else {
			logger.Printf("Failed to accept connection, %v\n", err)
		}
	}

	logger.Println("Done")
}
Esempio n. 9
0
func init() {
	drives = consistent.New()
	for _, drive := range GetConfig().Drives {
		drives.Add(drive)
	}
}