Beispiel #1
0
// Open starts the Graphite input processing data.
func (s *Service) Open() error {
	s.mu.Lock()
	defer s.mu.Unlock()

	s.logger.Printf("Starting graphite service, batch size %d, batch timeout %s", s.batchSize, s.batchTimeout)

	// Configure expvar monitoring. It's OK to do this even if the service fails to open and
	// should be done before any data could arrive for the service.
	tags := map[string]string{"proto": s.protocol, "bind": s.bindAddress}
	s.statMap = influxdb.NewStatistics(s.diagsKey, "graphite", tags)

	// Register diagnostics if a Monitor service is available.
	if s.Monitor != nil {
		s.Monitor.RegisterDiagnosticsClient(s.diagsKey, s)
	}

	if db := s.MetaClient.Database(s.database); db != nil {
		if rp, _ := s.MetaClient.RetentionPolicy(s.database, s.retentionPolicy); rp == nil {
			rpi := meta.NewRetentionPolicyInfo(s.retentionPolicy)
			if _, err := s.MetaClient.CreateRetentionPolicy(s.database, rpi); err != nil {
				s.logger.Printf("Failed to ensure target retention policy %s exists: %s", s.database, err.Error())
			}
		}
	} else {
		rpi := meta.NewRetentionPolicyInfo(s.retentionPolicy)
		if _, err := s.MetaClient.CreateDatabaseWithRetentionPolicy(s.database, rpi); err != nil {
			s.logger.Printf("Failed to ensure target database %s exists: %s", s.database, err.Error())
			return err
		}
	}

	s.batcher = tsdb.NewPointBatcher(s.batchSize, s.batchPending, s.batchTimeout)
	s.batcher.Start()

	// Start processing batches.
	s.wg.Add(1)
	go s.processBatches(s.batcher)

	var err error
	if strings.ToLower(s.protocol) == "tcp" {
		s.addr, err = s.openTCPServer()
	} else if strings.ToLower(s.protocol) == "udp" {
		s.addr, err = s.openUDPServer()
	} else {
		return fmt.Errorf("unrecognized Graphite input protocol %s", s.protocol)
	}
	if err != nil {
		return err
	}

	s.logger.Printf("Listening on %s: %s", strings.ToUpper(s.protocol), s.addr.String())
	return nil
}
Beispiel #2
0
// Open starts the Graphite input processing data.
func (s *Service) Open() error {
	s.mu.Lock()
	defer s.mu.Unlock()

	s.logger.Printf("Starting graphite service, batch size %d, batch timeout %s", s.batchSize, s.batchTimeout)

	// Register diagnostics if a Monitor service is available.
	if s.Monitor != nil {
		s.Monitor.RegisterDiagnosticsClient(s.diagsKey, s)
	}

	if db := s.MetaClient.Database(s.database); db != nil {
		if rp, _ := s.MetaClient.RetentionPolicy(s.database, s.retentionPolicy); rp == nil {
			rpi := meta.NewRetentionPolicyInfo(s.retentionPolicy)
			if _, err := s.MetaClient.CreateRetentionPolicy(s.database, rpi); err != nil {
				s.logger.Printf("Failed to ensure target retention policy %s exists: %s", s.database, err.Error())
			}
		}
	} else {
		rpi := meta.NewRetentionPolicyInfo(s.retentionPolicy)
		if _, err := s.MetaClient.CreateDatabaseWithRetentionPolicy(s.database, rpi); err != nil {
			s.logger.Printf("Failed to ensure target database %s exists: %s", s.database, err.Error())
			return err
		}
	}

	s.batcher = tsdb.NewPointBatcher(s.batchSize, s.batchPending, s.batchTimeout)
	s.batcher.Start()

	// Start processing batches.
	s.wg.Add(1)
	go s.processBatches(s.batcher)

	var err error
	if strings.ToLower(s.protocol) == "tcp" {
		s.addr, err = s.openTCPServer()
	} else if strings.ToLower(s.protocol) == "udp" {
		s.addr, err = s.openUDPServer()
	} else {
		return fmt.Errorf("unrecognized Graphite input protocol %s", s.protocol)
	}
	if err != nil {
		return err
	}

	s.logger.Printf("Listening on %s: %s", strings.ToUpper(s.protocol), s.addr.String())
	return nil
}
Beispiel #3
0
// createInternalStorage ensures the internal storage has been created.
func (m *Monitor) createInternalStorage() {
	if m.storeCreated {
		return
	}

	if _, err := m.MetaClient.CreateDatabase(m.storeDatabase); err != nil {
		m.Logger.Printf("failed to create database '%s', failed to create storage: %s",
			m.storeDatabase, err.Error())
		return
	}

	rpi := meta.NewRetentionPolicyInfo(MonitorRetentionPolicy)
	rpi.Duration = MonitorRetentionPolicyDuration
	rpi.ReplicaN = 1
	if _, err := m.MetaClient.CreateRetentionPolicy(m.storeDatabase, rpi); err != nil {
		m.Logger.Printf("failed to create retention policy '%s', failed to create internal storage: %s",
			rpi.Name, err.Error())
		return
	}

	if err := m.MetaClient.SetDefaultRetentionPolicy(m.storeDatabase, rpi.Name); err != nil {
		m.Logger.Printf("failed to set default retention policy on '%s', failed to create internal storage: %s",
			m.storeDatabase, err.Error())
		return
	}

	err := m.MetaClient.DropRetentionPolicy(m.storeDatabase, "default")
	if err != nil && err.Error() != influxdb.ErrRetentionPolicyNotFound("default").Error() {
		m.Logger.Printf("failed to delete retention policy 'default', failed to created internal storage: %s", err.Error())
		return
	}

	// Mark storage creation complete.
	m.storeCreated = true
}
Beispiel #4
0
func (e *QueryExecutor) executeCreateDatabaseStatement(stmt *influxql.CreateDatabaseStatement) error {
	if !stmt.RetentionPolicyCreate {
		_, err := e.MetaClient.CreateDatabase(stmt.Name)
		return err
	}

	rpi := meta.NewRetentionPolicyInfo(stmt.RetentionPolicyName)
	rpi.Duration = stmt.RetentionPolicyDuration
	rpi.ReplicaN = stmt.RetentionPolicyReplication
	_, err := e.MetaClient.CreateDatabaseWithRetentionPolicy(stmt.Name, rpi)
	return err
}
Beispiel #5
0
func (e *QueryExecutor) executeCreateRetentionPolicyStatement(stmt *influxql.CreateRetentionPolicyStatement) error {
	rpi := meta.NewRetentionPolicyInfo(stmt.Name)
	rpi.Duration = stmt.Duration
	rpi.ReplicaN = stmt.Replication

	// Create new retention policy.
	if _, err := e.MetaClient.CreateRetentionPolicy(stmt.Database, rpi); err != nil {
		return err
	}

	// If requested, set new policy as the default.
	if stmt.Default {
		if err := e.MetaClient.SetDefaultRetentionPolicy(stmt.Database, stmt.Name); err != nil {
			return err
		}
	}
	return nil
}
Beispiel #6
0
// createInternalStorage ensures the internal storage has been created.
func (m *Monitor) createInternalStorage() {
	if m.storeCreated {
		return
	}

	if di := m.MetaClient.Database(m.storeDatabase); di == nil {
		rpi := meta.NewRetentionPolicyInfo(MonitorRetentionPolicy)
		rpi.Duration = MonitorRetentionPolicyDuration
		rpi.ReplicaN = 1

		if _, err := m.MetaClient.CreateDatabaseWithRetentionPolicy(m.storeDatabase, rpi); err != nil {
			m.Logger.Printf("failed to create database '%s', failed to create storage: %s",
				m.storeDatabase, err.Error())
			return
		}
	}

	// Mark storage creation complete.
	m.storeCreated = true
}
Beispiel #7
0
func TestMetaService_DropDataNode_Reassign(t *testing.T) {
	t.Parallel()

	d, s, c := newServiceAndClient()
	defer os.RemoveAll(d)
	defer s.Close()
	defer c.Close()

	// Create a couple of nodes.
	n1, err := c.CreateDataNode("foo:8180", "bar:8181")
	if err != nil {
		t.Fatal(err)
	}

	n2, err := c.CreateDataNode("foo:8280", "bar:8281")
	if err != nil {
		t.Fatal(err)
	}

	// Create a retention policy with a replica factor of 1.
	rp := meta.NewRetentionPolicyInfo("rp0")
	rp.ReplicaN = 1

	// Create a database using rp0
	if _, err := c.CreateDatabaseWithRetentionPolicy("foo", rp); err != nil {
		t.Fatal(err)
	}

	sg, err := c.CreateShardGroup("foo", "rp0", time.Now())
	if err != nil {
		t.Fatal(err)
	}

	// Dropping the first data server should result in the shard being
	// reassigned to the other node.
	if err := c.DeleteDataNode(n1.ID); err != nil {
		t.Fatal(err)
	}

	// Retrieve updated shard group data from the Meta Store.
	rp, _ = c.RetentionPolicy("foo", "rp0")
	sg = &rp.ShardGroups[0]

	// There should still be two shards.
	if got, exp := len(sg.Shards), 2; got != exp {
		t.Errorf("there are %d shards, but should be %d", got, exp)
	}

	// The second data node should be the owner of both shards.
	for _, s := range sg.Shards {
		if !reflect.DeepEqual(s.Owners, []meta.ShardOwner{{n2.ID}}) {
			t.Errorf("owners for shard are %v, expected %v", s.Owners, []meta.ShardOwner{{2}})
		}
	}

	// The shard group should not be marked as deleted because both
	// shards have an owner.
	if sg.Deleted() {
		t.Error("shard group marked as deleted, but shouldn't be")
	}
}