// Ensure the shard writer returns an error when we can't get a connection. func TestShardWriter_Write_PoolMax(t *testing.T) { ts := newTestWriteService(writeShardSlow) s := cluster.NewService(cluster.Config{ ShardWriterTimeout: toml.Duration(100 * time.Millisecond), }) s.Listener = ts.muxln s.TSDBStore = &ts.TSDBStore if err := s.Open(); err != nil { t.Fatal(err) } defer s.Close() defer ts.Close() w := cluster.NewShardWriter(100*time.Millisecond, 1) w.MetaClient = &metaClient{host: ts.ln.Addr().String()} now := time.Now() shardID := uint64(1) ownerID := uint64(2) var points []models.Point points = append(points, models.MustNewPoint( "cpu", models.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now, )) go w.WriteShard(shardID, ownerID, points) time.Sleep(time.Millisecond) if err := w.WriteShard(shardID, ownerID, points); err == nil || err.Error() != "timed out waiting for free connection" { t.Fatalf("unexpected error: %v", err) } }
// Ensure the shard writer returns an error when dialing times out. func TestShardWriter_Write_ErrDialTimeout(t *testing.T) { ts := newTestWriteService(nil) ts.TSDBStore.WriteToShardFn = ts.writeShardSuccess s := cluster.NewService(cluster.Config{}) s.Listener = ts.muxln s.TSDBStore = &ts.TSDBStore if err := s.Open(); err != nil { t.Fatal(err) } defer s.Close() defer ts.Close() // Zero timeout set to support all platforms. w := cluster.NewShardWriter(0, 1) w.MetaClient = &metaClient{host: ts.ln.Addr().String()} now := time.Now() shardID := uint64(1) ownerID := uint64(2) var points []models.Point points = append(points, models.MustNewPoint( "cpu", models.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now, )) if err, exp := w.WriteShard(shardID, ownerID, points), "i/o timeout"; err == nil || !strings.Contains(err.Error(), exp) { t.Fatalf("expected error %v, to contain %s", err, exp) } }
// Ensure the shard writer returns an error when the server fails to accept the write. func TestShardWriter_WriteShard_Error(t *testing.T) { ts := newTestWriteService(writeShardFail) s := cluster.NewService(cluster.Config{}) s.Listener = ts.muxln s.TSDBStore = &ts.TSDBStore if err := s.Open(); err != nil { t.Fatal(err) } defer s.Close() defer ts.Close() w := cluster.NewShardWriter(time.Minute, 1) w.MetaClient = &metaClient{host: ts.ln.Addr().String()} now := time.Now() shardID := uint64(1) ownerID := uint64(2) var points []models.Point points = append(points, models.MustNewPoint( "cpu", models.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now, )) if err := w.WriteShard(shardID, ownerID, points); err == nil || err.Error() != "error code 1: write shard 1: failed to write" { t.Fatalf("unexpected error: %v", err) } }
// Ensure the shard writer can successful write a multiple requests. func TestShardWriter_WriteShard_Multiple(t *testing.T) { ts := newTestWriteService(nil) ts.TSDBStore.WriteToShardFn = ts.writeShardSuccess s := cluster.NewService(cluster.Config{}) s.Listener = ts.muxln s.TSDBStore = &ts.TSDBStore if err := s.Open(); err != nil { t.Fatal(err) } defer s.Close() defer ts.Close() w := cluster.NewShardWriter(time.Minute, 1) w.MetaClient = &metaClient{host: ts.ln.Addr().String()} // Build a single point. now := time.Now() var points []models.Point points = append(points, models.MustNewPoint("cpu", models.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now)) // Write to shard twice and close. if err := w.WriteShard(1, 2, points); err != nil { t.Fatal(err) } else if err := w.WriteShard(1, 2, points); err != nil { t.Fatal(err) } else if err := w.Close(); err != nil { t.Fatal(err) } // Validate response. responses, err := ts.ResponseN(1) if err != nil { t.Fatal(err) } else if responses[0].shardID != 1 { t.Fatalf("unexpected shard id: %d", responses[0].shardID) } // Validate point. if p := responses[0].points[0]; p.Name() != "cpu" { t.Fatalf("unexpected name: %s", p.Name()) } else if p.Fields()["value"] != int64(100) { t.Fatalf("unexpected 'value' field: %d", p.Fields()["value"]) } else if p.Tags()["host"] != "server01" { t.Fatalf("unexpected 'host' tag: %s", p.Tags()["host"]) } else if p.Time().UnixNano() != now.UnixNano() { t.Fatalf("unexpected time: %s", p.Time()) } }
// Ensure the shard writer returns an error when reading times out. func TestShardWriter_Write_ErrReadTimeout(t *testing.T) { ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { t.Fatal(err) } w := cluster.NewShardWriter(time.Millisecond, 1) w.MetaClient = &metaClient{host: ln.Addr().String()} now := time.Now() shardID := uint64(1) ownerID := uint64(2) var points []models.Point points = append(points, models.MustNewPoint( "cpu", models.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now, )) if err := w.WriteShard(shardID, ownerID, points); err == nil || !strings.Contains(err.Error(), "i/o timeout") { t.Fatalf("unexpected error: %s", err) } }
// NewServer returns a new instance of Server built from a config. func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) { // We need to ensure that a meta directory always exists even if // we don't start the meta store. node.json is always stored under // the meta directory. if err := os.MkdirAll(c.Meta.Dir, 0777); err != nil { return nil, fmt.Errorf("mkdir all: %s", err) } // 0.10-rc1 and prior would sometimes put the node.json at the root // dir which breaks backup/restore and restarting nodes. This moves // the file from the root so it's always under the meta dir. oldPath := filepath.Join(filepath.Dir(c.Meta.Dir), "node.json") newPath := filepath.Join(c.Meta.Dir, "node.json") if _, err := os.Stat(oldPath); err == nil { if err := os.Rename(oldPath, newPath); err != nil { return nil, err } } nodeAddr, err := meta.DefaultHost(DefaultHostname, c.Meta.HTTPBindAddress) if err != nil { return nil, err } // load the node information metaAddresses := []string{nodeAddr} if !c.Meta.Enabled { metaAddresses = c.Meta.JoinPeers } node, err := influxdb.LoadNode(c.Meta.Dir, metaAddresses) if err != nil { if !os.IsNotExist(err) { return nil, err } else { node = influxdb.NewNode(c.Meta.Dir, metaAddresses) } } // In 0.10.0 bind-address got moved to the top level. Check // The old location to keep things backwards compatible bind := c.BindAddress if c.Meta.BindAddress != "" { bind = c.Meta.BindAddress } if !c.Data.Enabled && !c.Meta.Enabled { return nil, fmt.Errorf("must run as either meta node or data node or both") } httpBindAddress, err := meta.DefaultHost(DefaultHostname, c.HTTPD.BindAddress) if err != nil { return nil, err } tcpBindAddress, err := meta.DefaultHost(DefaultHostname, bind) if err != nil { return nil, err } s := &Server{ buildInfo: *buildInfo, err: make(chan error), closing: make(chan struct{}), BindAddress: bind, Node: node, Monitor: monitor.New(c.Monitor), reportingDisabled: c.ReportingDisabled, joinPeers: c.Meta.JoinPeers, metaUseTLS: c.Meta.HTTPSEnabled, httpAPIAddr: httpBindAddress, httpUseTLS: c.HTTPD.HTTPSEnabled, tcpAddr: tcpBindAddress, config: c, } if c.Meta.Enabled { s.MetaService = meta.NewService(c.Meta) } if c.Data.Enabled { s.TSDBStore = tsdb.NewStore(c.Data.Dir) s.TSDBStore.EngineOptions.Config = c.Data // Copy TSDB configuration. s.TSDBStore.EngineOptions.EngineVersion = c.Data.Engine s.TSDBStore.EngineOptions.MaxWALSize = c.Data.MaxWALSize s.TSDBStore.EngineOptions.WALFlushInterval = time.Duration(c.Data.WALFlushInterval) s.TSDBStore.EngineOptions.WALPartitionFlushDelay = time.Duration(c.Data.WALPartitionFlushDelay) // Initialize query executor. s.QueryExecutor = tsdb.NewQueryExecutor() s.QueryExecutor.Store = s.TSDBStore s.QueryExecutor.MonitorStatementExecutor = &monitor.StatementExecutor{Monitor: s.Monitor} s.QueryExecutor.QueryLogEnabled = c.Data.QueryLogEnabled // Set the shard writer s.ShardWriter = cluster.NewShardWriter(time.Duration(c.Cluster.ShardWriterTimeout), c.Cluster.MaxRemoteWriteConnections) // Create the hinted handoff service s.HintedHandoff = hh.NewService(c.HintedHandoff, s.ShardWriter, s.MetaClient) s.HintedHandoff.Monitor = s.Monitor // Create the Subscriber service s.Subscriber = subscriber.NewService(c.Subscriber) // Initialize points writer. s.PointsWriter = cluster.NewPointsWriter() s.PointsWriter.WriteTimeout = time.Duration(c.Cluster.WriteTimeout) s.PointsWriter.TSDBStore = s.TSDBStore s.PointsWriter.ShardWriter = s.ShardWriter s.PointsWriter.HintedHandoff = s.HintedHandoff s.PointsWriter.Subscriber = s.Subscriber s.PointsWriter.Node = s.Node // needed for executing INTO queries. s.QueryExecutor.IntoWriter = s.PointsWriter // Initialize the monitor s.Monitor.Version = s.buildInfo.Version s.Monitor.Commit = s.buildInfo.Commit s.Monitor.Branch = s.buildInfo.Branch s.Monitor.BuildTime = s.buildInfo.Time s.Monitor.PointsWriter = s.PointsWriter } return s, nil }