func (self *FilteringSuite) TestReturnAllColumnsIfAskedForWildcard(c *C) { queryStr := "select * from t where column_one == 100 and column_two != 6;" query, err := parser.ParseQuery(queryStr) c.Assert(err, IsNil) series, err := common.StringToSeriesArray(` [ { "points": [ {"values": [{"int64_value": 100},{"int64_value": 5 }], "timestamp": 1381346631, "sequence_number": 1}, {"values": [{"int64_value": 100},{"int64_value": 6 }], "timestamp": 1381346631, "sequence_number": 1}, {"values": [{"int64_value": 90 },{"int64_value": 15}], "timestamp": 1381346632, "sequence_number": 1} ], "name": "t", "fields": ["column_one", "column_two"] } ] `) c.Assert(err, IsNil) result, err := Filter(query, series[0]) c.Assert(err, IsNil) c.Assert(result, NotNil) c.Assert(result.Points, HasLen, 1) c.Assert(result.Fields, HasLen, 2) c.Assert(result.Points[0].Values, HasLen, 2) }
func (self *DatastoreSuite) TestPropagateErrorsProperly(c *C) { cleanup(nil) db := newDatastore(c) defer cleanup(db) mock := ` { "points": [ { "values": [ { "int64_value": 3 } ], "sequence_number": 1 } ], "name": "foo", "fields": ["value"] }` pointTime := time.Now().Unix() series := stringToSeries(mock, pointTime, c) err := db.WriteSeriesData("test", series) c.Assert(err, IsNil) q, err := parser.ParseQuery("select value from foo;") c.Assert(err, IsNil) yield := func(series *protocol.Series) error { return fmt.Errorf("Whatever") } user := &MockUser{} err = db.ExecuteQuery(user, "test", q, yield) c.Assert(err, ErrorMatches, "Whatever") }
func (self *FilteringSuite) TestInequalityFiltering(c *C) { queryStr := "select * from t where column_one >= 100 and column_two > 6 and time > now() - 1d;" query, err := parser.ParseQuery(queryStr) c.Assert(err, IsNil) series, err := common.StringToSeriesArray(` [ { "points": [ {"values": [{"int64_value": 100},{"int64_value": 7 }], "timestamp": 1381346631, "sequence_number": 1}, {"values": [{"int64_value": 100},{"int64_value": 6 }], "timestamp": 1381346631, "sequence_number": 1}, {"values": [{"int64_value": 90 },{"int64_value": 15}], "timestamp": 1381346632, "sequence_number": 1} ], "name": "t", "fields": ["column_one", "column_two"] } ] `) c.Assert(err, IsNil) result, err := Filter(query, series[0]) c.Assert(err, IsNil) c.Assert(result, NotNil) c.Assert(result.Points, HasLen, 1) c.Assert(*result.Points[0].Values[0].Int64Value, Equals, int64(100)) c.Assert(*result.Points[0].Values[1].Int64Value, Equals, int64(7)) }
func (self *QueryEngine) RunQuery(user common.User, database string, query string, yield func(*protocol.Series) error) (err error) { // don't let a panic pass beyond RunQuery defer func() { if err := recover(); err != nil { fmt.Fprintf(os.Stderr, "********************************BUG********************************\n") buf := make([]byte, 1024) n := runtime.Stack(buf, false) fmt.Fprintf(os.Stderr, "Database: %s\n", database) fmt.Fprintf(os.Stderr, "Query: [%s]\n", query) fmt.Fprintf(os.Stderr, "Error: %s. Stacktrace: %s\n", err, string(buf[:n])) err = common.NewQueryError(common.InternalError, "Internal Error") } }() q, err := parser.ParseQuery(query) if err != nil { return err } if isAggregateQuery(q) { return self.executeCountQueryWithGroupBy(user, database, q, yield) } else { return self.distributeQuery(user, database, q, yield) } return nil }
func (self *CoordinatorSuite) TestShouldQuerySequentially(c *C) { end := time.Now().Truncate(24 * time.Hour) start := end.Add(-7 * 24 * time.Hour) shard := cluster.NewShard(1, start, end, cluster.SHORT_TERM, false, nil) shards := []*cluster.ShardData{shard} coordinator := NewCoordinatorImpl(&configuration.Configuration{ ClusterMaxResponseBufferSize: 1000, }, nil, nil) queries := map[string]bool{ "list series": false, "select count(foo) from /.*bar.*/ group by time(1d)": true, "select count(foo) from bar": true, "select foo from bar": true, "select count(foo) from bar group by baz": true, "select count(foo) from bar group by time(1d)": false, "select count(foo) from bar group by time(3d)": true, } for query, result := range queries { fmt.Printf("Testing %s\n", query) parsedQuery, err := parser.ParseQuery(query) c.Assert(err, IsNil) c.Assert(parsedQuery, HasLen, 1) querySpec := parser.NewQuerySpec(nil, "", parsedQuery[0]) c.Assert(coordinator.shouldQuerySequentially(shards, querySpec), Equals, result) } }
func (self *QueryEngine) RunQuery(user common.User, database string, queryString string, yield func(*protocol.Series) error) (err error) { // don't let a panic pass beyond RunQuery defer recoverFunc(database, queryString) q, err := parser.ParseQuery(queryString) if err != nil { return err } for _, query := range q { if query.DeleteQuery != nil { if err := self.coordinator.DeleteSeriesData(user, database, query.DeleteQuery); err != nil { return err } continue } selectQuery := query.SelectQuery if isAggregateQuery(selectQuery) { return self.executeCountQueryWithGroupBy(user, database, selectQuery, yield) } else if containsArithmeticOperators(selectQuery) { return self.executeArithmeticQuery(user, database, selectQuery, yield) } else { return self.distributeQuery(user, database, selectQuery, yield) } } return nil }
func (self *DatastoreSuite) TestCanWriteAndRetrievePoints(c *C) { cleanup(nil) db := newDatastore(c) defer cleanup(db) mock := ` { "points": [ { "values": [ { "int64_value": 3 } ], "sequence_number": 1 }, { "values": [ { "int64_value": 2 } ], "sequence_number": 2 } ], "name": "foo", "fields": ["value"] }` pointTime := time.Now().Unix() series := stringToSeries(mock, pointTime, c) err := db.WriteSeriesData("test", series) c.Assert(err, IsNil) q, errQ := parser.ParseQuery("select value from foo;") c.Assert(errQ, IsNil) resultSeries := []*protocol.Series{} yield := func(series *protocol.Series) error { resultSeries = append(resultSeries, series) return nil } user := &MockUser{} err = db.ExecuteQuery(user, "test", q, yield) c.Assert(err, IsNil) // we should get the actual data and the end of series data // indicator , i.e. a series with no points c.Assert(resultSeries, HasLen, 2) c.Assert(resultSeries[0].Points, HasLen, 2) c.Assert(resultSeries[0].Fields, HasLen, 1) c.Assert(*resultSeries[0].Points[0].SequenceNumber, Equals, uint32(2)) c.Assert(*resultSeries[0].Points[1].SequenceNumber, Equals, uint32(1)) c.Assert(*resultSeries[0].Points[0].GetTimestampInMicroseconds(), Equals, pointTime*1000000) c.Assert(*resultSeries[0].Points[1].GetTimestampInMicroseconds(), Equals, pointTime*1000000) c.Assert(*resultSeries[0].Points[0].Values[0].Int64Value, Equals, int64(2)) c.Assert(*resultSeries[0].Points[1].Values[0].Int64Value, Equals, int64(3)) c.Assert(resultSeries[1].Points, HasLen, 0) c.Assert(resultSeries[1].Fields, HasLen, 1) c.Assert(resultSeries, Not(DeepEquals), series) }
func (self *DatastoreSuite) TestCanSelectFromARegex(c *C) { cleanup(nil) db := newDatastore(c) defer cleanup(db) mock := `{ "points":[ {"values":[{"int64_value":3},{"string_value":"paul"}],"sequence_number":2}, {"values":[{"int64_value":1},{"string_value":"todd"}],"sequence_number":1}], "name":"user_things", "fields":["count", "name"] }` series := stringToSeries(mock, time.Now().Unix(), c) err := db.WriteSeriesData("foobar", series) c.Assert(err, IsNil) user := &MockUser{} results := executeQuery(user, "foobar", "select count, name from user_things;", db, c) c.Assert(results[0], DeepEquals, series) mock = `{ "points":[{"values":[{"double_value":10.1}],"sequence_number":23}], "name":"response_times", "fields":["ms"] }` responseSeries := stringToSeries(mock, time.Now().Unix(), c) err = db.WriteSeriesData("foobar", responseSeries) c.Assert(err, IsNil) results = executeQuery(user, "foobar", "select ms from response_times;", db, c) c.Assert(results[0], DeepEquals, responseSeries) mock = `{ "points":[{"values":[{"string_value":"NY"}],"sequence_number":23}, {"values":[{"string_value":"CO"}],"sequence_number":20}], "name":"other_things", "fields":["state"] }` otherSeries := stringToSeries(mock, time.Now().Unix(), c) err = db.WriteSeriesData("foobar", otherSeries) c.Assert(err, IsNil) results = executeQuery(user, "foobar", "select state from other_things;", db, c) c.Assert(results[0], DeepEquals, otherSeries) q, errQ := parser.ParseQuery("select * from /.*things/;") c.Assert(errQ, IsNil) resultSeries := make([]*protocol.Series, 0) yield := func(series *protocol.Series) error { if len(series.Points) > 0 { resultSeries = append(resultSeries, series) } return nil } err = db.ExecuteQuery(user, "foobar", q, yield) c.Assert(err, IsNil) c.Assert(resultSeries, HasLen, 2) c.Assert(resultSeries[0], DeepEquals, otherSeries) c.Assert(resultSeries[1], DeepEquals, series) }
func (self *DatastoreSuite) TestCanDeleteRangeOfDataFromRegex(c *C) { cleanup(nil) db := newDatastore(c) defer cleanup(db) mock := `{ "points":[ {"values":[{"int64_value":3},{"string_value":"paul"}],"sequence_number":2}, {"values":[{"int64_value":1},{"string_value":"todd"}],"sequence_number":1} ], "name":"events", "fields":["count","name"] }` series := stringToSeries(mock, time.Now().Unix(), c) err := db.WriteSeriesData("foobar", series) c.Assert(err, IsNil) user := &MockUser{} results := executeQuery(user, "foobar", "select count, name from events;", db, c) c.Assert(results[0], DeepEquals, series) mock = `{ "points":[{"values":[{"double_value":10.1}],"sequence_number":23}], "name":"response_times", "fields":["ms"] }` responseSeries := stringToSeries(mock, time.Now().Unix(), c) err = db.WriteSeriesData("foobar", responseSeries) c.Assert(err, IsNil) results = executeQuery(user, "foobar", "select ms from response_times;", db, c) c.Assert(results[0], DeepEquals, responseSeries) mock = `{ "points":[{"values":[{"double_value":232.1}],"sequence_number":23}, {"values":[{"double_value":10.1}],"sequence_number":20}], "name":"queue_time", "fields":["processed_time"] }` otherSeries := stringToSeries(mock, time.Now().Unix(), c) err = db.WriteSeriesData("foobar", otherSeries) c.Assert(err, IsNil) results = executeQuery(user, "foobar", "select processed_time from queue_time;", db, c) c.Assert(results[0], DeepEquals, otherSeries) queries, _ := parser.ParseQuery("delete from /.*time.*/ where time > now() - 1h") db.DeleteSeriesData("foobar", queries[0].DeleteQuery) results = executeQuery(user, "foobar", "select * from events;", db, c) c.Assert(results[0], DeepEquals, series) results = executeQuery(user, "foobar", "select * from response_times;", db, c) c.Assert(results, HasLen, 0) results = executeQuery(user, "foobar", "select * from queue_time;", db, c) c.Assert(results, HasLen, 0) }
func (self *CoordinatorImpl) handleReplayRequest(r *protocol.Request, replicationFactor *uint8, owningServerId *uint32) { err := self.datastore.LogRequestAndAssignSequenceNumber(r, replicationFactor, owningServerId) if err != nil { log.Error("Error writing waiting requests after replay: %s", err) } if *r.Type == protocol.Request_PROXY_WRITE || *r.Type == protocol.Request_REPLICATION_WRITE { log.Debug("Replaying write request") self.datastore.WriteSeriesData(*r.Database, r.Series) } else if *r.Type == protocol.Request_PROXY_DELETE || *r.Type == protocol.Request_REPLICATION_DELETE { query, _ := parser.ParseQuery(*r.Query) err = self.datastore.DeleteSeriesData(*r.Database, query[0].DeleteQuery) } }
func (self *DatastoreSuite) TestCanWriteAndRetrievePointsWithAlias(c *C) { cleanup(nil) db := newDatastore(c) defer cleanup(db) mock := ` { "points": [ { "values": [ { "int64_value": 3 } ], "sequence_number": 1 }, { "values": [ { "int64_value": 2 } ], "sequence_number": 2 } ], "name": "foo", "fields": ["value"] }` pointTime := time.Now().Unix() series := stringToSeries(mock, pointTime, c) err := db.WriteSeriesData("test", series) c.Assert(err, IsNil) q, errQ := parser.ParseQuery("select * from foo as f1 inner join foo as f2;") c.Assert(errQ, IsNil) resultSeries := map[string][]*protocol.Series{} yield := func(series *protocol.Series) error { resultSeries[*series.Name] = append(resultSeries[*series.Name], series) return nil } user := &MockUser{} err = db.ExecuteQuery(user, "test", q, yield) c.Assert(err, IsNil) // we should get the actual data and the end of series data // indicator , i.e. a series with no points c.Assert(resultSeries, HasLen, 2) c.Assert(resultSeries["f1"], HasLen, 2) c.Assert(resultSeries["f1"][0].Points, HasLen, 2) c.Assert(resultSeries["f1"][1].Points, HasLen, 0) c.Assert(resultSeries["f2"], HasLen, 2) c.Assert(resultSeries["f2"][0].Points, HasLen, 2) c.Assert(resultSeries["f2"][1].Points, HasLen, 0) }
func executeQuery(user common.User, database, query string, db Datastore, c *C) []*protocol.Series { q, errQ := parser.ParseQuery(query) c.Assert(errQ, IsNil) resultSeries := []*protocol.Series{} yield := func(series *protocol.Series) error { // ignore time series which have no data, this includes // end of series indicator if len(series.Points) > 0 { resultSeries = append(resultSeries, series) } return nil } err := db.ExecuteQuery(user, database, q, yield) c.Assert(err, IsNil) return resultSeries }
func (self *DatastoreSuite) TestCheckWriteAccess(c *C) { cleanup(nil) db := newDatastore(c) defer cleanup(db) mock := `{ "points":[ {"values":[{"int64_value":3},{"string_value":"paul"}],"sequence_number":2}, {"values":[{"int64_value":1},{"string_value":"todd"}],"sequence_number":1}], "name":"user_things", "fields":["count", "name"] }` series := stringToSeries(mock, time.Now().Unix(), c) err := db.WriteSeriesData("foobar", series) c.Assert(err, IsNil) mock = `{ "points":[{"values":[{"string_value":"NY"}],"sequence_number":23}, {"values":[{"string_value":"CO"}],"sequence_number":20}], "name":"other_things", "fields":["state"] }` otherSeries := stringToSeries(mock, time.Now().Unix(), c) err = db.WriteSeriesData("foobar", otherSeries) c.Assert(err, IsNil) user := &MockUser{ dbCannotWrite: map[string]bool{"other_things": true}, } regex, _ := regexp.Compile(".*") err = db.DeleteRangeOfRegex(user, "foobar", regex, time.Now().Add(-time.Hour), time.Now()) c.Assert(err, ErrorMatches, ".*one or more.*") q, errQ := parser.ParseQuery("select * from /.*things/;") c.Assert(errQ, IsNil) resultSeries := make([]*protocol.Series, 0) yield := func(series *protocol.Series) error { if len(series.Points) > 0 { resultSeries = append(resultSeries, series) } return nil } err = db.ExecuteQuery(user, "foobar", q, yield) c.Assert(err, IsNil) c.Assert(resultSeries, HasLen, 1) c.Assert(resultSeries[0], DeepEquals, otherSeries) }
func (self *ProtobufRequestHandler) handleQuery(request *protocol.Request, conn net.Conn) { // the query should always parse correctly since it was parsed at the originating server. queries, err := parser.ParseQuery(*request.Query) if err != nil || len(queries) < 1 { log.Error("Error parsing query: ", err) errorMsg := fmt.Sprintf("Cannot find user %s", *request.UserName) response := &protocol.Response{Type: &endStreamResponse, ErrorMessage: &errorMsg, RequestId: request.Id} self.WriteResponse(conn, response) return } query := queries[0] var user common.User if *request.IsDbUser { user = self.clusterConfig.GetDbUser(*request.Database, *request.UserName) } else { user = self.clusterConfig.GetClusterAdmin(*request.UserName) } if user == nil { errorMsg := fmt.Sprintf("Cannot find user %s", *request.UserName) response := &protocol.Response{Type: &accessDeniedResponse, ErrorMessage: &errorMsg, RequestId: request.Id} self.WriteResponse(conn, response) return } shard := self.clusterConfig.GetLocalShardById(*request.ShardId) querySpec := parser.NewQuerySpec(user, *request.Database, query) responseChan := make(chan *protocol.Response) if querySpec.IsDestructiveQuery() { go shard.HandleDestructiveQuery(querySpec, request, responseChan, true) } else { go shard.Query(querySpec, responseChan) } for { response := <-responseChan response.RequestId = request.Id self.WriteResponse(conn, response) if response.GetType() == protocol.Response_END_STREAM || response.GetType() == protocol.Response_ACCESS_DENIED { return } } }
func (self *DatastoreSuite) TestBreaksLargeResultsIntoMultipleBatches(c *C) { cleanup(nil) db := newDatastore(c) defer cleanup(db) mock := `{ "points":[ {"values":[{"double_value":23.1},{"string_value":"paul"}],"sequence_number":2}, {"values":[{"double_value":56.8},{"string_value":"todd"}],"sequence_number":1}], "name":"user_things", "fields":["response_time","name"] }` series := stringToSeries(mock, time.Now().Unix(), c) sequence := 0 writtenPoints := 0 for i := 0; i < 50000; i++ { for _, p := range series.Points { sequence += 1 s := uint32(sequence) p.SequenceNumber = &s } writtenPoints += 2 err := db.WriteSeriesData("foobar", series) c.Assert(err, IsNil) } q, errQ := parser.ParseQuery("select * from user_things;") c.Assert(errQ, IsNil) resultSeries := make([]*protocol.Series, 0) yield := func(series *protocol.Series) error { resultSeries = append(resultSeries, series) return nil } user := &MockUser{} err := db.ExecuteQuery(user, "foobar", q, yield) c.Assert(err, IsNil) c.Assert(len(resultSeries), InRange, 2, 20) pointCount := 0 for _, s := range resultSeries { pointCount += len(s.Points) } c.Assert(pointCount, Equals, writtenPoints) }
func (self *FilteringSuite) TestFilteringNonExistentColumn(c *C) { queryStr := "select * from t where column_one == 100 and column_two != 6" query, err := parser.ParseQuery(queryStr) c.Assert(err, IsNil) series, err := common.StringToSeriesArray(` [ { "points": [ {"values": [{"int64_value": 100}], "timestamp": 1381346631, "sequence_number": 1}, {"values": [{"int64_value": 90 }], "timestamp": 1381346632, "sequence_number": 1} ], "name": "t", "fields": ["column_one"] } ] `) c.Assert(err, IsNil) _, err = Filter(query, series[0]) c.Assert(err, NotNil) }
func (self *DatastoreSuite) TestCanDeleteARangeOfData(c *C) { cleanup(nil) db := newDatastore(c) defer cleanup(db) minutesAgo := time.Now().Add(-5 * time.Minute).Unix() mock := `{ "points":[ {"values":[{"int64_value":3},{"string_value":"paul"}],"sequence_number":2}, {"values":[{"int64_value":1},{"string_value":"todd"}],"sequence_number":1}], "name":"user_things", "fields":["count","name"] }` series := stringToSeries(mock, minutesAgo, c) err := db.WriteSeriesData("foobar", series) c.Assert(err, IsNil) user := &MockUser{} results := executeQuery(user, "foobar", "select count, name from user_things;", db, c) c.Assert(results[0], DeepEquals, series) mock = `{ "points":[ {"values":[{"int64_value":3},{"string_value":"john"}],"sequence_number":1}], "name":"user_things", "fields":["count","name"] }` series = stringToSeries(mock, time.Now().Unix(), c) err = db.WriteSeriesData("foobar", series) c.Assert(err, IsNil) results = executeQuery(user, "foobar", "select count, name from user_things;", db, c) c.Assert(results[0].Points, HasLen, 3) queries, _ := parser.ParseQuery("delete from user_things where time > now() - 1h and time < now() - 1m") err = db.DeleteSeriesData("foobar", queries[0].DeleteQuery) c.Assert(err, IsNil) results = executeQuery(user, "foobar", "select count, name from user_things;", db, c) c.Assert(results[0].Points, HasLen, 1) c.Assert(results[0], DeepEquals, series) }
func (self *FilteringSuite) TestNotRegexFiltering(c *C) { queryStr := "select * from t where column_one !~ /.*foo.*/ and time > now() - 1d;" query, err := parser.ParseQuery(queryStr) c.Assert(err, IsNil) series, err := common.StringToSeriesArray(` [ { "points": [ {"values": [{"string_value": "100"}], "timestamp": 1381346631, "sequence_number": 1}, {"values": [{"string_value": "foobar"}], "timestamp": 1381346631, "sequence_number": 1} ], "name": "t", "fields": ["column_one"] } ] `) c.Assert(err, IsNil) result, err := Filter(query, series[0]) c.Assert(err, IsNil) c.Assert(result, NotNil) c.Assert(result.Points, HasLen, 1) c.Assert(*result.Points[0].Values[0].StringValue, Equals, "100") }
func (self *DatastoreSuite) TestDeletingData(c *C) { cleanup(nil) db := newDatastore(c) defer cleanup(db) mock := ` { "points": [ { "values": [ { "int64_value": 3 } ], "sequence_number": 1 } ], "name": "foo", "fields": ["value"] }` pointTime := time.Now().Unix() series := stringToSeries(mock, pointTime, c) err := db.WriteSeriesData("test", series) c.Assert(err, IsNil) q, err := parser.ParseQuery("select value from foo;") c.Assert(err, IsNil) yield := func(series *protocol.Series) error { if len(series.Points) > 0 { panic("Series contains points") } return nil } c.Assert(db.DropDatabase("test"), IsNil) user := &MockUser{} err = db.ExecuteQuery(user, "test", q, yield) c.Assert(err, ErrorMatches, ".*Field value doesn't exist.*") }
func (self *FilteringSuite) TestFilteringWithJoin(c *C) { queryStr := "select * from t as bar inner join t as foo where bar.column_one == 100 and foo.column_two != 6;" query, err := parser.ParseQuery(queryStr) c.Assert(err, IsNil) series, err := common.StringToSeriesArray(` [ { "points": [ {"values": [{"int64_value": 100},{"int64_value": 5 }], "timestamp": 1381346631, "sequence_number": 1}, {"values": [{"int64_value": 100},{"int64_value": 6 }], "timestamp": 1381346631, "sequence_number": 1}, {"values": [{"int64_value": 90 },{"int64_value": 15}], "timestamp": 1381346632, "sequence_number": 1} ], "name": "foo_join_bar", "fields": ["bar.column_one", "foo.column_two"] } ] `) c.Assert(err, IsNil) result, err := Filter(query, series[0]) c.Assert(err, IsNil) c.Assert(result, NotNil) // no filtering should happen for join queries c.Assert(result.Points, HasLen, 1) }
func (self *ProtobufRequestHandler) HandleRequest(request *protocol.Request, conn net.Conn) error { if *request.Type == protocol.Request_PROXY_WRITE { response := &protocol.Response{RequestId: request.Id, Type: &self.writeOk} request.OriginatingServerId = &self.clusterConfig.localServerId // TODO: make request logging and datastore write atomic replicationFactor := self.clusterConfig.GetReplicationFactor(request.Database) err := self.db.LogRequestAndAssignSequenceNumber(request, &replicationFactor, request.OwnerServerId) if err != nil { return err } err = self.db.WriteSeriesData(*request.Database, request.Series) if err != nil { return err } err = self.WriteResponse(conn, response) // TODO: add quorum writes? self.coordinator.ReplicateWrite(request) return err } else if *request.Type == protocol.Request_PROXY_DROP_SERIES { response := &protocol.Response{RequestId: request.Id, Type: &self.writeOk} request.OriginatingServerId = &self.clusterConfig.localServerId replicationFactor := uint8(*request.ReplicationFactor) // TODO: make request logging and datastore write atomic err := self.db.LogRequestAndAssignSequenceNumber(request, &replicationFactor, request.OwnerServerId) if err != nil { return err } err = self.db.DropSeries(*request.Database, *request.Series.Name) if err != nil { return err } err = self.WriteResponse(conn, response) self.coordinator.ReplicateWrite(request) return err } else if *request.Type == protocol.Request_REPLICATION_DROP_SERIES { replicationFactor := uint8(*request.ReplicationFactor) // TODO: make request logging and datastore write atomic err := self.db.LogRequestAndAssignSequenceNumber(request, &replicationFactor, request.OwnerServerId) if err != nil { switch err := err.(type) { case datastore.SequenceMissingRequestsError: log.Warn("Missing sequence number error: Request SN: %v Last Known SN: %v", request.GetSequenceNumber(), err.LastKnownRequestSequence) go self.coordinator.ReplayReplication(request, &replicationFactor, request.OwnerServerId, &err.LastKnownRequestSequence) return nil default: return err } } return self.db.DropSeries(*request.Database, *request.Series.Name) } else if *request.Type == protocol.Request_PROXY_DROP_DATABASE { response := &protocol.Response{RequestId: request.Id, Type: &self.writeOk} request.OriginatingServerId = &self.clusterConfig.localServerId replicationFactor := uint8(*request.ReplicationFactor) // TODO: make request logging and datastore write atomic err := self.db.LogRequestAndAssignSequenceNumber(request, &replicationFactor, request.OwnerServerId) if err != nil { return err } err = self.db.DropDatabase(*request.Database) if err != nil { return err } err = self.WriteResponse(conn, response) self.coordinator.ReplicateWrite(request) return err } else if *request.Type == protocol.Request_REPLICATION_DROP_DATABASE { replicationFactor := uint8(*request.ReplicationFactor) // TODO: make request logging and datastore write atomic err := self.db.LogRequestAndAssignSequenceNumber(request, &replicationFactor, request.OwnerServerId) if err != nil { switch err := err.(type) { case datastore.SequenceMissingRequestsError: log.Warn("Missing sequence number error: Request SN: %v Last Known SN: %v", request.GetSequenceNumber(), err.LastKnownRequestSequence) go self.coordinator.ReplayReplication(request, &replicationFactor, request.OwnerServerId, &err.LastKnownRequestSequence) return nil default: return err } } return self.db.DropDatabase(*request.Database) } else if *request.Type == protocol.Request_PROXY_DELETE { response := &protocol.Response{RequestId: request.Id, Type: &self.writeOk} request.OriginatingServerId = &self.clusterConfig.localServerId // TODO: make request logging and datastore write atomic replicationFactor := self.clusterConfig.GetReplicationFactor(request.Database) err := self.db.LogRequestAndAssignSequenceNumber(request, &replicationFactor, request.OwnerServerId) if err != nil { return err } query, err := parser.ParseQuery(*request.Query) if err != nil { return err } err = self.db.DeleteSeriesData(*request.Database, query[0].DeleteQuery) if err != nil { return err } err = self.WriteResponse(conn, response) // TODO: add quorum writes? self.coordinator.ReplicateDelete(request) return err } else if *request.Type == protocol.Request_REPLICATION_WRITE { replicationFactor := self.clusterConfig.GetReplicationFactor(request.Database) // TODO: make request logging and datastore write atomic err := self.db.LogRequestAndAssignSequenceNumber(request, &replicationFactor, request.OwnerServerId) if err != nil { switch err := err.(type) { case datastore.SequenceMissingRequestsError: log.Warn("Missing sequence number error: Request SN: %v Last Known SN: %v", request.GetSequenceNumber(), err.LastKnownRequestSequence) go self.coordinator.ReplayReplication(request, &replicationFactor, request.OwnerServerId, &err.LastKnownRequestSequence) return nil default: return err } } self.db.WriteSeriesData(*request.Database, request.Series) return nil } else if *request.Type == protocol.Request_REPLICATION_DELETE { replicationFactor := self.clusterConfig.GetReplicationFactor(request.Database) // TODO: make request logging and datastore write atomic err := self.db.LogRequestAndAssignSequenceNumber(request, &replicationFactor, request.OwnerServerId) if err != nil { switch err := err.(type) { case datastore.SequenceMissingRequestsError: go self.coordinator.ReplayReplication(request, &replicationFactor, request.OwnerServerId, &err.LastKnownRequestSequence) return nil default: return err } } query, err := parser.ParseQuery(*request.Query) if err != nil { return err } return self.db.DeleteSeriesData(*request.Database, query[0].DeleteQuery) } else if *request.Type == protocol.Request_QUERY { go self.handleQuery(request, conn) } else if *request.Type == protocol.Request_LIST_SERIES { go self.handleListSeries(request, conn) } else if *request.Type == protocol.Request_REPLICATION_REPLAY { self.handleReplay(request, conn) } else if *request.Type == protocol.Request_SEQUENCE_NUMBER { self.handleSequenceNumberRequest(request, conn) } else { log.Error("unknown request type: %v", request) return errors.New("Unknown request type") } return nil }
func (self *QueryEngine) RunQuery(user common.User, database string, queryString string, localOnly bool, yield func(*protocol.Series) error) (err error) { // don't let a panic pass beyond RunQuery defer recoverFunc(database, queryString) q, err := parser.ParseQuery(queryString) if err != nil { return err } for _, query := range q { if query.DeleteQuery != nil { if err := self.coordinator.DeleteSeriesData(user, database, query.DeleteQuery, localOnly); err != nil { return err } continue } if query.DropQuery != nil { if err := self.coordinator.DeleteContinuousQuery(user, database, uint32(query.DropQuery.Id)); err != nil { return err } continue } if query.IsListQuery() { if query.IsListSeriesQuery() { series, err := self.coordinator.ListSeries(user, database) if err != nil { return err } for _, s := range series { if err := yield(s); err != nil { return err } } } else if query.IsListContinuousQueriesQuery() { queries, err := self.coordinator.ListContinuousQueries(user, database) if err != nil { return err } for _, q := range queries { if err := yield(q); err != nil { return err } } } continue } if query.DropSeriesQuery != nil { err := self.coordinator.DropSeries(user, database, query.DropSeriesQuery.GetTableName()) if err != nil { return err } continue } selectQuery := query.SelectQuery if selectQuery.IsContinuousQuery() { return self.coordinator.CreateContinuousQuery(user, database, queryString) } if isAggregateQuery(selectQuery) { return self.executeCountQueryWithGroupBy(user, database, selectQuery, localOnly, yield) } else if containsArithmeticOperators(selectQuery) { return self.executeArithmeticQuery(user, database, selectQuery, localOnly, yield) } else { return self.distributeQuery(user, database, selectQuery, localOnly, yield) } } return nil }
func (self *ProtobufRequestHandler) HandleRequest(request *protocol.Request, conn net.Conn) error { if *request.Type == protocol.Request_PROXY_WRITE { response := &protocol.Response{RequestId: request.Id, Type: &self.writeOk} location := common.RingLocation(request.Database, request.Series.Name, request.Series.Points[0].Timestamp) ownerId := self.clusterConfig.GetOwnerIdByLocation(&location) request.OriginatingServerId = &self.clusterConfig.localServerId // TODO: make request logging and datastore write atomic replicationFactor := self.clusterConfig.GetReplicationFactor(request.Database) err := self.db.LogRequestAndAssignSequenceNumber(request, &replicationFactor, ownerId) if err != nil { return err } err = self.db.WriteSeriesData(*request.Database, request.Series) if err != nil { return err } err = self.WriteResponse(conn, response) // TODO: add quorum writes? self.coordinator.ReplicateWrite(request) return err } else if *request.Type == protocol.Request_PROXY_DELETE { response := &protocol.Response{RequestId: request.Id, Type: &self.writeOk} request.OriginatingServerId = &self.clusterConfig.localServerId // TODO: make request logging and datastore write atomic replicationFactor := self.clusterConfig.GetReplicationFactor(request.Database) err := self.db.LogRequestAndAssignSequenceNumber(request, &replicationFactor, request.OwnerServerId) if err != nil { return err } query, _ := parser.ParseQuery(*request.Query) err = self.db.DeleteSeriesData(*request.Database, query[0].DeleteQuery) if err != nil { return err } err = self.WriteResponse(conn, response) // TODO: add quorum writes? self.coordinator.ReplicateDelete(request) return err } else if *request.Type == protocol.Request_REPLICATION_WRITE { replicationFactor := self.clusterConfig.GetReplicationFactor(request.Database) // TODO: make request logging and datastore write atomic err := self.db.LogRequestAndAssignSequenceNumber(request, &replicationFactor, request.OwnerServerId) if err != nil { switch err := err.(type) { case datastore.SequenceMissingRequestsError: go self.coordinator.ReplayReplication(request, &replicationFactor, request.OwnerServerId, &err.LastKnownRequestSequence) return nil default: return err } } self.db.WriteSeriesData(*request.Database, request.Series) return nil } else if *request.Type == protocol.Request_REPLICATION_DELETE { replicationFactor := self.clusterConfig.GetReplicationFactor(request.Database) // TODO: make request logging and datastore write atomic err := self.db.LogRequestAndAssignSequenceNumber(request, &replicationFactor, request.OwnerServerId) if err != nil { switch err := err.(type) { case datastore.SequenceMissingRequestsError: go self.coordinator.ReplayReplication(request, &replicationFactor, request.OwnerServerId, &err.LastKnownRequestSequence) return nil default: return err } } query, _ := parser.ParseQuery(*request.Query) return self.db.DeleteSeriesData(*request.Database, query[0].DeleteQuery) } else if *request.Type == protocol.Request_QUERY { go self.handleQuery(request, conn) } else if *request.Type == protocol.Request_REPLICATION_REPLAY { self.handleReplay(request, conn) } else { log.Error("unknown request type: %v", request) return errors.New("Unknown request type") } return nil }
func (self *CoordinatorImpl) RunQuery(user common.User, database string, queryString string, seriesWriter SeriesWriter) (err error) { log.Debug("COORD: RunQuery: ", queryString) // don't let a panic pass beyond RunQuery defer recoverFunc(database, queryString) q, err := parser.ParseQuery(queryString) if err != nil { return err } for _, query := range q { querySpec := parser.NewQuerySpec(user, database, query) if query.DeleteQuery != nil { if err := self.runDeleteQuery(querySpec, seriesWriter); err != nil { return err } continue } if query.DropQuery != nil { if err := self.DeleteContinuousQuery(user, database, uint32(query.DropQuery.Id)); err != nil { return err } continue } if query.IsListQuery() { if query.IsListSeriesQuery() { self.runListSeriesQuery(querySpec, seriesWriter) } else if query.IsListContinuousQueriesQuery() { queries, err := self.ListContinuousQueries(user, database) if err != nil { return err } for _, q := range queries { if err := seriesWriter.Write(q); err != nil { return err } } } continue } if query.DropSeriesQuery != nil { err := self.runDropSeriesQuery(querySpec, seriesWriter) if err != nil { return err } continue } selectQuery := query.SelectQuery if selectQuery.IsContinuousQuery() { return self.CreateContinuousQuery(user, database, queryString) } return self.runQuery(query, user, database, seriesWriter) } seriesWriter.Close() return nil }
func (self *CoordinatorImpl) RunQuery(user common.User, database string, queryString string, seriesWriter SeriesWriter) (err error) { log.Info("Query: db: %s, u: %s, q: %s", database, user.GetName(), queryString) // don't let a panic pass beyond RunQuery defer common.RecoverFunc(database, queryString, nil) q, err := parser.ParseQuery(queryString) if err != nil { return err } for _, query := range q { querySpec := parser.NewQuerySpec(user, database, query) if query.DeleteQuery != nil { if err := self.clusterConfiguration.CreateCheckpoint(); err != nil { return err } if err := self.runDeleteQuery(querySpec, seriesWriter); err != nil { return err } continue } if query.DropQuery != nil { if err := self.DeleteContinuousQuery(user, database, uint32(query.DropQuery.Id)); err != nil { return err } continue } if query.IsListQuery() { if query.IsListSeriesQuery() { self.runListSeriesQuery(querySpec, seriesWriter) } else if query.IsListContinuousQueriesQuery() { queries, err := self.ListContinuousQueries(user, database) if err != nil { return err } for _, q := range queries { if err := seriesWriter.Write(q); err != nil { return err } } } continue } if query.DropSeriesQuery != nil { err := self.runDropSeriesQuery(querySpec, seriesWriter) if err != nil { return err } continue } selectQuery := query.SelectQuery if selectQuery.IsContinuousQuery() { return self.CreateContinuousQuery(user, database, queryString) } if err := self.checkPermission(user, querySpec); err != nil { return err } return self.runQuery(querySpec, seriesWriter) } seriesWriter.Close() return nil }
func (self *CoordinatorImpl) ReplayReplication(request *protocol.Request, replicationFactor *uint8, owningServerId *uint32, lastSeenSequenceNumber *uint64) { key := fmt.Sprintf("%d_%d_%d_%d", *replicationFactor, *request.ClusterVersion, *request.OriginatingServerId, *owningServerId) self.runningReplaysLock.Lock() requestsWaitingToWrite := self.runningReplays[key] if requestsWaitingToWrite != nil { self.runningReplays[key] = append(requestsWaitingToWrite, request) self.runningReplaysLock.Unlock() return } self.runningReplays[key] = []*protocol.Request{request} self.runningReplaysLock.Unlock() id := atomic.AddUint32(&self.requestId, uint32(1)) replicationFactor32 := uint32(*replicationFactor) database := "" replayRequest := &protocol.Request{ Id: &id, Type: &replayReplication, Database: &database, ReplicationFactor: &replicationFactor32, OriginatingServerId: request.OriginatingServerId, OwnerServerId: owningServerId, ClusterVersion: request.ClusterVersion, LastKnownSequenceNumber: lastSeenSequenceNumber} replayedRequests := make(chan *protocol.Response, 100) server := self.clusterConfiguration.GetServerById(request.OriginatingServerId) err := server.protobufClient.MakeRequest(replayRequest, replayedRequests) if err != nil { log.Error(err) return } for { response := <-replayedRequests if response == nil || *response.Type == protocol.Response_REPLICATION_REPLAY_END { self.runningReplaysLock.Lock() defer self.runningReplaysLock.Unlock() for _, r := range self.runningReplays[key] { err := self.datastore.LogRequestAndAssignSequenceNumber(r, replicationFactor, owningServerId) if err != nil { log.Error("Error writing waiting requests after replay: %s", err) } if *r.Type == protocol.Request_PROXY_WRITE { self.datastore.WriteSeriesData(*r.Database, r.Series) } else if *r.Type == protocol.Request_PROXY_DELETE || *r.Type == protocol.Request_REPLICATION_DELETE { query, _ := parser.ParseQuery(*r.Query) err = self.datastore.DeleteSeriesData(*r.Database, query[0].DeleteQuery) } } delete(self.runningReplays, key) log.Info("Replay done for originating server %d and owner server %d", *request.OriginatingServerId, *owningServerId) return } request := response.Request // TODO: make request logging and datastore write atomic err := self.datastore.LogRequestAndAssignSequenceNumber(request, replicationFactor, owningServerId) if err != nil { log.Error("ERROR writing replay: ", err) } else { self.datastore.WriteSeriesData(*request.Database, request.Series) } } }