func (self *Coordinator) runSingleQuery(user common.User, db string, q *parser.Query, p engine.Processor) error { querySpec := parser.NewQuerySpec(user, db, q) if ok, err := self.permissions.CheckQueryPermissions(user, db, querySpec); !ok { return err } switch qt := q.Type(); qt { // administrative case parser.DropContinuousQuery: return self.runDropContinuousQuery(user, db, uint32(q.DropQuery.Id)) case parser.ListContinuousQueries: return self.runListContinuousQueries(user, db, p) case parser.Continuous: return self.runContinuousQuery(user, db, q.GetQueryString()) case parser.ListSeries: return self.runListSeriesQuery(querySpec, p) // Data queries case parser.Delete: return self.runDeleteQuery(querySpec, p) case parser.DropSeries: return self.runDropSeriesQuery(querySpec) case parser.Select: return self.runQuerySpec(querySpec, p) default: return fmt.Errorf("Can't handle query %s", qt) } }
func (self *CoordinatorSuite) TestShouldQuerySequentially(c *C) { end := time.Now().Truncate(24 * time.Hour) start := end.Add(-7 * 24 * time.Hour) shard := cluster.NewShard(1, start, end, "", "", nil) shards := []*cluster.ShardData{shard} coordinator := NewCoordinatorImpl(&configuration.Configuration{ ClusterMaxResponseBufferSize: 1000, }, nil, nil, nil) queries := map[string]bool{ "list series": false, "select count(foo) from /.*bar.*/ group by time(1d)": true, "select count(foo) from bar": true, "select foo from bar": true, "select count(foo) from bar group by baz": true, "select count(foo) from bar group by time(1d)": false, "select count(foo) from bar group by time(3d)": true, } for query, result := range queries { fmt.Printf("Testing %s\n", query) parsedQuery, err := parser.ParseQuery(query) c.Assert(err, IsNil) c.Assert(parsedQuery, HasLen, 1) querySpec := parser.NewQuerySpec(nil, "", parsedQuery[0]) c.Assert(coordinator.shouldQuerySequentially(shards, querySpec), Equals, result) } }
func (self *ProtobufRequestHandler) handleQuery(request *protocol.Request, conn net.Conn) { // the query should always parse correctly since it was parsed at the originating server. queries, err := parser.ParseQuery(*request.Query) if err != nil || len(queries) < 1 { log.Error("Error parsing query: ", err) errorMsg := fmt.Sprintf("Cannot find user %s", *request.UserName) response := &protocol.Response{ Type: protocol.Response_ERROR.Enum(), ErrorMessage: &errorMsg, RequestId: request.Id, } self.WriteResponse(conn, response) return } query := queries[0] var user common.User if *request.IsDbUser { user = self.clusterConfig.GetDbUser(*request.Database, *request.UserName) } else { user = self.clusterConfig.GetClusterAdmin(*request.UserName) } if user == nil { errorMsg := fmt.Sprintf("Cannot find user %s", *request.UserName) response := &protocol.Response{ Type: protocol.Response_ERROR.Enum(), ErrorMessage: &errorMsg, RequestId: request.Id, } self.WriteResponse(conn, response) return } shard := self.clusterConfig.GetLocalShardById(*request.ShardId) querySpec := parser.NewQuerySpec(user, *request.Database, query) responseChan := make(chan *protocol.Response) if querySpec.IsDestructiveQuery() { go shard.HandleDestructiveQuery(querySpec, request, responseChan, true) } else { go shard.Query(querySpec, responseChan) } for { response := <-responseChan response.RequestId = request.Id self.WriteResponse(conn, response) switch rt := response.GetType(); rt { case protocol.Response_END_STREAM, protocol.Response_ERROR: return case protocol.Response_QUERY: continue default: panic(fmt.Errorf("Unexpected response type: %s", rt)) } } }
func (dm *DataMigrator) migrateDatabaseInShard(database string, shard *LevelDbShard) error { log.Info("Migrating database %s for shard", database) seriesNames := shard.GetSeriesForDatabase(database) log.Info("Migrating %d series", len(seriesNames)) admin := dm.clusterConfig.GetClusterAdmin(dm.clusterConfig.GetClusterAdmins()[0]) pointCount := 0 for _, series := range seriesNames { q, err := parser.ParseQuery(fmt.Sprintf("select * from \"%s\"", series)) if err != nil { log.Error("Problem migrating series %s", series) continue } query := q[0] seriesChan := make(chan *protocol.Response) queryEngine := engine.NewPassthroughEngine(seriesChan, 2000) querySpec := parser.NewQuerySpec(admin, database, query) go func() { err := shard.Query(querySpec, queryEngine) if err != nil { log.Error("Error migrating %s", err.Error()) } queryEngine.Close() seriesChan <- &protocol.Response{Type: &endStreamResponse} }() for { response := <-seriesChan if *response.Type == endStreamResponse { break } err := dm.coord.WriteSeriesData(admin, database, []*protocol.Series{response.Series}) if err != nil { log.Error("Writing Series data: %s", err.Error()) } pointCount += len(response.Series.Points) if pointCount > POINT_COUNT_TO_PAUSE { pointCount = 0 time.Sleep(dm.pauseTime) } } } log.Info("Done migrating %s for shard", database) return nil }
func (self *ProtobufRequestHandler) handleQuery(request *protocol.Request, conn net.Conn) { // the query should always parse correctly since it was parsed at the originating server. queries, err := parser.ParseQuery(*request.Query) if err != nil || len(queries) < 1 { log.Error("Error parsing query: ", err) errorMsg := fmt.Sprintf("Cannot find user %s", *request.UserName) response := &protocol.Response{Type: &endStreamResponse, ErrorMessage: &errorMsg, RequestId: request.Id} self.WriteResponse(conn, response) return } query := queries[0] var user common.User if *request.IsDbUser { user = self.clusterConfig.GetDbUser(*request.Database, *request.UserName) } else { user = self.clusterConfig.GetClusterAdmin(*request.UserName) } if user == nil { errorMsg := fmt.Sprintf("Cannot find user %s", *request.UserName) response := &protocol.Response{Type: &accessDeniedResponse, ErrorMessage: &errorMsg, RequestId: request.Id} self.WriteResponse(conn, response) return } shard := self.clusterConfig.GetLocalShardById(*request.ShardId) querySpec := parser.NewQuerySpec(user, *request.Database, query) responseChan := make(chan *protocol.Response) if querySpec.IsDestructiveQuery() { go shard.HandleDestructiveQuery(querySpec, request, responseChan, true) } else { go shard.Query(querySpec, responseChan) } for { response := <-responseChan response.RequestId = request.Id self.WriteResponse(conn, response) if response.GetType() == protocol.Response_END_STREAM || response.GetType() == protocol.Response_ACCESS_DENIED { return } } }
func (self *CoordinatorImpl) RunQuery(user common.User, database string, queryString string, seriesWriter SeriesWriter) (err error) { log.Info("Start Query: db: %s, u: %s, q: %s", database, user.GetName(), queryString) defer func(t time.Time) { log.Debug("End Query: db: %s, u: %s, q: %s, t: %s", database, user.GetName(), queryString, time.Now().Sub(t)) }(time.Now()) // don't let a panic pass beyond RunQuery defer common.RecoverFunc(database, queryString, nil) q, err := parser.ParseQuery(queryString) if err != nil { return err } for _, query := range q { querySpec := parser.NewQuerySpec(user, database, query) if query.DeleteQuery != nil { if err := self.clusterConfiguration.CreateCheckpoint(); err != nil { return err } if err := self.runDeleteQuery(querySpec, seriesWriter); err != nil { return err } continue } if query.DropQuery != nil { if err := self.DeleteContinuousQuery(user, database, uint32(query.DropQuery.Id)); err != nil { return err } continue } if query.IsListQuery() { if query.IsListSeriesQuery() { self.runListSeriesQuery(querySpec, seriesWriter) } else if query.IsListContinuousQueriesQuery() { queries, err := self.ListContinuousQueries(user, database) if err != nil { return err } for _, q := range queries { if err := seriesWriter.Write(q); err != nil { return err } } } continue } if query.DropSeriesQuery != nil { err := self.runDropSeriesQuery(querySpec, seriesWriter) if err != nil { return err } continue } selectQuery := query.SelectQuery if selectQuery.IsContinuousQuery() { return self.CreateContinuousQuery(user, database, queryString) } if err := self.checkPermission(user, querySpec); err != nil { return err } return self.runQuery(querySpec, seriesWriter) } seriesWriter.Close() return nil }