func (self *CoordinatorImpl) write(db string, series []*protocol.Series, shard cluster.Shard, sync bool) error { request := &protocol.Request{Type: &write, Database: &db, MultiSeries: series} if sync { return shard.SyncWrite(request) } return shard.Write(request) }
func (self *CoordinatorImpl) write(db string, series []*protocol.Series, shard cluster.Shard, sync bool) error { request := &protocol.Request{Type: &write, Database: &db, MultiSeries: series} // break the request if it's too big if request.Size() >= MAX_REQUEST_SIZE { if l := len(series); l > 1 { // create two requests with half the serie if err := self.write(db, series[:l/2], shard, sync); err != nil { return err } return self.write(db, series[l/2:], shard, sync) } // otherwise, split the points of the only series s := series[0] l := len(s.Points) s1 := &protocol.Series{Name: s.Name, Fields: s.Fields, Points: s.Points[:l/2]} if err := self.write(db, []*protocol.Series{s1}, shard, sync); err != nil { return err } s2 := &protocol.Series{Name: s.Name, Fields: s.Fields, Points: s.Points[l/2:]} return self.write(db, []*protocol.Series{s2}, shard, sync) } if sync { return shard.SyncWrite(request) } return shard.Write(request) }
func (self *CoordinatorImpl) CommitSeriesData(db string, series *protocol.Series) error { lastPointIndex := 0 now := common.CurrentTime() var shardToWrite cluster.Shard for _, point := range series.Points { if point.Timestamp == nil { point.Timestamp = &now } } lastTime := int64(math.MinInt64) if len(series.Points) > 0 && *series.Points[0].Timestamp == lastTime { // just a hack to make sure lastTime will never equal the first // point's timestamp lastTime = 0 } // sort the points by timestamp series.SortPointsTimeDescending() for i, point := range series.Points { if *point.Timestamp != lastTime { shard, err := self.clusterConfiguration.GetShardToWriteToBySeriesAndTime(db, *series.Name, *point.Timestamp) if err != nil { return err } if shardToWrite == nil { shardToWrite = shard } else if shardToWrite.Id() != shard.Id() { newIndex := i newSeries := &protocol.Series{Name: series.Name, Fields: series.Fields, Points: series.Points[lastPointIndex:newIndex]} if err := self.write(db, newSeries, shardToWrite); err != nil { return err } lastPointIndex = newIndex shardToWrite = shard } lastTime = *point.Timestamp } } series.Points = series.Points[lastPointIndex:] if len(series.Points) > 0 { if shardToWrite == nil { shardToWrite, _ = self.clusterConfiguration.GetShardToWriteToBySeriesAndTime(db, *series.Name, *series.Points[0].Timestamp) } err := self.write(db, series, shardToWrite) if err != nil { log.Error("COORD error writing: ", err) return err } return err } return nil }
func (self *CoordinatorImpl) CommitSeriesData(db string, series *protocol.Series) error { lastTime := int64(0) lastPointIndex := 0 now := common.CurrentTime() var shardToWrite cluster.Shard for i, point := range series.Points { if point.Timestamp == nil { point.Timestamp = &now } if *point.Timestamp != lastTime { shard, err := self.clusterConfiguration.GetShardToWriteToBySeriesAndTime(db, *series.Name, *point.Timestamp) if err != nil { return err } if shardToWrite == nil { shardToWrite = shard } else if shardToWrite.Id() != shard.Id() { newIndex := i + 1 newSeries := &protocol.Series{Name: series.Name, Fields: series.Fields, Points: series.Points[lastPointIndex:newIndex]} self.write(db, newSeries, shardToWrite) lastPointIndex = newIndex shardToWrite = shard } lastTime = *point.Timestamp } } series.Points = series.Points[lastPointIndex:] if len(series.Points) > 0 { if shardToWrite == nil { shardToWrite, _ = self.clusterConfiguration.GetShardToWriteToBySeriesAndTime(db, *series.Name, *series.Points[0].Timestamp) } err := self.write(db, series, shardToWrite) if err != nil { log.Error("COORD error writing: ", err) } return err } return nil }
func (self *CoordinatorImpl) writeWithoutAssigningId(db string, series []*protocol.Series, shard cluster.Shard, sync bool) error { request := &protocol.Request{Type: &write, Database: &db, MultiSeries: series} // break the request if it's too big if request.Size() >= MAX_REQUEST_SIZE { if l := len(series); l > 1 { // create two requests with half the serie if err := self.writeWithoutAssigningId(db, series[:l/2], shard, sync); err != nil { return err } return self.writeWithoutAssigningId(db, series[l/2:], shard, sync) } // otherwise, split the points of the only series s := series[0] l := len(s.Points) s1 := &protocol.Series{Name: s.Name, FieldIds: s.FieldIds, Points: s.Points[:l/2]} if err := self.writeWithoutAssigningId(db, []*protocol.Series{s1}, shard, sync); err != nil { return err } s2 := &protocol.Series{Name: s.Name, FieldIds: s.FieldIds, Points: s.Points[l/2:]} return self.writeWithoutAssigningId(db, []*protocol.Series{s2}, shard, sync) } // if we received a synchronous write, then this is coming from the // continuous queries which have the sequence numbers assigned if sync { return shard.SyncWrite(request, false) } // If the shard isn't replicated do a syncrhonous write if shard.ReplicationFactor() <= 1 { // assign sequenceNumber and write synchronously return shard.SyncWrite(request, true) } return shard.Write(request) }
func (self *CoordinatorImpl) write(db string, series *protocol.Series, shard cluster.Shard) error { request := &protocol.Request{Type: &write, Database: &db, Series: series} return shard.Write(request) }