func createResponse(nextPointMap map[string]*NextPoint, series *protocol.Series, id *uint32) *protocol.Response { pointCount := len(series.Points) if pointCount < 1 { if nextPoint := nextPointMap[*series.Name]; nextPoint != nil { series.Points = append(series.Points, nextPoint.point) series.Fields = nextPoint.fields } response := &protocol.Response{Type: &queryResponse, Series: series, RequestId: id} return response } oldNextPoint := nextPointMap[*series.Name] nextPoint := series.Points[pointCount-1] series.Points[pointCount-1] = nil if oldNextPoint != nil { copy(series.Points[1:], series.Points[0:]) series.Points[0] = oldNextPoint.point } else { series.Points = series.Points[:len(series.Points)-1] } response := &protocol.Response{Series: series, Type: &queryResponse, RequestId: id} if nextPoint != nil { response.NextPointTime = nextPoint.Timestamp nextPointMap[*series.Name] = &NextPoint{series.Fields, nextPoint} } return response }
func (self *CoordinatorImpl) InterpolateValuesAndCommit(query string, db string, series *protocol.Series, targetName string, assignSequenceNumbers bool) error { defer common.RecoverFunc(db, query, nil) targetName = strings.Replace(targetName, ":series_name", *series.Name, -1) type sequenceKey struct { seriesName string timestamp int64 } sequenceMap := make(map[sequenceKey]int) r, _ := regexp.Compile(`\[.*?\]`) if r.MatchString(targetName) { serieses := map[string]*protocol.Series{} for _, point := range series.Points { targetNameWithValues := r.ReplaceAllStringFunc(targetName, func(match string) string { fieldName := match[1 : len(match)-1] fieldIndex := series.GetFieldIndex(fieldName) return point.GetFieldValueAsString(fieldIndex) }) if assignSequenceNumbers { key := sequenceKey{targetNameWithValues, *point.Timestamp} sequenceMap[key] += 1 sequenceNumber := uint64(sequenceMap[key]) point.SequenceNumber = &sequenceNumber } newSeries := serieses[targetNameWithValues] if newSeries == nil { newSeries = &protocol.Series{Name: &targetNameWithValues, Fields: series.Fields, Points: []*protocol.Point{point}} serieses[targetNameWithValues] = newSeries continue } newSeries.Points = append(newSeries.Points, point) } seriesSlice := make([]*protocol.Series, 0, len(serieses)) for _, s := range serieses { seriesSlice = append(seriesSlice, s) } if e := self.CommitSeriesData(db, seriesSlice, true); e != nil { log.Error("Couldn't write data for continuous query: ", e) } } else { newSeries := &protocol.Series{Name: &targetName, Fields: series.Fields, Points: series.Points} if assignSequenceNumbers { for _, point := range newSeries.Points { sequenceMap[sequenceKey{targetName, *point.Timestamp}] += 1 sequenceNumber := uint64(sequenceMap[sequenceKey{targetName, *point.Timestamp}]) point.SequenceNumber = &sequenceNumber } } if e := self.CommitSeriesData(db, []*protocol.Series{newSeries}, true); e != nil { log.Error("Couldn't write data for continuous query: ", e) } } return nil }
func (self *CoordinatorImpl) CommitSeriesData(db string, series *protocol.Series) error { lastPointIndex := 0 now := common.CurrentTime() var shardToWrite cluster.Shard for _, point := range series.Points { if point.Timestamp == nil { point.Timestamp = &now } } lastTime := int64(math.MinInt64) if len(series.Points) > 0 && *series.Points[0].Timestamp == lastTime { // just a hack to make sure lastTime will never equal the first // point's timestamp lastTime = 0 } // sort the points by timestamp series.SortPointsTimeDescending() for i, point := range series.Points { if *point.Timestamp != lastTime { shard, err := self.clusterConfiguration.GetShardToWriteToBySeriesAndTime(db, *series.Name, *point.Timestamp) if err != nil { return err } if shardToWrite == nil { shardToWrite = shard } else if shardToWrite.Id() != shard.Id() { newIndex := i newSeries := &protocol.Series{Name: series.Name, Fields: series.Fields, Points: series.Points[lastPointIndex:newIndex]} if err := self.write(db, newSeries, shardToWrite); err != nil { return err } lastPointIndex = newIndex shardToWrite = shard } lastTime = *point.Timestamp } } series.Points = series.Points[lastPointIndex:] if len(series.Points) > 0 { if shardToWrite == nil { shardToWrite, _ = self.clusterConfiguration.GetShardToWriteToBySeriesAndTime(db, *series.Name, *series.Points[0].Timestamp) } err := self.write(db, series, shardToWrite) if err != nil { log.Error("COORD error writing: ", err) return err } return err } return nil }
func (self *CoordinatorImpl) InterpolateValuesAndCommit(db string, series *protocol.Series, targetName string, assignSequenceNumbers bool) error { targetName = strings.Replace(targetName, ":series_name", *series.Name, -1) type sequenceKey struct { seriesName string timestamp int64 } sequenceMap := make(map[sequenceKey]int) r, _ := regexp.Compile(`\[.*\]`) replaceInvalidCharacters := func(r rune) rune { switch { case (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9'): return r case r == '_' || r == '-' || r == '.': return r case r == ' ': return '_' case r == '/': return '.' } return -1 } if r.MatchString(targetName) { for _, point := range series.Points { targetNameWithValues := r.ReplaceAllStringFunc(targetName, func(match string) string { fieldName := match[1 : len(match)-1] fieldIndex := series.GetFieldIndex(fieldName) return point.GetFieldValueAsString(fieldIndex) }) cleanedTargetName := strings.Map(replaceInvalidCharacters, targetNameWithValues) if assignSequenceNumbers { sequenceMap[sequenceKey{targetName, *point.Timestamp}] += 1 sequenceNumber := uint64(sequenceMap[sequenceKey{targetName, *point.Timestamp}]) point.SequenceNumber = &sequenceNumber } newSeries := &protocol.Series{Name: &cleanedTargetName, Fields: series.Fields, Points: []*protocol.Point{point}} if e := self.CommitSeriesData(db, newSeries); e != nil { log.Error("Couldn't write data for continuous query: ", e) } } } else { newSeries := &protocol.Series{Name: &targetName, Fields: series.Fields, Points: series.Points} if assignSequenceNumbers { for _, point := range newSeries.Points { sequenceMap[sequenceKey{targetName, *point.Timestamp}] += 1 sequenceNumber := uint64(sequenceMap[sequenceKey{targetName, *point.Timestamp}]) point.SequenceNumber = &sequenceNumber } } if e := self.CommitSeriesData(db, newSeries); e != nil { log.Error("Couldn't write data for continuous query: ", e) } } return nil }
func (self *QueryEngine) YieldSeries(seriesIncoming *protocol.Series) (shouldContinue bool) { if self.explain { self.pointsRead += int64(len(seriesIncoming.Points)) } seriesName := seriesIncoming.GetName() self.seriesToPoints[seriesName] = &protocol.Series{Name: &seriesName, Fields: seriesIncoming.Fields} return self.yieldSeriesData(seriesIncoming) && !self.limiter.hitLimit(seriesIncoming.GetName()) }
// merges two time series making sure that the resulting series has // the union of the two series columns and the values set // properly. will panic if the two series don't have the same name func MergeSeries(s1, s2 *protocol.Series) *protocol.Series { if s1.GetName() != s2.GetName() { panic("the two series don't have the same name") } // if the two series have the same columns and in the same order // append the points and return. if reflect.DeepEqual(s1.Fields, s2.Fields) { s1.Points = append(s1.Points, s2.Points...) return s1 } columns := map[string]struct{}{} for _, cs := range [][]string{s1.Fields, s2.Fields} { for _, c := range cs { columns[c] = struct{}{} } } points := append(pointMaps(s1), pointMaps(s2)...) fieldsSlice := make([]string, 0, len(columns)) for c := range columns { fieldsSlice = append(fieldsSlice, c) } resultPoints := make([]*protocol.Point, 0, len(points)) for idx, point := range points { resultPoint := &protocol.Point{} for _, field := range fieldsSlice { value := point[field] if value == nil { value = &protocol.FieldValue{ IsNull: &TRUE, } } resultPoint.Values = append(resultPoint.Values, value) if idx < len(s1.Points) { resultPoint.Timestamp = s1.Points[idx].Timestamp resultPoint.SequenceNumber = s1.Points[idx].SequenceNumber } else { resultPoint.Timestamp = s2.Points[idx-len(s1.Points)].Timestamp resultPoint.SequenceNumber = s2.Points[idx-len(s1.Points)].SequenceNumber } } resultPoints = append(resultPoints, resultPoint) } // otherwise, merge the columns result := &protocol.Series{ Name: s1.Name, Fields: fieldsSlice, Points: resultPoints, } return result }
func Filter(query *parser.SelectQuery, series *protocol.Series) (*protocol.Series, error) { if query.GetWhereCondition() == nil { return series, nil } columns := map[string]struct{}{} if query.GetFromClause().Type == parser.FromClauseInnerJoin { outer: for t, cs := range query.GetResultColumns() { for _, c := range cs { // if this is a wildcard select, then drop all columns and // just use '*' if c == "*" { columns = make(map[string]struct{}, 1) columns[c] = struct{}{} break outer } columns[t.Name+"."+c] = struct{}{} } } } else { for _, cs := range query.GetResultColumns() { for _, c := range cs { columns[c] = struct{}{} } } } points := series.Points series.Points = nil for _, point := range points { ok, err := matches(query.GetWhereCondition(), series.Fields, point) if err != nil { return nil, err } if ok { fmt.Printf("columns: %v, fields: %v\n", columns, series.Fields) filterColumns(columns, series.Fields, point) series.Points = append(series.Points, point) } } if _, ok := columns["*"]; !ok { newFields := []string{} for _, f := range series.Fields { if _, ok := columns[f]; !ok { continue } newFields = append(newFields, f) } series.Fields = newFields } return series, nil }
func (self *AllPointsWriter) yield(series *protocol.Series) error { oldSeries := self.memSeries[*series.Name] if oldSeries == nil { self.memSeries[*series.Name] = series return nil } self.memSeries[series.GetName()] = MergeSeries(self.memSeries[series.GetName()], series) return nil }
func (self *Limiter) calculateLimitAndSlicePoints(series *protocol.Series) { if self.shouldLimit { // if the limit is 0, stop returning any points limit := self.limitForSeries(*series.Name) defer func() { self.limits[*series.Name] = limit }() if limit == 0 { series.Points = nil return } limit -= len(series.Points) if limit <= 0 { sliceTo := len(series.Points) + limit series.Points = series.Points[0:sliceTo] limit = 0 } } }
func (self *PassthroughEngine) YieldSeries(seriesIncoming *protocol.Series) bool { log.Debug("PassthroughEngine YieldSeries %d", len(seriesIncoming.Points)) if *seriesIncoming.Name == "explain query" { self.responseType = &explainQueryResponse log.Debug("Response Changed!") } else { self.responseType = &queryResponse } self.limiter.calculateLimitAndSlicePoints(seriesIncoming) if len(seriesIncoming.Points) == 0 { log.Error("Not sent == 0") return false } if self.response == nil { self.response = &protocol.Response{ Type: self.responseType, Series: seriesIncoming, } } else if self.response.Series.GetName() != seriesIncoming.GetName() { self.responseChan <- self.response self.response = &protocol.Response{ Type: self.responseType, Series: seriesIncoming, } } else if len(self.response.Series.Points) > self.maxPointsInResponse { self.responseChan <- self.response self.response = &protocol.Response{ Type: self.responseType, Series: seriesIncoming, } } else { self.response.Series.Points = append(self.response.Series.Points, seriesIncoming.Points...) } return !self.limiter.hitLimit(seriesIncoming.GetName()) //return true }
func Filter(query *parser.SelectQuery, series *protocol.Series) (*protocol.Series, error) { if query.GetWhereCondition() == nil { return series, nil } columns := map[string]bool{} getColumns(query.GetColumnNames(), columns) getColumns(query.GetGroupByClause().Elems, columns) points := series.Points series.Points = nil for _, point := range points { ok, err := matches(query.GetWhereCondition(), series.Fields, point) if err != nil { return nil, err } if ok { filterColumns(columns, series.Fields, point) series.Points = append(series.Points, point) } } if !columns["*"] { newFields := []string{} for _, f := range series.Fields { if _, ok := columns[f]; !ok { continue } newFields = append(newFields, f) } series.Fields = newFields } return series, nil }
func (self *CoordinatorImpl) ProcessContinuousQueries(db string, series *protocol.Series) { if self.clusterConfiguration.parsedContinuousQueries != nil { incomingSeriesName := *series.Name for _, query := range self.clusterConfiguration.parsedContinuousQueries[db] { groupByClause := query.GetGroupByClause() if groupByClause.Elems != nil { continue } fromClause := query.GetFromClause() intoClause := query.GetIntoClause() targetName := intoClause.Target.Name interpolatedTargetName := strings.Replace(targetName, ":series_name", incomingSeriesName, -1) for _, table := range fromClause.Names { tableValue := table.Name if regex, ok := tableValue.GetCompiledRegex(); ok { if regex.MatchString(incomingSeriesName) { series.Name = &interpolatedTargetName if e := self.CommitSeriesData(db, series); e != nil { log.Error("Couldn't write data for continuous query: ", e) } } } else { if tableValue.Name == incomingSeriesName { series.Name = &interpolatedTargetName if e := self.CommitSeriesData(db, series); e != nil { log.Error("Couldn't write data for continuous query: ", e) } } } } } } }
func (self *CoordinatorImpl) normalizePointAndAppend(fieldNames map[string]int, result *protocol.Series, fields []string, point *protocol.Point) { oldValues := point.Values point.Values = make([]*protocol.FieldValue, len(fieldNames), len(fieldNames)) for index, field := range fields { indexForField, ok := fieldNames[field] // drop this point on the floor if the unexpected happens if !ok { log.Error("Couldn't lookup field: ", field, fields, fieldNames) return } point.Values[indexForField] = oldValues[index] } result.Points = append(result.Points, point) }
func (self *CoordinatorImpl) CommitSeriesData(db string, series *protocol.Series) error { lastTime := int64(0) lastPointIndex := 0 now := common.CurrentTime() var shardToWrite cluster.Shard for i, point := range series.Points { if point.Timestamp == nil { point.Timestamp = &now } if *point.Timestamp != lastTime { shard, err := self.clusterConfiguration.GetShardToWriteToBySeriesAndTime(db, *series.Name, *point.Timestamp) if err != nil { return err } if shardToWrite == nil { shardToWrite = shard } else if shardToWrite.Id() != shard.Id() { newIndex := i + 1 newSeries := &protocol.Series{Name: series.Name, Fields: series.Fields, Points: series.Points[lastPointIndex:newIndex]} self.write(db, newSeries, shardToWrite) lastPointIndex = newIndex shardToWrite = shard } lastTime = *point.Timestamp } } series.Points = series.Points[lastPointIndex:] if len(series.Points) > 0 { if shardToWrite == nil { shardToWrite, _ = self.clusterConfiguration.GetShardToWriteToBySeriesAndTime(db, *series.Name, *series.Points[0].Timestamp) } err := self.write(db, series, shardToWrite) if err != nil { log.Error("COORD error writing: ", err) } return err } return nil }
// We have three types of queries: // 1. time() without fill // 2. time() with fill // 3. no time() // // For (1) we flush as soon as a new bucket start, the prefix tree // keeps track of the other group by columns without the time // bucket. We reset the trie once the series is yielded. For (2), we // keep track of all group by columns with time being the last level // in the prefix tree. At the end of the query we step through [start // time, end time] in self.duration steps and get the state from the // prefix tree, using default values for groups without state in the // prefix tree. For the last case we keep the groups in the prefix // tree and on close() we loop through the groups and flush their // values with a timestamp equal to now() func (self *QueryEngine) aggregateValuesForSeries(series *protocol.Series) error { for _, aggregator := range self.aggregators { if err := aggregator.InitializeFieldsMetadata(series); err != nil { return err } } seriesState := self.getSeriesState(series.GetName()) currentRange := seriesState.pointsRange includeTimestampInGroup := self.duration != nil && self.fillWithZero var group []*protocol.FieldValue if !includeTimestampInGroup { group = make([]*protocol.FieldValue, len(self.elems)) } else { group = make([]*protocol.FieldValue, len(self.elems)+1) } for _, point := range series.Points { currentRange.UpdateRange(point) // this is a groupby with time() and no fill, flush as soon as we // start a new bucket if self.duration != nil && !self.fillWithZero { timestamp := self.getTimestampFromPoint(point) // this is the timestamp aggregator if seriesState.started && seriesState.lastTimestamp != timestamp { self.runAggregatesForTable(series.GetName()) } seriesState.lastTimestamp = timestamp seriesState.started = true } // get the group this point belongs to for idx, elem := range self.elems { // TODO: create an index from fieldname to index value, err := GetValue(elem, series.Fields, point) if err != nil { return err } group[idx] = value } // if this is a fill() query, add the timestamp at the end if includeTimestampInGroup { timestamp := self.getTimestampFromPoint(point) group[len(self.elems)] = &protocol.FieldValue{Int64Value: protocol.Int64(timestamp)} } // update the state of the given group node := seriesState.trie.GetNode(group) var err error for idx, aggregator := range self.aggregators { node.states[idx], err = aggregator.AggregatePoint(node.states[idx], point) if err != nil { return err } } } return nil }
// TODO: refactor this for clarity. This got super ugly... // Function yields all results that are safe to do so ensuring order. Returns all results that must wait for more from the servers. func (self *CoordinatorImpl) yieldResultsForSeries(isAscending bool, leftover *protocol.Series, responses []*protocol.Response, yield func(*protocol.Series) error) *protocol.Series { // results can come from different servers. Some of which won't know about fields that other servers may know about. // We need to normalize all this so that all fields are represented and the other field values are null. // Give each unique field name an index. We'll use this map later to construct the results and make sure that // the response objects have their fields in the result. fieldIndexes := make(map[string]int) for _, response := range responses { for _, name := range response.Series.Fields { if _, hasField := fieldIndexes[name]; !hasField { fieldIndexes[name] = len(fieldIndexes) } } } fields := make([]string, len(fieldIndexes), len(fieldIndexes)) for name, index := range fieldIndexes { fields[index] = name } fieldCount := len(fields) result := &protocol.Series{Name: responses[0].Series.Name, Fields: fields, Points: make([]*protocol.Point, 0)} if leftover == nil { leftover = &protocol.Series{Name: responses[0].Series.Name, Fields: fields, Points: make([]*protocol.Point, 0)} } barrierTime := BARRIER_TIME_MIN if isAscending { barrierTime = BARRIER_TIME_MAX } var shouldYieldComparator func(rawTime *int64) bool if isAscending { shouldYieldComparator = func(rawTime *int64) bool { if rawTime != nil && *rawTime < barrierTime { return true } else { return false } } } else { shouldYieldComparator = func(rawTime *int64) bool { if rawTime != nil && *rawTime > barrierTime { return true } else { return false } } } // find the barrier time for _, response := range responses { if shouldYieldComparator(response.NextPointTime) { barrierTime = *response.NextPointTime } } // yield the points from leftover that are safe for _, point := range leftover.Points { if shouldYieldComparator(point.Timestamp) { result.Points = append(result.Points, point) } else { break } } // if they all got added, clear out the leftover if len(leftover.Points) == len(result.Points) { leftover.Points = make([]*protocol.Point, 0) } if barrierTime == BARRIER_TIME_MIN || barrierTime == BARRIER_TIME_MAX { // all the nextPointTimes were nil so we're safe to send everything for _, response := range responses { // if this is the case we know that all responses contained the same // fields. So just append the points if len(response.Series.Fields) == fieldCount { result.Points = append(result.Points, response.Series.Points...) } else { log.Debug("Responses from servers had different numbers of fields.") for _, p := range response.Series.Points { self.normalizePointAndAppend(fieldIndexes, result, response.Series.Fields, p) } } } if len(leftover.Fields) == fieldCount { result.Points = append(result.Points, leftover.Points...) leftover.Points = []*protocol.Point{} } else { log.Debug("Responses from servers had different numbers of fields.") for _, p := range leftover.Points { self.normalizePointAndAppend(fieldIndexes, result, leftover.Fields, p) } } } else { for _, response := range responses { if shouldYieldComparator(response.NextPointTime) { // all points safe to yield if fieldCount == len(response.Series.Fields) { result.Points = append(result.Points, response.Series.Points...) } else { log.Debug("Responses from servers had different numbers of fields.") for _, p := range response.Series.Points { self.normalizePointAndAppend(fieldIndexes, result, response.Series.Fields, p) } } continue } if fieldCount == len(response.Series.Fields) { for i, point := range response.Series.Points { if shouldYieldComparator(point.Timestamp) { result.Points = append(result.Points, point) } else { // since they're returned in order, we can just append these to // the leftover and break out. leftover.Points = append(leftover.Points, response.Series.Points[i:]...) break } } } else { for i, point := range response.Series.Points { if shouldYieldComparator(point.Timestamp) { self.normalizePointAndAppend(fieldIndexes, result, response.Series.Fields, point) } else { // since they're returned in order, we can just append these to // the leftover and break out. for _, point := range response.Series.Points[i:] { self.normalizePointAndAppend(fieldIndexes, leftover, response.Series.Fields, point) } break } } } } } if isAscending { result.SortPointsTimeAscending() leftover.SortPointsTimeAscending() } else { result.SortPointsTimeDescending() leftover.SortPointsTimeDescending() } // Don't yield an empty points array, the engine will think it's the end of the stream. // streamResultsFromChannels will send the empty ones after all channels have returned. if len(result.Points) > 0 { yield(result) } if len(leftover.Points) > 0 { return leftover } return nil }
func (self *CoordinatorImpl) InterpolateValuesAndCommit(query string, db string, series *protocol.Series, targetName string, assignSequenceNumbers bool) error { defer common.RecoverFunc(db, query, nil) targetName = strings.Replace(targetName, ":series_name", *series.Name, -1) type sequenceKey struct { seriesName string timestamp int64 } sequenceMap := make(map[sequenceKey]int) r, _ := regexp.Compile(`\[.*?\]`) // get the fields that are used in the target name fieldsInTargetName := r.FindAllString(targetName, -1) fieldsIndeces := make([]int, 0, len(fieldsInTargetName)) for i, f := range fieldsInTargetName { f = f[1 : len(f)-1] fieldsIndeces = append(fieldsIndeces, series.GetFieldIndex(f)) fieldsInTargetName[i] = f } fields := make([]string, 0, len(series.Fields)-len(fieldsIndeces)) // remove the fields used in the target name from the series fields nextfield: for i, f := range series.Fields { for _, fi := range fieldsIndeces { if fi == i { continue nextfield } } fields = append(fields, f) } if r.MatchString(targetName) { serieses := map[string]*protocol.Series{} for _, point := range series.Points { fieldIndex := 0 targetNameWithValues := r.ReplaceAllStringFunc(targetName, func(_ string) string { value := point.GetFieldValueAsString(fieldsIndeces[fieldIndex]) fieldIndex++ return value }) p := &protocol.Point{ Values: make([]*protocol.FieldValue, 0, len(point.Values)-len(fieldsIndeces)), Timestamp: point.Timestamp, SequenceNumber: point.SequenceNumber, } // remove the fields used in the target name from the series fields nextvalue: for i, v := range point.Values { for _, fi := range fieldsIndeces { if fi == i { continue nextvalue } } p.Values = append(p.Values, v) } if assignSequenceNumbers { key := sequenceKey{targetNameWithValues, *p.Timestamp} sequenceMap[key] += 1 sequenceNumber := uint64(sequenceMap[key]) p.SequenceNumber = &sequenceNumber } newSeries := serieses[targetNameWithValues] if newSeries == nil { newSeries = &protocol.Series{Name: &targetNameWithValues, Fields: fields, Points: []*protocol.Point{p}} serieses[targetNameWithValues] = newSeries continue } newSeries.Points = append(newSeries.Points, p) } seriesSlice := make([]*protocol.Series, 0, len(serieses)) for _, s := range serieses { seriesSlice = append(seriesSlice, s) } if e := self.CommitSeriesData(db, seriesSlice, true); e != nil { log.Error("Couldn't write data for continuous query: ", e) } } else { newSeries := &protocol.Series{Name: &targetName, Fields: fields, Points: series.Points} if assignSequenceNumbers { for _, point := range newSeries.Points { sequenceMap[sequenceKey{targetName, *point.Timestamp}] += 1 sequenceNumber := uint64(sequenceMap[sequenceKey{targetName, *point.Timestamp}]) point.SequenceNumber = &sequenceNumber } } if e := self.CommitSeriesData(db, []*protocol.Series{newSeries}, true); e != nil { log.Error("Couldn't write data for continuous query: ", e) } } return nil }