示例#1
0
文件: bs2.go 项目: bradclawsie/code
func main() {
	v := make([]T, 100)
	for i := 0; i < L; i++ {
		a := i * 10
		b := a + 9
		v[i] = T{L: a, H: b}
	}

	// mix them up
	for i := range v {
		j := rand.Intn(i + 1)
		v[i], v[j] = v[j], v[i]
	}

	fmt.Printf("\n%v\n", v)

	// sort them
	sort.Sort(TS(v))

	fmt.Printf("\n%v\n", v)

	// search
	k := sort.Search(len(v), func(i int) bool { return v[i].H >= 321 })

	fmt.Printf("%d: %v\n", k, v[k])

	k = sort.Search(len(v), func(i int) bool { return v[i].H >= 118 })

	fmt.Printf("%d: %v\n", k, v[k])

	// need to make sure the thing you are looking for isn't bigger than
	// the last thing and smaller than the first (Go panics on out-of-range)
}
示例#2
0
func (pisearch *Pisearch) idxsearch(start int, searchkey []byte) (found bool, position int) {
	i := sort.Search(pisearch.numDigits, func(i int) bool {
		return pisearch.compare(pisearch.idxAt(i), searchkey) >= 0
	})
	j := i + sort.Search(pisearch.numDigits-i, func(j int) bool {
		return pisearch.compare(pisearch.idxAt(j+i), searchkey) != 0
	})
	//fmt.Println("Compare got i: ", i, "j", j)
	//fmt.Println("Digits there: ", pisearch.GetDigits(pisearch.idxAt(i), len(searchkey)))

	nMatches := (j - i)
	var positions []int
	for ; i < j; i++ {
		positions = append(positions, pisearch.idxAt(i))
	}
	if nMatches > 1 {
		sort.Ints(positions)
	}

	for i := 0; i < nMatches; i++ {
		if positions[i] >= start {
			return true, positions[i]
		}
	}
	return false, 0
}
示例#3
0
func (self *ClusterConfiguration) getShardRange(querySpec QuerySpec, shards []*ShardData) []*ShardData {
	if querySpec.AllShardsQuery() {
		return shards
	}

	startTime := common.TimeToMicroseconds(querySpec.GetStartTime())
	endTime := common.TimeToMicroseconds(querySpec.GetEndTime())

	// the shards are always in descending order, if we have the following shards
	// [t + 20, t + 30], [t + 10, t + 20], [t, t + 10]
	// if we are querying [t + 5, t + 15], we have to find the first shard whose
	// startMicro is less than the end time of the query,
	// which is the second shard [t + 10, t + 20], then
	// start searching from this shard for the shard that has
	// endMicro less than the start time of the query, which is
	// no entry (sort.Search will return the length of the slice
	// in this case) so we return [t + 10, t + 20], [t, t + 10]
	// as expected

	startIndex := sort.Search(len(shards), func(n int) bool {
		return shards[n].startMicro < endTime
	})

	if startIndex == len(shards) {
		return nil
	}

	endIndex := sort.Search(len(shards)-startIndex, func(n int) bool {
		return shards[n+startIndex].endMicro <= startTime
	})

	return shards[startIndex : endIndex+startIndex]
}
示例#4
0
// preloadChunksForRange loads chunks for the given range from the persistence.
// The caller must have locked the fingerprint of the series.
func (s *memorySeries) preloadChunksForRange(
	fp model.Fingerprint,
	from model.Time, through model.Time,
	mss *MemorySeriesStorage,
) (SeriesIterator, error) {
	firstChunkDescTime := model.Latest
	if len(s.chunkDescs) > 0 {
		firstChunkDescTime = s.chunkDescs[0].FirstTime()
	}
	if s.chunkDescsOffset != 0 && from.Before(firstChunkDescTime) {
		cds, err := mss.loadChunkDescs(fp, s.persistWatermark)
		if err != nil {
			return nopIter, err
		}
		s.chunkDescs = append(cds, s.chunkDescs...)
		s.chunkDescsOffset = 0
		s.persistWatermark += len(cds)
		firstChunkDescTime = s.chunkDescs[0].FirstTime()
	}

	if len(s.chunkDescs) == 0 || through.Before(firstChunkDescTime) {
		return nopIter, nil
	}

	// Find first chunk with start time after "from".
	fromIdx := sort.Search(len(s.chunkDescs), func(i int) bool {
		return s.chunkDescs[i].FirstTime().After(from)
	})
	// Find first chunk with start time after "through".
	throughIdx := sort.Search(len(s.chunkDescs), func(i int) bool {
		return s.chunkDescs[i].FirstTime().After(through)
	})
	if fromIdx == len(s.chunkDescs) {
		// Even the last chunk starts before "from". Find out if the
		// series ends before "from" and we don't need to do anything.
		lt, err := s.chunkDescs[len(s.chunkDescs)-1].LastTime()
		if err != nil {
			return nopIter, err
		}
		if lt.Before(from) {
			return nopIter, nil
		}
	}
	if fromIdx > 0 {
		fromIdx--
	}
	if throughIdx == len(s.chunkDescs) {
		throughIdx--
	}
	if fromIdx > throughIdx {
		// Guard against nonsensical result. The caller will quarantine the series with a meaningful log entry.
		return nopIter, fmt.Errorf("fromIdx=%d is greater than throughIdx=%d, likely caused by data corruption", fromIdx, throughIdx)
	}

	pinIndexes := make([]int, 0, throughIdx-fromIdx+1)
	for i := fromIdx; i <= throughIdx; i++ {
		pinIndexes = append(pinIndexes, i)
	}
	return s.preloadChunks(pinIndexes, fp, mss)
}
示例#5
0
func (g *getValuesAlongRangeOp) ExtractSamples(in metric.Values) (out metric.Values) {
	if len(in) == 0 {
		return
	}
	// Find the first sample where time >= g.current.
	firstIdx := sort.Search(len(in), func(i int) bool {
		return !in[i].Timestamp.Before(g.current)
	})
	if firstIdx == len(in) {
		// No samples at or after operator start time. This can only
		// happen if we try applying the operator to a time after the
		// last recorded sample. In this case, we're finished.
		g.current = g.through.Add(clientmodel.MinimumTick)
		return
	}

	// Find the first sample where time > g.through.
	lastIdx := sort.Search(len(in), func(i int) bool {
		return in[i].Timestamp.After(g.through)
	})
	if lastIdx == firstIdx {
		g.current = g.through.Add(clientmodel.MinimumTick)
		return
	}

	lastSampleTime := in[lastIdx-1].Timestamp
	// Sample times are stored with a maximum time resolution of one second,
	// so we have to add exactly that to target the next chunk on the next
	// op iteration.
	g.current = lastSampleTime.Add(time.Second)
	return in[firstIdx:lastIdx]
}
示例#6
0
文件: sort.go 项目: rniggebrugge/go
func main() {
	files := []string{"Remco", "clara", "Willem", "pieter"}
	target := "Clara"
	fmt.Printf("Looking for %s in %q\n", target, files)
	sort.Strings(files)

	compareSimple := func(i int) bool { return files[i] >= target }

	i := sort.Search(len(files), compareSimple)
	if i < len(files) && files[i] == target {
		fmt.Printf("Found \"%s\" at files[%d]\n", files[i], i)
	} else {
		fmt.Printf("Did not find \"%s\".\n", target)
	}

	SortFoldedStrings(files)
	fmt.Printf("Looking for %s in %q\n", target, files)
	betterCompare := func(i int) bool {
		return strings.ToLower(files[i]) >= strings.ToLower(target)
	}
	i = sort.Search(len(files), betterCompare)
	if i < len(files) && strings.EqualFold(files[i], target) {
		fmt.Printf("Found \"%s\" at files[%d]\n", files[i], i)
	} else {
		fmt.Printf("Did not find \"%s\".\n", target)
	}
}
示例#7
0
文件: coals.go 项目: mingzhi/hgt_old
func Split(a Assembly, begin, end int) (b, c Assembly) {
	sort.Sort(a)
	// find the first fragment, which begin index is in
	idxL := sort.Search(len(a), func(i int) bool { return a[i].End >= begin })
	idxR := sort.Search(len(a), func(i int) bool { return a[i].Begin > end })
	for i, _ := range a {
		if i >= idxL && i < idxR {
			if begin <= a[i].Begin {
				begin = a[i].Begin
			} else {
				b = append(b, Fragment{Begin: a[i].Begin, End: begin - 1})
			}
			if end >= a[i].End {
				c = append(c, Fragment{Begin: begin, End: a[i].End})
			} else {
				c = append(c, Fragment{Begin: begin, End: end})
				b = append(b, Fragment{Begin: end + 1, End: a[i].End})
			}
		} else {
			b = append(b, a[i])
		}
	}

	return
}
示例#8
0
// Returns a cursor to |key| in |ms|, plus the leaf + index that |key| is in. |t| is the type of the ordered values.
func findLeafInOrderedSequence(ms metaSequence, t Type, key Value, getValues getLeafOrderedValuesFn, vr ValueReader) (cursor *sequenceCursor, leaf Value, idx int) {
	cursor, leaf = newMetaSequenceCursor(ms, vr)

	if isSequenceOrderedByIndexedType(t) {
		orderedKey := key.(OrderedValue)

		cursor.seekBinary(func(mt sequenceItem) bool {
			return !mt.(metaTuple).value.(OrderedValue).Less(orderedKey)
		})
	} else {
		cursor.seekBinary(func(mt sequenceItem) bool {
			return !mt.(metaTuple).value.(Ref).TargetRef().Less(key.Ref())
		})
	}

	if current := cursor.current().(metaTuple); current.ChildRef().TargetRef() != valueFromType(leaf, leaf.Type()).Ref() {
		leaf = readMetaTupleValue(current, vr)
	}

	if leafData := getValues(leaf); isSequenceOrderedByIndexedType(t) {
		orderedKey := key.(OrderedValue)

		idx = sort.Search(len(leafData), func(i int) bool {
			return !leafData[i].(OrderedValue).Less(orderedKey)
		})
	} else {
		idx = sort.Search(len(leafData), func(i int) bool {
			return !leafData[i].Ref().Less(key.Ref())
		})
	}

	return
}
示例#9
0
// splitRangeByPrefixes returns a list of key ranges with
// corresponding configs. The split is done using matching prefix
// config entries. For example, consider the following set of configs
// and prefixes:
//
//   /:    config1
//   /db1: config2
//
// A range containing keys from /0 - /db3 will map to
// the following split ranges and corresponding configs:
//
//   /0   - /db1: config1
//   /db1 - /db2: config2
//   /db2 - /db3: config1
//
// After calling prefixConfigMap.build(), our prefixes will look
// like:
//
//   /:    config1
//   /db1: config2
//   /db2: config1
//
// The algorithm is straightforward for splitting a range by existing
// prefixes. Lookup start key; that is first config. Lookup end key:
// that is last config. We then step through the intervening
// prefixConfig records and create a rangeResult for each.
func (p *prefixConfigMap) splitRangeByPrefixes(start, end Key) ([]*rangeResult, error) {
	if bytes.Compare(start, end) >= 0 {
		return nil, util.Errorf("start key %q not less than end key %q", start, end)
	}
	startIdx := sort.Search(len(p.configs), func(i int) bool {
		return bytes.Compare(start, p.configs[i].prefix) < 0
	})
	endIdx := sort.Search(len(p.configs), func(i int) bool {
		return bytes.Compare(end, p.configs[i].prefix) < 0
	})

	if startIdx >= len(p.configs) || endIdx > len(p.configs) {
		return nil, util.Errorf("start and/or end keys (%q, %q) fall outside prefix range; "+
			"was default prefix not added?", start, end)
	}

	// Create the first range result which goes from start -> end and
	// uses the config specified for the start key.
	var results []*rangeResult
	result := &rangeResult{start: start, end: end, config: p.configs[startIdx-1].config}
	results = append(results, result)

	// Now, cycle through from startIdx to endIdx, adding a new
	// rangeResult at each step.
	for i := startIdx; i < endIdx; i++ {
		result.end = p.configs[i].prefix
		if bytes.Compare(result.end, end) == 0 {
			break
		}
		result = &rangeResult{start: result.end, end: end, config: p.configs[i].config}
		results = append(results, result)
	}

	return results, nil
}
示例#10
0
文件: list.go 项目: dgraph-io/dgraph
// Length iterates over the mutation layer and counts number of elements.
func (l *List) Length(afterUid uint64) int {
	l.RLock()
	defer l.RUnlock()

	pidx, midx := 0, 0
	pl := l.getPostingList(0)

	if afterUid > 0 {
		pidx = sort.Search(len(pl.Postings), func(idx int) bool {
			p := pl.Postings[idx]
			return afterUid < p.Uid
		})
		midx = sort.Search(len(l.mlayer), func(idx int) bool {
			mp := l.mlayer[idx]
			return afterUid < mp.Uid
		})
	}

	count := len(pl.Postings) - pidx
	for _, p := range l.mlayer[midx:] {
		if p.Op == Add {
			count++
		} else if p.Op == Del {
			count--
		}
	}
	return count
}
示例#11
0
// lookup returns the smallest index of an entry with an exact match
// for name, or an inexact match starting with name/. If there is no
// such entry, the result is -1, false.
func (z zipList) lookup(name string) (index int, exact bool) {
	// look for exact match first (name comes before name/ in z)
	i := sort.Search(len(z), func(i int) bool {
		return name <= z[i].Name
	})
	if i >= len(z) {
		return -1, false
	}
	// 0 <= i < len(z)
	if z[i].Name == name {
		return i, true
	}

	// look for inexact match (must be in z[i:], if present)
	z = z[i:]
	name += "/"
	j := sort.Search(len(z), func(i int) bool {
		return name <= z[i].Name
	})
	if j >= len(z) {
		return -1, false
	}
	// 0 <= j < len(z)
	if strings.HasPrefix(z[j].Name, name) {
		return i + j, false
	}

	return -1, false
}
示例#12
0
func (w *MockWriter) DeleteColumns(cf string, key []byte, columns [][]byte) Writer {
	rows := w.pool.Rows(cf)

	t := now()

	i := sort.Search(len(rows), func(i int) bool { return bytes.Compare(rows[i].Key, key) >= 0 })
	if i < len(rows) && bytes.Equal(rows[i].Key, key) {
		// Row exists, delete the columns
		e := rows[i]
		cols := e.Columns
		for _, c := range columns {
			j := sort.Search(len(cols), func(j int) bool { return bytes.Compare(cols[j].Name, c) >= 0 })
			if j < len(cols) && bytes.Equal(cols[j].Name, c) {
				if t >= cols[j].Timestamp {
					// TODO store tombstone?
					copy(cols[j:], cols[j+1:])
					cols[len(cols)-1] = nil
					cols = cols[:len(cols)-1]
				}
			}
		}
		e.Columns = cols
	}

	return w
}
示例#13
0
// Adds an element to the start of the buffer (removing one from the end if necessary).
func (self *TimedStore) Add(timestamp time.Time, item interface{}) {
	data := timedStoreData{
		timestamp: timestamp,
		data:      item,
	}
	// Common case: data is added in order.
	if len(self.buffer) == 0 || !timestamp.Before(self.buffer[len(self.buffer)-1].timestamp) {
		self.buffer = append(self.buffer, data)
	} else {
		// Data is out of order; insert it in the correct position.
		index := sort.Search(len(self.buffer), func(index int) bool {
			return self.buffer[index].timestamp.After(timestamp)
		})
		self.buffer = append(self.buffer, timedStoreData{}) // Make room to shift the elements
		copy(self.buffer[index+1:], self.buffer[index:])    // Shift the elements over
		self.buffer[index] = data
	}

	// Remove any elements before eviction time.
	// TODO(rjnagal): This is assuming that the added entry has timestamp close to now.
	evictTime := timestamp.Add(-self.age)
	index := sort.Search(len(self.buffer), func(index int) bool {
		return self.buffer[index].timestamp.After(evictTime)
	})
	if index < len(self.buffer) {
		self.buffer = self.buffer[index:]
	}

	// Remove any elements if over our max size.
	if self.maxItems >= 0 && len(self.buffer) > self.maxItems {
		startIndex := len(self.buffer) - self.maxItems
		self.buffer = self.buffer[startIndex:]
	}
}
示例#14
0
文件: stock.go 项目: jhurwich/Trendy
func (s *Stock) ActualRange(startDate time.Time, endDate time.Time, overrideUrl string) (Span, error) {
	// Check if data is memoized in s.Span, if so return that subslice.
	if s.Span.Covers(startDate) && s.Span.Covers(endDate) {
		// Find the first date after startDate in Span. The smallest range that
		// includes startDate begins at that date - 1
		start := sort.Search(len(s.Span), func(i int) bool { return s.Span[i].Time.After(startDate) }) - 1
		end := sort.Search(len(s.Span), func(i int) bool { return s.Span[i].Time.After(endDate) })
		return s.Span[start:end], nil
	}

	// all or part of the data is missing from what is memoized, check the database
	dbSpan, err := DB.GetRange(s, startDate, endDate)
	if err != nil {
		return nil, err
	}

	if len(dbSpan) > 0 {
		// information was stored in the database, return it
		return dbSpan, nil
	} else {
		// data wasn't in database, populate it
		newSpan, err := s.ActualPopulate(startDate, endDate, overrideUrl)
		if err != nil {
			return nil, err
		}
		return newSpan, nil
	}
}
示例#15
0
// Returns the smallest index of an entry with an exact match for "name",
// or an inexact match starting with "name/". If there is no such entry,
// returns (-1, false).
func (zl zipList) Lookup(name string) (idx int, exact bool) {
	// Look for exact match.
	// "name" comes before "name/" in zl.
	i := sort.Search(len(zl), func(i int) bool {
		return name <= zl[i].f.Name
	})

	if i >= len(zl) {
		return -1, false
	}

	if zl[i].f.Name == name {
		return i, true
	}

	// Look for inexact match in zl[i:].
	zl = zl[i:]
	name += "/"
	j := sort.Search(len(zl), func(i int) bool {
		return name <= zl[i].f.Name
	})

	if j >= len(zl) {
		return -1, false
	}

	// 0 <= j < len(zl)
	if strings.HasPrefix(zl[j].f.Name, name) {
		return i + j, false
	}

	return -1, false
}
示例#16
0
func (g *getValuesAlongRangeOp) ExtractSamples(in []model.SamplePair) (out []model.SamplePair) {
	if len(in) == 0 {
		return
	}
	// Find the first sample where time >= g.from.
	firstIdx := sort.Search(len(in), func(i int) bool {
		return !in[i].Timestamp.Before(g.from)
	})
	if firstIdx == len(in) {
		// No samples at or after operator start time. This can only happen if we
		// try applying the operator to a time after the last recorded sample. In
		// this case, we're finished.
		g.from = g.through.Add(1)
		return
	}

	// Find the first sample where time > g.through.
	lastIdx := sort.Search(len(in), func(i int) bool {
		return in[i].Timestamp.After(g.through)
	})
	if lastIdx == firstIdx {
		g.from = g.through.Add(1)
		return
	}

	lastSampleTime := in[lastIdx-1].Timestamp
	// Sample times are stored with a maximum time resolution of one second, so
	// we have to add exactly that to target the next chunk on the next op
	// iteration.
	g.from = lastSampleTime.Add(time.Second)
	return in[firstIdx:lastIdx]
}
示例#17
0
// BlameHunks returns BlamedHunk structs corresponding to hunks, using the
// commit data in commits. Hunks are only included if their range overlaps with
// the character range specified by charStart..charEnd.
//
// Precondition: hunks should be sorted.
func BlameHunks(hunks []Hunk, commits map[string]Commit, charStart, charEnd int) ([]BlamedHunk, error) {
	startHunkIdx := sort.Search(len(hunks), func(i int) bool {
		return charStart >= 0 && charStart < hunks[i].CharEnd
	})
	endHunkIdx := sort.Search(len(hunks), func(i int) bool {
		return charEnd >= 0 && charEnd <= hunks[i].CharEnd
	})

	if startHunkIdx == len(hunks) {
		return nil, fmt.Errorf("Could not find start hunk including index %d", charStart)
	}
	if endHunkIdx == len(hunks) {
		return nil, fmt.Errorf("Could not find end hunk including index %d", charEnd)
	}

	var blamedHunks []BlamedHunk
	for i := startHunkIdx; i <= endHunkIdx; i++ {
		commit, in := commits[hunks[i].CommitID]
		if !in {
			return nil, fmt.Errorf("Commit %s not found", commit)
		}

		blamedHunks = append(blamedHunks, BlamedHunk{&hunks[i], &commit})
	}
	return blamedHunks, nil
}
示例#18
0
// Returns up to maxResult elements in the specified time period (inclusive).
// Results are from first to last. maxResults of -1 means no limit. When first
// and last are specified, maxResults is ignored.
func (self *TimedStore) InTimeRange(start, end time.Time, maxResults int) []interface{} {
	// No stats, return empty.
	if len(self.buffer) == 0 {
		return []interface{}{}
	}

	// Return all results in a time range if specified.
	if !start.IsZero() && !end.IsZero() {
		maxResults = -1
	}

	var startIndex int
	if start.IsZero() {
		// None specified, start at the beginning.
		startIndex = len(self.buffer) - 1
	} else {
		// Start is the index before the elements smaller than it. We do this by
		// finding the first element smaller than start and taking the index
		// before that element
		startIndex = sort.Search(len(self.buffer), func(index int) bool {
			// buffer[index] < start
			return self.getData(index).timestamp.Before(start)
		}) - 1
		// Check if start is after all the data we have.
		if startIndex < 0 {
			return []interface{}{}
		}
	}

	var endIndex int
	if end.IsZero() {
		// None specified, end with the latest stats.
		endIndex = 0
	} else {
		// End is the first index smaller than or equal to it (so, not larger).
		endIndex = sort.Search(len(self.buffer), func(index int) bool {
			// buffer[index] <= t -> !(buffer[index] > t)
			return !self.getData(index).timestamp.After(end)
		})
		// Check if end is before all the data we have.
		if endIndex == len(self.buffer) {
			return []interface{}{}
		}
	}

	// Trim to maxResults size.
	numResults := startIndex - endIndex + 1
	if maxResults != -1 && numResults > maxResults {
		startIndex -= numResults - maxResults
		numResults = maxResults
	}

	// Return in sorted timestamp order so from the "back" to "front".
	result := make([]interface{}, numResults)
	for i := 0; i < numResults; i++ {
		result[i] = self.Get(startIndex - i)
	}
	return result
}
示例#19
0
// lookupAll returns a slice into the matching region of the index.
// The runtime is O(log(N)*len(s)).
func (x *Index) lookupAll(s []byte) []int {
	// find matching suffix index range [i:j]
	// find the first index where s would be the prefix
	i := sort.Search(len(x.sa), func(i int) bool { return bytes.Compare(x.at(i), s) >= 0 })
	// starting at i, find the first index at which s is not a prefix
	j := i + sort.Search(len(x.sa)-i, func(j int) bool { return !bytes.HasPrefix(x.at(j+i), s) })
	return x.sa[i:j]
}
示例#20
0
func (w *MockWriter) InsertTtl(cf string, row *Row, ttl int) Writer {
	rows := w.pool.Rows(cf)

	t := thrift.Int64Ptr(now())
	for _, c := range row.Columns {
		if c.Timestamp == nil {
			c.Timestamp = t
		}
		if ttl > 0 {
			c.Ttl = thrift.Int32Ptr(int32(ttl))
		}
		if c.Ttl != nil {
			// reset to the actual time to expire
			c.Ttl = thrift.Int32Ptr(int32(now()/1e6) + *c.Ttl)
		}
	}

	i := sort.Search(len(rows), func(i int) bool { return bytes.Compare(rows[i].Key, row.Key) >= 0 })
	if i < len(rows) && bytes.Equal(rows[i].Key, row.Key) {
		// Row already exists, merge the columns
		e := rows[i]
		checkExpired(e)
		cols := e.Columns
		for _, c := range row.Columns {
			j := sort.Search(len(cols), func(j int) bool { return bytes.Compare(cols[j].Name, c.Name) >= 0 })
			if j < len(cols) && bytes.Equal(cols[j].Name, c.Name) {
				// Column already exists, pick the one with the greater timestamp
				ec := cols[j]
				et := *t
				if ec != nil {
					et = *ec.Timestamp
				}
				if *c.Timestamp >= et {
					ec.Value = c.Value
					ec.Ttl = c.Ttl
					ec.Timestamp = c.Timestamp
				}
			} else {
				// New column, insert sorted
				cols = append(cols, c)
				copy(cols[j+1:], cols[j:])
				cols[j] = c
			}
		}
		e.Columns = cols
	} else {
		// New row, insert sorted
		sort.Sort(Columns(row.Columns))
		rows = append(rows, row)
		copy(rows[i+1:], rows[i:])
		rows[i] = row

		w.pool.Data[cf] = rows
	}

	return w
}
示例#21
0
func (p *Pisearch) idxrange(searchkey []byte) (start, end int) {
	start = sort.Search(p.numDigits, func(i int) bool {
		return p.compare(p.idxAt(i), searchkey) >= 0
	})
	end = start + sort.Search(p.numDigits-start, func(j int) bool {
		return p.compare(p.idxAt(j+start), searchkey) != 0
	})
	return
}
示例#22
0
func (p *page) search(key string, stack *[]pagestack, bt *btree) (bool, uint64, int, error) {

	if p.pgtype == tleaf {
		if p.count == 0 {
			*stack = append(*stack, pagestack{pageid: p.curid, index: 0})
			return false, 0, 0, nil
		}

		//循环查找
		elements := p.getElements()
		c := func(i int) bool {
			// ee,_:=strconv.Atoi(elements[i].key())
			//kk,_:=strconv.Atoi(key)
			//return ee<=kk//elements[i].key() <= key
			return elements[i].key() <= key
		}
		idx := sort.Search(int(p.count), c)
		if idx < int(p.count) {
			if elements[idx].key() == key {
				//fmt.Printf("found : %v %v\n",key,elements[idx].value)
				*stack = append(*stack, pagestack{pageid: p.curid, index: idx})
				return true, elements[idx].value, idx, nil
			}
			*stack = append(*stack, pagestack{pageid: p.curid, index: idx})
			return false, elements[idx].value, idx, nil
		}

		*stack = append(*stack, pagestack{pageid: p.curid, index: 0})
		return false, 0, 0, nil //errors.New("found error")
	} else if p.pgtype == tinterior {
		if p.count == 0 {
			*stack = append(*stack, pagestack{pageid: p.curid, index: 0})
			return false, 0, -1, errors.New("ERROR")
		}

		//循环查找
		elements := p.getElements()
		c := func(i int) bool {
			//ee,_:=strconv.Atoi(elements[i].key())
			//kk,_:=strconv.Atoi(key)
			return elements[i].key() <= key
		}
		idx := sort.Search(int(p.count), c)
		if idx < int(p.count) {
			*stack = append(*stack, pagestack{pageid: p.curid, index: idx})
			sub := bt.getpage(uint32(elements[idx].value))
			return sub.search(key, stack, bt)
		}

		//没有找到,需要添加
		*stack = append(*stack, pagestack{pageid: p.curid, index: -1})
		return false, 0, -1, errors.New("found error")
	}
	fmt.Printf("[ERROR]==>SEARCH :: b+tree error \n")
	return false, 0, -1, errors.New("ERROR")

}
示例#23
0
// Returns up to maxResult elements in the specified time period (inclusive).
// Results are from first to last. maxResults of -1 means no limit.
func (self *StatsBuffer) InTimeRange(start, end time.Time, maxResults int) []*info.ContainerStats {
	// No stats, return empty.
	if self.size == 0 {
		return []*info.ContainerStats{}
	}

	// NOTE: Since we store the elments in descending timestamp order "start" will
	// be a higher index than "end".

	var startIndex int
	if start.IsZero() {
		// None specified, start at the beginning.
		startIndex = self.size - 1
	} else {
		// Start is the index before the elements smaller than it. We do this by
		// finding the first element smaller than start and taking the index
		// before that element
		startIndex = sort.Search(self.size, func(index int) bool {
			// buffer[index] < start
			return self.Get(index).Timestamp.Before(start)
		}) - 1
		// Check if start is after all the data we have.
		if startIndex < 0 {
			return []*info.ContainerStats{}
		}
	}

	var endIndex int
	if end.IsZero() {
		// None specified, end with the latest stats.
		endIndex = 0
	} else {
		// End is the first index smaller than or equal to it (so, not larger).
		endIndex = sort.Search(self.size, func(index int) bool {
			// buffer[index] <= t -> !(buffer[index] > t)
			return !self.Get(index).Timestamp.After(end)
		})
		// Check if end is before all the data we have.
		if endIndex == self.size {
			return []*info.ContainerStats{}
		}
	}

	// Trim to maxResults size.
	numResults := startIndex - endIndex + 1
	if maxResults != -1 && numResults > maxResults {
		startIndex -= numResults - maxResults
		numResults = maxResults
	}

	// Return in sorted timestamp order so from the "back" to "front".
	result := make([]*info.ContainerStats, numResults)
	for i := 0; i < numResults; i++ {
		result[i] = self.Get(startIndex - i)
	}
	return result
}
示例#24
0
// preloadChunksForRange loads chunks for the given range from the persistence.
// The caller must have locked the fingerprint of the series.
func (s *memorySeries) preloadChunksForRange(
	fp model.Fingerprint,
	from model.Time, through model.Time,
	mss *MemorySeriesStorage,
) (SeriesIterator, error) {
	firstChunkDescTime := model.Latest
	if len(s.chunkDescs) > 0 {
		firstChunkDescTime = s.chunkDescs[0].FirstTime()
	}
	if s.chunkDescsOffset != 0 && from.Before(firstChunkDescTime) {
		cds, err := mss.loadChunkDescs(fp, s.persistWatermark)
		if err != nil {
			return nopIter, err
		}
		s.chunkDescs = append(cds, s.chunkDescs...)
		s.chunkDescsOffset = 0
		s.persistWatermark += len(cds)
		firstChunkDescTime = s.chunkDescs[0].FirstTime()
	}

	if len(s.chunkDescs) == 0 || through.Before(firstChunkDescTime) {
		return nopIter, nil
	}

	// Find first chunk with start time after "from".
	fromIdx := sort.Search(len(s.chunkDescs), func(i int) bool {
		return s.chunkDescs[i].FirstTime().After(from)
	})
	// Find first chunk with start time after "through".
	throughIdx := sort.Search(len(s.chunkDescs), func(i int) bool {
		return s.chunkDescs[i].FirstTime().After(through)
	})
	if fromIdx == len(s.chunkDescs) {
		// Even the last chunk starts before "from". Find out if the
		// series ends before "from" and we don't need to do anything.
		lt, err := s.chunkDescs[len(s.chunkDescs)-1].LastTime()
		if err != nil {
			return nopIter, err
		}
		if lt.Before(from) {
			return nopIter, nil
		}
	}
	if fromIdx > 0 {
		fromIdx--
	}
	if throughIdx == len(s.chunkDescs) {
		throughIdx--
	}

	pinIndexes := make([]int, 0, throughIdx-fromIdx+1)
	for i := fromIdx; i <= throughIdx; i++ {
		pinIndexes = append(pinIndexes, i)
	}
	return s.preloadChunks(pinIndexes, fp, mss)
}
示例#25
0
// SeekTo positions the cursor at the timestamp specified by seek and returns the
// timestamp and value.
func (c *devCursor) SeekTo(seek int64) (int64, interface{}) {
	// Seek to position in cache.
	c.cacheKeyBuf, c.cacheValueBuf = func() (int64, interface{}) {
		// Seek to position in cache index.
		c.cachePos = sort.Search(len(c.cache), func(i int) bool {
			return c.cache[i].Time().UnixNano() >= seek
		})

		if c.cachePos < len(c.cache) {
			v := c.cache[c.cachePos]
			if v.UnixNano() == seek || c.ascending {
				// Exact seek found or, if ascending, next one is good.
				return v.UnixNano(), v.Value()
			}
			// Nothing available if descending.
			return tsdb.EOF, nil
		}

		// Ascending cursor, no match in the cache.
		if c.ascending {
			return tsdb.EOF, nil
		}

		// Descending cursor, go to previous value in cache, and return if it exists.
		c.cachePos--
		if c.cachePos < 0 {
			return tsdb.EOF, nil
		}
		return c.cache[c.cachePos].UnixNano(), c.cache[c.cachePos].Value()
	}()

	// Seek to position to tsm block.
	if c.ascending {
		c.tsmValues, _ = c.tsmKeyCursor.SeekTo(time.Unix(0, seek-1), c.ascending)
	} else {
		c.tsmValues, _ = c.tsmKeyCursor.SeekTo(time.Unix(0, seek+1), c.ascending)
	}

	c.tsmPos = sort.Search(len(c.tsmValues), func(i int) bool {
		return c.tsmValues[i].Time().UnixNano() >= seek
	})

	if !c.ascending {
		c.tsmPos--
	}

	if c.tsmPos >= 0 && c.tsmPos < len(c.tsmValues) {
		c.tsmKeyBuf = c.tsmValues[c.tsmPos].Time().UnixNano()
		c.tsmValueBuf = c.tsmValues[c.tsmPos].Value()
	} else {
		c.tsmKeyBuf = tsdb.EOF
		c.tsmKeyCursor.Close()
	}

	return c.read()
}
示例#26
0
func (events EventList) Range(begin, end time.Time) EventList {
	start_idx := sort.Search(len(events), func(i int) bool {
		return begin.Before(events[i].End)
	})
	end_idx := sort.Search(len(events), func(i int) bool {
		return end.Before(events[i].End)
	})

	return events[start_idx:end_idx]
}
示例#27
0
文件: list.go 项目: dgraph-io/dgraph
func (l *List) iterate(afterUid uint64, f func(obj *types.Posting) bool) {
	l.AssertRLock()
	pidx, midx := 0, 0
	pl := l.getPostingList(0)

	if afterUid > 0 {
		pidx = sort.Search(len(pl.Postings), func(idx int) bool {
			p := pl.Postings[idx]
			return afterUid < p.Uid
		})
		midx = sort.Search(len(l.mlayer), func(idx int) bool {
			mp := l.mlayer[idx]
			return afterUid < mp.Uid
		})
	}

	var mp, pp *types.Posting
	cont := true
	for cont {
		if pidx < len(pl.Postings) {
			pp = pl.Postings[pidx]
		} else {
			pp = emptyPosting
		}
		if midx < len(l.mlayer) {
			mp = l.mlayer[midx]
		} else {
			mp = emptyPosting
		}

		switch {
		case pp.Uid == 0 && mp.Uid == 0:
			cont = false
		case mp.Uid == 0 || (pp.Uid > 0 && pp.Uid < mp.Uid):
			cont = f(pp)
			pidx++
		case pp.Uid == 0 || (mp.Uid > 0 && mp.Uid < pp.Uid):
			if mp.Op != Del {
				cont = f(mp)
			}
			midx++
		case pp.Uid == mp.Uid:
			if mp.Op != Del {
				cont = f(mp)
			}
			pidx++
			midx++
		default:
			log.Fatalf("Unhandled case during iteration of posting list.")
		}
	}
}
示例#28
0
// FirstInForceBefore(): looking at the ties for the nearest timestamp s < tm, return
// the earliest (first in the presented sequence order) of these ties at s. Nearest means
// that there is no other timestamp r such that s < r < tm.
func (s *Series) FirstInForceBefore(tm time.Time) (*Frame, SearchStatus, int) {

	m := len(s.Frames)
	utm := TimeToPrimTm(tm)

	// Search returns the smallest index i in [0, m) at which f(i) is true.
	// If i == m, this means no such index had f(i) true.
	i := sort.Search(m, func(i int) bool {
		return s.Frames[i].Tm() >= utm
	})
	if i == m {
		// all frames Tm < utm
		rtm := s.Frames[m-1].Tm()

		// Handling repeated timestamps:
		// Need to search back to the first Frame at rtm.
		// For worst case efficiency of O(log(n)), rather
		// than O(n), use Search() again to
		// find the smallest index such that Tm >= rtm.
		k := sort.Search(m, func(i int) bool {
			return s.Frames[i].Tm() >= rtm
		})
		// k == m is impossible, rtm came from a Frame in s.Frames
		return s.Frames[k], InFuture, k
	}
	// INVAR: at least one entry had Tm >= utm

	if i == 0 {
		return nil, InPast, -1
	}

	// i is the smallest Frame such that itm >= utm.
	// Since we want to go strictly before that i,
	// start at j = i - 1; then find the first of any ties
	// at the Frames[j].Tm() timestamp. If we don't
	// find any, just return s.Frame[j].
	j := i - 1
	jtm := s.Frames[j].Tm()

	// Handling repeated timestamps:
	// Search foward to the last Frame at itm.
	// For worst case efficiency of O(log(n)), rather
	// than O(n), use Search() again to
	// find the smallest index such that Tm >= itm,
	// then subtract 1.
	k := sort.Search(m, func(i int) bool {
		return s.Frames[i].Tm() >= jtm
	})

	// k == m is impossible since jtm comes from the j Frame
	return s.Frames[k], Avail, k
}
示例#29
0
// Add inserts a single interval to the index.
// If it overlaps with existing intervals, it's data will
// take priority over other intervals.
func (ivl *IntervalIndex) Add(n Interval) {
	Min, Max := n.Range()
	if Max < Min {
		panic("Max > Min!")
	}

	// Initial case: Add as single element.
	if ivl.r == nil {
		ivl.r = []Interval{n}
		ivl.Max = Max
		return
	}

	// Find the lowest fitting interval:
	minIdx := sort.Search(len(ivl.r), func(i int) bool {
		_, iMax := ivl.r[i].Range()
		return Min <= iMax
	})

	// Find the highest fitting interval:
	maxIdx := sort.Search(len(ivl.r), func(i int) bool {
		iMin, _ := ivl.r[i].Range()
		return Max <= iMin
	})

	// Remember biggest offset:
	if Max > ivl.Max {
		ivl.Max = Max
	}

	// New interval is bigger than all others:
	if minIdx >= len(ivl.r) {
		ivl.r = append(ivl.r, n)
		return
	}

	// New range fits nicely in; just insert it in between:
	if minIdx == maxIdx {
		ivl.r = insert(ivl.r, minIdx, n)
		return
	}

	// Something in between. Merge to continuous interval:
	for i := minIdx; i < maxIdx; i++ {
		n.Merge(ivl.r[i])
	}

	// Delete old unmerged intervals and substitute with merged:
	ivl.r[minIdx] = n
	ivl.r = cut(ivl.r, minIdx+1, maxIdx)
}
示例#30
0
文件: markov.go 项目: scvalex/markov
func main() {
	rand.Seed(time.Now().UnixNano())

	// Read text and split it into space delimited words.
	t := text{}
	for i, done := 0, false; !done; i++ {
		var s string
		n, err := fmt.Scan(&s)
		if n == 0 || err != nil {
			done = true
		}
		t.text = append(t.text, s)
		t.words = append(t.words, i)
	}

	// Pre-process words.
	sort.Sort(t)

	// Print priming words.
	/* Find a fullstop. */
	var aux int
	for done := false; !done; {
		aux = rand.Int() % t.Len()
		for ; aux < t.Len() && !strings.HasSuffix(t.Word(aux), "."); aux++ {
		}
		done = aux < t.Len() && strings.HasSuffix(t.Word(aux), ".")
	}
	// words[aux] is now something ending in a fullstop, so don't
	// print the first word.
	for i := 1; i < ngram_n && t.words[aux]+i < t.Len(); i++ {
		fmt.Printf("%s ", t.text[t.words[aux]+i])
	}
	fmt.Println()

	for i := 0; i < 3; i++ {
		// Binary search for the selected phrase.
		start := sort.Search(t.Len(), func(i int) bool {
			return (t.wordscmp(t.words[i], t.words[aux]) == 0)
		})
		end := sort.Search(t.Len(), func(i int) bool {
			return t.wordscmp(t.words[aux], t.words[i]) < 0
		})
		fmt.Printf("aux; %d: '%s'\n", aux, t.Word(aux))
		fmt.Printf("%d..%d\n", start, end)
		if start < t.Len() && end < t.Len() {
			fmt.Printf("%d: '%s'\n", start, t.Word(start))
			fmt.Printf("%d: '%s'\n", end, t.Word(end))
		}
	}
}