예제 #1
0
func TestDeleteWithSecondaryIndex(t *testing.T) {
	cassandra.PoolConstructor = cassandra.NewMockConnectionPool
	defer func() {
		cassandra.PoolConstructor = cassandra.DefaultPoolConstructor
	}()

	testDate := time.Date(2014, 1, 1, 0, 30, 0, 0, time.UTC)
	// Hourly granularity sticks this in at midnight
	testBucketedDate := time.Date(2014, 1, 1, 0, 0, 0, 0, time.UTC)

	testTs := &timeseries.TimeSeries{
		Ks:             "testTsKs",
		Cf:             "testTsCf",
		RowGranularity: time.Hour,
		Marshaler: func(i interface{}) (string, time.Time) {
			return "testUid", testDate
		},
		IndexCf: "testTsIndexCf",
		SecondaryIndexer: func(i interface{}) string {
			return "testTsSecondaryIndex"
		},
	}

	pool, err := cassandra.ConnectionPool(testTs.Ks)
	assert.Nil(t, err, "Error getting C* connection pool")

	writer := pool.Writer().(*cassandra.MockWriter)
	columnNames := [][]byte{
		[]byte(fmt.Sprintf("%d-testUid", testDate.Unix())),
	}
	rowName := []byte(fmt.Sprintf("testTsSecondaryIndex%d", testBucketedDate.Unix()))
	writer.On("DeleteColumns", testTs.Cf, rowName, columnNames).Return(nil).Once()
	testTs.Delete(writer, true)
}
예제 #2
0
파일: index.go 프로젝트: armada-io/h2
func (i *index) load() error {
	pool, err := cassandra.ConnectionPool(i.ts.Ks)
	if err != nil {
		return fmt.Errorf("Failed to get C* connection pool: %v", err)
	}

	finished := false
	lastCol := []byte{}
	i.values = make(map[time.Time]bool)

	for !finished {
		finished = true // we'll set back to false if we find any results
		row, err := pool.Reader().Cf(i.ts.IndexCf).Slice(&gossie.Slice{
			Start:    lastCol,
			End:      []byte{},
			Count:    indexBatch,
			Reversed: false,
		}).Get(i.rowKey())
		if err != nil {
			return fmt.Errorf("Error reading from C*: %v", err)
		}
		if row != nil && len(row.Columns) > 0 {
			// just got one column which was last one in previous call, so we are finished
			if len(row.Columns) == 1 && string(row.Columns[0].Name) == string(lastCol) {
				break
			}

			// we found more rows, so not finished
			finished = false

			// add any returned columns to our buffer
			for _, c := range row.Columns {
				// set the lastCol for the next fetch
				lastCol = c.Name
				t, err := i.fromColName(c.Name)
				if err != nil {
					return fmt.Errorf("Conversion fail reading index: %v", err)
				}
				// fill index values
				i.values[t] = true
				// set start, if not set
				if i.start.IsZero() {
					i.start = t
				}
				// set end, always
				i.end = t
			}
		}
	}

	// extend end by truncation value
	i.end = i.end.Add(i.granularity())

	return nil
}
예제 #3
0
func loadInData(t *testing.T, ts *TimeSeries, period int) {
	pool, err := cassandra.ConnectionPool(ts.Ks)
	if err != nil {
		t.Fatalf("C* connection pool error: %v", err)
	}

	currTime := 1387065600 // middle of DEC 2013
	for i := 0; i < 10000; i++ {
		thing := &TestThing{
			Id:   fmt.Sprintf("%v", i+1),
			Time: time.Unix(int64(currTime), 0),
		}
		currTime += period

		w := pool.Writer()
		ts.Map(w, thing, nil)
		if err := w.Run(); err != nil {
			t.Fatalf("Error writing to C*: %v", err)
		}
	}
}
예제 #4
0
파일: globallock.go 프로젝트: armada-io/h2
// Unlock releases this global lock
func (gl *globalLock) Unlock() {
	if gl.unlocked {
		return
	}
	// close the exit channel (so we can only Unlock once) which causes our refresher loop (if any) to break
	close(gl.exit)
	gl.unlocked = true

	// delete from C*
	pool, err := cassandra.ConnectionPool(keyspace)
	if err != nil {
		log.Warnf("[Sync:GlobalLock] Release error due to C*: %v", err)
		return
	}
	writer := pool.Writer()
	writer.ConsistencyLevel(gossie.CONSISTENCY_QUORUM).DeleteColumns(
		cfGlobalLock,
		gl.id,
		[][]byte{[]byte(gl.lockId[:])},
	)
	if err := writer.Run(); err != nil {
		log.Warnf("[Sync:GlobalLock] Unlock failed due to C*: %v", err)
	}
}
예제 #5
0
파일: iterator.go 프로젝트: armada-io/h2
func (i *itemIterator) fillBuffer() error {
	if i.buffer == nil {
		if err := i.initBuffer(); err != nil {
			return err
		}
	}

	// sanity check
	if i.BatchSize < 2 {
		panic("Invalid BatchSize - must be >= 2")
	}

	pool, err := cassandra.ConnectionPool(i.ts.Ks)
	if err != nil {
		return fmt.Errorf("Failed to get C* connection pool: %v", err)
	}

	// init buffer for THIS fetch
	i.buffer = i.buffer[0:0]
	i.bufferI = 0

	// loop rows
	for {
		// we may wish to skip fetching from a row, based on our "index" (which keeps track of which rows have ANY data for efficiency)
		shouldSkip, err := i.index.skip(i.fetchTime)
		if err != nil {
			log.Warnf("Error loading index for timeseries, will proceed trying all rows: %v", err)
		}
		if !shouldSkip {
			// loop chunks of columns within this one row
			for {
				row, err := pool.Reader().Cf(i.ts.Cf).Slice(&gossie.Slice{
					Start:    i.startCol,
					End:      []byte{},
					Count:    i.BatchSize + 1,
					Reversed: i.Reverse,
				}).Get(i.ts.toRowKey(i.secondaryIndex, i.fetchTime))
				if err != nil {
					return fmt.Errorf("Error reading from C*: %v", err)
				}

				// strip off first column if we have already read it, or if it is the "lastId" column as part of a paginated fetch
				if row != nil && len(row.Columns) > 0 {
					if bytes.Equal(row.Columns[0].Name, i.lastFetchedCol) || bytes.Equal(row.Columns[0].Name, []byte(i.fromId)) {
						row.Columns = row.Columns[1:]
					}
				}

				if row == nil || len(row.Columns) == 0 {
					// nothing left in row -- go to the next row
					break
				}

				// add any returned columns to our buffer
				for _, c := range row.Columns {
					// set the startCol for the next fetch
					i.startCol = c.Name
					// column within our specified end time?
					_, ct, err := columnNameToTimeId(c.Name)
					if err != nil {
						return fmt.Errorf("Bad column name found: %v", err)
					}
					if i.isBeyondEndOfRange(ct, false) {
						// end of range
						i.endOfRange = true
						// @todo possibly return an error if no items here
						return nil
					}

					// ok - we'll have this column
					item, err := colToItem(c)
					if err != nil {
						return fmt.Errorf("Bad column found: %v", err)
					}
					i.buffer = append(i.buffer, item)
					i.lastFetchedCol = c.Name

					// got enough now?
					if len(i.buffer) >= i.BatchSize {
						return nil
					}
				}
			}
		}

		// go to next row -- if we're within timing
		i.startCol = []byte{} // reset startCol since we always want to start from beggining of row
		if i.Reverse {
			i.fetchTime = i.fetchTime.Add(-i.ts.RowGranularity)
		} else {
			i.fetchTime = i.fetchTime.Add(i.ts.RowGranularity)
		}
		if i.isBeyondEndOfRange(i.fetchTime, true) {
			// end of range
			i.endOfRange = true
			return nil
		}
	}

	// no return - unreachable
}
예제 #6
0
파일: globallock.go 프로젝트: armada-io/h2
// GlobalTimedLock attempts to achieve a global lock on `id`, waiting for `waitFor` time in
// case of contention (before giving up) and reserving the lock for a maximum `holdFor`
// in the event of failure
// NOTE: locks can and will be held for longer than `holdFor`, but in the case of failure
// (eg: binary crashes) then this is the maximum amount of time other programs will hang
// around contending for the now defunkt lock
func GlobalTimedLock(id []byte, waitFor, holdFor time.Duration) (Lock, error) {
	if int64(holdFor) < int64(minHoldFor) {
		return nil, ErrHoldFor
	}

	u, err := gossie.NewTimeUUID()
	if err != nil {
		log.Warnf("[Sync:GlobalLock] Failed to generate time UUID: %v", err)
		return nil, ErrGenUuid
	}
	l := &globalLock{
		id:     id,
		lockId: u,
		exit:   make(chan struct{}),
	}

	// make my node in C*
	pool, err := cassandra.ConnectionPool(keyspace)
	if err != nil {
		return nil, fmt.Errorf("Error locking due to C*: %v", err)
	}
	writer := pool.Writer()
	writer.ConsistencyLevel(gossie.CONSISTENCY_QUORUM).Insert(cfGlobalLock, &gossie.Row{
		Key: l.id,
		Columns: []*gossie.Column{
			{
				Name:  []byte(l.lockId[:]),
				Value: []byte{}, // @todo could inject some data about who has the lock here
				Ttl:   durationToSeconds(holdFor, 1.0),
			},
		},
	})
	startTime := time.Now()
	err = writer.Run()
	if err != nil {
		inst.Timing(1.0, "sync.globaltimedlock.acquire", time.Since(startTime))
		inst.Counter(1.0, "sync.globaltimedlock.acquire.failure")
		return nil, err
	}

	// read all back and ensure i'm the lowest
	reader := pool.Reader().ConsistencyLevel(gossie.CONSISTENCY_QUORUM).Cf(cfGlobalLock)
	attempts := 0
	errs := multierror.New()
	start := time.Now()
	for {
		// break out if we've waited too long
		if attempts > 0 {
			if time.Now().After(start.Add(waitFor)) {
				inst.Timing(1.0, "sync.globaltimedlock.acquire", time.Since(startTime))
				inst.Counter(1.0, "sync.globaltimedlock.acquire.failure")
				l.Unlock()
				return nil, ErrContended
			}
			// delay a bit to avoid hammering C*
			time.Sleep(addJitter(delayFor))
		}

		attempts++

		row, err := reader.Get(l.id)
		if err != nil {
			errs.Add(fmt.Errorf("C* read back error: %v", err))
			continue
		}
		if row == nil || len(row.Columns) == 0 {
			errs.Add(fmt.Errorf("C* read back error: no columns returned from query"))
			continue
		}

		col := row.Columns[0]
		if bytes.Equal(col.Name, []byte(l.lockId[:])) {
			// we have the lock
			break
		}
	}

	inst.Timing(1.0, "sync.globaltimedlock.acquire", time.Since(startTime))
	inst.Counter(1.0, "sync.globaltimedlock.acquire.success")

	// put in place the refresher loop @todo
	go func() {
		for {
			log.Debug("[Sync:GlobalLock] Doing refresher loop…")
			refresh := time.Duration(float64(holdFor) * 0.75)
			select {
			case <-l.exit:
				log.Debugf("[Sync:GlobalLock] Breaking out of refresher loop")
				return
			case <-time.After(refresh):
				log.Debugf("[Sync:GlobalLock] Refreshing %s [%s]", string(l.id), l.lockId.String())
				writer.ConsistencyLevel(gossie.CONSISTENCY_QUORUM).Insert(cfGlobalLock, &gossie.Row{
					Key: l.id,
					Columns: []*gossie.Column{{
						Name:  []byte(l.lockId[:]),
						Value: []byte{},                        // @todo could inject some data about who has the lock here
						Ttl:   durationToSeconds(holdFor, 1.5), // 1.5 is because we renew the lock earlier than the timeout, so we need to cover that extra bit
					}},
				})
				if err := writer.Run(); err != nil {
					// @todo we could inform clients of this, somehow, eg: via a channel
					log.Warnf("[Sync:GlobalLock] failed to refresh lock .. cannot guarantee exclusivity")
				}
			}
		}
	}()

	return l, nil
}