Exemple #1
0
func NewBlock(ctx context.Context, endKey, id, dsPath string) *Block {
	if id == "" {
		u, err := uuid.NewV4()
		if err != nil {
			openinstrument.Logf(ctx, "Error generating UUID for new datastore block filename: %s", err)
			return nil
		}
		id = u.String()
	}
	return &Block{
		LogStreams: make(map[string]*oproto.ValueStream, 0),
		NewStreams: make([]*oproto.ValueStream, 0),
		dsPath:     dsPath,
		Block: &oproto.Block{
			Header: &oproto.BlockHeader{
				Version: uint32(2),
				Index:   make([]*oproto.BlockHeaderIndex, 0),
			},
			Id:     id,
			EndKey: endKey,
			State:  oproto.Block_UNKNOWN,
			Node:   store_config.Get().GetTaskName(),
		},
	}
}
Exemple #2
0
func (block *Block) SetState(ctx context.Context, state oproto.Block_State) error {
	block.protoLock.Lock()
	defer block.protoLock.Unlock()
	//openinstrument.Logf(ctx, "Updating cluster block %s to state %s", block.Block.Id, state.String())
	block.Block.State = state
	return store_config.Get().UpdateBlock(context.Background(), *block.Block)
}
func (s *server) UpdateRetentionPolicy(ctx context.Context, request *oproto.UpdateRetentionPolicyRequest) (*oproto.UpdateRetentionPolicyResponse, error) {
	cs := store_config.Get()
	policy, err := cs.GetRetentionPolicy(ctx)
	if err != nil {
		return nil, fmt.Errorf("Error fetching retention policy: %s", err)
	}

	switch request.Op {
	case oproto.UpdateRetentionPolicyRequest_APPEND:
		policy.Policy = append(policy.Policy, request.Item)

	case oproto.UpdateRetentionPolicyRequest_INSERT:
		if request.Position >= uint32(len(policy.Policy)) {
			return nil, fmt.Errorf("Invalid position for insert")
		}
		i := []*oproto.RetentionPolicyItem{request.Item}
		i = append(i, policy.Policy[request.Position:]...)
		policy.Policy = append(policy.Policy[:request.Position], i...)

	case oproto.UpdateRetentionPolicyRequest_REMOVE:
		if request.Position >= uint32(len(policy.Policy)) {
			return nil, fmt.Errorf("Invalid position for remove")
		}
		policy.Policy = append(policy.Policy[:request.Position], policy.Policy[request.Position+1:]...)

	default:
		return nil, fmt.Errorf("Invalid operation")
	}

	err = cs.UpdateRetentionPolicy(ctx, policy)
	if err != nil {
		return nil, fmt.Errorf("Error updating retention policy: %s", err)
	}
	return &oproto.UpdateRetentionPolicyResponse{&policy}, nil
}
Exemple #4
0
func (block *Block) Compact(ctx context.Context) error {
	openinstrument.Logf(ctx, "Compacting block %s\n", block)
	startTime := time.Now()

	// Update cached number of streams and values
	defer block.UpdateIndexedCount()
	defer block.UpdateLoggedCount()
	defer block.UpdateUnloggedCount()

	block.protoLock.Lock()
	defer block.protoLock.Unlock()
	block.Block.State = oproto.Block_COMPACTING
	block.compactStartTime = time.Now()

	block.newStreamsLock.Lock()
	defer block.newStreamsLock.Unlock()

	block.logLock.Lock()
	defer block.logLock.Unlock()

	streams := make(map[string]*oproto.ValueStream, 0)

	// Apply the retention policy during compaction
	p, err := store_config.Get().GetRetentionPolicy(ctx)
	if err != nil {
		return fmt.Errorf("Error getting retention policy from config store: %s", err)
	}
	policy := retentionpolicy.New(&p)
	endKey := ""

	appendValues := func(stream *oproto.ValueStream) {
		if stream.Variable == nil {
			openinstrument.Logf(ctx, "Skipping reading stream that contains no variable")
			return
		}
		varName := variable.ProtoToString(stream.Variable)
		out := policy.Apply(stream)
		if len(out.Value) == 0 {
			//openinstrument.Logf(ctx, "Dropping stream for variable %s", varName)
			return
		}
		outstream, found := streams[varName]
		if found {
			outstream.Value = append(outstream.Value, stream.Value...)
		} else {
			streams[varName] = stream
		}
		if varName > endKey {
			endKey = varName
		}
	}

	// Append logged streams
	for _, stream := range block.LogStreams {
		appendValues(stream)
	}
	openinstrument.Logf(ctx, "Block log contains %d streams", len(streams))

	// Append indexed streams
	reader, err := block.GetIndexedStreams(ctx)
	if err != nil {
		openinstrument.Logf(ctx, "Unable to read block: %s", err)
	} else {
		for stream := range reader {
			appendValues(stream)
		}
		openinstrument.Logf(ctx, "Compaction read block containing %d streams", len(streams))
	}

	// Append unlogged (new) streams
	if len(block.NewStreams) > 0 {
		for _, stream := range block.NewStreams {
			appendValues(stream)
		}
		openinstrument.Logf(ctx, "Compaction added %d unlogged streams, total: %d streams", len(block.NewStreams), len(streams))
	}

	// The end key may have changed if streams have been dropped
	block.Block.EndKey = endKey

	if err = block.Write(ctx, streams); err != nil {
		openinstrument.Logf(ctx, "Error writing: %s", err)
		return err
	}

	// Delete the log file
	os.Remove(block.logFilename())
	openinstrument.Logf(ctx, "Deleted log file %s", block.logFilename())
	block.LogStreams = make(map[string]*oproto.ValueStream)
	block.NewStreams = make([]*oproto.ValueStream, 0)

	block.compactEndTime = time.Now()
	block.Block.State = oproto.Block_LIVE
	block.UpdateSize()
	openinstrument.Logf(ctx, "Finished compaction of %s in %v", block, time.Since(startTime))

	return nil
}
func (s *server) GetCluster(ctx context.Context, request *oproto.GetClusterRequest) (*oproto.GetClusterResponse, error) {
	cs := store_config.Get()
	config := cs.GetClusterConfig(ctx)
	return &oproto.GetClusterResponse{Config: config}, nil
}
func GetConfig(ctx context.Context, ds *datastore.Datastore, w http.ResponseWriter, req *http.Request) {
	returnResponse(w, req, store_config.Get().GetClusterConfig(ctx))
}