Example #1
0
func (valueFile *outputValueFile) updateValueFileRow() error {
	closeTime := tools.Timestamp()
	md5Digest := valueFile.md5Sum.Sum(nil)

	// convert the set of collection ids to a form postgres will take
	// {n1, n1, ...}
	collectionIDs := make([]int, len(valueFile.collectionIDSet))
	var n int
	for collectionID := range valueFile.collectionIDSet {
		collectionIDs[n] = int(collectionID)
		n += 1
	}
	sort.Ints(collectionIDs)
	collectionIDStrings := make([]string, len(collectionIDs))
	for n, collectionID := range collectionIDs {
		collectionIDStrings[n] = fmt.Sprintf("%d", collectionID)
	}
	collectionIDLiteral := fmt.Sprintf("{%s}",
		strings.Join(collectionIDStrings, ","))

	stmt := nodedb.Stmts["update-value-file"]
	_, err := stmt.Exec(
		valueFile.creationTime,
		closeTime,
		valueFile.bytesWritten,
		md5Digest,
		valueFile.sequencesWritten,
		valueFile.minSegmentID,
		valueFile.maxSegmentID,
		len(collectionIDs),
		collectionIDLiteral,
		valueFile.valueFileID)
	return err
}
Example #2
0
// NewOutputValueFile creates an entity implmenting the OutputValueFile interface
func NewOutputValueFile(fileSpaceInfo tools.FileSpaceInfo) (OutputValueFile, error) {
	var valueFile outputValueFile
	var err error

	valueFile.creationTime = tools.Timestamp()
	valueFile.md5Sum = md5.New()
	valueFile.collectionIDSet = make(map[uint32]struct{})
	repositoryPath := os.Getenv("NIMBUSIO_REPOSITORY_PATH")

	if valueFile.spaceID, err = fileSpaceInfo.FindMaxAvailSpaceID(tools.FileSpaceJournal); err != nil {
		return nil, err
	}

	if err = valueFile.insertValueFileRow(); err != nil {
		return nil, err
	}

	valueFile.filePath = tools.ComputeValueFilePath(repositoryPath, valueFile.spaceID,
		valueFile.valueFileID)

	fog.Debug("NewOutputValueFile %s", valueFile.filePath)

	dirPath := path.Dir(valueFile.filePath)
	if err = os.MkdirAll(dirPath, os.ModeDir|0755); err != nil {
		return nil, fmt.Errorf("os.MkdirAll(%s...", err)
	}

	valueFile.fileHandle, err = os.Create(valueFile.filePath)
	if err != nil {
		return nil, fmt.Errorf("os.Create(%s) %s", valueFile.filePath, err)
	}

	err = syscall.Fallocate(int(valueFile.fileHandle.Fd()), 0, 0,
		int64(MaxValueFileSize))
	if err != nil {
		return nil, fmt.Errorf("Fallocate failed %s", err)
	}

	valueFile.enableFsync = os.Getenv("NIMBUSIO_ENABLE_FSYNC") == "1"
	fog.Info("NewOutputValueFile: NIMBUSIO_ENABLE_FSYNC = %t", valueFile.enableFsync)

	return &valueFile, nil
}
Example #3
0
func handleStoreSequence(state *writerState, request requestStoreSequence) {
	userRequestID := request.UserRequestID
	segment := request.Segment
	sequence := request.Sequence
	data := request.Data
	var err error
	var md5Digest []byte
	var offset uint64

	fog.Debug("%s StoreSequence #%d", userRequestID, sequence.SequenceNum)

	if state.ValueFile.Size()+sequence.SegmentSize >= MaxValueFileSize {
		fog.Info("value file full")

		if state.SyncTimer != nil {
			state.SyncTimer.Stop()
		}
		state.SyncTimer = nil

		if err = state.ValueFile.Close(); err != nil {
			request.resultChan <- storeSequenceResult{Err: fmt.Errorf("error closing value file %s", err)}
			return
		}

		if state.ValueFile, err = NewOutputValueFile(state.FileSpaceInfo); err != nil {
			request.resultChan <- storeSequenceResult{Err: fmt.Errorf("error opening value file %s", err)}
			return
		}

		startSyncTimer(state)
	}

	md5Digest, err = base64.StdEncoding.DecodeString(sequence.EncodedSegmentMD5Digest)
	if err != nil {
		request.resultChan <- storeSequenceResult{Err: err}
		return
	}

	key := segmentKey{segment.UnifiedID, segment.ConjoinedPart,
		segment.SegmentNum}
	entry, ok := state.SegmentMap[key]
	if !ok {
		request.resultChan <- storeSequenceResult{Err: fmt.Errorf("StoreSequence unknown segment %s", key)}
		return
	}

	offset, err = state.ValueFile.Store(segment.CollectionID, entry.SegmentID,
		data)
	if err != nil {
		request.resultChan <- storeSequenceResult{Err: fmt.Errorf("ValueFile.Store %s", err)}
		return
	}

	stmt := nodedb.Stmts["new-segment-sequence"]
	_, err = stmt.Exec(
		segment.CollectionID,
		entry.SegmentID,
		sequence.ZfecPaddingSize,
		state.ValueFile.ID(),
		sequence.SequenceNum,
		offset,
		sequence.SegmentSize,
		md5Digest,
		sequence.SegmentAdler32)
	if err != nil {
		request.resultChan <- storeSequenceResult{Err: fmt.Errorf("new-segment-sequence %s", err)}
	}

	state.StatGrabber.Accumulate("nimbusio_write_requests", 1)
	state.StatGrabber.Accumulate("nimbusio_write_bytes", len(data))

	entry.LastActionTime = tools.Timestamp()
	state.SegmentMap[key] = entry

	request.resultChan <- storeSequenceResult{ValueFileID: state.ValueFile.ID()}
}
Example #4
0
func handleStartSegment(state *writerState, request requestStartSegment) {
	userRequestID := request.UserRequestID
	segment := request.Segment
	nodeNames := request.NodeNames

	var entry segmentMapEntry
	var err error
	var sourceNodeID uint32
	var handoffNodeID uint32
	var ok bool
	var timestamp time.Time

	fog.Debug("%s StartSegment", userRequestID)

	if sourceNodeID, ok = state.NodeIDMap[nodeNames.SourceNodeName]; !ok {
		request.resultChan <- fmt.Errorf("unknown source node %s", nodeNames.SourceNodeName)
		return
	}

	if timestamp, err = tools.ParseTimestampRepr(segment.TimestampRepr); err != nil {
		request.resultChan <- fmt.Errorf("unable to parse timestamp %s", err)
		return
	}

	if nodeNames.HandoffNodeName != "" {
		if handoffNodeID, ok = state.NodeIDMap[nodeNames.HandoffNodeName]; !ok {
			request.resultChan <- fmt.Errorf("unknown handoff node %s", nodeNames.HandoffNodeName)
			return
		}

		stmt := nodedb.Stmts["new-segment-for-handoff"]
		row := stmt.QueryRow(
			segment.CollectionID,
			segment.Key,
			segment.UnifiedID,
			timestamp,
			segment.SegmentNum,
			segment.ConjoinedPart,
			sourceNodeID,
			handoffNodeID)
		if err = row.Scan(&entry.SegmentID); err != nil {
			request.resultChan <- err
			return
		}
	} else {
		stmt := nodedb.Stmts["new-segment"]
		row := stmt.QueryRow(
			segment.CollectionID,
			segment.Key,
			segment.UnifiedID,
			timestamp,
			segment.SegmentNum,
			segment.ConjoinedPart,
			sourceNodeID)
		if err = row.Scan(&entry.SegmentID); err != nil {
			request.resultChan <- err
			return
		}
	}
	entry.LastActionTime = tools.Timestamp()

	key := segmentKey{segment.UnifiedID, segment.ConjoinedPart,
		segment.SegmentNum}

	state.SegmentMap[key] = entry

	request.resultChan <- nil
}