Пример #1
0
func (reader *sizeReader) Read(p []byte) (int, error) {
	if reader.CurrentSize >= reader.MaxSize {
		fog.Debug("returning 0 bytes and EOF")
		return 0, io.EOF
	}

	bytesLeft := reader.MaxSize - reader.CurrentSize
	var bytesToSend uint64
	var err error

	if bytesLeft > uint64(len(p)) {
		bytesToSend = uint64(len(p))
	} else {
		bytesToSend = bytesLeft
		err = io.EOF
		fog.Debug("returning %d bytes and EOF", bytesToSend)
	}

	for i := 0; i < int(bytesToSend); i++ {
		p[i] = 'a'
	}
	reader.CurrentSize += bytesToSend

	currentMB := reader.CurrentSize / (1024 * 1024)
	if currentMB > reader.ReportedMB {
		fog.Debug("size reader %dmb", currentMB)
		reader.ReportedMB = currentMB
	}

	return int(bytesToSend), err
}
Пример #2
0
func testPOST(serviceDomain string, useTLS bool) {
	var contentLength uint64 = 1024 * 1024 * 1024
	fog.Debug("start post %dmb", contentLength/(1024*1024))

	client, scheme := getClient(useTLS)

	url := fmt.Sprintf("%s://%s/admin", scheme, serviceDomain)
	bodyReader := tools.NewSizeReader(contentLength)
	request, err := http.NewRequest("POST", url, bodyReader)
	if err != nil {
		fog.Critical("NewRequest (POST) failed %s", err)
	}
	request.TransferEncoding = []string{"identity"}
	request.Header.Add("Content-Type", "text/plain")
	request.Header.Add("Content-Length", fmt.Sprintf("%s", contentLength))
	request.ContentLength = int64(contentLength)
	response, err := client.Do(request)
	if err != nil {
		fog.Error("post %s", err)
		return
	}
	tools.ReadAndDiscard(response.Body)
	response.Body.Close()

	fog.Debug("finished post %s", response.Status)
}
Пример #3
0
func (s serverImpl) handleGET(w http.ResponseWriter, req *http.Request) {
	fog.Debug("(%s) got request %s %s", s.Name, req.Method, req.URL)
	if req.Body != nil {
		tools.ReadAndDiscard(req.Body)
		req.Body.Close()
	}

	var contentLength uint64 = 1024 * 1024 * 1024
	fog.Debug("response body %dmb", contentLength/(1024*1024))
	bodyReader := tools.NewSizeReader(contentLength)

	http.ServeContent(w, req, "content.txt", time.Now(), bodyReader)
}
Пример #4
0
// FindMaxAvailSpaceID returns the space id with the most space available
func (info FileSpaceMap) FindMaxAvailSpaceID(purpose string) (uint32, error) {
	var maxAvailSpace uint64
	var maxAvailSpaceID uint32
	var found bool
	var statfsBuffer syscall.Statfs_t

	for _, entry := range info[purpose] {
		if err := syscall.Statfs(entry.Path, &statfsBuffer); err != nil {
			return 0, fmt.Errorf("syscall.Statfs(%s, ...) %s", entry.Path, err)
		}
		availSpace := uint64(statfsBuffer.Bsize) * statfsBuffer.Bavail
		fog.Debug("(%d) %s available=%d", entry.SpaceID, entry.Path, availSpace)
		if availSpace > maxAvailSpace {
			found = true
			maxAvailSpace = availSpace
			maxAvailSpaceID = entry.SpaceID
		}
	}

	if !found {
		return 0, fmt.Errorf("no space found")
	}

	return maxAvailSpaceID, nil
}
Пример #5
0
// NewEventSubSocketHandler returns a function that handles event notifications
func NewEventSubSocketHandler(eventSubSocket *zmq4.Socket) func(zmq4.State) error {
	var nodeIDMap map[string]uint32
	var err error

	if nodeIDMap, err = centraldb.GetNodeIDMap(); err != nil {
		fog.Critical("centraldb.GetNodeIDMap() failed %s", err)
	}

	return func(_ zmq4.State) error {
		var err error
		var ok bool
		var webWriterStart msg.WebWriterStart
		var timestamp time.Time
		var sourceNodeID uint32

		marshalledMessage, err := eventSubSocket.RecvMessage(0)
		if err != nil {
			return fmt.Errorf("RecvMessage %s", err)
		}

		// the 0th part should be the topic, we skip that

		err = json.Unmarshal([]byte(marshalledMessage[1]), &webWriterStart)
		if err != nil {
			return fmt.Errorf("Unmarshal %s", err)
		}

		if webWriterStart.MessageType != "web-writer-start" {
			return fmt.Errorf("unknown message type '%s'",
				webWriterStart.MessageType)
		}

		timestamp, err = tools.ParseTimestampRepr(webWriterStart.TimestampRepr)
		if err != nil {
			return fmt.Errorf("unable to parse %s %s",
				webWriterStart.TimestampRepr, err)
		}

		sourceNodeID, ok = nodeIDMap[webWriterStart.SourceNodeName]
		if !ok {
			return fmt.Errorf("unknown source_node_name %s",
				webWriterStart.SourceNodeName)
		}

		fog.Debug("cancel-segments-from-node %s", webWriterStart.SourceNodeName)

		// cancel all all segment rows
		//    * from a specifiic source node
		//    * are in active status
		//    * with a timestamp earlier than the specified time.
		// This is triggered by a web server restart
		stmt := nodedb.Stmts["cancel-segments-from-node"]
		if _, err = stmt.Exec(sourceNodeID, timestamp); err != nil {
			return fmt.Errorf("cancel-segments-from-node %s", err)
		}

		return nil
	}
}
Пример #6
0
func (s serverImpl) handlePOST(w http.ResponseWriter, req *http.Request) {
	fog.Debug("(%s) got request %s %s", s.Name, req.Method, req.URL)
	if req.Body != nil {
		tools.ReadAndDiscard(req.Body)
		req.Body.Close()
	}
	io.WriteString(w, "hello, world!\n")
}
Пример #7
0
// main entry point for data writer
func main() {
	var err error
	var writerSocket *zmq4.Socket
	var eventSubSocket *zmq4.Socket

	fog.Info("program starts")

	if writerSocket, err = createWriterSocket(); err != nil {
		fog.Critical("createWriterSocket %s", err)
	}
	defer writerSocket.Close()

	fog.Info("binding writer socket to %s", dataWriterAddress)
	if err = writerSocket.Bind(dataWriterAddress); err != nil {
		fog.Critical("Bind(%s) %s", dataWriterAddress, err)
	}

	if eventSubSocket, err = createEventSubSocket(); err != nil {
		fog.Critical("createEventSubSocket %s", err)
	}
	defer eventSubSocket.Close()

	fog.Info("connecting event sub socket to %s", eventAggregatorPubAddress)
	if err = eventSubSocket.Connect(eventAggregatorPubAddress); err != nil {
		fog.Critical("Connect(%s) %s", eventAggregatorPubAddress, err)
	}

	messageChan := NewMessageHandler()

	reactor := zmq4.NewReactor()
	reactor.AddChannel(tools.NewSignalWatcher(), 1, tools.SigtermHandler)
	reactor.AddSocket(writerSocket, zmq4.POLLIN,
		NewWriterSocketHandler(writerSocket, messageChan))
	reactor.AddSocket(eventSubSocket, zmq4.POLLIN,
		NewEventSubSocketHandler(eventSubSocket))

	fog.Debug("starting reactor.Run")
	reactor.SetVerbose(true)
	err = reactor.Run(reactorPollingInterval)
	if err == tools.SigtermError {
		fog.Info("program terminates normally due to SIGTERM")
	} else if errno, ok := err.(syscall.Errno); ok {
		// we can get 'interrupted system call' if we get SIGTERM while
		// a socket is waiting on a read. That's not too bad.
		if errno == syscall.EINTR {
			fog.Warn("reactor.Run returns '%s' assuming SIGTERM", errno)
		} else {
			fog.Error("reactor.Run returns %T '%s'", errno, errno)
		}
	} else {
		fog.Error("reactor.Run returns %T %s", err, err)
	}
}
Пример #8
0
func testGET(serviceDomain string, useTLS bool) {

	fog.Debug("start get")

	client, scheme := getClient(useTLS)

	url := fmt.Sprintf("%s://%s/admin", scheme, serviceDomain)
	request, err := http.NewRequest("GET", url, nil)
	if err != nil {
		fog.Critical("NewRequest (GET) failed %s", err)
	}
	response, err := client.Do(request)
	if err != nil {
		fog.Error("get %s", err)
		return
	}

	fog.Debug("reading GET response body")
	tools.ReadAndDiscard(response.Body)
	response.Body.Close()

	fog.Debug("finished get %s", response.Status)
}
Пример #9
0
// FinishConjoinedArchive completes a conjoined archive
func handleFinishConjoinedArchive(state *writerState, request requestFinishConjoinedArchive) {
	conjoined := request.Conjoined
	var err error
	var ok bool
	var handoffNodeID uint32
	var timestamp time.Time

	fog.Debug("%s FinishConjoinedArchive %s", conjoined.UserRequestID, conjoined)

	if timestamp, err = tools.ParseTimestampRepr(conjoined.TimestampRepr); err != nil {
		request.resultChan <- fmt.Errorf("unable to parse timestamp %s", err)
		return
	}

	if conjoined.HandoffNodeName != "" {
		if handoffNodeID, ok = state.NodeIDMap[conjoined.HandoffNodeName]; !ok {
			request.resultChan <- fmt.Errorf("unknown handoff node %s", conjoined.HandoffNodeName)
		}
		stmt := nodedb.Stmts["finish-conjoined-for-handoff"]
		_, err = stmt.Exec(
			timestamp,
			conjoined.CollectionID,
			conjoined.Key,
			conjoined.UnifiedID,
			handoffNodeID)

		if err != nil {
			request.resultChan <- fmt.Errorf("finish-conjoined-for-handoff %s", err)
			return
		}
	} else {

		stmt := nodedb.Stmts["finish-conjoined"]
		_, err = stmt.Exec(
			timestamp,
			conjoined.CollectionID,
			conjoined.Key,
			conjoined.UnifiedID)

		if err != nil {
			request.resultChan <- fmt.Errorf("finish-conjoined %s", err)
			return
		}

	}

	request.resultChan <- nil
}
Пример #10
0
// SanityCheck verifies that the file info map is valid
func (info FileSpaceMap) SanityCheck(repositoryPath string) error {
	for _, purpose := range FileSpacePurpose {
		for _, entry := range info[purpose] {
			fog.Debug("%s (%d) %s", purpose, entry.SpaceID, entry.Path)
			symlinkPath := path.Join(repositoryPath,
				fmt.Sprintf("%d", entry.SpaceID))
			destPath, err := os.Readlink(symlinkPath)
			if err != nil {
				return fmt.Errorf("(%d) os.Readlink(%s) %s", entry.SpaceID,
					symlinkPath, err)
			}
			if destPath != entry.Path {
				return fmt.Errorf("(%d) path mismatch %s != %s", entry.SpaceID,
					destPath, entry.Path)
			}
		}
	}
	return nil
}
Пример #11
0
// NewOutputValueFile creates an entity implmenting the OutputValueFile interface
func NewOutputValueFile(fileSpaceInfo tools.FileSpaceInfo) (OutputValueFile, error) {
	var valueFile outputValueFile
	var err error

	valueFile.creationTime = tools.Timestamp()
	valueFile.md5Sum = md5.New()
	valueFile.collectionIDSet = make(map[uint32]struct{})
	repositoryPath := os.Getenv("NIMBUSIO_REPOSITORY_PATH")

	if valueFile.spaceID, err = fileSpaceInfo.FindMaxAvailSpaceID(tools.FileSpaceJournal); err != nil {
		return nil, err
	}

	if err = valueFile.insertValueFileRow(); err != nil {
		return nil, err
	}

	valueFile.filePath = tools.ComputeValueFilePath(repositoryPath, valueFile.spaceID,
		valueFile.valueFileID)

	fog.Debug("NewOutputValueFile %s", valueFile.filePath)

	dirPath := path.Dir(valueFile.filePath)
	if err = os.MkdirAll(dirPath, os.ModeDir|0755); err != nil {
		return nil, fmt.Errorf("os.MkdirAll(%s...", err)
	}

	valueFile.fileHandle, err = os.Create(valueFile.filePath)
	if err != nil {
		return nil, fmt.Errorf("os.Create(%s) %s", valueFile.filePath, err)
	}

	err = syscall.Fallocate(int(valueFile.fileHandle.Fd()), 0, 0,
		int64(MaxValueFileSize))
	if err != nil {
		return nil, fmt.Errorf("Fallocate failed %s", err)
	}

	valueFile.enableFsync = os.Getenv("NIMBUSIO_ENABLE_FSYNC") == "1"
	fog.Info("NewOutputValueFile: NIMBUSIO_ENABLE_FSYNC = %t", valueFile.enableFsync)

	return &valueFile, nil
}
Пример #12
0
// Close the underling file and update the database row
func (valueFile *outputValueFile) Close() error {
	if err := valueFile.Sync(); err != nil {
		return fmt.Errorf("Sync() %s %s", valueFile.filePath, err)
	}

	if err := valueFile.fileHandle.Close(); err != nil {
		return fmt.Errorf("Close() %s %s", valueFile.filePath, err)
	}

	if valueFile.bytesWritten == 0 {
		fog.Debug("OutputValueFile removing empty file %s", valueFile.filePath)
		return os.Remove(valueFile.filePath)
	}

	if err := valueFile.updateValueFileRow(); err != nil {
		return err
	}

	return nil
}
Пример #13
0
// CancelSegment stops storing the segment
func handleCancelSegment(state *writerState, request requestCancelSegment) {
	cancel := request.Cancel
	var err error

	fog.Debug("%s CancelSegment", cancel.UserRequestID)

	key := segmentKey{cancel.UnifiedID, cancel.ConjoinedPart,
		cancel.SegmentNum}
	delete(state.SegmentMap, key)

	stmt := nodedb.Stmts["cancel-segment"]
	_, err = stmt.Exec(
		cancel.UnifiedID,
		cancel.ConjoinedPart,
		cancel.SegmentNum)

	if err != nil {
		request.resultChan <- fmt.Errorf("cancel-segment %s", err)
		return
	}

	request.resultChan <- nil
}
Пример #14
0
// DestroyKey makes a key inaccessible
func handleDestroyKey(state *writerState, request requestDestroyKey) {
	destroyKey := request.DestroyKey
	var err error
	var ok bool
	var timestamp time.Time
	var sourceNodeID uint32
	var handoffNodeID uint32

	fog.Debug("DestroyKey (%d)", destroyKey.UnifiedIDToDestroy)

	if sourceNodeID, ok = state.NodeIDMap[destroyKey.SourceNodeName]; !ok {
		request.resultChan <- fmt.Errorf("unknown source node %s", destroyKey.SourceNodeName)
		return
	}

	if timestamp, err = tools.ParseTimestampRepr(destroyKey.TimestampRepr); err != nil {
		request.resultChan <- fmt.Errorf("unable to parse timestamp %s", err)
		return
	}

	if destroyKey.UnifiedIDToDestroy > 0 {
		if destroyKey.HandoffNodeName != "" {
			if handoffNodeID, ok = state.NodeIDMap[destroyKey.HandoffNodeName]; !ok {
				request.resultChan <- fmt.Errorf("unknown handoff node %s", destroyKey.HandoffNodeName)
				return
			}
			stmt := nodedb.Stmts["new-tombstone-for-unified-id-for-handoff"]
			_, err = stmt.Exec(
				destroyKey.CollectionID,
				destroyKey.Key,
				destroyKey.UnifiedID,
				timestamp,
				destroyKey.SegmentNum,
				destroyKey.UnifiedIDToDestroy,
				sourceNodeID,
				handoffNodeID)

			if err != nil {
				request.resultChan <- fmt.Errorf("new-tombstone-for-unified-id-for-handoff %d %s",
					destroyKey.UnifiedIDToDestroy, err)
				return
			}
		} else {
			stmt := nodedb.Stmts["new-tombstone-for-unified-id"]
			_, err = stmt.Exec(
				destroyKey.CollectionID,
				destroyKey.Key,
				destroyKey.UnifiedID,
				timestamp,
				destroyKey.SegmentNum,
				destroyKey.UnifiedIDToDestroy,
				sourceNodeID)

			if err != nil {
				request.resultChan <- fmt.Errorf("new-tombstone-for-unified-id %d %s",
					destroyKey.UnifiedIDToDestroy, err)
				return
			}
		}

		stmt := nodedb.Stmts["delete-conjoined-for-unified-id"]
		_, err = stmt.Exec(
			timestamp,
			destroyKey.CollectionID,
			destroyKey.Key,
			destroyKey.UnifiedIDToDestroy)

		if err != nil {
			request.resultChan <- fmt.Errorf("delete-conjoined-for-unified-id %d %s",
				destroyKey.UnifiedIDToDestroy, err)
			return
		}
	} else {
		if destroyKey.HandoffNodeName != "" {
			if handoffNodeID, ok = state.NodeIDMap[destroyKey.HandoffNodeName]; !ok {
				request.resultChan <- fmt.Errorf("unknown handoff node %s", destroyKey.HandoffNodeName)
				return
			}
			stmt := nodedb.Stmts["new-tombstone-for-handoff"]
			_, err = stmt.Exec(
				destroyKey.CollectionID,
				destroyKey.Key,
				destroyKey.UnifiedID,
				timestamp,
				destroyKey.SegmentNum,
				sourceNodeID,
				handoffNodeID)

			if err != nil {
				request.resultChan <- fmt.Errorf("new-tombstone-for-handoff %s", err)
				return
			}
		} else {
			stmt := nodedb.Stmts["new-tombstone"]
			_, err = stmt.Exec(
				destroyKey.CollectionID,
				destroyKey.Key,
				destroyKey.UnifiedID,
				timestamp,
				destroyKey.SegmentNum,
				sourceNodeID)

			if err != nil {
				request.resultChan <- fmt.Errorf("new-tombstone %s", err)
				return
			}
		}

		stmt := nodedb.Stmts["delete-conjoined"]
		_, err = stmt.Exec(
			timestamp,
			destroyKey.CollectionID,
			destroyKey.Key,
			destroyKey.UnifiedID)

		if err != nil {
			request.resultChan <- fmt.Errorf("delete-conjoined %s", err)
			return
		}
	}

	request.resultChan <- nil
}
Пример #15
0
// FinishSegment finishes storing the segment
func handleFinishSegment(state *writerState, request requestFinishSegment) {
	userRequestID := request.UserRequestID
	segment := request.Segment
	file := request.File
	metaData := request.MetaData
	var err error
	var md5Digest []byte
	var timestamp time.Time

	fog.Debug("%s FinishSegment", userRequestID)

	key := segmentKey{segment.UnifiedID, segment.ConjoinedPart,
		segment.SegmentNum}
	entry, ok := state.SegmentMap[key]
	if !ok {
		request.resultChan <- fmt.Errorf("FinishSegment unknown segment %s", key)
		return
	}

	delete(state.SegmentMap, key)

	md5Digest, err = base64.StdEncoding.DecodeString(file.EncodedFileMD5Digest)
	if err != nil {
		request.resultChan <- err
		return
	}

	if timestamp, err = tools.ParseTimestampRepr(segment.TimestampRepr); err != nil {
		request.resultChan <- fmt.Errorf("unable to parse timestamp %s", err)
		return
	}

	stmt := nodedb.Stmts["finish-segment"]
	_, err = stmt.Exec(
		file.FileSize,
		file.FileAdler32,
		md5Digest,
		entry.SegmentID)

	if err != nil {
		request.resultChan <- fmt.Errorf("finish-segment %s", err)
		return
	}

	for _, metaEntry := range metaData {
		stmt := nodedb.Stmts["new-meta-data"]
		_, err = stmt.Exec(
			segment.CollectionID,
			entry.SegmentID,
			metaEntry.Key,
			metaEntry.Value,
			timestamp)

		if err != nil {
			request.resultChan <- fmt.Errorf("new-meta-data %s", err)
			return
		}
	}

	request.resultChan <- nil
}
Пример #16
0
// handleConnection manages one HTTP connection
// expected to be run in a goroutine
func handleConnection(router routing.Router, conn net.Conn) {
	defer conn.Close()
	const bufferSize = 64 * 1024
	var err error

	requestID, err := tools.CreateUUID()
	if err != nil {
		fog.Error("%s tools.CreateUUID(): %s", conn.RemoteAddr().String(), err)
		return
	}
	fog.Info("%s starts %s", requestID, conn.RemoteAddr().String())

	request, err := http.ReadRequest(bufio.NewReaderSize(conn, bufferSize))
	if err != nil {
		fog.Error("%s %s ReadRequest failed: %s", requestID,
			conn.RemoteAddr().String(), err)
		fog.Info("%s aborts", requestID)
		return
	}

	// change the URL to point to our internal host
	request.URL.Host, err = router.Route(requestID, request)
	if err != nil {
		routerErr, ok := err.(routing.RouterError)
		if ok {
			fog.Error("%s %s, %s router error: %s",
				requestID, request.Method, request.URL, err)
			sendErrorReply(conn, routerErr.HTTPCode(), routerErr.ErrorMessage())
		} else {
			fog.Error("%s %s, %s Unexpected error type: %T %s",
				requestID, request.Method, request.URL, err, err)
		}
		fog.Info("%s aborts", requestID)
		return
	}
	request.URL.Scheme = "http"

	// heave the incoming RequestURI: can't be set in a client request
	request.RequestURI = ""

	modifyHeaders(request, conn.RemoteAddr().String(), requestID)
	fog.Debug("%s routing %s %s", requestID, request.Method, request.URL)

	// TODO: cache the connection to the internal server
	internalConn, err := net.Dial("tcp", request.URL.Host)
	if err != nil {
		fog.Error("%s %s, %s unable to dial internal server: %s",
			requestID, request.Method, request.URL, err)
		sendErrorReply(conn, http.StatusInternalServerError, err.Error())
		fog.Info("%s aborts", requestID)
		return
	}
	defer internalConn.Close()

	err = request.Write(bufio.NewWriterSize(internalConn, bufferSize))
	if err != nil {
		fog.Error("%s %s, %s request.Write: %s",
			requestID, request.Method, request.URL, err)
		sendErrorReply(conn, http.StatusInternalServerError, err.Error())
		fog.Info("%s aborts", requestID)
		return
	}
	request.Body.Close()

	response, err := http.ReadResponse(bufio.NewReaderSize(internalConn, bufferSize),
		request)
	if err != nil {
		fog.Error("%s %s, %s http.ReadResponse: %s",
			requestID, request.Method, request.URL, err)
		sendErrorReply(conn, http.StatusInternalServerError, err.Error())
		fog.Info("%s aborts", requestID)
		return
	}

	if err := response.Write(bufio.NewWriterSize(conn, bufferSize)); err != nil {
		fog.Error("%s %s, %s error sending response: %s",
			requestID, request.Method, request.URL, err)
	}
	response.Body.Close()

	fog.Info("%s ends (%d) %s", requestID, response.StatusCode, response.Status)
}
Пример #17
0
func handleStartSegment(state *writerState, request requestStartSegment) {
	userRequestID := request.UserRequestID
	segment := request.Segment
	nodeNames := request.NodeNames

	var entry segmentMapEntry
	var err error
	var sourceNodeID uint32
	var handoffNodeID uint32
	var ok bool
	var timestamp time.Time

	fog.Debug("%s StartSegment", userRequestID)

	if sourceNodeID, ok = state.NodeIDMap[nodeNames.SourceNodeName]; !ok {
		request.resultChan <- fmt.Errorf("unknown source node %s", nodeNames.SourceNodeName)
		return
	}

	if timestamp, err = tools.ParseTimestampRepr(segment.TimestampRepr); err != nil {
		request.resultChan <- fmt.Errorf("unable to parse timestamp %s", err)
		return
	}

	if nodeNames.HandoffNodeName != "" {
		if handoffNodeID, ok = state.NodeIDMap[nodeNames.HandoffNodeName]; !ok {
			request.resultChan <- fmt.Errorf("unknown handoff node %s", nodeNames.HandoffNodeName)
			return
		}

		stmt := nodedb.Stmts["new-segment-for-handoff"]
		row := stmt.QueryRow(
			segment.CollectionID,
			segment.Key,
			segment.UnifiedID,
			timestamp,
			segment.SegmentNum,
			segment.ConjoinedPart,
			sourceNodeID,
			handoffNodeID)
		if err = row.Scan(&entry.SegmentID); err != nil {
			request.resultChan <- err
			return
		}
	} else {
		stmt := nodedb.Stmts["new-segment"]
		row := stmt.QueryRow(
			segment.CollectionID,
			segment.Key,
			segment.UnifiedID,
			timestamp,
			segment.SegmentNum,
			segment.ConjoinedPart,
			sourceNodeID)
		if err = row.Scan(&entry.SegmentID); err != nil {
			request.resultChan <- err
			return
		}
	}
	entry.LastActionTime = tools.Timestamp()

	key := segmentKey{segment.UnifiedID, segment.ConjoinedPart,
		segment.SegmentNum}

	state.SegmentMap[key] = entry

	request.resultChan <- nil
}
Пример #18
0
// Route reads a request and decides where it should go <host:port>
func (router *routerImpl) Route(requestID string, req *http.Request) (string, error) {
	var err error

	hostName := req.Host

	router.requestCounter += 1
	fog.Debug("%s host=%s, method=%s, URL=%s", requestID, hostName, req.Method,
		req.URL)

	// TODO: be able to handle http requests from http 1.0 clients w/o a
	// host header to at least the website, if nothing else.

	if hostName == "" {
		return "", routerErrorImpl{httpCode: http.StatusBadRequest,
			errorMessage: "HOST header not found"}
	}
	routingHostName := strings.Split(hostName, ":")[0]
	if !strings.HasSuffix(routingHostName, serviceDomain) {
		return "", routerErrorImpl{httpCode: http.StatusNotFound,
			errorMessage: fmt.Sprintf("Invalid HOST '%s'", routingHostName)}
	}

	var routingMethod string
	var routedHost string

	if routingHostName == serviceDomain {
		// this is not a request specific to any particular collection
		// TODO: figure out how to route these requests.
		// in production, this might not matter.
		routingMethod = "management API"
		routedHost = router.managmentAPIDests.Next()
		fog.Debug("%s %s routed to %s by %s", requestID, req.URL.Path,
			routedHost, routingMethod)
		return routedHost, nil
	}

	destPort, ok := destPortMap[req.Method]
	if !ok {
		return "", routerErrorImpl{httpCode: http.StatusBadRequest,
			errorMessage: fmt.Sprintf("Unknown method '%s'", req.Method)}
	}

	collectionName := parseCollectionFromHostName(routingHostName)
	if collectionName == "" {
		return "", routerErrorImpl{httpCode: http.StatusNotFound,
			errorMessage: fmt.Sprintf("Unparseable host name '%s'", hostName)}
	}

	hostsForCollection, err := router.centralDB.GetHostsForCollection(collectionName)
	if err != nil {
		fog.Error("database error: collection '%s' %s", collectionName, err)
		return "", routerErrorImpl{httpCode: http.StatusInternalServerError,
			errorMessage: fmt.Sprintf("database error: collection '%s'",
				collectionName)}
	}

	if len(hostsForCollection) == 0 {
		return "", routerErrorImpl{httpCode: http.StatusNotFound,
			errorMessage: fmt.Sprintf("no hosts for collection '%s'",
				collectionName)}
	}

	availableHosts, err := router.availability.AvailableHosts(
		hostsForCollection, destPort)
	if err != nil {
		return "", routerErrorImpl{httpCode: http.StatusInternalServerError,
			errorMessage: fmt.Sprintf("collection '%s': %s", collectionName, err)}
	}
	if len(availableHosts) == 0 {
		// XXX: the python web_director retries here, after a delay.
		// IMO, that's what HTTP Status 503 is for
		return "", routerErrorImpl{httpCode: http.StatusServiceUnavailable,
			errorMessage: fmt.Sprintf("no hosts available for collection '%s'",
				collectionName)}
	}

	switch {
	case alwaysRouteToFirstNode:
		routingMethod = "NIMBUSIO_WEB_DIRECTOR_ALWAYS_FIRST_NODE"
		routedHost = availableHosts[0]
	case (req.Method == "GET" || req.Method == "HEAD") &&
		strings.HasPrefix(req.URL.Path, "/data/") &&
		len(req.URL.Path) > len("/data/"):
		routedHost, err = consistentHashDest(hostsForCollection, availableHosts,
			collectionName, req.URL.Path)
		if err != nil {
			return "", routerErrorImpl{httpCode: http.StatusInternalServerError,
				errorMessage: fmt.Sprintf("collection '%s': %s", collectionName, err)}
		}
		routingMethod = "hash"
	default:
		if router.roundRobinCounter == 0 {
			// start the round robin dispatcher at a random number, so all the
			// workers don't start on the same point.
			n, err := rand.Int(rand.Reader, big.NewInt(int64(len(hostsForCollection))))
			if err != nil {
				return "", routerErrorImpl{httpCode: http.StatusInternalServerError,
					errorMessage: fmt.Sprintf("collection '%s': %s", collectionName, err)}
			}
			router.roundRobinCounter = n.Uint64()
		} else {
			router.roundRobinCounter += 1
		}

		// XXX: the python version works with hostsForCollection and then tries
		// to find one in availableHosts. IMO, assuming the group of
		// available hosts is fairly stable, we get the same result working
		// strictly with availableHosts
		i := int(router.roundRobinCounter % uint64(len(availableHosts)))
		routedHost = availableHosts[i]
		routingMethod = "round robin"
	}

	fog.Debug("%s %s %s routed to %s by %s", requestID, collectionName,
		req.URL.Path, routedHost, routingMethod)

	return fmt.Sprintf("%s:%s", routedHost, destPort), nil
}
Пример #19
0
// Debug prepends literal 'DEBUG' to log message
func (l logData) Debug(text string, args ...interface{}) {
	fog.Debug(l.prefix()+text, args...)
}
Пример #20
0
func ReadAndDiscard(reader io.Reader) {
	fog.Debug("ReadAndDiscard starts")
	n, err := io.Copy(ioutil.Discard, reader)
	fog.Debug("ReadAndDiscard: %d, %v", n, err)
}
Пример #21
0
func handleStoreSequence(state *writerState, request requestStoreSequence) {
	userRequestID := request.UserRequestID
	segment := request.Segment
	sequence := request.Sequence
	data := request.Data
	var err error
	var md5Digest []byte
	var offset uint64

	fog.Debug("%s StoreSequence #%d", userRequestID, sequence.SequenceNum)

	if state.ValueFile.Size()+sequence.SegmentSize >= MaxValueFileSize {
		fog.Info("value file full")

		if state.SyncTimer != nil {
			state.SyncTimer.Stop()
		}
		state.SyncTimer = nil

		if err = state.ValueFile.Close(); err != nil {
			request.resultChan <- storeSequenceResult{Err: fmt.Errorf("error closing value file %s", err)}
			return
		}

		if state.ValueFile, err = NewOutputValueFile(state.FileSpaceInfo); err != nil {
			request.resultChan <- storeSequenceResult{Err: fmt.Errorf("error opening value file %s", err)}
			return
		}

		startSyncTimer(state)
	}

	md5Digest, err = base64.StdEncoding.DecodeString(sequence.EncodedSegmentMD5Digest)
	if err != nil {
		request.resultChan <- storeSequenceResult{Err: err}
		return
	}

	key := segmentKey{segment.UnifiedID, segment.ConjoinedPart,
		segment.SegmentNum}
	entry, ok := state.SegmentMap[key]
	if !ok {
		request.resultChan <- storeSequenceResult{Err: fmt.Errorf("StoreSequence unknown segment %s", key)}
		return
	}

	offset, err = state.ValueFile.Store(segment.CollectionID, entry.SegmentID,
		data)
	if err != nil {
		request.resultChan <- storeSequenceResult{Err: fmt.Errorf("ValueFile.Store %s", err)}
		return
	}

	stmt := nodedb.Stmts["new-segment-sequence"]
	_, err = stmt.Exec(
		segment.CollectionID,
		entry.SegmentID,
		sequence.ZfecPaddingSize,
		state.ValueFile.ID(),
		sequence.SequenceNum,
		offset,
		sequence.SegmentSize,
		md5Digest,
		sequence.SegmentAdler32)
	if err != nil {
		request.resultChan <- storeSequenceResult{Err: fmt.Errorf("new-segment-sequence %s", err)}
	}

	state.StatGrabber.Accumulate("nimbusio_write_requests", 1)
	state.StatGrabber.Accumulate("nimbusio_write_bytes", len(data))

	entry.LastActionTime = tools.Timestamp()
	state.SegmentMap[key] = entry

	request.resultChan <- storeSequenceResult{ValueFileID: state.ValueFile.ID()}
}
Пример #22
0
// NewWriterSocketHandler returns a function suitable for use as a handler
// by zmq.Reactor
func NewWriterSocketHandler(writerSocket *zmq4.Socket,
	messageChan chan<- types.Message) func(zmq4.State) error {

	// these messages get only and ack, not a reply
	var ackOnlyMessages = map[string]ackOnlyMessageHandler{
		"ping": handlePing,
		"resilient-server-handshake": handleHandshake,
		"resilient-server-signoff":   handleSignoff}

	return func(_ zmq4.State) error {
		var err error
		var ok bool
		var rawMessage []string
		var message types.Message

		if rawMessage, err = writerSocket.RecvMessage(0); err != nil {
			return fmt.Errorf("RecvMessage %s", err)
		}

		message.Marshalled = rawMessage[0]
		message.Data = []byte(strings.Join(rawMessage[1:], ""))

		if message.Type, err = msg.GetMessageType(message.Marshalled); err != nil {
			return fmt.Errorf("GetMessageType %s", err)
		}

		if message.ID, err = msg.GetMessageID(message.Marshalled); err != nil {
			return fmt.Errorf("GetMessageID %s", err)
		}

		reply := map[string]interface{}{
			"message-type":  "resilient-server-ack",
			"message-id":    message.ID,
			"incoming-type": message.Type,
			"accepted":      true}

		marshalledReply, err := json.Marshal(reply)
		if err != nil {
			return fmt.Errorf("Marshal %s", err)
		}

		_, err = writerSocket.SendMessage([]string{string(marshalledReply)})
		if err != nil {
			return fmt.Errorf("SendMessage %s", err)
		}

		/* ---------------------------------------------------------------
		** 2014-05-21 dougfort:
		** The python version of ResiliantServer maintains a dict of
		** "client-address" for sending replies, but IMO we don't need
		** that here because every message contains "client-address"
		** so we can decouple the reply.
		** -------------------------------------------------------------*/
		handler, ok := ackOnlyMessages[message.Type]
		if ok {
			handler(message)
			return nil
		}

		fog.Debug("writer-socket-handler received %s %s",
			message.Type, message.ID)
		messageChan <- message

		return nil
	}
}