// Perform a
func CbfsSanityCheck(config Configuration) error {

	uuid := NewUuid() // use uuid so other nodes on cluster don't conflict
	numAttempts := 20
	for i := 0; i < numAttempts; i++ {
		filename := fmt.Sprintf("env_check_%v_%v", uuid, i)
		content := fmt.Sprintf("Hello %v_%v", uuid, i)
		err := CbfsReadWriteFile(config, filename, content)
		if err == nil {
			logg.LogTo("ELASTIC_THOUGHT", "Cbfs sanity ok: %v", filename)
			return nil
		}
		logg.LogTo("ELASTIC_THOUGHT", "Cbfs sanity failed # %v: %v", i, filename)
		if i >= (numAttempts - 1) {
			logg.LogTo("ELASTIC_THOUGHT", "Cbfs sanity check giving up")
			return err
		} else {
			logg.LogTo("ELASTIC_THOUGHT", "Cbfs sanity check sleeping ..")
			time.Sleep(time.Duration(i) * time.Second)
			logg.LogTo("ELASTIC_THOUGHT", "Cbfs sanity check done sleeping")
		}
	}
	return fmt.Errorf("Exhausted attempts")

}
Пример #2
0
func (c *Checkerlution) CreateSensors() {

	sensorLayer := 0.0

	sensorFuncGameState := func(syncCounter int) []float64 {
		logg.LogTo("MAIN", "sensor func game state called")
		return c.currentGameState
	}
	sensorGameStateNodeId := ng.NewSensorId("SensorGameState", sensorLayer)
	sensorGameState := &ng.Sensor{
		NodeId:         sensorGameStateNodeId,
		VectorLength:   32,
		SensorFunction: sensorFuncGameState,
	}

	sensorFuncPossibleMove := func(syncCounter int) []float64 {
		logg.LogTo("MAIN", "sensor func possible move called")
		return c.currentPossibleMove.VectorRepresentation()
	}
	sensorPossibleMoveNodeId := ng.NewSensorId("SensorPossibleMove", sensorLayer)
	sensorPossibleMove := &ng.Sensor{
		NodeId:         sensorPossibleMoveNodeId,
		VectorLength:   5, // start_location, is_king, final_location, will_be_king, amt_would_capture
		SensorFunction: sensorFuncPossibleMove,
	}
	c.cortex.SetSensors([]*ng.Sensor{sensorGameState, sensorPossibleMove})

}
Пример #3
0
func TestUntarGzWithToc(t *testing.T) {

	// Create a test tar archive
	buf := new(bytes.Buffer)

	var files = []tarFile{
		{"foo/1.txt", "."},
		{"foo/2.txt", "."},
		{"bar/1.txt", "."},
		{"bar/2.txt", "."},
		{"bar/3.txt", "."},
		{"bar/4.txt", "."},
		{"bar/5.txt", "."},
	}
	createArchive(buf, files)
	reader := bytes.NewReader(buf.Bytes())

	tempDir := TempDir()
	logg.LogTo("TEST", "tempDir: %v", tempDir)
	toc, err := untarWithToc(reader, tempDir)
	assert.True(t, err == nil)

	logg.LogTo("TEST", "toc: %v, err: %v", toc, err)

	// TODO: add asserations

}
Пример #4
0
func (c OcrRpcClient) handleRpcResponse(deliveries <-chan amqp.Delivery, correlationUuid string, rpcResponseChan chan OcrResult) {
	logg.LogTo("OCR_CLIENT", "looping over deliveries..")
	for d := range deliveries {
		if d.CorrelationId == correlationUuid {
			logg.LogTo(
				"OCR_CLIENT",
				"got %dB delivery: [%v] %q.  Reply to: %v",
				len(d.Body),
				d.DeliveryTag,
				d.Body,
				d.ReplyTo,
			)

			ocrResult := OcrResult{
				Text: string(d.Body),
			}

			logg.LogTo("OCR_CLIENT", "send result to rpcResponseChan")
			rpcResponseChan <- ocrResult
			logg.LogTo("OCR_CLIENT", "sent result to rpcResponseChan")

			return

		} else {
			logg.LogTo("OCR_CLIENT", "ignoring delivery w/ correlation id: %v", d.CorrelationId)
		}

	}
}
Пример #5
0
// Follow changes feed.  This will typically be run in its own goroutine.
func (c ChangesListener) FollowChangesFeed() {

	logg.LogTo("CHANGES", "going to follow changes feed")

	var since interface{}

	handleChange := func(reader io.Reader) interface{} {
		logg.LogTo("CHANGES", "handleChange() callback called")
		changes, err := decodeChanges(reader)
		if err != nil {
			// it's very common for this to timeout while waiting for new changes.
			// since we want to follow the changes feed forever, just log an error
			logg.LogTo("CHANGES", "%T decoding changes: %v.", err, err)
			return since
		}
		c.processChanges(changes)

		since = changes.LastSequence
		logg.LogTo("CHANGES", "returning since: %v", since)
		return since

	}

	options := map[string]interface{}{}
	options["feed"] = "longpoll"

	logg.LogTo("CHANGES", "Following changes feed: %+v.", options)

	// this will block until the handleChange callback returns nil
	c.Database.Changes(handleChange, options)

	logg.LogPanic("Changes listener died -- this should never happen")

}
Пример #6
0
func (w *OcrRpcWorker) handle(deliveries <-chan amqp.Delivery, done chan error) {
	for d := range deliveries {
		logg.LogTo(
			"OCR_WORKER",
			"got %d byte delivery: [%v]. Routing key: %v  Reply to: %v",
			len(d.Body),
			d.DeliveryTag,
			d.RoutingKey,
			d.ReplyTo,
		)

		ocrResult, err := w.resultForDelivery(d)
		if err != nil {
			msg := "Error generating ocr result.  Error: %v"
			logg.LogError(fmt.Errorf(msg, err))
		}

		logg.LogTo("OCR_WORKER", "Sending rpc response: %v", ocrResult)
		err = w.sendRpcResponse(ocrResult, d.ReplyTo, d.CorrelationId)
		if err != nil {
			msg := "Error returning ocr result: %v.  Error: %v"
			logg.LogError(fmt.Errorf(msg, ocrResult, err))
			// if we can't send our response, let's just abort
			done <- err
			break
		}

	}
	logg.LogTo("OCR_WORKER", "handle: deliveries channel closed")
	done <- fmt.Errorf("handle: deliveries channel closed")
}
func TestChooseBestMove(t *testing.T) {

	ng.SeedRandom()
	logg.LogKeys["MAIN"] = true

	checkerlution := &Checkerlution{}
	checkerlution.ourTeamId = RED_TEAM

	checkerlution.CreateNeurgoCortex()
	cortex := checkerlution.cortex
	cortex.Run()

	gameState, possibleMoves := FakeGameDocument()
	bestMove := checkerlution.chooseBestMove(gameState, possibleMoves)
	logg.LogTo("TEST", "bestMove: %v", &bestMove)

	found := false
	for _, possibleMove := range possibleMoves {
		logg.LogTo("TEST", "possibleMove: %v", &possibleMove)
		if possibleMove.Equals(bestMove) {
			found = true
		}
	}
	assert.True(t, found)

	cortex.Shutdown()

}
Пример #8
0
func TopologyOrWeightMutator(cortex *ng.Cortex) (success bool, result MutateResult) {

	randomNumber := ng.RandomIntInRange(0, 100)
	didMutate := false
	var mutators []CortexMutator
	if randomNumber > 90 {
		mutators = []CortexMutator{MutateActivation}
	} else if randomNumber > 80 {
		mutators = []CortexMutator{MutateAllWeightsBellCurve}
	} else if randomNumber > 20 {
		// apply topological mutation
		includeNonTopological := false
		mutators = CortexMutatorsNonRecurrent(includeNonTopological)
	} else {
		mutators = CortexMutatorsNonTopological()
	}
	// before we mutate the cortex, we need to init it,
	// otherwise things like Outsplice will fail because
	// there are no DataChan's.
	cortex.Init()
	for i := 0; i <= 100; i++ {
		randInt := RandomIntInRange(0, len(mutators))
		mutator := mutators[randInt]
		didMutate, _ = mutator(cortex)
		if !didMutate {
			logg.LogTo("NEURVOLVE", "Mutate didn't work, retrying...")
			continue
		}
		break
	}
	logg.LogTo("NEURVOLVE", "did mutate: %v", didMutate)
	success = didMutate
	result = "nothing"
	return
}
Пример #9
0
// Run this job
func (j *TrainingJob) Run(wg *sync.WaitGroup) {

	defer wg.Done()

	logg.LogTo("TRAINING_JOB", "Run() called!")

	updatedState, err := j.UpdateProcessingState(Processing)
	if err != nil {
		j.recordProcessingError(err)
		return
	}

	if !updatedState {
		logg.LogTo("TRAINING_JOB", "%+v already processed.  Ignoring.", j)
		return
	}

	j.StdOutUrl = j.getStdOutCbfsUrl()
	j.StdErrUrl = j.getStdErrCbfsUrl()

	if err := j.extractData(); err != nil {
		j.recordProcessingError(err)
		return
	}

	if err := j.runCaffe(); err != nil {
		j.recordProcessingError(err)
		return
	}

	j.FinishedSuccessfully(j.Configuration.DbConnection(), "")

}
func (s *OcrHttpMultipartHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {

	defer req.Body.Close()

	ocrRequest, err := s.extractParts(req)
	if err != nil {
		logg.LogError(err)
		errStr := fmt.Sprintf("Error extracting multipart/related parts: %v", err)
		http.Error(w, errStr, 500)
		return
	}

	logg.LogTo("OCR_HTTP", "ocrRequest: %v", ocrRequest)

	ocrResult, err := HandleOcrRequest(ocrRequest, s.RabbitConfig)

	if err != nil {
		msg := "Unable to perform OCR decode.  Error: %v"
		errMsg := fmt.Sprintf(msg, err)
		logg.LogError(fmt.Errorf(errMsg))
		http.Error(w, errMsg, 500)
		return
	}

	logg.LogTo("OCR_HTTP", "ocrResult: %v", ocrResult)

	fmt.Fprintf(w, ocrResult.Text)

}
Пример #11
0
func RunPopulationTrainer(pt *nv.PopulationTrainer) bool {

	population := getInitialPopulation()
	scape := getScape()

	fitPopulation, succeeded := pt.Train(population, scape, nv.NewNullRecorder())

	if succeeded {
		logg.LogTo("MAIN", "Successfully trained!")

		fittestCortex := fitPopulation[0]
		logg.LogTo("MAIN", "Fitness: %v", fittestCortex.Fitness)

		filename := fmt.Sprintf("/tmp/checkerlution-%v.json", time.Now().Unix())
		logg.LogTo("MAIN", "Saving Cortex to %v", filename)
		cortex := fittestCortex.Cortex
		cortex.MarshalJSONToFile(filename)

		// verify it can now solve the training set
		verified := cortex.Verify(ng.XnorTrainingSamples())
		if !verified {
			logg.LogTo("MAIN", "Failed to verify neural net")
			succeeded = false
		}

	}

	if !succeeded {
		logg.LogTo("MAIN", "Failed to train neural net")
	}

	return succeeded

}
Пример #12
0
// Save a new version of Datafile to the db
func (d Datafile) Save(db couch.Database) (*Datafile, error) {

	idToRetrieve := ""

	switch d.HasValidId() {
	case true:
		logg.LogTo("MODEL", "calling db.Edit()")
		_, err := db.Edit(d)
		if err != nil {
			return nil, err
		}
		idToRetrieve = d.Id
	default:
		logg.LogTo("MODEL", "calling db.Insert()")
		id, _, err := db.Insert(d)
		if err != nil {
			return nil, err
		}
		idToRetrieve = id
	}

	// load latest version from db to get the _id and _rev fields
	datafile := &Datafile{}
	err := db.Retrieve(idToRetrieve, datafile)
	if err != nil {
		return nil, err
	}

	return datafile, nil

}
// Creates datasets from a datafile
func (e EndpointContext) CreateDataSetsEndpoint(c *gin.Context) {

	user := c.MustGet(MIDDLEWARE_KEY_USER).(User)
	db := c.MustGet(MIDDLEWARE_KEY_DB).(couch.Database)
	logg.LogTo("REST", "user: %v db: %v", user, db)

	dataset := NewDataset(e.Configuration)

	// bind the input struct to the JSON request
	if ok := c.Bind(dataset); !ok {
		errMsg := fmt.Sprintf("Invalid input")
		c.String(400, errMsg)
		return
	}

	logg.LogTo("REST", "dataset: %+v", dataset)

	// save dataset in db
	if err := dataset.Insert(); err != nil {
		c.String(500, err.Error())
		return
	}

	// the changes listener will see new datafile and download to cbfs

	// update with urls of training/testing artifacts (which don't exist yet)
	if err := dataset.AddArtifactUrls(); err != nil {
		errMsg := fmt.Sprintf("Error updating dataset: %+v.  Err: %v", dataset, err)
		c.String(500, errMsg)
		return
	}

	c.JSON(201, dataset)

}
Пример #14
0
func RunPopulationTrainerLoop(maxIterations int) bool {

	// create population trainer ...
	pt := &nv.PopulationTrainer{
		FitnessThreshold: ng.FITNESS_THRESHOLD,
		MaxGenerations:   1000,
		CortexMutator:    nv.MutateAllWeightsBellCurve,
		// CortexMutator: nv.MutateWeights,
		// CortexMutator: RandomNeuronMutator,
		// CortexMutator:       nv.TopologyOrWeightMutator,
		NumOpponents:        5,
		SnapshotRequestChan: make(chan chan nv.EvaluatedCortexes),
	}
	nv.RegisterHandlers(pt)

	for i := 0; i < maxIterations; i++ {
		succeeded := RunPopulationTrainer(pt)
		if !succeeded {
			logg.LogTo("MAIN", "Population trainer succeeded %d times and then failed this time", i)
			return false
		} else {
			logg.LogTo("MAIN", "Population trainer succeeded %d times", i)

		}
	}
	logg.LogTo("MAIN", "Population succeeded %d times", maxIterations)
	return true
}
Пример #15
0
func TestTesseractEngineWithJson(t *testing.T) {

	if testing.Short() {
		t.Skip("skipping test in short mode.")
	}

	testJsons := []string{}
	testJsons = append(testJsons, `{"engine":"tesseract"}`)
	testJsons = append(testJsons, `{"engine":"tesseract", "engine_args":{}}`)
	testJsons = append(testJsons, `{"engine":"tesseract", "engine_args":null}`)
	testJsons = append(testJsons, `{"engine":"tesseract", "engine_args":{"config_vars":{"tessedit_char_whitelist":"0123456789"}, "psm":"1"}}`)
	testJsons = append(testJsons, `{"engine":"tesseract", "engine_args":{"config_vars":{"tessedit_create_hocr":"1", "tessedit_pageseg_mode":"1"}, "psm":"3"}}`)

	for _, testJson := range testJsons {
		logg.LogTo("TEST", "testJson: %v", testJson)
		ocrRequest := OcrRequest{}
		err := json.Unmarshal([]byte(testJson), &ocrRequest)
		assert.True(t, err == nil)
		bytes, err := ioutil.ReadFile("docs/testimage.png")
		assert.True(t, err == nil)
		ocrRequest.ImgBytes = bytes
		engine := NewOcrEngine(ocrRequest.EngineType)
		result, err := engine.ProcessRequest(ocrRequest)
		logg.LogTo("TEST", "err: %v", err)
		assert.True(t, err == nil)
		logg.LogTo("TEST", "result: %v", result)

	}

}
Пример #16
0
func stressTest(doneChannel chan<- bool) {

	imageUrls := imageUrls()
	logg.LogTo("CLI", "imageUrls: %v", imageUrls)
	logg.LogTo("CLI", "numIterations: %v", *numIterations)

	openOcrUrl := *ocrUrl
	client := ocrclient.NewHttpClient(openOcrUrl)

	for i := 0; i < *numIterations; i++ {
		index := randomIntInRange(0, numTestImages)
		imageUrl := imageUrls[index]
		logg.LogTo("CLI", "OCR decoding: %v.  index: %d", imageUrl, index)

		ocrRequest := ocrclient.OcrRequest{
			ImgUrl:     imageUrl,
			EngineType: ocrclient.ENGINE_TESSERACT,
		}

		ocrDecoded, err := client.DecodeImageUrl(ocrRequest)
		if err != nil {
			logg.LogError(fmt.Errorf("Error decoding image: %v", err))
		} else {
			logg.LogTo("CLI", "OCR decoded: %v", ocrDecoded)
		}
	}

	doneChannel <- true

}
Пример #17
0
func (t TesseractEngine) ProcessRequest(ocrRequest OcrRequest) (OcrResult, error) {

	tmpFileName, err := func() (string, error) {
		if ocrRequest.ImgUrl != "" {
			return t.tmpFileFromImageUrl(ocrRequest.ImgUrl)
		} else {
			return t.tmpFileFromImageBytes(ocrRequest.ImgBytes)
		}

	}()

	if err != nil {
		logg.LogTo("OCR_TESSERACT", "error getting tmpFileName")
		return OcrResult{}, err
	}

	defer os.Remove(tmpFileName)

	engineArgs, err := NewTesseractEngineArgs(ocrRequest)
	if err != nil {
		logg.LogTo("OCR_TESSERACT", "error getting engineArgs")
		return OcrResult{}, err
	}

	ocrResult, err := t.processImageFile(tmpFileName, *engineArgs)

	return ocrResult, err

}
Пример #18
0
func (s *OcrHttpHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {

	logg.LogTo("OCR_HTTP", "serveHttp called")
	defer req.Body.Close()

	ocrRequest := OcrRequest{}
	decoder := json.NewDecoder(req.Body)
	err := decoder.Decode(&ocrRequest)
	if err != nil {
		logg.LogError(err)
		http.Error(w, "Unable to unmarshal json", 500)
		return
	}

	ocrResult, err := HandleOcrRequest(ocrRequest, s.RabbitConfig)

	if err != nil {
		msg := "Unable to perform OCR decode.  Error: %v"
		errMsg := fmt.Sprintf(msg, err)
		logg.LogError(fmt.Errorf(errMsg))
		http.Error(w, errMsg, 500)
		return
	}

	logg.LogTo("OCR_HTTP", "ocrResult: %v", ocrResult)

	fmt.Fprintf(w, ocrResult.Text)

}
Пример #19
0
func logWeights(neuron *Neuron) {
	for _, inboundConnection := range neuron.Inbound {
		logmsg := fmt.Sprintf("%v -> %v weights: %v", inboundConnection.NodeId.UUID, neuron.NodeId.UUID, inboundConnection.Weights)
		logg.LogTo("NODE_STATE", logmsg)
		logmsg = fmt.Sprintf("%v bias: %v", neuron.NodeId.UUID, neuron.Bias)
		logg.LogTo("NODE_STATE", logmsg)
	}
}
Пример #20
0
func confirmDelivery(ack, nack chan uint64) {
	select {
	case tag := <-ack:
		logg.LogTo("OCR_CLIENT", "confirmed delivery, tag: %v", tag)
	case tag := <-nack:
		logg.LogTo("OCR_CLIENT", "failed to confirm delivery: %v", tag)
	}
}
Пример #21
0
func (s StrokeWidthTransformer) preprocess(ocrRequest *OcrRequest) error {

	// write bytes to a temp file

	tmpFileNameInput, err := createTempFileName()
	tmpFileNameInput = fmt.Sprintf("%s.png", tmpFileNameInput)
	if err != nil {
		return err
	}
	defer os.Remove(tmpFileNameInput)

	tmpFileNameOutput, err := createTempFileName()
	tmpFileNameOutput = fmt.Sprintf("%s.png", tmpFileNameOutput)
	if err != nil {
		return err
	}
	defer os.Remove(tmpFileNameOutput)

	err = saveBytesToFileName(ocrRequest.ImgBytes, tmpFileNameInput)
	if err != nil {
		return err
	}

	// run DecodeText binary on it (if not in path, print warning and do nothing)
	darkOnLightSetting := s.extractDarkOnLightParam(*ocrRequest)
	logg.LogTo(
		"PREPROCESSOR_WORKER",
		"DetectText on %s -> %s with %s",
		tmpFileNameInput,
		tmpFileNameOutput,
		darkOnLightSetting,
	)
	out, err := exec.Command(
		"DetectText",
		tmpFileNameInput,
		tmpFileNameOutput,
		darkOnLightSetting,
	).CombinedOutput()
	if err != nil {
		logg.LogFatal("Error running command: %s.  out: %s", err, out)
	}
	logg.LogTo("PREPROCESSOR_WORKER", "output: %v", string(out))

	// read bytes from output file into ocrRequest.ImgBytes
	resultBytes, err := ioutil.ReadFile(tmpFileNameOutput)
	if err != nil {
		return err
	}

	ocrRequest.ImgBytes = resultBytes

	return nil

}
func (w *PreprocessorRpcWorker) handleDelivery(d amqp.Delivery) error {

	ocrRequest := OcrRequest{}
	err := json.Unmarshal(d.Body, &ocrRequest)
	if err != nil {
		msg := "Error unmarshaling json: %v.  Error: %v"
		errMsg := fmt.Sprintf(msg, string(d.Body), err)
		logg.LogError(fmt.Errorf(errMsg))
		return err
	}

	logg.LogTo("PREPROCESSOR_WORKER", "ocrRequest before: %v", ocrRequest)
	routingKey := ocrRequest.nextPreprocessor(w.rabbitConfig.RoutingKey)
	logg.LogTo("PREPROCESSOR_WORKER", "publishing with routing key %q", routingKey)
	logg.LogTo("PREPROCESSOR_WORKER", "ocrRequest after: %v", ocrRequest)

	err = w.preprocessImage(&ocrRequest)
	if err != nil {
		msg := "Error preprocessing image: %v.  Error: %v"
		errMsg := fmt.Sprintf(msg, ocrRequest, err)
		logg.LogError(fmt.Errorf(errMsg))
		return err
	}

	ocrRequestJson, err := json.Marshal(ocrRequest)
	if err != nil {
		return err
	}

	logg.LogTo("PREPROCESSOR_WORKER", "sendRpcResponse to: %v", routingKey)

	if err := w.channel.Publish(
		w.rabbitConfig.Exchange, // publish to an exchange
		routingKey,              // routing to 0 or more queues
		false,                   // mandatory
		false,                   // immediate
		amqp.Publishing{
			Headers:         amqp.Table{},
			ContentType:     "text/plain",
			ContentEncoding: "",
			Body:            []byte(ocrRequestJson),
			DeliveryMode:    amqp.Transient, // 1=non-persistent, 2=persistent
			Priority:        0,              // 0-9
			ReplyTo:         d.ReplyTo,
			CorrelationId:   d.CorrelationId,
			// a bunch of application/implementation-specific fields
		},
	); err != nil {
		return err
	}
	logg.LogTo("PREPROCESSOR_WORKER", "handleDelivery succeeded")

	return nil
}
Пример #23
0
func (neuron *Neuron) computeScalarOutput(weightedInputs []*weightedInput) float64 {
	output := neuron.weightedInputDotProductSum(weightedInputs)
	logmsg := fmt.Sprintf("%v raw output: %v", neuron.NodeId.UUID, output)
	logg.LogTo("NODE_STATE", logmsg)
	output += neuron.Bias
	logmsg = fmt.Sprintf("%v raw output + bias: %v", neuron.NodeId.UUID, output)
	logg.LogTo("NODE_STATE", logmsg)
	output = neuron.ActivationFunction.ActivationFunction(output)
	logmsg = fmt.Sprintf("%v after activation: %v", neuron.NodeId.UUID, output)
	logg.LogTo("NODE_STATE", logmsg)
	return output
}
Пример #24
0
func main() {
	switch kingpin.MustParse(app.Parse(os.Args[1:])) {
	case "stress":
		logg.LogTo("CLI", "do stress test")
		stressTestLauncher()
	case "upload":
		logg.LogTo("CLI", "do upload")
		uploadLauncher()
	default:
		logg.LogTo("CLI", "oops, nothing to do")
	}
}
Пример #25
0
func confirmDeliveryWorker(ack, nack chan uint64) {
	logg.LogTo("OCR_WORKER", "awaiting delivery confirmation ...")
	select {
	case tag := <-ack:
		logg.LogTo("OCR_WORKER", "confirmed delivery, tag: %v", tag)
	case tag := <-nack:
		logg.LogTo("OCR_WORKER", "failed to confirm delivery: %v", tag)
	case <-time.After(RPC_RESPONSE_TIMEOUT):
		// this is bad, the worker will probably be dsyfunctional
		// at this point, so panic
		logg.LogPanic("timeout trying to confirm delivery")
	}
}
Пример #26
0
func newFolderWatcher(dirList []string) {
	watcher, err := fsnotify.NewWatcher()
	if err != nil {
		log.Fatal(err)
	}
	defer watcher.Close()

	done := make(chan bool)
	go func() {
		for {
			select {
			case event := <-watcher.Events:
				logg.LogTo(TagLog, "New Event %v", event)

				if event.Op&fsnotify.Chmod == fsnotify.Chmod {
					f, _ := os.Stat(event.Name)
					if isJSON(f.Name()) && !isHidden(f.Name()) {
						err = NewLocalDocument(event.Name, &resources)
					} else if !isHidden(f.Name()) {
						err = NewLocalAttachment(event.Name, &resources)
					}

					if err != nil {
						logg.LogTo(TagError, "%v", err)
					} else {
						patchFiles(resources)
					}
				}

				if event.Op&fsnotify.Rename == fsnotify.Rename {
					documentID := getDocumentID(event.Name)
					err := deleteDocument(documentID)
					if err != nil {
						logg.LogTo(TagError, "Error deleting document : %v", err)
					}
				}
			case err := <-watcher.Errors:
				logg.LogTo(TagError, "%v", err)
			}
		}
	}()

	for _, dir := range dirList {
		logg.LogTo(TagLog, "attaching watcher to %s", dir)
		err = watcher.Add(dir)
		if err != nil {
			logg.LogPanic("Error attaching fs watcher : %v", err)
		}
	}
	<-done
}
Пример #27
0
// Copy the contents of Datafile.Url to CBFS and return the cbfs dest path
func (d Datafile) CopyToBlobStore(db couch.Database, blobStore BlobStore) (string, error) {

	if !d.HasValidId() {
		errMsg := fmt.Errorf("Datafile: %+v must have an id", d)
		logg.LogError(errMsg)
		return "", errMsg
	}

	if len(d.Url) == 0 {
		errMsg := fmt.Errorf("Datafile: %+v must have a non empty url", d)
		logg.LogError(errMsg)
		return "", errMsg
	}

	logg.LogTo("MODEL", "datafile url: |%v|", d.Url)

	// figure out dest path to save to on blobStore
	u, err := url.Parse(d.Url)
	if err != nil {
		errMsg := fmt.Errorf("Error parsing: %v. Err %v", d.Url, err)
		logg.LogError(errMsg)
		return "", errMsg
	}
	urlPath := u.Path
	_, filename := path.Split(urlPath)
	destPath := fmt.Sprintf("%v/%v", d.Id, filename)

	// open input stream to url
	resp, err := http.Get(d.Url)
	if err != nil {
		errMsg := fmt.Errorf("Error opening: %v. Err %v", d.Url, err)
		logg.LogError(errMsg)
		return "", errMsg
	}
	defer resp.Body.Close()

	// write to blobStore
	options := BlobPutOptions{}
	options.ContentType = resp.Header.Get("Content-Type")
	if err := blobStore.Put("", destPath, resp.Body, options); err != nil {
		errMsg := fmt.Errorf("Error writing %v to blobStore: %v", destPath, err)
		logg.LogError(errMsg)
		return "", errMsg
	}

	logg.LogTo("MODEL", "copied datafile url %v to blobStore: %v", d.Url, destPath)

	return destPath, nil

}
Пример #28
0
func (sensor *Sensor) scatterOutput(dataMessage *DataMessage) {

	if len(dataMessage.Inputs) == 0 {
		logg.LogPanic("cannot scatter empty data message")
	}

	for _, outboundConnection := range sensor.Outbound {
		logmsg := fmt.Sprintf("%v -> %v: %v", sensor.NodeId.UUID,
			outboundConnection.NodeId.UUID, dataMessage)
		logg.LogTo("NODE_PRE_SEND", logmsg)
		dataChan := outboundConnection.DataChan
		dataChan <- dataMessage
		logg.LogTo("NODE_POST_SEND", logmsg)
	}
}
func RunTopologyMutatingTrainer() bool {

	ng.SeedRandom()

	// training set
	examples := ng.XnorTrainingSamples()

	// create netwwork with topology capable of solving XNOR
	cortex := ng.BasicCortex()

	// verify it can not yet solve the training set (since training would be useless in that case)
	verified := cortex.Verify(examples)
	if verified {
		panic("neural net already trained, nothing to do")
	}

	shc := &nv.StochasticHillClimber{
		FitnessThreshold:           ng.FITNESS_THRESHOLD,
		MaxIterationsBeforeRestart: 20000,
		MaxAttempts:                10,
		WeightSaturationRange:      []float64{-10000, 10000},
	}

	tmt := &nv.TopologyMutatingTrainer{
		MaxAttempts:                100,
		MaxIterationsBeforeRestart: 5,
		StochasticHillClimber:      shc,
	}
	cortexTrained, succeeded := tmt.TrainExamples(cortex, examples)
	if succeeded {
		logg.LogTo("MAIN", "Successfully trained net: %v", ng.JsonString(cortexTrained))

		// verify it can now solve the training set
		verified = cortexTrained.Verify(examples)
		if !verified {
			logg.LogTo("MAIN", "Failed to verify neural net")
			succeeded = false
		}

	}

	if !succeeded {
		logg.LogTo("MAIN", "Failed to train neural net")
	}

	return succeeded

}
Пример #30
0
func (actuator *Actuator) Run() {

	defer actuator.wg.Done()

	actuator.checkRunnable()

	weightedInputs := createEmptyWeightedInputs(actuator.Inbound)

	closed := false

	for {

		select {
		case responseChan := <-actuator.Closing:
			closed = true
			responseChan <- true
			break
		case dataMessage := <-actuator.DataChan:
			actuator.logPostDataReceive(dataMessage)
			recordInput(weightedInputs, dataMessage)
		}

		if closed {
			actuator.Closing = nil
			actuator.DataChan = nil
			break
		}

		if receiveBarrierSatisfied(weightedInputs) {

			scalarOutput := actuator.computeScalarOutput(weightedInputs)
			actuator.ActuatorFunction(scalarOutput)

			if actuator.Cortex != nil && actuator.Cortex.SyncChan != nil {
				logmsg := fmt.Sprintf("%v -> %v", actuator.NodeId.UUID, actuator.Cortex.NodeId.UUID)
				logg.LogTo("ACTUATOR_SYNC", logmsg)

				actuator.Cortex.SyncChan <- actuator.NodeId
			} else {
				logg.LogTo("ACTUATOR_SYNC", "Could not sync actuator: %v", actuator)
			}
			weightedInputs = createEmptyWeightedInputs(actuator.Inbound)

		}

	}

}