func init() { //set logging logg.LogKeys[TagError] = true logg.LogKeys[TagDiff] = true if DEBUG == false { logg.LogKeys[TagLog] = true } kingpin.Parse() if *configFileName == "" { kingpin.Errorf("Config file name missing") return } configFile, err := os.Open(*configFileName) if err != nil { logg.LogPanic("Unable to open file: %v. Err: %v", *configFileName, err.Error()) return } defer configFile.Close() configReader := bufio.NewReader(configFile) err = parseConfigFile(configReader) if err != nil { logg.LogPanic("Erro parsing the config file: %v", err) } }
//authenticate uses a custom service to authenticate against a credentials repository like Active Directory //and returns a session from sync gateway func authenticate() AuthResponse { request, err := http.NewRequest("POST", authConfig.ServerURL, bytes.NewReader([]byte("{\"username\": \""+authConfig.Username+"\", \"password\": \""+authConfig.Password+"\"}"))) if err != nil { logg.LogPanic("Error creating request: %v", err) } logRequest(request) response, err := globalHTTP.Do(request) if err != nil { logg.LogPanic("Error authenticating: %v", err) } defer response.Body.Close() authResponse := AuthResponse{} document, err := ioutil.ReadAll(response.Body) if err != nil { logg.LogPanic("Error reading contents: %v", err) } json.Unmarshal(document, &authResponse) return authResponse }
func main() { var preprocessor string flagFunc := func() { flag.StringVar( &preprocessor, "preprocessor", "identity", "The preprocessor to use, eg, stroke-width-transform", ) } rabbitConfig := ocrworker.DefaultConfigFlagsOverride(flagFunc) // inifinite loop, since sometimes worker <-> rabbitmq connection // gets broken. see https://github.com/tleyden/open-ocr/issues/4 for { logg.LogTo("PREPROCESSOR_WORKER", "Creating new Preprocessor Worker") preprocessorWorker, err := ocrworker.NewPreprocessorRpcWorker( rabbitConfig, preprocessor, ) if err != nil { logg.LogPanic("Could not create rpc worker: %v", err) } preprocessorWorker.Run() // this happens when connection is closed err = <-preprocessorWorker.Done logg.LogError(fmt.Errorf("Preprocessor Worker failed with error: %v", err)) } }
func (game *Game) InitDbConnection() { db, error := couch.Connect(SERVER_URL) if error != nil { logg.LogPanic("Error connecting to %v: %v", SERVER_URL, error) } game.db = db }
func setAuth(request *http.Request) { if authConfig.Username != "" && authConfig.Password != "" { if authConfig.SimpleAuth { request.SetBasicAuth(authConfig.Username, authConfig.Password) } else { session := authenticate() layout := "2006-01-02T15:04:05Z07:00" expire, err := time.Parse(layout, session.Expires) if err != nil { logg.LogPanic("Error parsing time: %v", err) } rawCookie := []string{session.CookieName + "=" + session.SessionID} maxAge := 0 secure := true httpOnly := true path := "/" cookie := http.Cookie{session.CookieName, session.SessionID, path, config.SyncURL, expire, expire.Format(time.UnixDate), maxAge, secure, httpOnly, rawCookie[0], rawCookie} request.AddCookie(&cookie) } } }
func NewUuid() string { u4, err := uuid.NewV4() if err != nil { logg.LogPanic("Error generating uuid", err) } return fmt.Sprintf("%s", u4) }
// Follow changes feed. This will typically be run in its own goroutine. func (c ChangesListener) FollowChangesFeed() { logg.LogTo("CHANGES", "going to follow changes feed") var since interface{} handleChange := func(reader io.Reader) interface{} { logg.LogTo("CHANGES", "handleChange() callback called") changes, err := decodeChanges(reader) if err != nil { // it's very common for this to timeout while waiting for new changes. // since we want to follow the changes feed forever, just log an error logg.LogTo("CHANGES", "%T decoding changes: %v.", err, err) return since } c.processChanges(changes) since = changes.LastSequence logg.LogTo("CHANGES", "returning since: %v", since) return since } options := map[string]interface{}{} options["feed"] = "longpoll" logg.LogTo("CHANGES", "Following changes feed: %+v.", options) // this will block until the handleChange callback returns nil c.Database.Changes(handleChange, options) logg.LogPanic("Changes listener died -- this should never happen") }
func (pt *PopulationTrainer) generateOffspring(population []EvaluatedCortex) (withOffspring []EvaluatedCortex) { withOffspring = make([]EvaluatedCortex, 0) withOffspring = append(withOffspring, population...) for _, evaldCortex := range population { cortex := evaldCortex.Cortex offspringCortex := cortex.Copy() offspringNodeIdStr := fmt.Sprintf("cortex-%s", ng.NewUuid()) offspringCortex.NodeId = ng.NewCortexId(offspringNodeIdStr) succeeded, _ := pt.CortexMutator(offspringCortex) if !succeeded { logg.LogPanic("Unable to mutate cortex: %v", offspringCortex) } evaldCortexOffspring := EvaluatedCortex{ Cortex: offspringCortex, ParentId: cortex.NodeId.UUID, CreatedInGeneration: pt.CurrentGeneration, Fitness: 0.0, } withOffspring = append(withOffspring, evaldCortexOffspring) } return }
// Connect to db based on url stored in config, or panic if not able to connect func (c Configuration) DbConnection() couch.Database { db, err := couch.Connect(c.DbUrl) if err != nil { err = errors.New(fmt.Sprintf("Error %v | dbUrl: %v", err, c.DbUrl)) logg.LogPanic("%v", err) } return db }
func unmarshalCortex(pathToCortex string) *ng.Cortex { cortex, err := ng.NewCortexFromJSONFile(pathToCortex) if err != nil { logg.LogPanic("Error reading cortex from: %v. Err: %v", pathToCortex, err) } return cortex }
func makeHandler(method handlerMethod) http.Handler { return http.HandlerFunc(func(r http.ResponseWriter, rq *http.Request) { h := newHandler(r, rq) err := h.invoke(method) if err != nil { logg.LogPanic("Error creating http handler: %v", err) } }) }
func parseConfigFile(r io.Reader) { config = Config{} decoder := json.NewDecoder(r) if err := decoder.Decode(&config); err != nil { logg.LogPanic("Error parsing config file: %v", err) } }
// This test assumes that rabbit mq is running func DisabledTestOcrHttpHandlerIntegration(t *testing.T) { rabbitConfig := rabbitConfigForTests() err := spawnOcrWorker(rabbitConfig) if err != nil { logg.LogPanic("Could not spawn ocr worker") } // add a handler to serve up an image from the filesystem. http.HandleFunc("/img", func(w http.ResponseWriter, r *http.Request) { http.ServeFile(w, r, "refactoring.png") }) http.Handle("/ocr", NewOcrHttpHandler(rabbitConfig)) go http.ListenAndServe(":8081", nil) logg.LogTo("TEST", "test1") ocrRequest := OcrRequest{ ImgUrl: "http://localhost:8081/img", EngineType: ENGINE_MOCK, } jsonBytes, err := json.Marshal(ocrRequest) if err != nil { logg.LogPanic("Could not marshal OcrRequest") } reader := bytes.NewReader(jsonBytes) resp, err := http.Post("http://localhost:8081/ocr", "application/json", reader) assert.True(t, err == nil) logg.LogTo("TEST", "resp: %v", resp) // connect to it via http client logg.LogTo("TEST", "Sleep for 60s") time.Sleep(time.Second * 60) // make sure get expected result assert.True(t, true) }
func startWebServer() { sock, err := net.Listen("tcp", ":"+fmt.Sprintf("%d", webPort)) if err != nil { logg.LogPanic("Error starting web server : %v", err) } go func() { fmt.Println("HTTP now available at port ", webPort) http.Serve(sock, nil) }() }
func confirmDeliveryWorker(ack, nack chan uint64) { logg.LogTo("OCR_WORKER", "awaiting delivery confirmation ...") select { case tag := <-ack: logg.LogTo("OCR_WORKER", "confirmed delivery, tag: %v", tag) case tag := <-nack: logg.LogTo("OCR_WORKER", "failed to confirm delivery: %v", tag) case <-time.After(RPC_RESPONSE_TIMEOUT): // this is bad, the worker will probably be dsyfunctional // at this point, so panic logg.LogPanic("timeout trying to confirm delivery") } }
func main() { startWebServer() resources, _ := getLocalResources() patchFiles(resources) dirList, err := getDirectories() if err != nil { logg.LogPanic("Error scanning directories", err) } newFolderWatcher(dirList) }
func newFolderWatcher(dirList []string) { watcher, err := fsnotify.NewWatcher() if err != nil { log.Fatal(err) } defer watcher.Close() done := make(chan bool) go func() { for { select { case event := <-watcher.Events: logg.LogTo(TagLog, "New Event %v", event) if event.Op&fsnotify.Chmod == fsnotify.Chmod { f, _ := os.Stat(event.Name) if isJSON(f.Name()) && !isHidden(f.Name()) { err = NewLocalDocument(event.Name, &resources) } else if !isHidden(f.Name()) { err = NewLocalAttachment(event.Name, &resources) } if err != nil { logg.LogTo(TagError, "%v", err) } else { patchFiles(resources) } } if event.Op&fsnotify.Rename == fsnotify.Rename { documentID := getDocumentID(event.Name) err := deleteDocument(documentID) if err != nil { logg.LogTo(TagError, "Error deleting document : %v", err) } } case err := <-watcher.Errors: logg.LogTo(TagError, "%v", err) } } }() for _, dir := range dirList { logg.LogTo(TagLog, "attaching watcher to %s", dir) err = watcher.Add(dir) if err != nil { logg.LogPanic("Error attaching fs watcher : %v", err) } } <-done }
func (sensor *Sensor) scatterOutput(dataMessage *DataMessage) { if len(dataMessage.Inputs) == 0 { logg.LogPanic("cannot scatter empty data message") } for _, outboundConnection := range sensor.Outbound { logmsg := fmt.Sprintf("%v -> %v: %v", sensor.NodeId.UUID, outboundConnection.NodeId.UUID, dataMessage) logg.LogTo("NODE_PRE_SEND", logmsg) dataChan := outboundConnection.DataChan dataChan <- dataMessage logg.LogTo("NODE_POST_SEND", logmsg) } }
func (game *Game) CreateRemoteUser() { u4, err := uuid.NewV4() if err != nil { logg.LogPanic("Error generating uuid", err) } user := &User{ Id: fmt.Sprintf("user:%s", u4), TeamId: game.ourTeamId, } newId, newRevision, err := game.db.Insert(user) logg.LogTo("MAIN", "Inserted new user %v rev %v", newId, newRevision) user.Rev = newRevision game.user = *user }
func init() { // parse config file kingpin.Parse() if *configFileName == "" { kingpin.Errorf("Config file name missing") return } configFile, err := os.Open(*configFileName) if err != nil { logg.LogPanic("Unable to open file: %v. Err: %v", *configFileName, err.Error()) return } defer configFile.Close() configReader := bufio.NewReader(configFile) parseConfigFile(configReader) //set logging logg.LogKeys[logTag] = true }
func (pt *PopulationTrainer) cullPopulation(population []EvaluatedCortex) (culledPopulation []EvaluatedCortex) { population = pt.sortByFitness(population) if len(population)%2 != 0 { logg.LogPanic("population size must be even") } culledPopulationSize := len(population) / 2 culledPopulation = make([]EvaluatedCortex, 0) for i, evaldCortex := range population { culledPopulation = append(culledPopulation, evaldCortex) if i >= (culledPopulationSize - 1) { break } } return }
func (game *Game) PostChosenMove(validMove ValidMove) { logg.LogTo("MAIN", "post chosen move: %v", validMove) preMoveSleepSeconds := game.calculatePreMoveSleepSeconds() logg.LogTo("MAIN", "sleep %v (s) before posting move", preMoveSleepSeconds) time.Sleep(time.Second * time.Duration(preMoveSleepSeconds)) if len(validMove.Locations) == 0 { logg.LogTo("MAIN", "invalid move, ignoring: %v", validMove) } u4, err := uuid.NewV4() if err != nil { logg.LogPanic("Error generating uuid", err) } votes := &OutgoingVotes{} votes.Id = fmt.Sprintf("vote:%s", u4) votes.Turn = game.gameState.Turn votes.PieceId = validMove.PieceId votes.TeamId = game.ourTeamId votes.GameId = game.gameState.Number // TODO: this is actually a bug, because if there is a // double jump it will only send the first jump move endLocation := validMove.Locations[0] locations := []int{validMove.StartLocation, endLocation} votes.Locations = locations newId, newRevision, err := game.db.Insert(votes) logg.LogTo("MAIN", "newId: %v, newRevision: %v err: %v", newId, newRevision, err) if err != nil { logg.LogError(err) return } }
func main() { noOpFlagFunc := ocrworker.NoOpFlagFunction() rabbitConfig := ocrworker.DefaultConfigFlagsOverride(noOpFlagFunc) // inifinite loop, since sometimes worker <-> rabbitmq connection // gets broken. see https://github.com/tleyden/open-ocr/issues/4 for { logg.LogTo("OCR_WORKER", "Creating new OCR Worker") ocrWorker, err := ocrworker.NewOcrRpcWorker(rabbitConfig) if err != nil { logg.LogPanic("Could not create rpc worker") } ocrWorker.Run() // this happens when connection is closed err = <-ocrWorker.Done logg.LogError(fmt.Errorf("OCR Worker failed with error: %v", err)) } }
func TestAddNeuronRecurrent(t *testing.T) { ng.SeedRandom() numAdded := 0 numIterations := 100 for i := 0; i < numIterations; i++ { cortex := BasicCortex() numNeuronsBefore := len(cortex.Neurons) ok, mutateResult := AddNeuronRecurrent(cortex) neuron := mutateResult.(*ng.Neuron) if !ok { continue } else { numAdded += 1 } assert.True(t, neuron != nil) assert.True(t, neuron.ActivationFunction != nil) numNeuronsAfter := len(cortex.Neurons) addedNeuron := numNeuronsAfter == numNeuronsBefore+1 if !addedNeuron { logg.LogPanic("AddNeuronRecurrent %v did not add exactly one neuron. before: %v after: %v", i, numNeuronsBefore, numNeuronsAfter) } // run network make sure it runs examples := ng.XnorTrainingSamples() fitness := cortex.Fitness(examples) assert.True(t, fitness >= 0) } assert.True(t, numAdded > 0) }
func (neuron *Neuron) primeRecurrentOutbound(cxn *OutboundConnection) (closed bool) { inputs := []float64{0} dataMessage := &DataMessage{ SenderId: neuron.NodeId, Inputs: inputs, } if cxn.NodeId.UUID == neuron.NodeId.UUID { // we are sending to ourselves, so short-circuit the // channel based messaging so we can use unbuffered channels neuron.receiveRecurrentDataMessage(dataMessage) if neuron.receiveBarrierSatisfied() { msg := "Receive Barrier not expected to be satisfied yet" logg.LogPanic(msg) } } else { logPreSend(neuron.NodeId, cxn.NodeId, dataMessage) if cxn.DataChan == nil { log.Panicf("DataChan is nil for connection: %v", cxn) } select { case cxn.DataChan <- dataMessage: case <-time.After(time.Second): log.Panicf("Timeout sending to %v", cxn) case responseChan := <-neuron.Closing: closed = true responseChan <- true } logWeights(neuron) logPostSend(neuron.NodeId, cxn.NodeId, dataMessage) } return }
func (pt *PopulationTrainer) chooseRandomOpponents(cortex *ng.Cortex, population []EvaluatedCortex, numOpponents int) (opponents []*ng.Cortex) { if numOpponents >= len(population) { logg.LogPanic("Not enough members of population to choose %d opponents", numOpponents) } opponents = make([]*ng.Cortex, 0) for i := 0; i < numOpponents; i++ { for { randInt := RandomIntInRange(0, len(population)) randomEvaluatedCortex := population[randInt] if randomEvaluatedCortex.Cortex == cortex { continue } opponents = append(opponents, randomEvaluatedCortex.Cortex) break } } return }
func main() { // TODO: customize listen port (defaults to 8080) usage := `ElasticThought REST API server. Usage: elastic-thought [--sync-gw-url=<sgu>] [--blob-store-url=<bsu>] Options: -h --help Show this screen. --sync-gw-url=<sgu> Sync Gateway DB URL [default: http://localhost:4985/elastic-thought]. --blob-store-url=<bsu> Blob store URL [default: file:///tmp].` parsedDocOptArgs, _ := docopt.Parse(usage, nil, true, "ElasticThought alpha", false) fmt.Println(parsedDocOptArgs) config := *(et.NewDefaultConfiguration()) config, err := config.Merge(parsedDocOptArgs) if err != nil { logg.LogFatal("Error processing cmd line args: %v", err) return } if err := et.EnvironmentSanityCheck(config); err != nil { logg.LogFatal("Failed environment sanity check: %v", err) return } var jobScheduler et.JobScheduler switch config.QueueType { case et.Nsq: jobScheduler = et.NewNsqJobScheduler(config) case et.Goroutine: jobScheduler = et.NewInProcessJobScheduler(config) default: logg.LogFatal("Unexpected queue type: %v", config.QueueType) } context := &et.EndpointContext{ Configuration: config, } changesListener, err := et.NewChangesListener(config, jobScheduler) if err != nil { logg.LogPanic("Error creating changes listener: %v", err) } go changesListener.FollowChangesFeed() ginEngine := gin.Default() // all requests wrapped in database connection middleware ginEngine.Use(et.DbConnector(config.DbUrl)) // endpoint to create a new user (db auth not required) ginEngine.POST("/users", context.CreateUserEndpoint) // TODO: bundle in static assets from ../../example directory into the // binary using gobin-data and then allow them to be served up // via the /example REST endpoint. // ginEngine.Static("/example", "../../example") <-- uncomment for quick hack rel path // all endpoints in the authorized group require Basic Auth credentials // which is enforced by the DbAuthRequired middleware. authorized := ginEngine.Group("/") authorized.Use(et.DbAuthRequired()) { authorized.POST("/datafiles", context.CreateDataFileEndpoint) authorized.POST("/datasets", context.CreateDataSetsEndpoint) authorized.POST("/solvers", context.CreateSolverEndpoint) authorized.POST("/training-jobs", context.CreateTrainingJob) authorized.POST("/classifiers", context.CreateClassifierEndpoint) authorized.POST("/classifiers/:classifier-id/classify", context.CreateClassificationJobEndpoint) } // Listen and serve on 0.0.0.0:8080 ginEngine.Run(":8080") }
func (scape TrainingSampleScape) FitnessAgainst(cortex *ng.Cortex, opponentCortex *ng.Cortex) (fitness float64) { // return cortex.Fitness(scape.examples) - opponentCortex.Fitness(scape.examples) logg.LogPanic("Cannot calculate fitness against another cortex") return 0.0 }
func (scape XnorScapeTwoPlayer) Fitness(cortex *ng.Cortex) float64 { logg.LogPanic("Fitness not implemented") return 0.0 }
func (shc *StochasticHillClimber) validate() { if len(shc.WeightSaturationRange) == 0 { logg.LogPanic("Invalid (empty) WeightSaturationRange") } }