// Run inputs func ProvisionInputs(transmat integrity.Transmat, assemblerFn integrity.Assembler, inputs []def.Input, rootfs string, journal log15.Logger) integrity.Assembly { // start having all filesystems filesystems := make(map[def.Input]integrity.Arena, len(inputs)) fsGather := make(chan map[def.Input]materializerReport) for _, in := range inputs { go func(in def.Input) { try.Do(func() { journal.Info(fmt.Sprintf("Starting materialize for %s hash=%s", in.Type, in.Hash)) arena := transmat.Materialize( integrity.TransmatKind(in.Type), integrity.CommitID(in.Hash), []integrity.SiloURI{integrity.SiloURI(in.URI)}, ) journal.Info(fmt.Sprintf("Finished materialize for %s hash=%s", in.Type, in.Hash)) fsGather <- map[def.Input]materializerReport{ in: {Arena: arena}, } }).Catch(integrity.Error, func(err *errors.Error) { journal.Warn(fmt.Sprintf("Errored during materialize for %s hash=%s", in.Type, in.Hash), "error", err.Message()) fsGather <- map[def.Input]materializerReport{ in: {Err: err}, } }).Done() }(in) } // (we don't have any output setup at this point, but if we do in the future, that'll be here.) // gather materialized inputs for range inputs { for in, report := range <-fsGather { if report.Err != nil { panic(report.Err) } filesystems[in] = report.Arena } } journal.Info("All inputs acquired... starting assembly") // assemble them into the final tree assemblyParts := make([]integrity.AssemblyPart, 0, len(filesystems)) for input, arena := range filesystems { assemblyParts = append(assemblyParts, integrity.AssemblyPart{ SourcePath: arena.Path(), TargetPath: input.Location, Writable: true, // TODO input config should have a word about this }) } assembly := assemblerFn(rootfs, assemblyParts) journal.Info("Assembly complete!") return assembly }
func processPendingAllocations(srv *context.T, log log15.Logger) { pending := srv.DB.ListPendingRepositories() for i := range pending { r, err := srv.DB.GetRepository(pending[i]) if err != nil { log.Error("error while processing pending repository", "repo", pending[i], "err", err) } c, err := srv.DB.GetTransactionConfirmations(r.NameAllocationTx) if err != nil { log.Error("error while calculating pending repository's allocation confirmations", "repo", pending[i], "txn", r.NameAllocationTx, "err", err) } if c >= ALLOCATION_CONFIRMATIONS_REQUIRED { r.Status = repository.ACTIVE srv.DB.PutRepository(r) log.Info("activated repository", "repo", pending[i], "alloc_txn", r.NameAllocationTx) } } }
func listenAndServer(log log15.Logger, addr string, handler http.Handler) error { conf, err := config.GetConfig() if err != nil { return err } conf.Endless.DefaultHammerTime = strings.TrimSpace(conf.Endless.DefaultHammerTime) if conf.Endless.DefaultHammerTime != "" { duration, err := time.ParseDuration(conf.Endless.DefaultHammerTime) if err == nil { endless.DefaultHammerTime = duration } else { log.Error("Bad format", log15.Ctx{"module": "Endless", "DefaultHammerTime": conf.Endless.DefaultHammerTime, "error": err}) } } var terminated int32 srv := endless.NewServer(addr, handler) preHookFunc := func() { atomic.StoreInt32(&terminated, 1) } srv.RegisterSignalHook(endless.PRE_SIGNAL, syscall.SIGHUP, preHookFunc) srv.RegisterSignalHook(endless.PRE_SIGNAL, syscall.SIGINT, preHookFunc) srv.RegisterSignalHook(endless.PRE_SIGNAL, syscall.SIGTERM, preHookFunc) log.Info("Launching server") err = srv.ListenAndServe() if atomic.LoadInt32(&terminated) == 0 { if err != nil { log.Error("During startup, error has occurred", "error", err) } return err } else { log.Info("Server is going to shutdown") return nil } }
// Run inputs func ProvisionInputs(transmat integrity.Transmat, assemblerFn integrity.Assembler, inputs []def.Input, rootfs string, journal log15.Logger) integrity.Assembly { // start having all filesystems // input names are used as keys, so must be unique inputsByName := make(map[string]def.Input, len(inputs)) for _, in := range inputs { // TODO checks should also be sooner, up in cfg parse // but this check is for programmatic access as well (errors down the line can get nonobvious if you skip this). if _, ok := inputsByName[in.Name]; ok { panic(errors.ProgrammerError.New("duplicate name in input config")) } inputsByName[in.Name] = in } filesystems := make(map[string]integrity.Arena, len(inputs)) fsGather := make(chan map[string]materializerReport) for _, in := range inputs { go func(in def.Input) { try.Do(func() { journal.Info(fmt.Sprintf("Starting materialize for %s hash=%s", in.Type, in.Hash)) // todo: create validity checking api for URIs, check them all before launching anything warehouses := make([]integrity.SiloURI, len(in.Warehouses)) for i, wh := range in.Warehouses { warehouses[i] = integrity.SiloURI(wh) } // invoke transmat (blocking, potentially long time) arena := transmat.Materialize( integrity.TransmatKind(in.Type), integrity.CommitID(in.Hash), warehouses, ) // submit report journal.Info(fmt.Sprintf("Finished materialize for %s hash=%s", in.Type, in.Hash)) fsGather <- map[string]materializerReport{ in.Name: {Arena: arena}, } }).Catch(integrity.Error, func(err *errors.Error) { journal.Warn(fmt.Sprintf("Errored during materialize for %s hash=%s", in.Type, in.Hash), "error", err.Message()) fsGather <- map[string]materializerReport{ in.Name: {Err: err}, } }).Done() }(in) } // (we don't have any output setup at this point, but if we do in the future, that'll be here.) // gather materialized inputs for range inputs { for name, report := range <-fsGather { if report.Err != nil { panic(report.Err) } filesystems[name] = report.Arena } } journal.Info("All inputs acquired... starting assembly") // assemble them into the final tree assemblyParts := make([]integrity.AssemblyPart, 0, len(filesystems)) for name, arena := range filesystems { assemblyParts = append(assemblyParts, integrity.AssemblyPart{ SourcePath: arena.Path(), TargetPath: inputsByName[name].MountPath, Writable: true, // TODO input config should have a word about this }) } assembly := assemblerFn(rootfs, assemblyParts) journal.Info("Assembly complete!") return assembly }
// Run outputs func PreserveOutputs(transmat integrity.Transmat, outputs []def.Output, rootfs string, journal log15.Logger) []def.Output { // run commit on the outputs scanGather := make(chan scanReport) for _, out := range outputs { go func(out def.Output) { filterOptions := make([]integrity.MaterializerConfigurer, 0, 3) out.Filters.InitDefaultsOutput() switch out.Filters.UidMode { case def.FilterKeep: // easy, just no filter. case def.FilterUse: f := filter.UidFilter{out.Filters.Uid} filterOptions = append(filterOptions, integrity.UseFilter(f)) default: panic(errors.ProgrammerError.New("unhandled filter mode %v", out.Filters.UidMode)) } switch out.Filters.GidMode { case def.FilterKeep: // easy, just no filter. case def.FilterUse: f := filter.GidFilter{out.Filters.Gid} filterOptions = append(filterOptions, integrity.UseFilter(f)) default: panic(errors.ProgrammerError.New("unhandled filter mode %v", out.Filters.GidMode)) } switch out.Filters.MtimeMode { case def.FilterKeep: // easy, just no filter. case def.FilterUse: f := filter.MtimeFilter{out.Filters.Mtime} filterOptions = append(filterOptions, integrity.UseFilter(f)) default: panic(errors.ProgrammerError.New("unhandled filter mode %v", out.Filters.MtimeMode)) } scanPath := filepath.Join(rootfs, out.MountPath) journal.Info(fmt.Sprintf("Starting scan on %q", scanPath)) try.Do(func() { // todo: create validity checking api for URIs, check them all before launching anything warehouses := make([]integrity.SiloURI, len(out.Warehouses)) for i, wh := range out.Warehouses { warehouses[i] = integrity.SiloURI(wh) } // invoke transmat (blocking, potentially long time) commitID := transmat.Scan( integrity.TransmatKind(out.Type), scanPath, warehouses, filterOptions..., ) out.Hash = string(commitID) // submit report journal.Info(fmt.Sprintf("Finished scan on %q", scanPath)) scanGather <- scanReport{Output: out} }).Catch(integrity.Error, func(err *errors.Error) { journal.Warn(fmt.Sprintf("Errored scan on %q", scanPath), "error", err.Message()) scanGather <- scanReport{Err: err} }).Done() }(out) } // gather reports var results []def.Output for range outputs { report := <-scanGather if report.Err != nil { panic(report.Err) } results = append(results, report.Output) } return results }
func listenAndServer(log log15.Logger, addr string, handler http.Handler) error { log.Info("Launching server") err := http.ListenAndServe(addr, handler) log.Error("During startup, error has occurred", "error", err) return err }
// Run outputs func PreserveOutputs(transmat integrity.Transmat, outputs []def.Output, rootfs string, journal log15.Logger) []def.Output { // run commit on the outputs scanGather := make(chan scanReport) for _, out := range outputs { go func(out def.Output) { filterOptions := make([]integrity.MaterializerConfigurer, 0, 4) for _, name := range out.Filters { cfg := strings.Fields(name) switch cfg[0] { case "uid": f := filter.UidFilter{} if len(cfg) > 1 { f.Value, _ = strconv.Atoi(cfg[1]) } filterOptions = append(filterOptions, integrity.UseFilter(f)) case "gid": f := filter.GidFilter{} if len(cfg) > 1 { f.Value, _ = strconv.Atoi(cfg[1]) } filterOptions = append(filterOptions, integrity.UseFilter(f)) case "mtime": f := filter.MtimeFilter{} if len(cfg) > 1 { f.Value, _ = time.Parse(time.RFC3339, cfg[1]) } filterOptions = append(filterOptions, integrity.UseFilter(f)) default: continue } } scanPath := filepath.Join(rootfs, out.Location) journal.Info(fmt.Sprintf("Starting scan on %q", scanPath)) try.Do(func() { // TODO: following is hack; badly need to update config parsing to understand this first-class warehouseCoordsList := make([]integrity.SiloURI, 0) if out.URI != "" { warehouseCoordsList = append(warehouseCoordsList, integrity.SiloURI(out.URI)) } // invoke transmat commitID := transmat.Scan( integrity.TransmatKind(out.Type), scanPath, warehouseCoordsList, filterOptions..., ) out.Hash = string(commitID) journal.Info(fmt.Sprintf("Finished scan on %q", scanPath)) scanGather <- scanReport{Output: out} }).Catch(integrity.Error, func(err *errors.Error) { journal.Warn(fmt.Sprintf("Errored scan on %q", scanPath), "error", err.Message()) scanGather <- scanReport{Err: err} }).Done() }(out) } // gather reports var results []def.Output for range outputs { report := <-scanGather if report.Err != nil { panic(report.Err) } results = append(results, report.Output) } return results }