Esempio n. 1
0
// Run inputs
func ProvisionInputs(transmat integrity.Transmat, assemblerFn integrity.Assembler, inputs []def.Input, rootfs string, journal log15.Logger) integrity.Assembly {
	// start having all filesystems
	filesystems := make(map[def.Input]integrity.Arena, len(inputs))
	fsGather := make(chan map[def.Input]materializerReport)
	for _, in := range inputs {
		go func(in def.Input) {
			try.Do(func() {
				journal.Info(fmt.Sprintf("Starting materialize for %s hash=%s", in.Type, in.Hash))
				arena := transmat.Materialize(
					integrity.TransmatKind(in.Type),
					integrity.CommitID(in.Hash),
					[]integrity.SiloURI{integrity.SiloURI(in.URI)},
				)
				journal.Info(fmt.Sprintf("Finished materialize for %s hash=%s", in.Type, in.Hash))
				fsGather <- map[def.Input]materializerReport{
					in: {Arena: arena},
				}
			}).Catch(integrity.Error, func(err *errors.Error) {
				journal.Warn(fmt.Sprintf("Errored during materialize for %s hash=%s", in.Type, in.Hash), "error", err.Message())
				fsGather <- map[def.Input]materializerReport{
					in: {Err: err},
				}
			}).Done()
		}(in)
	}

	// (we don't have any output setup at this point, but if we do in the future, that'll be here.)

	// gather materialized inputs
	for range inputs {
		for in, report := range <-fsGather {
			if report.Err != nil {
				panic(report.Err)
			}
			filesystems[in] = report.Arena
		}
	}
	journal.Info("All inputs acquired... starting assembly")

	// assemble them into the final tree
	assemblyParts := make([]integrity.AssemblyPart, 0, len(filesystems))
	for input, arena := range filesystems {
		assemblyParts = append(assemblyParts, integrity.AssemblyPart{
			SourcePath: arena.Path(),
			TargetPath: input.Location,
			Writable:   true, // TODO input config should have a word about this
		})
	}
	assembly := assemblerFn(rootfs, assemblyParts)
	journal.Info("Assembly complete!")
	return assembly
}
Esempio n. 2
0
func WebsocketHandler(srv *context.T, log log15.Logger) func(http.ResponseWriter, *http.Request) {
	return func(w http.ResponseWriter, r *http.Request) {
		log := log.New("cmp", "websocket")
		conn, err := upgrader.Upgrade(w, r, nil)
		if err != nil {
			log.Error("error upgrading websocket connetion", "err", err)
			return
		}

		ch := srv.Router.Sub("/block")
		defer srv.Router.Unsub(ch)

	loop:
		select {
		case blki := <-ch:
			if blk, ok := blki.(*block.Block); ok {
				encoded, err := json.Marshal(blk)
				if err != nil {
					log.Error("error encoding block", "err", err)
					return
				}
				if err = conn.WriteMessage(websocket.TextMessage, encoded); err != nil {
					log.Error("error sending data", "err", err)
					return
				}
			}
		}
		goto loop

	}
}
Esempio n. 3
0
func mineBlock(status MiningStatus, srv *context.T, log log15.Logger, previousBlockHash types.Hash, transactions []*transaction.Envelope) {
	if bat := prepareBAT(srv, log); bat != nil {
		blk, err := block.NewBlock(previousBlockHash, targetBits(), append(transactions, bat))
		if err != nil {
			log.Error("error while creating a new block", "err", err)
		} else {
			// send off the new pool
			for i := range status.Miners {
				status.Miners[i].signallingChannel <- blk
				status.Miners[i].Block = blk
				status.Miners[i].StartTime = time.Now()
			}
		}
	}

}
func listenAndServer(log log15.Logger, addr string, handler http.Handler) error {
	conf, err := config.GetConfig()
	if err != nil {
		return err
	}

	conf.Endless.DefaultHammerTime = strings.TrimSpace(conf.Endless.DefaultHammerTime)

	if conf.Endless.DefaultHammerTime != "" {
		duration, err := time.ParseDuration(conf.Endless.DefaultHammerTime)
		if err == nil {
			endless.DefaultHammerTime = duration
		} else {
			log.Error("Bad format", log15.Ctx{"module": "Endless", "DefaultHammerTime": conf.Endless.DefaultHammerTime, "error": err})
		}
	}

	var terminated int32
	srv := endless.NewServer(addr, handler)
	preHookFunc := func() {
		atomic.StoreInt32(&terminated, 1)
	}
	srv.RegisterSignalHook(endless.PRE_SIGNAL, syscall.SIGHUP, preHookFunc)
	srv.RegisterSignalHook(endless.PRE_SIGNAL, syscall.SIGINT, preHookFunc)
	srv.RegisterSignalHook(endless.PRE_SIGNAL, syscall.SIGTERM, preHookFunc)

	log.Info("Launching server")
	err = srv.ListenAndServe()
	if atomic.LoadInt32(&terminated) == 0 {
		if err != nil {
			log.Error("During startup, error has occurred", "error", err)
		}
		return err
	} else {
		log.Info("Server is going to shutdown")
		return nil
	}
}
Esempio n. 5
0
func processPendingAllocations(srv *context.T, log log15.Logger) {
	pending := srv.DB.ListPendingRepositories()
	for i := range pending {
		r, err := srv.DB.GetRepository(pending[i])
		if err != nil {
			log.Error("error while processing pending repository", "repo", pending[i], "err", err)
		}
		c, err := srv.DB.GetTransactionConfirmations(r.NameAllocationTx)
		if err != nil {
			log.Error("error while calculating pending repository's allocation confirmations",
				"repo", pending[i], "txn", r.NameAllocationTx, "err", err)
		}
		if c >= ALLOCATION_CONFIRMATIONS_REQUIRED {
			r.Status = repository.ACTIVE
			srv.DB.PutRepository(r)
			log.Info("activated repository", "repo", pending[i], "alloc_txn", r.NameAllocationTx)
		}

	}
}
Esempio n. 6
0
func InfoHandler(srv *context.T, log log15.Logger) func(http.ResponseWriter, *http.Request) {
	return func(resp http.ResponseWriter, req *http.Request) {
		log := log.New("http")
		lastBlock, err := srv.DB.GetLastBlock()
		if err != nil {
			log.Error("error serving /info", "err", err)
		}
		info := Info{
			Mining:    server.GetMiningStatus(),
			LastBlock: lastBlock,
		}
		info.Debug.NumGoroutine = runtime.NumGoroutine()
		json, err := json.Marshal(info)
		if err != nil {
			log.Error("error serving /info", "err", err)
		}
		resp.Header().Add("Content-Type", "application/json")
		resp.Write(json)
	}

}
Esempio n. 7
0
func prepareBAT(srv *context.T, log log15.Logger) *transaction.Envelope {
	key, err := srv.DB.GetMainKey()
	if err != nil {
		log.Error("error while attempting to retrieve main key", "err", err)
	}
	if key != nil {
		bat, err := transaction.NewBlockAttribution()
		if err != nil {
			log.Error("error while creating a BAT", "err", err)
		} else {
			hash, err := srv.DB.GetPreviousEnvelopeHashForPublicKey(&key.PublicKey)
			if err != nil {
				log.Error("error while creating a BAT", "err", err)
			}
			bate := transaction.NewEnvelope(hash, bat)
			bate.Sign(key)
			return bate
		}
	}
	return nil

}
Esempio n. 8
0
func SetupGitRoutes(r *mux.Router, srv *context.T, log log15.Logger) {
	log = log.New("cmp", "git")
	// Git Server
	r.Methods("POST").Path("/{repository:.+}/git-upload-pack").HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
		log := log.New("cmp", "git-upload-pack")
		dec := pktline.NewDecoder(req.Body)
		resp.Header().Add("Cache-Control", "no-cache")
		resp.Header().Add("Content-Type", "application/x-git-upload-pack-result")
		enc := pktline.NewEncoder(resp)

		var wants, haves, common []git.Hash
		var objects []git.Object
		wantsRcvd := false

		for {
			var pktline []byte
			if err := dec.Decode(&pktline); err != nil {
				log.Error("error while decoding pkt-line", "err", err)
				return
			}

			switch {
			case pktline == nil:
				switch {
				case !wantsRcvd:
					wantsRcvd = true
				case wantsRcvd:
					for i := range haves {
						_, err := readObject(srv, haves[i])
						if err == nil {
							enc.Encode([]byte(fmt.Sprintf("ACK %x common\n", haves[i])))
							common = append(common, haves[i])
						} else {
							enc.Encode([]byte("NAK\n"))
						}
					}
					haves = make([]git.Hash, 0)
				}
			case bytes.Compare(pktline, []byte("done\n")) == 0:
				if len(common) == 0 {
					enc.Encode([]byte("NAK\n"))
				}
				goto done
			default:
				line := bytes.Split(pktline, []byte{' '})
				h := bytes.TrimSuffix(line[1], []byte{10})
				hash, err := hex.DecodeString(string(h))
				if err != nil {
					enc.Encode(append([]byte{3}, []byte(fmt.Sprintf("error parsing hash %s: %v\n", line[1], err))...))
					return
				}
				if string(line[0]) == "want" {
					wants = append(wants, hash)
				}
				if string(line[0]) == "have" {
					haves = append(haves, hash)
				}
			}
		}
	done:
		var err error
		for i := range wants {
			var objs []git.Object
			objs, err = processCommit(srv, wants[i], common)
			if err != nil {
				enc.Encode(append([]byte{3}, []byte(fmt.Sprintf("%s", err))...))
				return
			}
			objects = append(objects, objs...)
		}
		// filter out duplicates
		seen := make(map[string]bool)
		filteredObjects := make([]git.Object, 0)
		for i := range objects {
			hash := string(objects[i].Hash())
			if !seen[hash] {
				seen[hash] = true
				filteredObjects = append(filteredObjects, objects[i])
			}
		}
		//

		packfile := git.NewPackfile(filteredObjects)
		err = git.WritePackfile(&sideband64Writer{writer: &pktlineWriter{encoder: enc}, band: 1}, packfile)
		if err != nil {
			enc.Encode(append([]byte{3}, []byte(fmt.Sprintf("%s", err))...))
			return
		}

		enc.Encode(append([]byte{1}, pktlineToBytes(nil)...))
		enc.Encode(nil)
	})

	r.Methods("POST").Path("/{repository:.+}/git-receive-pack").HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
		reponame := mux.Vars(req)["repository"]
		var lines [][]byte
		dec := pktline.NewDecoder(req.Body)
		dec.DecodeUntilFlush(&lines)
		resp.Header().Add("Cache-Control", "no-cache")
		resp.Header().Add("Content-Type", "application/x-git-receive-pack-result")
		enc := pktline.NewEncoder(resp)

		packfile, err := git.ReadPackfile(req.Body)
		if err != nil {
			enc.Encode(append([]byte{1}, pktlineToBytes([]byte(fmt.Sprintf("unpack %v\n", err)))...))
		} else {
			enc.Encode(append([]byte{1}, pktlineToBytes([]byte("unpack ok"))...))
			for i := range packfile.Objects {
				err = git.WriteObject(packfile.Objects[i], path.Join(srv.Config.General.DataPath, "objects"))
				if err != nil {
					enc.Encode(append([]byte{3}, []byte(fmt.Sprintf("Error while writing object: %v\n", err))...))
				} else {
					srv.Router.Pub(packfile.Objects[i], "/git/object")
				}
			}
			for i := range lines {
				split := strings.Split(string(lines[i]), " ")
				old := split[0]
				new := split[1]
				ref := strings.TrimRight(split[2], string([]byte{0}))
				oldHash, err := hex.DecodeString(old)
				if err != nil {
					enc.Encode(append([]byte{3}, []byte(fmt.Sprintf("Malformed hash %s\n", old))...))
					return
				}
				newHash, err := hex.DecodeString(new)
				if err != nil {
					enc.Encode(append([]byte{3}, []byte(fmt.Sprintf("Malformed hash %s\n", new))...))
					return
				}
				tx := transaction.NewReferenceUpdate(reponame, ref, oldHash, newHash)
				key, err := srv.DB.GetMainKey()
				if err != nil {
					enc.Encode(append([]byte{3}, []byte(fmt.Sprintf("Errow while retrieving main key: %v", err))...))
					return
				}
				if key == nil {
					enc.Encode(append([]byte{3}, []byte("No main private key to sign the transaction")...))
					return
				}
				hash, err := srv.DB.GetPreviousEnvelopeHashForPublicKey(&key.PublicKey)
				if err != nil {
					enc.Encode(append([]byte{3}, []byte(fmt.Sprintf("Error while preparing transaction: %v", err))...))
					return
				}

				txe := transaction.NewEnvelope(hash, tx)
				txe.Sign(key)

				enc.Encode(append([]byte{2}, []byte(fmt.Sprintf("[gitchain] Transaction %s\n", txe.Hash()))...))
				srv.Router.Pub(txe, "/transaction")
				enc.Encode(append([]byte{1}, pktlineToBytes([]byte(fmt.Sprintf("ok %s\n", ref)))...))
			}
		}
		enc.Encode(append([]byte{1}, pktlineToBytes(nil)...))
		enc.Encode(nil)
	})

	r.Methods("GET").Path("/{repository:.+}/info/refs").HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
		req.ParseForm()
		service := req.Form["service"][0]

		reponame := mux.Vars(req)["repository"]
		repo, err := srv.DB.GetRepository(reponame)
		if err != nil {
			log.Error("error while retrieving repository", "repo", reponame, "err", err)
			resp.WriteHeader(500)
			return
		}
		if repo == nil || repo.Status == repository.PENDING {
			resp.WriteHeader(404)
			return
		}
		refs, err := srv.DB.ListRefs(reponame)
		if err != nil {
			log.Error("error listing refs", "repo", reponame, "err", err)
			resp.WriteHeader(500)
			return
		}
		reflines := make([][]byte, len(refs))
		for i := range refs {
			ref, err := srv.DB.GetRef(reponame, refs[i])
			if err != nil {
				log.Error("error getting ref", "repo", reponame, "err", err)
				resp.WriteHeader(500)
				return
			}
			refline := append(append([]byte(hex.EncodeToString(ref)), 32), []byte(refs[i])...)
			if i == 0 {
				// append capabilities
				refline = append(append(refline, 0), capabilities()...)
			}
			refline = append(refline, 10) // LF
			reflines[i] = refline
		}

		ref, err := srv.DB.GetRef(reponame, "refs/heads/master")
		if bytes.Compare(ref, make([]byte, 20)) != 0 {
			reflines = append(reflines, append(append(append([]byte(hex.EncodeToString(ref)), 32), []byte("HEAD")...), 10))
		}

		resp.Header().Add("Content-Type", fmt.Sprintf("application/x-%s-advertisement", service))
		resp.Header().Add("Cache-Control", "no-cache")
		enc := pktline.NewEncoder(resp)
		enc.Encode([]byte(fmt.Sprintf("# service=%s\n", service)))
		enc.Encode(nil)
		if len(reflines) == 0 {
			enc.Encode(append(append(append(append([]byte("0000000000000000000000000000000000000000"), 32), nulCapabilities()...), []byte{0, 32, 10}...)))
		} else {
			for i := range reflines {
				enc.Encode(reflines[i])
			}
		}
		enc.Encode(nil)
	})

	r.Methods("GET").Path("/{repository:.+}/HEAD").HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
		reponame := mux.Vars(req)["repository"]
		ref, err := srv.DB.GetRef(reponame, "refs/heads/master")
		if err != nil {
			log.Error("error while retrieving repository HEAD", "repo", reponame, "err", err)
			resp.WriteHeader(500)
			return
		}
		resp.Header().Add("Content-Type", "text/plain")
		resp.Header().Add("Cache-Control", "no-cache")
		resp.Write([]byte(hex.EncodeToString(ref)))
	})

	r.Methods("GET").Path("/{repository:.+}/objects/{hash:.+}").HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
	})

}
Esempio n. 9
0
// Run inputs
func ProvisionInputs(transmat integrity.Transmat, assemblerFn integrity.Assembler, inputs []def.Input, rootfs string, journal log15.Logger) integrity.Assembly {
	// start having all filesystems
	// input names are used as keys, so must be unique
	inputsByName := make(map[string]def.Input, len(inputs))
	for _, in := range inputs {
		// TODO checks should also be sooner, up in cfg parse
		// but this check is for programmatic access as well (errors down the line can get nonobvious if you skip this).
		if _, ok := inputsByName[in.Name]; ok {
			panic(errors.ProgrammerError.New("duplicate name in input config"))
		}
		inputsByName[in.Name] = in
	}
	filesystems := make(map[string]integrity.Arena, len(inputs))
	fsGather := make(chan map[string]materializerReport)
	for _, in := range inputs {
		go func(in def.Input) {
			try.Do(func() {
				journal.Info(fmt.Sprintf("Starting materialize for %s hash=%s", in.Type, in.Hash))
				// todo: create validity checking api for URIs, check them all before launching anything
				warehouses := make([]integrity.SiloURI, len(in.Warehouses))
				for i, wh := range in.Warehouses {
					warehouses[i] = integrity.SiloURI(wh)
				}
				// invoke transmat (blocking, potentially long time)
				arena := transmat.Materialize(
					integrity.TransmatKind(in.Type),
					integrity.CommitID(in.Hash),
					warehouses,
				)
				// submit report
				journal.Info(fmt.Sprintf("Finished materialize for %s hash=%s", in.Type, in.Hash))
				fsGather <- map[string]materializerReport{
					in.Name: {Arena: arena},
				}
			}).Catch(integrity.Error, func(err *errors.Error) {
				journal.Warn(fmt.Sprintf("Errored during materialize for %s hash=%s", in.Type, in.Hash), "error", err.Message())
				fsGather <- map[string]materializerReport{
					in.Name: {Err: err},
				}
			}).Done()
		}(in)
	}

	// (we don't have any output setup at this point, but if we do in the future, that'll be here.)

	// gather materialized inputs
	for range inputs {
		for name, report := range <-fsGather {
			if report.Err != nil {
				panic(report.Err)
			}
			filesystems[name] = report.Arena
		}
	}
	journal.Info("All inputs acquired... starting assembly")

	// assemble them into the final tree
	assemblyParts := make([]integrity.AssemblyPart, 0, len(filesystems))
	for name, arena := range filesystems {
		assemblyParts = append(assemblyParts, integrity.AssemblyPart{
			SourcePath: arena.Path(),
			TargetPath: inputsByName[name].MountPath,
			Writable:   true, // TODO input config should have a word about this
		})
	}
	assembly := assemblerFn(rootfs, assemblyParts)
	journal.Info("Assembly complete!")
	return assembly
}
Esempio n. 10
0
// Run outputs
func PreserveOutputs(transmat integrity.Transmat, outputs []def.Output, rootfs string, journal log15.Logger) []def.Output {
	// run commit on the outputs
	scanGather := make(chan scanReport)
	for _, out := range outputs {
		go func(out def.Output) {
			filterOptions := make([]integrity.MaterializerConfigurer, 0, 3)
			out.Filters.InitDefaultsOutput()
			switch out.Filters.UidMode {
			case def.FilterKeep: // easy, just no filter.
			case def.FilterUse:
				f := filter.UidFilter{out.Filters.Uid}
				filterOptions = append(filterOptions, integrity.UseFilter(f))
			default:
				panic(errors.ProgrammerError.New("unhandled filter mode %v", out.Filters.UidMode))
			}
			switch out.Filters.GidMode {
			case def.FilterKeep: // easy, just no filter.
			case def.FilterUse:
				f := filter.GidFilter{out.Filters.Gid}
				filterOptions = append(filterOptions, integrity.UseFilter(f))
			default:
				panic(errors.ProgrammerError.New("unhandled filter mode %v", out.Filters.GidMode))
			}
			switch out.Filters.MtimeMode {
			case def.FilterKeep: // easy, just no filter.
			case def.FilterUse:
				f := filter.MtimeFilter{out.Filters.Mtime}
				filterOptions = append(filterOptions, integrity.UseFilter(f))
			default:
				panic(errors.ProgrammerError.New("unhandled filter mode %v", out.Filters.MtimeMode))
			}

			scanPath := filepath.Join(rootfs, out.MountPath)
			journal.Info(fmt.Sprintf("Starting scan on %q", scanPath))
			try.Do(func() {
				// todo: create validity checking api for URIs, check them all before launching anything
				warehouses := make([]integrity.SiloURI, len(out.Warehouses))
				for i, wh := range out.Warehouses {
					warehouses[i] = integrity.SiloURI(wh)
				}
				// invoke transmat (blocking, potentially long time)
				commitID := transmat.Scan(
					integrity.TransmatKind(out.Type),
					scanPath,
					warehouses,
					filterOptions...,
				)
				out.Hash = string(commitID)
				// submit report
				journal.Info(fmt.Sprintf("Finished scan on %q", scanPath))
				scanGather <- scanReport{Output: out}
			}).Catch(integrity.Error, func(err *errors.Error) {
				journal.Warn(fmt.Sprintf("Errored scan on %q", scanPath), "error", err.Message())
				scanGather <- scanReport{Err: err}
			}).Done()
		}(out)
	}

	// gather reports
	var results []def.Output
	for range outputs {
		report := <-scanGather
		if report.Err != nil {
			panic(report.Err)
		}
		results = append(results, report.Output)
	}

	return results
}
func listenAndServer(log log15.Logger, addr string, handler http.Handler) error {
	log.Info("Launching server")
	err := http.ListenAndServe(addr, handler)
	log.Error("During startup, error has occurred", "error", err)
	return err
}
Esempio n. 12
0
// Run outputs
func PreserveOutputs(transmat integrity.Transmat, outputs []def.Output, rootfs string, journal log15.Logger) []def.Output {
	// run commit on the outputs
	scanGather := make(chan scanReport)
	for _, out := range outputs {
		go func(out def.Output) {
			filterOptions := make([]integrity.MaterializerConfigurer, 0, 4)
			for _, name := range out.Filters {
				cfg := strings.Fields(name)
				switch cfg[0] {
				case "uid":
					f := filter.UidFilter{}
					if len(cfg) > 1 {
						f.Value, _ = strconv.Atoi(cfg[1])
					}
					filterOptions = append(filterOptions, integrity.UseFilter(f))
				case "gid":
					f := filter.GidFilter{}
					if len(cfg) > 1 {
						f.Value, _ = strconv.Atoi(cfg[1])
					}
					filterOptions = append(filterOptions, integrity.UseFilter(f))
				case "mtime":
					f := filter.MtimeFilter{}
					if len(cfg) > 1 {
						f.Value, _ = time.Parse(time.RFC3339, cfg[1])
					}
					filterOptions = append(filterOptions, integrity.UseFilter(f))
				default:
					continue
				}
			}
			scanPath := filepath.Join(rootfs, out.Location)
			journal.Info(fmt.Sprintf("Starting scan on %q", scanPath))
			try.Do(func() {
				// TODO: following is hack; badly need to update config parsing to understand this first-class
				warehouseCoordsList := make([]integrity.SiloURI, 0)
				if out.URI != "" {
					warehouseCoordsList = append(warehouseCoordsList, integrity.SiloURI(out.URI))
				}
				// invoke transmat
				commitID := transmat.Scan(
					integrity.TransmatKind(out.Type),
					scanPath,
					warehouseCoordsList,
					filterOptions...,
				)
				out.Hash = string(commitID)
				journal.Info(fmt.Sprintf("Finished scan on %q", scanPath))
				scanGather <- scanReport{Output: out}
			}).Catch(integrity.Error, func(err *errors.Error) {
				journal.Warn(fmt.Sprintf("Errored scan on %q", scanPath), "error", err.Message())
				scanGather <- scanReport{Err: err}
			}).Done()
		}(out)
	}

	// gather reports
	var results []def.Output
	for range outputs {
		report := <-scanGather
		if report.Err != nil {
			panic(report.Err)
		}
		results = append(results, report.Output)
	}

	return results
}