// New returns a new Contractor. func New(cs consensusSet, wallet walletShim, tpool transactionPool, hdb hostDB, persistDir string) (*Contractor, error) { // Check for nil inputs. if cs == nil { return nil, errNilCS } if wallet == nil { return nil, errNilWallet } if tpool == nil { return nil, errNilTpool } // Create the persist directory if it does not yet exist. err := os.MkdirAll(persistDir, 0700) if err != nil { return nil, err } // Create the logger. logger, err := persist.NewFileLogger(filepath.Join(persistDir, "contractor.log")) if err != nil { return nil, err } // Create Contractor using production dependencies. return newContractor(cs, &walletBridge{w: wallet}, tpool, hdb, newPersist(persistDir), logger) }
// StartContinuousProfiling will continuously print statistics about the cpu // usage, memory usage, and runtime stats of the program. func StartContinuousProfile(profileDir string) { // Create the folder for all of the profiling results. err := os.MkdirAll(profileDir, 0700) if err != nil { fmt.Println(err) return } // Continuously log statistics about the running Sia application. go func() { // Create the logger. log, err := persist.NewFileLogger(filepath.Join(profileDir, "continuousProfiling.log")) if err != nil { fmt.Println("Profile logging failed:", err) return } // Collect statistics in an infinite loop. sleepTime := time.Second * 20 for { // Sleep for an exponential amount of time each iteration, this // keeps the size of the log small while still providing lots of // information. time.Sleep(sleepTime) sleepTime = time.Duration(1.5 * float64(sleepTime)) var m runtime.MemStats runtime.ReadMemStats(&m) log.Printf("\n\tGoroutines: %v\n\tAlloc: %v\n\tTotalAlloc: %v\n\tHeapAlloc: %v\n\tHeapSys: %v\n", runtime.NumGoroutine(), m.Alloc, m.TotalAlloc, m.HeapAlloc, m.HeapSys) } }() }
// initPersist initializes the persistence of the miner. func (m *Miner) initPersist() error { // Create the miner directory. err := os.MkdirAll(m.persistDir, 0700) if err != nil { return err } // Add a logger. m.log, err = persist.NewFileLogger(filepath.Join(m.persistDir, logFile)) if err != nil { return err } return m.initSettings() }
// initPersist handles all of the persistence initialization, such as creating // the persistence directory and starting the logger. func (r *Renter) initPersist() error { // Create the perist directory if it does not yet exist. err := os.MkdirAll(r.persistDir, 0700) if err != nil { return err } // Initialize the logger. r.log, err = persist.NewFileLogger(filepath.Join(r.persistDir, logFile)) if err != nil { return err } // Load the prior persistence structures. err = r.load() if err != nil && !os.IsNotExist(err) { return err } return nil }
// New returns a new HostDB. func New(cs consensusSet, persistDir string) (*HostDB, error) { // Check for nil inputs. if cs == nil { return nil, errNilCS } // Create the persist directory if it does not yet exist. err := os.MkdirAll(persistDir, 0700) if err != nil { return nil, err } // Create the logger. logger, err := persist.NewFileLogger(filepath.Join(persistDir, "hostdb.log")) if err != nil { return nil, err } // Create HostDB using production dependencies. return newHostDB(cs, stdDialer{}, stdSleeper{}, newPersist(persistDir), logger) }
// initPersist loads all of the wallet's persistence files into memory, // creating them if they do not exist. func (w *Wallet) initPersist() error { // Create a directory for the wallet without overwriting an existing // directory. err := os.MkdirAll(w.persistDir, 0700) if err != nil { return err } // Start logging. w.log, err = persist.NewFileLogger(filepath.Join(w.persistDir, logFile)) if err != nil { return err } // Load the settings file. err = w.initSettings() if err != nil { return err } return nil }
// initPersist initializes the persistence structures of the consensus set, in // particular loading the database and preparing to manage subscribers. func (cs *ConsensusSet) initPersist() error { // Create the consensus directory. err := os.MkdirAll(cs.persistDir, 0700) if err != nil { return err } // Initialize the logger. cs.log, err = persist.NewFileLogger(filepath.Join(cs.persistDir, logFile)) if err != nil { return err } // Set up closing the logger. cs.tg.AfterStop(func() { err := cs.log.Close() if err != nil { // State of the logger is unknown, a println will suffice. fmt.Println("Error shutting down consensus set logger:", err) } }) // Try to load an existing database from disk - a new one will be created // if one does not exist. err = cs.loadDB() if err != nil { return err } // Set up the closing of the database. cs.tg.AfterStop(func() { err := cs.db.Close() if err != nil { cs.log.Println("ERROR: Unable to close consensus set database at shutdown:", err) } }) return nil }
// newLogger creates a logger that the host can use to log messages and write // critical statements. func (productionDependencies) newLogger(s string) (*persist.Logger, error) { return persist.NewFileLogger(s) }
// New returns an initialized Gateway. func New(addr string, bootstrap bool, persistDir string) (*Gateway, error) { // Create the directory if it doesn't exist. err := os.MkdirAll(persistDir, 0700) if err != nil { return nil, err } g := &Gateway{ handlers: make(map[rpcID]modules.RPCFunc), initRPCs: make(map[string]modules.RPCFunc), peers: make(map[modules.NetAddress]*peer), nodes: make(map[modules.NetAddress]struct{}), persistDir: persistDir, } // Create the logger. g.log, err = persist.NewFileLogger(filepath.Join(g.persistDir, logFile)) if err != nil { return nil, err } // Establish the closing of the logger. g.threads.AfterStop(func() { if err := g.log.Close(); err != nil { // The logger may or may not be working here, so use a println // instead. fmt.Println("Failed to close the gateway logger:", err) } }) // Establish that the peerTG must complete shutdown before the primary // thread group completes shutdown. g.threads.OnStop(func() { err = g.peerTG.Stop() if err != nil { g.log.Println("ERROR: peerTG experienced errors while shutting down:", err) } }) // Register RPCs. g.RegisterRPC("ShareNodes", g.shareNodes) g.RegisterConnectCall("ShareNodes", g.requestNodes) // Establish the de-registration of the RPCs. g.threads.OnStop(func() { g.UnregisterRPC("ShareNodes") g.UnregisterConnectCall("ShareNodes") }) // Load the old node list. If it doesn't exist, no problem, but if it does, // we want to know about any errors preventing us from loading it. if loadErr := g.load(); loadErr != nil && !os.IsNotExist(loadErr) { return nil, loadErr } // Add the bootstrap peers to the node list. if bootstrap { for _, addr := range modules.BootstrapPeers { err := g.addNode(addr) if err != nil && err != errNodeExists { g.log.Printf("WARN: failed to add the bootstrap node '%v': %v", addr, err) } } } // Create the listener which will listen for new connections from peers. permanentListenClosedChan := make(chan struct{}) g.listener, err = net.Listen("tcp", addr) if err != nil { return nil, err } // Automatically close the listener when g.threads.Stop() is called. g.threads.OnStop(func() { err := g.listener.Close() if err != nil { g.log.Println("WARN: closing the listener failed:", err) } <-permanentListenClosedChan }) // Set the address and port of the gateway. _, g.port, err = net.SplitHostPort(g.listener.Addr().String()) if err != nil { return nil, err } // Set myAddr equal to the address returned by the listener. It will be // overwritten by threadedLearnHostname later on. g.myAddr = modules.NetAddress(g.listener.Addr().String()) // Spawn the peer connection listener. go g.permanentListen(permanentListenClosedChan) // Spawn the peer manager and provide tools for ensuring clean shutdown. peerManagerClosedChan := make(chan struct{}) g.threads.OnStop(func() { <-peerManagerClosedChan }) go g.permanentPeerManager(peerManagerClosedChan) // Spawn the node manager and provide tools for ensuring clean shudown. nodeManagerClosedChan := make(chan struct{}) g.threads.OnStop(func() { <-nodeManagerClosedChan }) go g.permanentNodeManager(nodeManagerClosedChan) // Spawn the node purger and provide tools for ensuring clean shutdown. nodePurgerClosedChan := make(chan struct{}) g.threads.OnStop(func() { <-nodePurgerClosedChan }) go g.permanentNodePurger(nodePurgerClosedChan) // Spawn threads to take care of port forwarding and hostname discovery. go g.threadedForwardPort(g.port) go g.threadedLearnHostname() g.log.Println("INFO: gateway created, started logging") return g, nil }
// New returns an initialized Gateway. func New(addr string, persistDir string) (g *Gateway, err error) { // Create the directory if it doesn't exist. err = os.MkdirAll(persistDir, 0700) if err != nil { return } g = &Gateway{ handlers: make(map[rpcID]modules.RPCFunc), initRPCs: make(map[string]modules.RPCFunc), peers: make(map[modules.NetAddress]*peer), nodes: make(map[modules.NetAddress]struct{}), persistDir: persistDir, } // Create the logger. g.log, err = persist.NewFileLogger(filepath.Join(g.persistDir, logFile)) if err != nil { return nil, err } // Register RPCs. g.RegisterRPC("ShareNodes", g.shareNodes) g.RegisterConnectCall("ShareNodes", g.requestNodes) // Load the old node list. If it doesn't exist, no problem, but if it does, // we want to know about any errors preventing us from loading it. if loadErr := g.load(); loadErr != nil && !os.IsNotExist(loadErr) { return nil, loadErr } // Add the bootstrap peers to the node list. if build.Release == "standard" { for _, addr := range modules.BootstrapPeers { err := g.addNode(addr) if err != nil && err != errNodeExists { g.log.Printf("WARN: failed to add the bootstrap node '%v': %v", addr, err) } } g.save() } // Create listener and set address. g.listener, err = net.Listen("tcp", addr) if err != nil { return } // Automatically close the listener when g.threads.Stop() is called. g.threads.OnStop(func() { err := g.listener.Close() if err != nil { g.log.Println("WARN: closing the listener failed:", err) } }) _, g.port, err = net.SplitHostPort(g.listener.Addr().String()) if err != nil { return nil, err } if build.Release == "testing" { g.myAddr = modules.NetAddress(g.listener.Addr().String()) } g.log.Println("INFO: gateway created, started logging") // Forward the RPC port, if possible. go g.threadedForwardPort(g.port) // Learn our external IP. go g.threadedLearnHostname() // Spawn the peer and node managers. These will attempt to keep the peer // and node lists healthy. go g.threadedPeerManager() go g.threadedNodeManager() // Spawn the primary listener. go g.threadedListen() return }