Exemplo n.º 1
0
// NewLDBDatabase returns a LevelDB wrapped object.
func NewLDBDatabase(file string, cache int, handles int) (*LDBDatabase, error) {
	// Calculate the cache and file descriptor allowance for this particular database
	cache = int(float64(cache) * cacheRatio[filepath.Base(file)])
	if cache < 16 {
		cache = 16
	}
	handles = int(float64(handles) * handleRatio[filepath.Base(file)])
	if handles < 16 {
		handles = 16
	}
	glog.V(logger.Info).Infof("Alloted %dMB cache and %d file handles to %s", cache, handles, file)

	// Open the db and recover any potential corruptions
	db, err := leveldb.OpenFile(file, &opt.Options{
		OpenFilesCacheCapacity: handles,
		BlockCacheCapacity:     cache / 2 * opt.MiB,
		WriteBuffer:            cache / 4 * opt.MiB, // Two of these are used internally
		Filter:                 filter.NewBloomFilter(10),
	})
	if _, corrupted := err.(*errors.ErrCorrupted); corrupted {
		db, err = leveldb.RecoverFile(file, nil)
	}
	// (Re)check for errors and abort if opening of the db failed
	if err != nil {
		return nil, err
	}
	return &LDBDatabase{
		fn: file,
		db: db,
	}, nil
}
Exemplo n.º 2
0
func (pdb *PDB) Recover() {
	db, err := leveldb.RecoverFile(DB_PATH, nil)
	pdb.db = *db
	if err != nil {
		log.Fatal("Error recovering db!", err)
	}
}
Exemplo n.º 3
0
func Open(file string) (*Instance, error) {
	opts := &opt.Options{
		OpenFilesCacheCapacity: 100,
		WriteBuffer:            4 << 20,
	}

	db, err := leveldb.OpenFile(file, opts)
	if leveldbIsCorrupted(err) {
		db, err = leveldb.RecoverFile(file, opts)
	}
	if leveldbIsCorrupted(err) {
		// The database is corrupted, and we've tried to recover it but it
		// didn't work. At this point there isn't much to do beyond dropping
		// the database and reindexing...
		l.Infoln("Database corruption detected, unable to recover. Reinitializing...")
		if err := os.RemoveAll(file); err != nil {
			return nil, err
		}
		db, err = leveldb.OpenFile(file, opts)
	}
	if err != nil {
		return nil, err
	}

	return newDBInstance(db, file), nil
}
Exemplo n.º 4
0
func (l *LevelDb) open() error {
	l.Lock()
	defer l.Unlock()

	var err error
	if l.db == nil {
		l.G().Log.Debug("+ LevelDb.open")
		fn := l.GetFilename()
		l.G().Log.Debug("| Opening LevelDB for local cache: %v %s", l, fn)
		l.db, err = leveldb.OpenFile(fn, nil)
		if err != nil {
			if _, ok := err.(*errors.ErrCorrupted); ok {
				l.G().Log.Debug("| LevelDB was corrupted; attempting recovery (%v)", err)
				l.db, err = leveldb.RecoverFile(fn, nil)
				if err != nil {
					l.G().Log.Debug("| Recovery failed: %v", err)
				} else {
					l.G().Log.Debug("| Recovery succeeded!")
				}
			}
		}
		l.G().Log.Debug("- LevelDb.open -> %s", ErrToOk(err))
	}
	return err
}
Exemplo n.º 5
0
Arquivo: db.go Projeto: parkghost/nodb
func (s Store) Repair(path string, cfg *config.Config) error {
	db, err := leveldb.RecoverFile(path, newOptions(&cfg.LevelDB))
	if err != nil {
		return err
	}

	db.Close()
	return nil
}
Exemplo n.º 6
0
func dumpLeveldb(path string) error {
	db, err := leveldb.OpenFile(path, &opt.Options{
		ErrorIfMissing: true,
	})
	if err != nil {
		if _, ok := err.(*leveldb_errors.ErrCorrupted); !ok {
			return err
		}
		log.Printf("Database is corrupted, trying to recover\n")
		db, err = leveldb.RecoverFile(path, nil)
		if err != nil {
			return fmt.Errorf("Could not recover database: %v\n", err)
		}
	}
	defer db.Close()

	i := db.NewIterator(nil, nil)
	defer i.Release()

	var rlog raft.Log

	if i.Last() {
		for bytes.HasPrefix(i.Key(), []byte("stablestore-")) {
			i.Prev()
		}
		for {
			if err := json.Unmarshal(i.Value(), &rlog); err != nil {
				log.Fatalf("Corrupted database: %v\n", err)
			}
			if rlog.Type == raft.LogCommand {
				rmsg := types.NewRobustMessageFromBytes(rlog.Data)
				lastModified = time.Unix(0, rmsg.Id.Id)
				break
			}
			i.Prev()
		}
	}
	i.First()

	fmt.Printf(fmt.Sprintf("%%%ds", padding)+"\tValue\n", "Key")

	for i.Next() {
		if bytes.HasPrefix(i.Key(), []byte("stablestore-")) {
			// TODO: also dump the stablestore values
		} else {
			if err := json.Unmarshal(i.Value(), &rlog); err != nil {
				log.Fatalf("Corrupted database: %v\n", err)
			}
			dumpLog(binary.BigEndian.Uint64(i.Key()), &rlog)
		}
	}

	return nil
}
Exemplo n.º 7
0
// NewLDBDatabase returns a LevelDB wrapped object. LDBDatabase does not persist data by
// it self but requires a background poller which syncs every X. `Flush` should be called
// when data needs to be stored and written to disk.
func NewLDBDatabase(file string) (*LDBDatabase, error) {
	// Open the db
	db, err := leveldb.OpenFile(file, &opt.Options{OpenFilesCacheCapacity: OpenFileLimit})
	// check for corruption and attempt to recover
	if _, iscorrupted := err.(*errors.ErrCorrupted); iscorrupted {
		db, err = leveldb.RecoverFile(file, nil)
	}
	// (re) check for errors and abort if opening of the db failed
	if err != nil {
		return nil, err
	}
	return &LDBDatabase{
		fn: file,
		db: db,
	}, nil
}
Exemplo n.º 8
0
func (db *GoLevelDB) init(path string, conf *Config, repair bool) error {
	if conf == nil {
		conf = NewDefaultConfig()
	}

	// Create path if not exists first
	if err := os.MkdirAll(path, 0700); err != nil {
		return errors.Trace(err)
	}

	opts := &opt.Options{}
	opts.ErrorIfMissing = false
	opts.ErrorIfExist = false

	opts.Filter = filter.NewBloomFilter(conf.BloomFilterSize)

	opts.Compression = opt.SnappyCompression

	opts.BlockSize = conf.BlockSize
	opts.WriteBuffer = conf.WriteBufferSize
	opts.OpenFilesCacheCapacity = conf.MaxOpenFiles

	opts.CompactionTableSize = 32 * 1024 * 1024
	opts.WriteL0SlowdownTrigger = 16
	opts.WriteL0PauseTrigger = 64

	db.path = path
	db.opts = opts
	db.ropt = nil
	db.wopt = nil

	if repair {
		if rdb, err := leveldb.RecoverFile(db.path, db.opts); err != nil {
			return errors.Trace(err)
		} else {
			db.lvdb = rdb
			return nil
		}
	}

	var err error
	if db.lvdb, err = leveldb.OpenFile(path, db.opts); err != nil {
		return errors.Trace(err)
	}
	return nil
}
Exemplo n.º 9
0
// NewLevelDBStore opens a leveldb at the given directory to be used as a log-
// and stable storage for raft.
func NewLevelDBStore(dir string, errorIfExist bool) (*LevelDBStore, error) {
	db, err := leveldb.OpenFile(dir, &opt.Options{ErrorIfExist: errorIfExist})
	if err != nil {
		if errorIfExist && err == os.ErrExist {
			return nil, fmt.Errorf("You specified -singlenode or -join, but %q already contains data, indicating this node is already part of a RobustIRC network. THIS IS UNSAFE! It will lead to split-brain scenarios and data-loss. Please see http://robustirc.net/docs/adminguide.html#_healing_partitions if you are trying to heal a network partition.", dir)
		}
		if _, ok := err.(*leveldb_errors.ErrCorrupted); !ok {
			return nil, fmt.Errorf("could not open: %v", err)
		}
		db, err = leveldb.RecoverFile(dir, nil)
		if err != nil {
			return nil, fmt.Errorf("could not recover: %v", err)
		}
	}

	return &LevelDBStore{db: db}, nil
}
Exemplo n.º 10
0
// New creates a new *info as Info.
//
// dir is the directory where the leveldb that caches responses will be written.
// client must be an authenticated client to make requests to the Android Build API.
func New(dir string, client *http.Client) (Info, error) {
	db, err := leveldb.OpenFile(dir, nil)
	if err != nil && errors.IsCorrupted(err) {
		db, err = leveldb.RecoverFile(dir, nil)
	}
	if err != nil {
		return nil, fmt.Errorf("Failed to open leveldb at %s: %s", dir, err)
	}
	c, err := newAndroidCommits(client)
	if err != nil {
		return nil, fmt.Errorf("Failed to create commits: %s", err)
	}

	i := &info{db: db, commits: c}
	go i.poll()

	return i, nil
}
Exemplo n.º 11
0
func (this *Index) LoadOrCreateOf(file string) (*Index, error) {

	file = this.getFilenameFromFile(file)

	//log.Printf("for debug removing : %s", file)
	//TODO renewable or recache analyze
	//os.RemoveAll(file)
	//	return this, errors.New("Not implemented")
	var err error
	//TODO re-get last states
	//Uses recover in order to catch also unclosed
	//Test upgrade size file to limit operation ???

	this.db, err = leveldb.OpenFile(file, &opt.Options{
		BlockSize:                     32 * opt.KiB,
		CompactionTableSize:           2 * opt.MiB,
		CompactionTableSizeMultiplier: 8,
		CompactionTotalSize:           20 * opt.MiB,
		CompactionTotalSizeMultiplier: 10,
		CompactionL0Trigger:           8,
	})
	if err != nil {
		log.Printf("There seem to have a problem with the index. Will try to correct that %v", err)

		this.db, err = leveldb.RecoverFile(file, &opt.Options{
			BlockSize:                     32 * opt.KiB,
			CompactionTableSize:           2 * opt.MiB,
			CompactionTableSizeMultiplier: 8,
			CompactionTotalSize:           20 * opt.MiB,
			CompactionTotalSizeMultiplier: 10,
			CompactionL0Trigger:           8,
		})
	}
	this.batch = new(leveldb.Batch)
	this.batch_size = 32 * opt.KiB / (8 * 5)
	if err != nil {
		log.Printf("%v", err)
	}

	//	stats, _ := this.db.GetProperty("leveldb.stats")
	//	log.Printf("stats : %v", stats)
	return this, nil
}
Exemplo n.º 12
0
func openLevelDB(filepath string) (*leveldb.DB, error) {
	o := &opt.Options{
		Filter: filter.NewBloomFilter(10),
		Strict: opt.StrictAll,
	}
	db, err := leveldb.OpenFile(filepath, o)
	if err == nil {
		return db, nil
	}
	if _, ok := err.(*errors.ErrCorrupted); ok {
		log.Printf("recovering leveldb: %v", err)
		db, err = leveldb.RecoverFile(filepath, o)
		if err != nil {
			log.Printf("failed to recover leveldb: %v", err)
			return nil, err
		}
		return db, nil
	}
	log.Printf("failed to open leveldb: %v", err)
	return nil, err
}
Exemplo n.º 13
0
// newPersistentNodeDB creates/opens a leveldb backed persistent node database,
// also flushing its contents in case of a version mismatch.
func newPersistentNodeDB(path string, version int, self NodeID) (*nodeDB, error) {
	opts := &opt.Options{OpenFilesCacheCapacity: 5}
	db, err := leveldb.OpenFile(path, opts)
	if _, iscorrupted := err.(*errors.ErrCorrupted); iscorrupted {
		db, err = leveldb.RecoverFile(path, nil)
	}
	if err != nil {
		return nil, err
	}
	// The nodes contained in the cache correspond to a certain protocol version.
	// Flush all nodes if the version doesn't match.
	currentVer := make([]byte, binary.MaxVarintLen64)
	currentVer = currentVer[:binary.PutVarint(currentVer, int64(version))]

	blob, err := db.Get(nodeDBVersionKey, nil)
	switch err {
	case leveldb.ErrNotFound:
		// Version not found (i.e. empty cache), insert it
		if err := db.Put(nodeDBVersionKey, currentVer, nil); err != nil {
			db.Close()
			return nil, err
		}

	case nil:
		// Version present, flush if different
		if !bytes.Equal(blob, currentVer) {
			db.Close()
			if err = os.RemoveAll(path); err != nil {
				return nil, err
			}
			return newPersistentNodeDB(path, version, self)
		}
	}
	return &nodeDB{
		lvl:  db,
		self: self,
		quit: make(chan struct{}),
	}, nil
}
Exemplo n.º 14
0
// NewDriver create a leveldb store driver
func NewDriver(dbpath string) Driver {
	var db *leveldb.DB
	var err error
	var cache *lru.Cache

	_, err = os.Stat(dbpath)

	if err == nil || os.IsExist(err) {
		db, err = leveldb.RecoverFile(dbpath, nil)
	} else {
		db, err = leveldb.OpenFile(dbpath, nil)
	}
	if err != nil {
		log.Fatal(err)
	}
	cache = lru.New(1000)
	var RWLocker = new(sync.Mutex)
	return Driver{
		db:       db,
		cache:    cache,
		RWLocker: RWLocker,
	}
}
Exemplo n.º 15
0
func Open(file string) (*Instance, error) {
	opts := &opt.Options{
		OpenFilesCacheCapacity: 100,
		WriteBuffer:            4 << 20,
	}

	if _, err := os.Stat(file); os.IsNotExist(err) {
		// The file we are looking to open does not exist. This may be the
		// first launch so we should look for an old version and try to
		// convert it.
		if err := checkConvertDatabase(file); err != nil {
			l.Infoln("Converting old database:", err)
			l.Infoln("Will rescan from scratch.")
		}
	}

	db, err := leveldb.OpenFile(file, opts)
	if leveldbIsCorrupted(err) {
		db, err = leveldb.RecoverFile(file, opts)
	}
	if leveldbIsCorrupted(err) {
		// The database is corrupted, and we've tried to recover it but it
		// didn't work. At this point there isn't much to do beyond dropping
		// the database and reindexing...
		l.Infoln("Database corruption detected, unable to recover. Reinitializing...")
		if err := os.RemoveAll(file); err != nil {
			return nil, err
		}
		db, err = leveldb.OpenFile(file, opts)
	}
	if err != nil {
		return nil, err
	}

	return newDBInstance(db), nil
}
Exemplo n.º 16
0
func syncthingMain() {
	// Create a main service manager. We'll add things to this as we go along.
	// We want any logging it does to go through our log system.
	mainSvc := suture.New("main", suture.Spec{
		Log: func(line string) {
			if debugSuture {
				l.Debugln(line)
			}
		},
	})
	mainSvc.ServeBackground()

	// Set a log prefix similar to the ID we will have later on, or early log
	// lines look ugly.
	l.SetPrefix("[start] ")

	if auditEnabled {
		startAuditing(mainSvc)
	}

	if verbose {
		mainSvc.Add(newVerboseSvc())
	}

	// Event subscription for the API; must start early to catch the early events.
	apiSub := events.NewBufferedSubscription(events.Default.Subscribe(events.AllEvents), 1000)

	if len(os.Getenv("GOMAXPROCS")) == 0 {
		runtime.GOMAXPROCS(runtime.NumCPU())
	}

	// Ensure that that we have a certificate and key.
	cert, err := tls.LoadX509KeyPair(locations[locCertFile], locations[locKeyFile])
	if err != nil {
		cert, err = newCertificate(locations[locCertFile], locations[locKeyFile], tlsDefaultCommonName)
		if err != nil {
			l.Fatalln("load cert:", err)
		}
	}

	// We reinitialize the predictable RNG with our device ID, to get a
	// sequence that is always the same but unique to this syncthing instance.
	predictableRandom.Seed(seedFromBytes(cert.Certificate[0]))

	myID = protocol.NewDeviceID(cert.Certificate[0])
	l.SetPrefix(fmt.Sprintf("[%s] ", myID.String()[:5]))

	l.Infoln(LongVersion)
	l.Infoln("My ID:", myID)

	// Emit the Starting event, now that we know who we are.

	events.Default.Log(events.Starting, map[string]string{
		"home": baseDirs["config"],
		"myID": myID.String(),
	})

	// Prepare to be able to save configuration

	cfgFile := locations[locConfigFile]

	var myName string

	// Load the configuration file, if it exists.
	// If it does not, create a template.

	if info, err := os.Stat(cfgFile); err == nil {
		if !info.Mode().IsRegular() {
			l.Fatalln("Config file is not a file?")
		}
		cfg, err = config.Load(cfgFile, myID)
		if err == nil {
			myCfg := cfg.Devices()[myID]
			if myCfg.Name == "" {
				myName, _ = os.Hostname()
			} else {
				myName = myCfg.Name
			}
		} else {
			l.Fatalln("Configuration:", err)
		}
	} else {
		l.Infoln("No config file; starting with empty defaults")
		myName, _ = os.Hostname()
		newCfg := defaultConfig(myName)
		cfg = config.Wrap(cfgFile, newCfg)
		cfg.Save()
		l.Infof("Edit %s to taste or use the GUI\n", cfgFile)
	}

	if cfg.Raw().OriginalVersion != config.CurrentVersion {
		l.Infoln("Archiving a copy of old config file format")
		// Archive a copy
		osutil.Rename(cfgFile, cfgFile+fmt.Sprintf(".v%d", cfg.Raw().OriginalVersion))
		// Save the new version
		cfg.Save()
	}

	if err := checkShortIDs(cfg); err != nil {
		l.Fatalln("Short device IDs are in conflict. Unlucky!\n  Regenerate the device ID of one if the following:\n  ", err)
	}

	if len(profiler) > 0 {
		go func() {
			l.Debugln("Starting profiler on", profiler)
			runtime.SetBlockProfileRate(1)
			err := http.ListenAndServe(profiler, nil)
			if err != nil {
				l.Fatalln(err)
			}
		}()
	}

	// The TLS configuration is used for both the listening socket and outgoing
	// connections.

	tlsCfg := &tls.Config{
		Certificates:           []tls.Certificate{cert},
		NextProtos:             []string{bepProtocolName},
		ClientAuth:             tls.RequestClientCert,
		SessionTicketsDisabled: true,
		InsecureSkipVerify:     true,
		MinVersion:             tls.VersionTLS12,
		CipherSuites: []uint16{
			tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
			tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
			tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
			tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
			tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
			tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
		},
	}

	// If the read or write rate should be limited, set up a rate limiter for it.
	// This will be used on connections created in the connect and listen routines.

	opts := cfg.Options()

	if !opts.SymlinksEnabled {
		symlinks.Supported = false
	}

	protocol.PingTimeout = time.Duration(opts.PingTimeoutS) * time.Second
	protocol.PingIdleTime = time.Duration(opts.PingIdleTimeS) * time.Second

	if opts.MaxSendKbps > 0 {
		writeRateLimit = ratelimit.NewBucketWithRate(float64(1000*opts.MaxSendKbps), int64(5*1000*opts.MaxSendKbps))
	}
	if opts.MaxRecvKbps > 0 {
		readRateLimit = ratelimit.NewBucketWithRate(float64(1000*opts.MaxRecvKbps), int64(5*1000*opts.MaxRecvKbps))
	}

	if (opts.MaxRecvKbps > 0 || opts.MaxSendKbps > 0) && !opts.LimitBandwidthInLan {
		lans, _ = osutil.GetLans()
		networks := make([]string, 0, len(lans))
		for _, lan := range lans {
			networks = append(networks, lan.String())
		}
		l.Infoln("Local networks:", strings.Join(networks, ", "))
	}

	dbFile := locations[locDatabase]
	ldb, err := leveldb.OpenFile(dbFile, dbOpts())
	if err != nil && errors.IsCorrupted(err) {
		ldb, err = leveldb.RecoverFile(dbFile, dbOpts())
	}
	if err != nil {
		l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?")
	}

	// Remove database entries for folders that no longer exist in the config
	folders := cfg.Folders()
	for _, folder := range db.ListFolders(ldb) {
		if _, ok := folders[folder]; !ok {
			l.Infof("Cleaning data for dropped folder %q", folder)
			db.DropFolder(ldb, folder)
		}
	}

	m := model.NewModel(cfg, myID, myName, "syncthing", Version, ldb)
	cfg.Subscribe(m)

	if t := os.Getenv("STDEADLOCKTIMEOUT"); len(t) > 0 {
		it, err := strconv.Atoi(t)
		if err == nil {
			m.StartDeadlockDetector(time.Duration(it) * time.Second)
		}
	} else if !IsRelease || IsBeta {
		m.StartDeadlockDetector(20 * 60 * time.Second)
	}

	// Clear out old indexes for other devices. Otherwise we'll start up and
	// start needing a bunch of files which are nowhere to be found. This
	// needs to be changed when we correctly do persistent indexes.
	for _, folderCfg := range cfg.Folders() {
		m.AddFolder(folderCfg)
		for _, device := range folderCfg.DeviceIDs() {
			if device == myID {
				continue
			}
			m.Index(device, folderCfg.ID, nil, 0, nil)
		}
		// Routine to pull blocks from other devices to synchronize the local
		// folder. Does not run when we are in read only (publish only) mode.
		if folderCfg.ReadOnly {
			l.Okf("Ready to synchronize %s (read only; no external updates accepted)", folderCfg.ID)
			m.StartFolderRO(folderCfg.ID)
		} else {
			l.Okf("Ready to synchronize %s (read-write)", folderCfg.ID)
			m.StartFolderRW(folderCfg.ID)
		}
	}

	mainSvc.Add(m)

	// GUI

	setupGUI(mainSvc, cfg, m, apiSub)

	// The default port we announce, possibly modified by setupUPnP next.

	addr, err := net.ResolveTCPAddr("tcp", opts.ListenAddress[0])
	if err != nil {
		l.Fatalln("Bad listen address:", err)
	}

	// Start discovery

	localPort := addr.Port
	discoverer = discovery(localPort)

	// Start UPnP. The UPnP service will restart global discovery if the
	// external port changes.

	if opts.UPnPEnabled {
		upnpSvc := newUPnPSvc(cfg, localPort)
		mainSvc.Add(upnpSvc)
	}

	connectionSvc := newConnectionSvc(cfg, myID, m, tlsCfg)
	cfg.Subscribe(connectionSvc)
	mainSvc.Add(connectionSvc)

	if cpuProfile {
		f, err := os.Create(fmt.Sprintf("cpu-%d.pprof", os.Getpid()))
		if err != nil {
			log.Fatal(err)
		}
		pprof.StartCPUProfile(f)
		defer pprof.StopCPUProfile()
	}

	for _, device := range cfg.Devices() {
		if len(device.Name) > 0 {
			l.Infof("Device %s is %q at %v", device.DeviceID, device.Name, device.Addresses)
		}
	}

	if opts.URAccepted > 0 && opts.URAccepted < usageReportVersion {
		l.Infoln("Anonymous usage report has changed; revoking acceptance")
		opts.URAccepted = 0
		opts.URUniqueID = ""
		cfg.SetOptions(opts)
	}
	if opts.URAccepted >= usageReportVersion {
		if opts.URUniqueID == "" {
			// Previously the ID was generated from the node ID. We now need
			// to generate a new one.
			opts.URUniqueID = randomString(8)
			cfg.SetOptions(opts)
			cfg.Save()
		}
	}

	// The usageReportingManager registers itself to listen to configuration
	// changes, and there's nothing more we need to tell it from the outside.
	// Hence we don't keep the returned pointer.
	newUsageReportingManager(m, cfg)

	if opts.RestartOnWakeup {
		go standbyMonitor()
	}

	if opts.AutoUpgradeIntervalH > 0 {
		if noUpgrade {
			l.Infof("No automatic upgrades; STNOUPGRADE environment variable defined.")
		} else if IsRelease {
			go autoUpgrade()
		} else {
			l.Infof("No automatic upgrades; %s is not a release version.", Version)
		}
	}

	events.Default.Log(events.StartupComplete, map[string]string{
		"myID": myID.String(),
	})
	go generatePingEvents()

	cleanConfigDirectory()

	code := <-stop

	mainSvc.Stop()

	l.Okln("Exiting")
	os.Exit(code)
}
Exemplo n.º 17
0
func syncthingMain() {
	// Create a main service manager. We'll add things to this as we go along.
	// We want any logging it does to go through our log system.
	mainSvc := suture.New("main", suture.Spec{
		Log: func(line string) {
			l.Debugln(line)
		},
	})
	mainSvc.ServeBackground()

	// Set a log prefix similar to the ID we will have later on, or early log
	// lines look ugly.
	l.SetPrefix("[start] ")

	if auditEnabled {
		startAuditing(mainSvc)
	}

	if verbose {
		mainSvc.Add(newVerboseSvc())
	}

	errors := logger.NewRecorder(l, logger.LevelWarn, maxSystemErrors, 0)
	systemLog := logger.NewRecorder(l, logger.LevelDebug, maxSystemLog, initialSystemLog)

	// Event subscription for the API; must start early to catch the early events.
	apiSub := events.NewBufferedSubscription(events.Default.Subscribe(events.AllEvents), 1000)

	if len(os.Getenv("GOMAXPROCS")) == 0 {
		runtime.GOMAXPROCS(runtime.NumCPU())
	}

	// Attempt to increase the limit on number of open files to the maximum
	// allowed, in case we have many peers. We don't really care enough to
	// report the error if there is one.
	osutil.MaximizeOpenFileLimit()

	// Ensure that that we have a certificate and key.
	cert, err := tls.LoadX509KeyPair(locations[locCertFile], locations[locKeyFile])
	if err != nil {
		l.Infof("Generating RSA key and certificate for %s...", tlsDefaultCommonName)
		cert, err = tlsutil.NewCertificate(locations[locCertFile], locations[locKeyFile], tlsDefaultCommonName, tlsRSABits)
		if err != nil {
			l.Fatalln(err)
		}
	}

	// We reinitialize the predictable RNG with our device ID, to get a
	// sequence that is always the same but unique to this syncthing instance.
	predictableRandom.Seed(seedFromBytes(cert.Certificate[0]))

	myID = protocol.NewDeviceID(cert.Certificate[0])
	l.SetPrefix(fmt.Sprintf("[%s] ", myID.String()[:5]))

	l.Infoln(LongVersion)
	l.Infoln("My ID:", myID)

	// Emit the Starting event, now that we know who we are.

	events.Default.Log(events.Starting, map[string]string{
		"home": baseDirs["config"],
		"myID": myID.String(),
	})

	// Prepare to be able to save configuration

	cfgFile := locations[locConfigFile]

	// Load the configuration file, if it exists.
	// If it does not, create a template.

	cfg, myName, err := loadConfig(cfgFile)
	if err != nil {
		if os.IsNotExist(err) {
			l.Infoln("No config file; starting with empty defaults")
			myName, _ = os.Hostname()
			newCfg := defaultConfig(myName)
			cfg = config.Wrap(cfgFile, newCfg)
			cfg.Save()
			l.Infof("Edit %s to taste or use the GUI\n", cfgFile)
		} else {
			l.Fatalln("Loading config:", err)
		}
	}

	if cfg.Raw().OriginalVersion != config.CurrentVersion {
		l.Infoln("Archiving a copy of old config file format")
		// Archive a copy
		osutil.Rename(cfgFile, cfgFile+fmt.Sprintf(".v%d", cfg.Raw().OriginalVersion))
		// Save the new version
		cfg.Save()
	}

	if err := checkShortIDs(cfg); err != nil {
		l.Fatalln("Short device IDs are in conflict. Unlucky!\n  Regenerate the device ID of one if the following:\n  ", err)
	}

	if len(profiler) > 0 {
		go func() {
			l.Debugln("Starting profiler on", profiler)
			runtime.SetBlockProfileRate(1)
			err := http.ListenAndServe(profiler, nil)
			if err != nil {
				l.Fatalln(err)
			}
		}()
	}

	// The TLS configuration is used for both the listening socket and outgoing
	// connections.

	tlsCfg := &tls.Config{
		Certificates:           []tls.Certificate{cert},
		NextProtos:             []string{bepProtocolName},
		ClientAuth:             tls.RequestClientCert,
		SessionTicketsDisabled: true,
		InsecureSkipVerify:     true,
		MinVersion:             tls.VersionTLS12,
		CipherSuites: []uint16{
			tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
			tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
			tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
			tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
			tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
			tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
		},
	}

	// If the read or write rate should be limited, set up a rate limiter for it.
	// This will be used on connections created in the connect and listen routines.

	opts := cfg.Options()

	if !opts.SymlinksEnabled {
		symlinks.Supported = false
	}

	if (opts.MaxRecvKbps > 0 || opts.MaxSendKbps > 0) && !opts.LimitBandwidthInLan {
		lans, _ = osutil.GetLans()
		networks := make([]string, 0, len(lans))
		for _, lan := range lans {
			networks = append(networks, lan.String())
		}
		for _, lan := range opts.AlwaysLocalNets {
			_, ipnet, err := net.ParseCIDR(lan)
			if err != nil {
				l.Infoln("Network", lan, "is malformed:", err)
				continue
			}
			networks = append(networks, ipnet.String())
		}
		l.Infoln("Local networks:", strings.Join(networks, ", "))
	}

	dbFile := locations[locDatabase]
	dbOpts := dbOpts(cfg)
	ldb, err := leveldb.OpenFile(dbFile, dbOpts)
	if leveldbIsCorrupted(err) {
		ldb, err = leveldb.RecoverFile(dbFile, dbOpts)
	}
	if leveldbIsCorrupted(err) {
		// The database is corrupted, and we've tried to recover it but it
		// didn't work. At this point there isn't much to do beyond dropping
		// the database and reindexing...
		l.Infoln("Database corruption detected, unable to recover. Reinitializing...")
		if err := resetDB(); err != nil {
			l.Fatalln("Remove database:", err)
		}
		ldb, err = leveldb.OpenFile(dbFile, dbOpts)
	}
	if err != nil {
		l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?")
	}

	// Remove database entries for folders that no longer exist in the config
	folders := cfg.Folders()
	for _, folder := range db.ListFolders(ldb) {
		if _, ok := folders[folder]; !ok {
			l.Infof("Cleaning data for dropped folder %q", folder)
			db.DropFolder(ldb, folder)
		}
	}

	m := model.NewModel(cfg, myID, myName, "syncthing", Version, ldb)
	cfg.Subscribe(m)

	if t := os.Getenv("STDEADLOCKTIMEOUT"); len(t) > 0 {
		it, err := strconv.Atoi(t)
		if err == nil {
			m.StartDeadlockDetector(time.Duration(it) * time.Second)
		}
	} else if !IsRelease || IsBeta {
		m.StartDeadlockDetector(20 * time.Minute)
	}

	if paused {
		for device := range cfg.Devices() {
			m.PauseDevice(device)
		}
	}

	// Clear out old indexes for other devices. Otherwise we'll start up and
	// start needing a bunch of files which are nowhere to be found. This
	// needs to be changed when we correctly do persistent indexes.
	for _, folderCfg := range cfg.Folders() {
		m.AddFolder(folderCfg)
		for _, device := range folderCfg.DeviceIDs() {
			if device == myID {
				continue
			}
			m.Index(device, folderCfg.ID, nil, 0, nil)
		}
		// Routine to pull blocks from other devices to synchronize the local
		// folder. Does not run when we are in read only (publish only) mode.
		if folderCfg.ReadOnly {
			m.StartFolderRO(folderCfg.ID)
		} else {
			m.StartFolderRW(folderCfg.ID)
		}
	}

	mainSvc.Add(m)

	// The default port we announce, possibly modified by setupUPnP next.

	uri, err := url.Parse(opts.ListenAddress[0])
	if err != nil {
		l.Fatalf("Failed to parse listen address %s: %v", opts.ListenAddress[0], err)
	}

	addr, err := net.ResolveTCPAddr("tcp", uri.Host)
	if err != nil {
		l.Fatalln("Bad listen address:", err)
	}

	// The externalAddr tracks our external addresses for discovery purposes.

	var addrList *addressLister

	// Start UPnP

	if opts.UPnPEnabled {
		upnpSvc := newUPnPSvc(cfg, addr.Port)
		mainSvc.Add(upnpSvc)

		// The external address tracker needs to know about the UPnP service
		// so it can check for an external mapped port.
		addrList = newAddressLister(upnpSvc, cfg)
	} else {
		addrList = newAddressLister(nil, cfg)
	}

	// Start relay management

	var relaySvc *relay.Svc
	if opts.RelaysEnabled && (opts.GlobalAnnEnabled || opts.RelayWithoutGlobalAnn) {
		relaySvc = relay.NewSvc(cfg, tlsCfg)
		mainSvc.Add(relaySvc)
	}

	// Start discovery

	cachedDiscovery := discover.NewCachingMux()
	mainSvc.Add(cachedDiscovery)

	if cfg.Options().GlobalAnnEnabled {
		for _, srv := range cfg.GlobalDiscoveryServers() {
			l.Infoln("Using discovery server", srv)
			gd, err := discover.NewGlobal(srv, cert, addrList, relaySvc)
			if err != nil {
				l.Warnln("Global discovery:", err)
				continue
			}

			// Each global discovery server gets its results cached for five
			// minutes, and is not asked again for a minute when it's returned
			// unsuccessfully.
			cachedDiscovery.Add(gd, 5*time.Minute, time.Minute, globalDiscoveryPriority)
		}
	}

	if cfg.Options().LocalAnnEnabled {
		// v4 broadcasts
		bcd, err := discover.NewLocal(myID, fmt.Sprintf(":%d", cfg.Options().LocalAnnPort), addrList, relaySvc)
		if err != nil {
			l.Warnln("IPv4 local discovery:", err)
		} else {
			cachedDiscovery.Add(bcd, 0, 0, ipv4LocalDiscoveryPriority)
		}
		// v6 multicasts
		mcd, err := discover.NewLocal(myID, cfg.Options().LocalAnnMCAddr, addrList, relaySvc)
		if err != nil {
			l.Warnln("IPv6 local discovery:", err)
		} else {
			cachedDiscovery.Add(mcd, 0, 0, ipv6LocalDiscoveryPriority)
		}
	}

	// GUI

	setupGUI(mainSvc, cfg, m, apiSub, cachedDiscovery, relaySvc, errors, systemLog)

	// Start connection management

	connectionSvc := connections.NewConnectionSvc(cfg, myID, m, tlsCfg, cachedDiscovery, relaySvc, bepProtocolName, tlsDefaultCommonName, lans)
	mainSvc.Add(connectionSvc)

	if cpuProfile {
		f, err := os.Create(fmt.Sprintf("cpu-%d.pprof", os.Getpid()))
		if err != nil {
			log.Fatal(err)
		}
		pprof.StartCPUProfile(f)
	}

	for _, device := range cfg.Devices() {
		if len(device.Name) > 0 {
			l.Infof("Device %s is %q at %v", device.DeviceID, device.Name, device.Addresses)
		}
	}

	if opts.URAccepted > 0 && opts.URAccepted < usageReportVersion {
		l.Infoln("Anonymous usage report has changed; revoking acceptance")
		opts.URAccepted = 0
		opts.URUniqueID = ""
		cfg.SetOptions(opts)
	}
	if opts.URAccepted >= usageReportVersion {
		if opts.URUniqueID == "" {
			// Previously the ID was generated from the node ID. We now need
			// to generate a new one.
			opts.URUniqueID = randomString(8)
			cfg.SetOptions(opts)
			cfg.Save()
		}
	}

	// The usageReportingManager registers itself to listen to configuration
	// changes, and there's nothing more we need to tell it from the outside.
	// Hence we don't keep the returned pointer.
	newUsageReportingManager(cfg, m)

	if opts.RestartOnWakeup {
		go standbyMonitor()
	}

	if opts.AutoUpgradeIntervalH > 0 {
		if noUpgrade {
			l.Infof("No automatic upgrades; STNOUPGRADE environment variable defined.")
		} else if IsRelease {
			go autoUpgrade(cfg)
		} else {
			l.Infof("No automatic upgrades; %s is not a release version.", Version)
		}
	}

	events.Default.Log(events.StartupComplete, map[string]string{
		"myID": myID.String(),
	})
	go generatePingEvents()

	cleanConfigDirectory()

	code := <-stop

	mainSvc.Stop()

	l.Okln("Exiting")

	if cpuProfile {
		pprof.StopCPUProfile()
	}

	os.Exit(code)
}
Exemplo n.º 18
0
func main() {
	defer common.LogPanic()

	var err error
	// Setup flags.
	dbConf := buildbot.DBConfigFromFlags() // Global init to initialize glog and parse arguments.

	// Global init to initialize glog and parse arguments.
	common.InitWithMetrics("internal", graphiteServer)

	if !*local {
		*targetList = metadata.Must(metadata.ProjectGet("datahopper_internal_targets"))
	}
	targets := strings.Split(*targetList, " ")
	glog.Infof("Targets: %#v", targets)

	codenameDB, err = leveldb.OpenFile(*codenameDbDir, nil)
	if err != nil && errors.IsCorrupted(err) {
		codenameDB, err = leveldb.RecoverFile(*codenameDbDir, nil)
	}
	if err != nil {
		glog.Fatalf("Failed to open codename leveldb at %s: %s", *codenameDbDir, err)
	}
	// Initialize the buildbot database.
	if !*local {
		if err := dbConf.GetPasswordFromMetadata(); err != nil {
			glog.Fatal(err)
		}
	}
	if err := dbConf.InitDB(); err != nil {
		glog.Fatal(err)
	}

	var cookieSalt = "notverysecret"
	var clientID = "31977622648-1873k0c1e5edaka4adpv1ppvhr5id3qm.apps.googleusercontent.com"
	var clientSecret = "cw0IosPu4yjaG2KWmppj2guj"
	var redirectURL = fmt.Sprintf("http://localhost%s/oauth2callback/", *port)
	if !*local {
		cookieSalt = metadata.Must(metadata.ProjectGet(metadata.COOKIESALT))
		clientID = metadata.Must(metadata.ProjectGet(metadata.CLIENT_ID))
		clientSecret = metadata.Must(metadata.ProjectGet(metadata.CLIENT_SECRET))
		redirectURL = "https://internal.skia.org/oauth2callback/"
	}
	login.Init(clientID, clientSecret, redirectURL, cookieSalt, login.DEFAULT_SCOPE, login.DEFAULT_DOMAIN_WHITELIST, *local)

	// Ingest Android framework builds.
	go func() {
		glog.Infof("Starting.")
		repos := gitinfo.NewRepoMap(*workdir)

		// In this case we don't want a backoff transport since the Apiary backend
		// seems to fail a lot, so we basically want to fall back to polling if a
		// call fails.
		client, err := auth.NewClientWithTransport(*local, *oauthCacheFile, "", &http.Transport{Dial: util.DialTimeout}, androidbuildinternal.AndroidbuildInternalScope, storage.CloudPlatformScope)
		if err != nil {
			glog.Fatalf("Unable to create installed app oauth client:%s", err)
		}

		buildService, err := androidbuildinternal.New(client)
		if err != nil {
			glog.Fatalf("Failed to obtain Android build service: %v", err)
		}
		glog.Infof("Ready to start loop.")
		step(targets, buildService, repos)
		for _ = range time.Tick(*period) {
			step(targets, buildService, repos)
		}
	}()

	r := mux.NewRouter()
	r.HandleFunc("/", indexHandler)
	r.HandleFunc("/builders/{codename}/builds/{buildNumber}", redirectHandler)
	r.HandleFunc("/builders/{codename}", builderRedirectHandler)
	r.HandleFunc("/loginstatus/", login.StatusHandler)
	r.HandleFunc("/logout/", login.LogoutHandler)
	r.HandleFunc("/oauth2callback/", login.OAuth2CallbackHandler)
	http.Handle("/", util.LoggingGzipRequestResponse(r))
	glog.Fatal(http.ListenAndServe(*port, nil))
}
Exemplo n.º 19
0
func main() {
	defer common.LogPanic()

	var err error

	// Global init to initialize glog and parse arguments.
	common.InitWithMetrics("internal", graphiteServer)

	if !*local {
		*targetList = metadata.Must(metadata.ProjectGet("datahopper_internal_targets"))
	}
	targets := strings.Split(*targetList, " ")
	glog.Infof("Targets: %#v", targets)

	codenameDB, err = leveldb.OpenFile(*codenameDbDir, nil)
	if err != nil && errors.IsCorrupted(err) {
		codenameDB, err = leveldb.RecoverFile(*codenameDbDir, nil)
	}
	if err != nil {
		glog.Fatalf("Failed to open codename leveldb at %s: %s", *codenameDbDir, err)
	}
	// Initialize the buildbot database.
	db, err = buildbot.NewRemoteDB(*buildbotDbHost)
	if err != nil {
		glog.Fatal(err)
	}

	var redirectURL = fmt.Sprintf("http://localhost%s/oauth2callback/", *port)
	if !*local {
		redirectURL = "https://internal.skia.org/oauth2callback/"
	}
	if err := login.InitFromMetadataOrJSON(redirectURL, login.DEFAULT_SCOPE, login.DEFAULT_DOMAIN_WHITELIST); err != nil {
		glog.Fatalf("Failed to initialize login system: %s", err)

	}

	if *local {
		webhook.InitRequestSaltForTesting()
	} else {
		webhook.MustInitRequestSaltFromMetadata()
	}

	repos = gitinfo.NewRepoMap(*workdir)

	// Ingest Android framework builds.
	go func() {
		glog.Infof("Starting.")

		// In this case we don't want a backoff transport since the Apiary backend
		// seems to fail a lot, so we basically want to fall back to polling if a
		// call fails.
		client, err := auth.NewJWTServiceAccountClient("", "", &http.Transport{Dial: util.DialTimeout}, androidbuildinternal.AndroidbuildInternalScope, storage.CloudPlatformScope)
		if err != nil {
			glog.Fatalf("Unable to create authenticated client: %s", err)
		}

		buildService, err := androidbuildinternal.New(client)
		if err != nil {
			glog.Fatalf("Failed to obtain Android build service: %v", err)
		}
		glog.Infof("Ready to start loop.")
		step(targets, buildService)
		for _ = range time.Tick(*period) {
			step(targets, buildService)
		}
	}()

	r := mux.NewRouter()
	r.HandleFunc("/", indexHandler)
	r.HandleFunc("/builders/{codename}/builds/{buildNumber}", redirectHandler)
	r.HandleFunc("/builders/{codename}", builderRedirectHandler)
	r.HandleFunc("/ingestBuild", ingestBuildHandler)
	r.HandleFunc("/loginstatus/", login.StatusHandler)
	r.HandleFunc("/logout/", login.LogoutHandler)
	r.HandleFunc("/oauth2callback/", login.OAuth2CallbackHandler)
	http.Handle("/", util.LoggingGzipRequestResponse(r))
	glog.Fatal(http.ListenAndServe(*port, nil))
}