func (db *DB) compactionError() { var ( err error wlocked bool ) noerr: // No error. for { select { case err = <-db.compErrSetC: switch { case err == nil: case errors.IsCorrupted(err): goto hasperr default: goto haserr } case _, _ = <-db.closeC: return } } haserr: // Transient error. for { select { case db.compErrC <- err: case err = <-db.compErrSetC: switch { case err == nil: goto noerr case errors.IsCorrupted(err): goto hasperr default: } case _, _ = <-db.closeC: return } } hasperr: // Persistent error. for { select { case db.compErrC <- err: case db.compPerErrC <- err: case db.writeLockC <- struct{}{}: // Hold write lock, so that write won't pass-through. wlocked = true case _, _ = <-db.closeC: if wlocked { // We should release the lock or Close will hang. <-db.writeLockC } return } } }
func (i *mergedIterator) iterErr(iter Iterator) bool { if err := iter.Error(); err != nil { if i.errf != nil { i.errf(err) } if i.strict || !errors.IsCorrupted(err) { i.err = err return true } } return false }
func (i *indexedIterator) dataErr() bool { if err := i.data.Error(); err != nil { if i.errf != nil { i.errf(err) } if i.strict || !errors.IsCorrupted(err) { i.err = err return true } } return false }
// A "better" version of leveldb's errors.IsCorrupted. func leveldbIsCorrupted(err error) bool { switch { case err == nil: return false case errors.IsCorrupted(err): return true case strings.Contains(err.Error(), "corrupted"): return true } return false }
// New creates a new *info as Info. // // dir is the directory where the leveldb that caches responses will be written. // client must be an authenticated client to make requests to the Android Build API. func New(dir string, client *http.Client) (Info, error) { db, err := leveldb.OpenFile(dir, nil) if err != nil && errors.IsCorrupted(err) { db, err = leveldb.RecoverFile(dir, nil) } if err != nil { return nil, fmt.Errorf("Failed to open leveldb at %s: %s", dir, err) } c, err := newAndroidCommits(client) if err != nil { return nil, fmt.Errorf("Failed to create commits: %s", err) } i := &info{db: db, commits: c} go i.poll() return i, nil }
func syncthingMain() { // Create a main service manager. We'll add things to this as we go along. // We want any logging it does to go through our log system. mainSvc := suture.New("main", suture.Spec{ Log: func(line string) { if debugSuture { l.Debugln(line) } }, }) mainSvc.ServeBackground() // Set a log prefix similar to the ID we will have later on, or early log // lines look ugly. l.SetPrefix("[start] ") if auditEnabled { startAuditing(mainSvc) } if verbose { mainSvc.Add(newVerboseSvc()) } // Event subscription for the API; must start early to catch the early events. apiSub := events.NewBufferedSubscription(events.Default.Subscribe(events.AllEvents), 1000) if len(os.Getenv("GOMAXPROCS")) == 0 { runtime.GOMAXPROCS(runtime.NumCPU()) } // Ensure that that we have a certificate and key. cert, err := tls.LoadX509KeyPair(locations[locCertFile], locations[locKeyFile]) if err != nil { cert, err = newCertificate(locations[locCertFile], locations[locKeyFile], tlsDefaultCommonName) if err != nil { l.Fatalln("load cert:", err) } } // We reinitialize the predictable RNG with our device ID, to get a // sequence that is always the same but unique to this syncthing instance. predictableRandom.Seed(seedFromBytes(cert.Certificate[0])) myID = protocol.NewDeviceID(cert.Certificate[0]) l.SetPrefix(fmt.Sprintf("[%s] ", myID.String()[:5])) l.Infoln(LongVersion) l.Infoln("My ID:", myID) // Emit the Starting event, now that we know who we are. events.Default.Log(events.Starting, map[string]string{ "home": baseDirs["config"], "myID": myID.String(), }) // Prepare to be able to save configuration cfgFile := locations[locConfigFile] var myName string // Load the configuration file, if it exists. // If it does not, create a template. if info, err := os.Stat(cfgFile); err == nil { if !info.Mode().IsRegular() { l.Fatalln("Config file is not a file?") } cfg, err = config.Load(cfgFile, myID) if err == nil { myCfg := cfg.Devices()[myID] if myCfg.Name == "" { myName, _ = os.Hostname() } else { myName = myCfg.Name } } else { l.Fatalln("Configuration:", err) } } else { l.Infoln("No config file; starting with empty defaults") myName, _ = os.Hostname() newCfg := defaultConfig(myName) cfg = config.Wrap(cfgFile, newCfg) cfg.Save() l.Infof("Edit %s to taste or use the GUI\n", cfgFile) } if cfg.Raw().OriginalVersion != config.CurrentVersion { l.Infoln("Archiving a copy of old config file format") // Archive a copy osutil.Rename(cfgFile, cfgFile+fmt.Sprintf(".v%d", cfg.Raw().OriginalVersion)) // Save the new version cfg.Save() } if err := checkShortIDs(cfg); err != nil { l.Fatalln("Short device IDs are in conflict. Unlucky!\n Regenerate the device ID of one if the following:\n ", err) } if len(profiler) > 0 { go func() { l.Debugln("Starting profiler on", profiler) runtime.SetBlockProfileRate(1) err := http.ListenAndServe(profiler, nil) if err != nil { l.Fatalln(err) } }() } // The TLS configuration is used for both the listening socket and outgoing // connections. tlsCfg := &tls.Config{ Certificates: []tls.Certificate{cert}, NextProtos: []string{bepProtocolName}, ClientAuth: tls.RequestClientCert, SessionTicketsDisabled: true, InsecureSkipVerify: true, MinVersion: tls.VersionTLS12, CipherSuites: []uint16{ tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, }, } // If the read or write rate should be limited, set up a rate limiter for it. // This will be used on connections created in the connect and listen routines. opts := cfg.Options() if !opts.SymlinksEnabled { symlinks.Supported = false } protocol.PingTimeout = time.Duration(opts.PingTimeoutS) * time.Second protocol.PingIdleTime = time.Duration(opts.PingIdleTimeS) * time.Second if opts.MaxSendKbps > 0 { writeRateLimit = ratelimit.NewBucketWithRate(float64(1000*opts.MaxSendKbps), int64(5*1000*opts.MaxSendKbps)) } if opts.MaxRecvKbps > 0 { readRateLimit = ratelimit.NewBucketWithRate(float64(1000*opts.MaxRecvKbps), int64(5*1000*opts.MaxRecvKbps)) } if (opts.MaxRecvKbps > 0 || opts.MaxSendKbps > 0) && !opts.LimitBandwidthInLan { lans, _ = osutil.GetLans() networks := make([]string, 0, len(lans)) for _, lan := range lans { networks = append(networks, lan.String()) } l.Infoln("Local networks:", strings.Join(networks, ", ")) } dbFile := locations[locDatabase] ldb, err := leveldb.OpenFile(dbFile, dbOpts()) if err != nil && errors.IsCorrupted(err) { ldb, err = leveldb.RecoverFile(dbFile, dbOpts()) } if err != nil { l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?") } // Remove database entries for folders that no longer exist in the config folders := cfg.Folders() for _, folder := range db.ListFolders(ldb) { if _, ok := folders[folder]; !ok { l.Infof("Cleaning data for dropped folder %q", folder) db.DropFolder(ldb, folder) } } m := model.NewModel(cfg, myID, myName, "syncthing", Version, ldb) cfg.Subscribe(m) if t := os.Getenv("STDEADLOCKTIMEOUT"); len(t) > 0 { it, err := strconv.Atoi(t) if err == nil { m.StartDeadlockDetector(time.Duration(it) * time.Second) } } else if !IsRelease || IsBeta { m.StartDeadlockDetector(20 * 60 * time.Second) } // Clear out old indexes for other devices. Otherwise we'll start up and // start needing a bunch of files which are nowhere to be found. This // needs to be changed when we correctly do persistent indexes. for _, folderCfg := range cfg.Folders() { m.AddFolder(folderCfg) for _, device := range folderCfg.DeviceIDs() { if device == myID { continue } m.Index(device, folderCfg.ID, nil, 0, nil) } // Routine to pull blocks from other devices to synchronize the local // folder. Does not run when we are in read only (publish only) mode. if folderCfg.ReadOnly { l.Okf("Ready to synchronize %s (read only; no external updates accepted)", folderCfg.ID) m.StartFolderRO(folderCfg.ID) } else { l.Okf("Ready to synchronize %s (read-write)", folderCfg.ID) m.StartFolderRW(folderCfg.ID) } } mainSvc.Add(m) // GUI setupGUI(mainSvc, cfg, m, apiSub) // The default port we announce, possibly modified by setupUPnP next. addr, err := net.ResolveTCPAddr("tcp", opts.ListenAddress[0]) if err != nil { l.Fatalln("Bad listen address:", err) } // Start discovery localPort := addr.Port discoverer = discovery(localPort) // Start UPnP. The UPnP service will restart global discovery if the // external port changes. if opts.UPnPEnabled { upnpSvc := newUPnPSvc(cfg, localPort) mainSvc.Add(upnpSvc) } connectionSvc := newConnectionSvc(cfg, myID, m, tlsCfg) cfg.Subscribe(connectionSvc) mainSvc.Add(connectionSvc) if cpuProfile { f, err := os.Create(fmt.Sprintf("cpu-%d.pprof", os.Getpid())) if err != nil { log.Fatal(err) } pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() } for _, device := range cfg.Devices() { if len(device.Name) > 0 { l.Infof("Device %s is %q at %v", device.DeviceID, device.Name, device.Addresses) } } if opts.URAccepted > 0 && opts.URAccepted < usageReportVersion { l.Infoln("Anonymous usage report has changed; revoking acceptance") opts.URAccepted = 0 opts.URUniqueID = "" cfg.SetOptions(opts) } if opts.URAccepted >= usageReportVersion { if opts.URUniqueID == "" { // Previously the ID was generated from the node ID. We now need // to generate a new one. opts.URUniqueID = randomString(8) cfg.SetOptions(opts) cfg.Save() } } // The usageReportingManager registers itself to listen to configuration // changes, and there's nothing more we need to tell it from the outside. // Hence we don't keep the returned pointer. newUsageReportingManager(m, cfg) if opts.RestartOnWakeup { go standbyMonitor() } if opts.AutoUpgradeIntervalH > 0 { if noUpgrade { l.Infof("No automatic upgrades; STNOUPGRADE environment variable defined.") } else if IsRelease { go autoUpgrade() } else { l.Infof("No automatic upgrades; %s is not a release version.", Version) } } events.Default.Log(events.StartupComplete, map[string]string{ "myID": myID.String(), }) go generatePingEvents() cleanConfigDirectory() code := <-stop mainSvc.Stop() l.Okln("Exiting") os.Exit(code) }
func main() { flag.Parse() if enableBufferPool { bpool = util.NewBufferPool(opt.DefaultBlockSize + 128) } log.Printf("Test DB stored at %q", dbPath) if httpProf != "" { log.Printf("HTTP pprof listening at %q", httpProf) runtime.SetBlockProfileRate(1) go func() { if err := http.ListenAndServe(httpProf, nil); err != nil { log.Fatalf("HTTPPROF: %v", err) } }() } runtime.GOMAXPROCS(runtime.NumCPU()) os.RemoveAll(dbPath) stor, err := storage.OpenFile(dbPath, false) if err != nil { log.Fatal(err) } tstor := &testingStorage{stor} defer tstor.Close() fatalf := func(err error, format string, v ...interface{}) { atomic.StoreUint32(&fail, 1) atomic.StoreUint32(&done, 1) log.Printf("FATAL: "+format, v...) if err != nil && errors.IsCorrupted(err) { cerr := err.(*errors.ErrCorrupted) if !cerr.Fd.Zero() && cerr.Fd.Type == storage.TypeTable { log.Print("FATAL: corruption detected, scanning...") if !tstor.scanTable(storage.FileDesc{Type: storage.TypeTable, Num: cerr.Fd.Num}, false) { log.Printf("FATAL: unable to find corrupted key/value pair in table %v", cerr.Fd) } } } runtime.Goexit() } if openFilesCacheCapacity == 0 { openFilesCacheCapacity = -1 } o := &opt.Options{ OpenFilesCacheCapacity: openFilesCacheCapacity, DisableBufferPool: !enableBufferPool, DisableBlockCache: !enableBlockCache, ErrorIfExist: true, Compression: opt.NoCompression, } if enableCompression { o.Compression = opt.DefaultCompression } db, err := leveldb.Open(tstor, o) if err != nil { log.Fatal(err) } defer db.Close() var ( mu = &sync.Mutex{} gGetStat = &latencyStats{} gIterStat = &latencyStats{} gWriteStat = &latencyStats{} gTrasactionStat = &latencyStats{} startTime = time.Now() writeReq = make(chan *leveldb.Batch) writeAck = make(chan error) writeAckAck = make(chan struct{}) ) go func() { for b := range writeReq { var err error if mrand.Float64() < transactionProb { log.Print("> Write using transaction") gTrasactionStat.start() var tr *leveldb.Transaction if tr, err = db.OpenTransaction(); err == nil { if err = tr.Write(b, nil); err == nil { if err = tr.Commit(); err == nil { gTrasactionStat.record(b.Len()) } } else { tr.Discard() } } } else { gWriteStat.start() if err = db.Write(b, nil); err == nil { gWriteStat.record(b.Len()) } } writeAck <- err <-writeAckAck } }() go func() { for { time.Sleep(3 * time.Second) log.Print("------------------------") log.Printf("> Elapsed=%v", time.Now().Sub(startTime)) mu.Lock() log.Printf("> GetLatencyMin=%v GetLatencyMax=%v GetLatencyAvg=%v GetRatePerSec=%d", gGetStat.min, gGetStat.max, gGetStat.avg(), gGetStat.ratePerSec()) log.Printf("> IterLatencyMin=%v IterLatencyMax=%v IterLatencyAvg=%v IterRatePerSec=%d", gIterStat.min, gIterStat.max, gIterStat.avg(), gIterStat.ratePerSec()) log.Printf("> WriteLatencyMin=%v WriteLatencyMax=%v WriteLatencyAvg=%v WriteRatePerSec=%d", gWriteStat.min, gWriteStat.max, gWriteStat.avg(), gWriteStat.ratePerSec()) log.Printf("> TransactionLatencyMin=%v TransactionLatencyMax=%v TransactionLatencyAvg=%v TransactionRatePerSec=%d", gTrasactionStat.min, gTrasactionStat.max, gTrasactionStat.avg(), gTrasactionStat.ratePerSec()) mu.Unlock() cachedblock, _ := db.GetProperty("leveldb.cachedblock") openedtables, _ := db.GetProperty("leveldb.openedtables") alivesnaps, _ := db.GetProperty("leveldb.alivesnaps") aliveiters, _ := db.GetProperty("leveldb.aliveiters") blockpool, _ := db.GetProperty("leveldb.blockpool") log.Printf("> BlockCache=%s OpenedTables=%s AliveSnaps=%s AliveIter=%s BlockPool=%q", cachedblock, openedtables, alivesnaps, aliveiters, blockpool) log.Print("------------------------") } }() for ns, numKey := range numKeys { func(ns, numKey int) { log.Printf("[%02d] STARTING: numKey=%d", ns, numKey) keys := make([][]byte, numKey) for i := range keys { keys[i] = randomData(nil, byte(ns), 1, uint32(i), keyLen) } wg.Add(1) go func() { var wi uint32 defer func() { log.Printf("[%02d] WRITER DONE #%d", ns, wi) wg.Done() }() var ( b = new(leveldb.Batch) k2, v2 []byte nReader int32 ) for atomic.LoadUint32(&done) == 0 { log.Printf("[%02d] WRITER #%d", ns, wi) b.Reset() for _, k1 := range keys { k2 = randomData(k2, byte(ns), 2, wi, keyLen) v2 = randomData(v2, byte(ns), 3, wi, valueLen) b.Put(k2, v2) b.Put(k1, k2) } writeReq <- b if err := <-writeAck; err != nil { writeAckAck <- struct{}{} fatalf(err, "[%02d] WRITER #%d db.Write: %v", ns, wi, err) } snap, err := db.GetSnapshot() if err != nil { writeAckAck <- struct{}{} fatalf(err, "[%02d] WRITER #%d db.GetSnapshot: %v", ns, wi, err) } writeAckAck <- struct{}{} wg.Add(1) atomic.AddInt32(&nReader, 1) go func(snapwi uint32, snap *leveldb.Snapshot) { var ( ri int iterStat = &latencyStats{} getStat = &latencyStats{} ) defer func() { mu.Lock() gGetStat.add(getStat) gIterStat.add(iterStat) mu.Unlock() atomic.AddInt32(&nReader, -1) log.Printf("[%02d] READER #%d.%d DONE Snap=%v Alive=%d IterLatency=%v GetLatency=%v", ns, snapwi, ri, snap, atomic.LoadInt32(&nReader), iterStat.avg(), getStat.avg()) snap.Release() wg.Done() }() stopi := snapwi + 3 for (ri < 3 || atomic.LoadUint32(&wi) < stopi) && atomic.LoadUint32(&done) == 0 { var n int iter := snap.NewIterator(dataPrefixSlice(byte(ns), 1), nil) iterStat.start() for iter.Next() { k1 := iter.Key() k2 := iter.Value() iterStat.record(1) if dataNS(k2) != byte(ns) { fatalf(nil, "[%02d] READER #%d.%d K%d invalid in-key NS: want=%d got=%d", ns, snapwi, ri, n, ns, dataNS(k2)) } kwritei := dataI(k2) if kwritei != snapwi { fatalf(nil, "[%02d] READER #%d.%d K%d invalid in-key iter num: %d", ns, snapwi, ri, n, kwritei) } getStat.start() v2, err := snap.Get(k2, nil) if err != nil { fatalf(err, "[%02d] READER #%d.%d K%d snap.Get: %v\nk1: %x\n -> k2: %x", ns, snapwi, ri, n, err, k1, k2) } getStat.record(1) if checksum0, checksum1 := dataChecksum(v2); checksum0 != checksum1 { err := &errors.ErrCorrupted{Fd: storage.FileDesc{0xff, 0}, Err: fmt.Errorf("v2: %x: checksum mismatch: %v vs %v", v2, checksum0, checksum1)} fatalf(err, "[%02d] READER #%d.%d K%d snap.Get: %v\nk1: %x\n -> k2: %x", ns, snapwi, ri, n, err, k1, k2) } n++ iterStat.start() } iter.Release() if err := iter.Error(); err != nil { fatalf(err, "[%02d] READER #%d.%d K%d iter.Error: %v", ns, snapwi, ri, numKey, err) } if n != numKey { fatalf(nil, "[%02d] READER #%d.%d missing keys: want=%d got=%d", ns, snapwi, ri, numKey, n) } ri++ } }(wi, snap) atomic.AddUint32(&wi, 1) } }() delB := new(leveldb.Batch) wg.Add(1) go func() { var ( i int iterStat = &latencyStats{} ) defer func() { log.Printf("[%02d] SCANNER DONE #%d", ns, i) wg.Done() }() time.Sleep(2 * time.Second) for atomic.LoadUint32(&done) == 0 { var n int delB.Reset() iter := db.NewIterator(dataNsSlice(byte(ns)), nil) iterStat.start() for iter.Next() && atomic.LoadUint32(&done) == 0 { k := iter.Key() v := iter.Value() iterStat.record(1) for ci, x := range [...][]byte{k, v} { checksum0, checksum1 := dataChecksum(x) if checksum0 != checksum1 { if ci == 0 { fatalf(nil, "[%02d] SCANNER %d.%d invalid key checksum: want %d, got %d\n%x -> %x", ns, i, n, checksum0, checksum1, k, v) } else { fatalf(nil, "[%02d] SCANNER %d.%d invalid value checksum: want %d, got %d\n%x -> %x", ns, i, n, checksum0, checksum1, k, v) } } } if dataPrefix(k) == 2 || mrand.Int()%999 == 0 { delB.Delete(k) } n++ iterStat.start() } iter.Release() if err := iter.Error(); err != nil { fatalf(err, "[%02d] SCANNER #%d.%d iter.Error: %v", ns, i, n, err) } if n > 0 { log.Printf("[%02d] SCANNER #%d IterLatency=%v", ns, i, iterStat.avg()) } if delB.Len() > 0 && atomic.LoadUint32(&done) == 0 { t := time.Now() writeReq <- delB if err := <-writeAck; err != nil { writeAckAck <- struct{}{} fatalf(err, "[%02d] SCANNER #%d db.Write: %v", ns, i, err) } else { writeAckAck <- struct{}{} } log.Printf("[%02d] SCANNER #%d Deleted=%d Time=%v", ns, i, delB.Len(), time.Now().Sub(t)) } i++ } }() }(ns, numKey) } go func() { sig := make(chan os.Signal) signal.Notify(sig, os.Interrupt, os.Kill) log.Printf("Got signal: %v, exiting...", <-sig) atomic.StoreUint32(&done, 1) }() wg.Wait() }
func (ts *testingStorage) scanTable(fd storage.FileDesc, checksum bool) (corrupted bool) { r, err := ts.Open(fd) if err != nil { log.Fatal(err) } defer r.Close() size, err := r.Seek(0, os.SEEK_END) if err != nil { log.Fatal(err) } o := &opt.Options{ DisableLargeBatchTransaction: true, Strict: opt.NoStrict, } if checksum { o.Strict = opt.StrictBlockChecksum | opt.StrictReader } tr, err := table.NewReader(r, size, fd, nil, bpool, o) if err != nil { log.Fatal(err) } defer tr.Release() checkData := func(i int, t string, data []byte) bool { if len(data) == 0 { panic(fmt.Sprintf("[%v] nil data: i=%d t=%s", fd, i, t)) } checksum0, checksum1 := dataChecksum(data) if checksum0 != checksum1 { atomic.StoreUint32(&fail, 1) atomic.StoreUint32(&done, 1) corrupted = true data0, data1 := dataSplit(data) data0c0, data0c1 := dataChecksum(data0) data1c0, data1c1 := dataChecksum(data1) log.Printf("FATAL: [%v] Corrupted data i=%d t=%s (%#x != %#x): %x(%v) vs %x(%v)", fd, i, t, checksum0, checksum1, data0, data0c0 == data0c1, data1, data1c0 == data1c1) return true } return false } iter := tr.NewIterator(nil, nil) defer iter.Release() for i := 0; iter.Next(); i++ { ukey, _, kt, kerr := parseIkey(iter.Key()) if kerr != nil { atomic.StoreUint32(&fail, 1) atomic.StoreUint32(&done, 1) corrupted = true log.Printf("FATAL: [%v] Corrupted ikey i=%d: %v", fd, i, kerr) return } if checkData(i, "key", ukey) { return } if kt == ktVal && checkData(i, "value", iter.Value()) { return } } if err := iter.Error(); err != nil { if errors.IsCorrupted(err) { atomic.StoreUint32(&fail, 1) atomic.StoreUint32(&done, 1) corrupted = true log.Printf("FATAL: [%v] Corruption detected: %v", fd, err) } else { log.Fatal(err) } } return }
// NewReader creates a new initialized table reader for the file. // The fi, cache and bpool is optional and can be nil. // // The returned table reader instance is goroutine-safe. func NewReader(f io.ReaderAt, size int64, fd storage.FileDesc, cache *cache.CacheGetter, bpool *util.BufferPool, o *opt.Options) (*Reader, error) { if f == nil { return nil, errors.New("leveldb/table: nil file") } r := &Reader{ fd: fd, reader: f, cache: cache, bpool: bpool, o: o, cmp: o.GetComparer(), verifyChecksum: o.GetStrict(opt.StrictBlockChecksum), } if size < footerLen { r.err = r.newErrCorrupted(0, size, "table", "too small") return r, nil } footerPos := size - footerLen var footer [footerLen]byte if _, err := r.reader.ReadAt(footer[:], footerPos); err != nil && err != io.EOF { return nil, err } if string(footer[footerLen-len(magic):footerLen]) != magic { r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad magic number") return r, nil } var n int // Decode the metaindex block handle. r.metaBH, n = decodeBlockHandle(footer[:]) if n == 0 { r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad metaindex block handle") return r, nil } // Decode the index block handle. r.indexBH, n = decodeBlockHandle(footer[n:]) if n == 0 { r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad index block handle") return r, nil } // Read metaindex block. metaBlock, err := r.readBlock(r.metaBH, true) if err != nil { if errors.IsCorrupted(err) { r.err = err return r, nil } else { return nil, err } } // Set data end. r.dataEnd = int64(r.metaBH.offset) // Read metaindex. metaIter := r.newBlockIter(metaBlock, nil, nil, true) for metaIter.Next() { key := string(metaIter.Key()) if !strings.HasPrefix(key, "filter.") { continue } fn := key[7:] if f0 := o.GetFilter(); f0 != nil && f0.Name() == fn { r.filter = f0 } else { for _, f0 := range o.GetAltFilters() { if f0.Name() == fn { r.filter = f0 break } } } if r.filter != nil { filterBH, n := decodeBlockHandle(metaIter.Value()) if n == 0 { continue } r.filterBH = filterBH // Update data end. r.dataEnd = int64(filterBH.offset) break } } metaIter.Release() metaBlock.Release() // Cache index and filter block locally, since we don't have global cache. if cache == nil { r.indexBlock, err = r.readBlock(r.indexBH, true) if err != nil { if errors.IsCorrupted(err) { r.err = err return r, nil } else { return nil, err } } if r.filter != nil { r.filterBlock, err = r.readFilterBlock(r.filterBH) if err != nil { if !errors.IsCorrupted(err) { return nil, err } // Don't use filter then. r.filter = nil } } } return r, nil }
func (r *Reader) find(key []byte, filtered bool, ro *opt.ReadOptions, noValue bool) (rkey, value []byte, err error) { r.mu.RLock() defer r.mu.RUnlock() if r.err != nil { err = r.err return } indexBlock, rel, err := r.getIndexBlock(true) if err != nil { return } defer rel.Release() index := r.newBlockIter(indexBlock, nil, nil, true) defer index.Release() if !index.Seek(key) { err = index.Error() if err == nil { err = ErrNotFound } return } dataBH, n := decodeBlockHandle(index.Value()) if n == 0 { r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle") return } if filtered && r.filter != nil { filterBlock, frel, ferr := r.getFilterBlock(true) if ferr == nil { if !filterBlock.contains(r.filter, dataBH.offset, key) { frel.Release() return nil, nil, ErrNotFound } frel.Release() } else if !errors.IsCorrupted(ferr) { err = ferr return } } data := r.getDataIter(dataBH, nil, r.verifyChecksum, !ro.GetDontFillCache()) defer data.Release() if !data.Seek(key) { err = data.Error() if err == nil { err = ErrNotFound } return } // Don't use block buffer, no need to copy the buffer. rkey = data.Key() if !noValue { if r.bpool == nil { value = data.Value() } else { // Use block buffer, and since the buffer will be recycled, the buffer // need to be copied. value = append([]byte{}, data.Value()...) } } return }
func main() { defer common.LogPanic() var err error // Setup flags. dbConf := buildbot.DBConfigFromFlags() // Global init to initialize glog and parse arguments. // Global init to initialize glog and parse arguments. common.InitWithMetrics("internal", graphiteServer) if !*local { *targetList = metadata.Must(metadata.ProjectGet("datahopper_internal_targets")) } targets := strings.Split(*targetList, " ") glog.Infof("Targets: %#v", targets) codenameDB, err = leveldb.OpenFile(*codenameDbDir, nil) if err != nil && errors.IsCorrupted(err) { codenameDB, err = leveldb.RecoverFile(*codenameDbDir, nil) } if err != nil { glog.Fatalf("Failed to open codename leveldb at %s: %s", *codenameDbDir, err) } // Initialize the buildbot database. if !*local { if err := dbConf.GetPasswordFromMetadata(); err != nil { glog.Fatal(err) } } if err := dbConf.InitDB(); err != nil { glog.Fatal(err) } var cookieSalt = "notverysecret" var clientID = "31977622648-1873k0c1e5edaka4adpv1ppvhr5id3qm.apps.googleusercontent.com" var clientSecret = "cw0IosPu4yjaG2KWmppj2guj" var redirectURL = fmt.Sprintf("http://localhost%s/oauth2callback/", *port) if !*local { cookieSalt = metadata.Must(metadata.ProjectGet(metadata.COOKIESALT)) clientID = metadata.Must(metadata.ProjectGet(metadata.CLIENT_ID)) clientSecret = metadata.Must(metadata.ProjectGet(metadata.CLIENT_SECRET)) redirectURL = "https://internal.skia.org/oauth2callback/" } login.Init(clientID, clientSecret, redirectURL, cookieSalt, login.DEFAULT_SCOPE, login.DEFAULT_DOMAIN_WHITELIST, *local) // Ingest Android framework builds. go func() { glog.Infof("Starting.") repos := gitinfo.NewRepoMap(*workdir) // In this case we don't want a backoff transport since the Apiary backend // seems to fail a lot, so we basically want to fall back to polling if a // call fails. client, err := auth.NewClientWithTransport(*local, *oauthCacheFile, "", &http.Transport{Dial: util.DialTimeout}, androidbuildinternal.AndroidbuildInternalScope, storage.CloudPlatformScope) if err != nil { glog.Fatalf("Unable to create installed app oauth client:%s", err) } buildService, err := androidbuildinternal.New(client) if err != nil { glog.Fatalf("Failed to obtain Android build service: %v", err) } glog.Infof("Ready to start loop.") step(targets, buildService, repos) for _ = range time.Tick(*period) { step(targets, buildService, repos) } }() r := mux.NewRouter() r.HandleFunc("/", indexHandler) r.HandleFunc("/builders/{codename}/builds/{buildNumber}", redirectHandler) r.HandleFunc("/builders/{codename}", builderRedirectHandler) r.HandleFunc("/loginstatus/", login.StatusHandler) r.HandleFunc("/logout/", login.LogoutHandler) r.HandleFunc("/oauth2callback/", login.OAuth2CallbackHandler) http.Handle("/", util.LoggingGzipRequestResponse(r)) glog.Fatal(http.ListenAndServe(*port, nil)) }
func recoverTable(s *session, o *opt.Options) error { o = dupOptions(o) // Mask StrictReader, lets StrictRecovery doing its job. o.Strict &= ^opt.StrictReader // Get all tables and sort it by file number. tableFiles_, err := s.getFiles(storage.TypeTable) if err != nil { return err } tableFiles := files(tableFiles_) tableFiles.sort() var ( maxSeq uint64 recoveredKey, goodKey, corruptedKey, corruptedBlock, droppedTable int // We will drop corrupted table. strict = o.GetStrict(opt.StrictRecovery) rec = &sessionRecord{numLevel: o.GetNumLevel()} bpool = util.NewBufferPool(o.GetBlockSize() + 5) ) buildTable := func(iter iterator.Iterator) (tmp storage.File, size int64, err error) { tmp = s.newTemp() writer, err := tmp.Create() if err != nil { return } defer func() { writer.Close() if err != nil { tmp.Remove() tmp = nil } }() // Copy entries. tw := table.NewWriter(writer, o) for iter.Next() { key := iter.Key() if validIkey(key) { err = tw.Append(key, iter.Value()) if err != nil { return } } } err = iter.Error() if err != nil { return } err = tw.Close() if err != nil { return } err = writer.Sync() if err != nil { return } size = int64(tw.BytesLen()) return } recoverTable := func(file storage.File) error { s.logf("table@recovery recovering @%d", file.Num()) reader, err := file.Open() if err != nil { return err } var closed bool defer func() { if !closed { reader.Close() } }() // Get file size. size, err := reader.Seek(0, 2) if err != nil { return err } var ( tSeq uint64 tgoodKey, tcorruptedKey, tcorruptedBlock int imin, imax []byte ) tr, err := table.NewReader(reader, size, storage.NewFileInfo(file), nil, bpool, o) if err != nil { return err } iter := tr.NewIterator(nil, nil) iter.(iterator.ErrorCallbackSetter).SetErrorCallback(func(err error) { if errors.IsCorrupted(err) { s.logf("table@recovery block corruption @%d %q", file.Num(), err) tcorruptedBlock++ } }) // Scan the table. for iter.Next() { key := iter.Key() _, seq, _, kerr := parseIkey(key) if kerr != nil { tcorruptedKey++ continue } tgoodKey++ if seq > tSeq { tSeq = seq } if imin == nil { imin = append([]byte{}, key...) } imax = append(imax[:0], key...) } if err := iter.Error(); err != nil { iter.Release() return err } iter.Release() goodKey += tgoodKey corruptedKey += tcorruptedKey corruptedBlock += tcorruptedBlock if strict && (tcorruptedKey > 0 || tcorruptedBlock > 0) { droppedTable++ s.logf("table@recovery dropped @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", file.Num(), tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq) return nil } if tgoodKey > 0 { if tcorruptedKey > 0 || tcorruptedBlock > 0 { // Rebuild the table. s.logf("table@recovery rebuilding @%d", file.Num()) iter := tr.NewIterator(nil, nil) tmp, newSize, err := buildTable(iter) iter.Release() if err != nil { return err } closed = true reader.Close() if err := file.Replace(tmp); err != nil { return err } size = newSize } if tSeq > maxSeq { maxSeq = tSeq } recoveredKey += tgoodKey // Add table to level 0. rec.addTable(0, file.Num(), uint64(size), imin, imax) s.logf("table@recovery recovered @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", file.Num(), tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq) } else { droppedTable++ s.logf("table@recovery unrecoverable @%d Ck·%d Cb·%d S·%d", file.Num(), tcorruptedKey, tcorruptedBlock, size) } return nil } // Recover all tables. if len(tableFiles) > 0 { s.logf("table@recovery F·%d", len(tableFiles)) // Mark file number as used. s.markFileNum(tableFiles[len(tableFiles)-1].Num()) for _, file := range tableFiles { if err := recoverTable(file); err != nil { return err } } s.logf("table@recovery recovered F·%d N·%d Gk·%d Ck·%d Q·%d", len(tableFiles), recoveredKey, goodKey, corruptedKey, maxSeq) } // Set sequence number. rec.setSeqNum(maxSeq) // Create new manifest. if err := s.create(); err != nil { return err } // Commit. return s.commit(rec) }
func (db *DB) recoverJournalRO() error { // Get all journals and sort it by file number. rawFds, err := db.s.stor.List(storage.TypeJournal) if err != nil { return err } sortFds(rawFds) // Journals that will be recovered. var fds []storage.FileDesc for _, fd := range rawFds { if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum { fds = append(fds, fd) } } var ( // Options. strict = db.s.o.GetStrict(opt.StrictJournal) checksum = db.s.o.GetStrict(opt.StrictJournalChecksum) writeBuffer = db.s.o.GetWriteBuffer() mdb = memdb.New(db.s.icmp, writeBuffer) ) // Recover journals. if len(fds) > 0 { db.logf("journal@recovery RO·Mode F·%d", len(fds)) var ( jr *journal.Reader buf = &util.Buffer{} batch = &Batch{} ) for _, fd := range fds { db.logf("journal@recovery recovering @%d", fd.Num) fr, err := db.s.stor.Open(fd) if err != nil { return err } // Create or reset journal reader instance. if jr == nil { jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum) } else { jr.Reset(fr, dropper{db.s, fd}, strict, checksum) } // Replay journal to memdb. for { r, err := jr.Next() if err != nil { if err == io.EOF { break } fr.Close() return errors.SetFd(err, fd) } buf.Reset() if _, err := buf.ReadFrom(r); err != nil { if err == io.ErrUnexpectedEOF { // This is error returned due to corruption, with strict == false. continue } fr.Close() return errors.SetFd(err, fd) } if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil { if !strict && errors.IsCorrupted(err) { db.s.logf("journal error: %v (skipped)", err) // We won't apply sequence number as it might be corrupted. continue } fr.Close() return errors.SetFd(err, fd) } // Save sequence number. db.seq = batch.seq + uint64(batch.Len()) } fr.Close() } } // Set memDB. db.mem = &memDB{db: db, DB: mdb, ref: 1} return nil }
func (db *DB) recoverJournal() error { // Get all journals and sort it by file number. rawFds, err := db.s.stor.List(storage.TypeJournal) if err != nil { return err } sortFds(rawFds) // Journals that will be recovered. var fds []storage.FileDesc for _, fd := range rawFds { if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum { fds = append(fds, fd) } } var ( ofd storage.FileDesc // Obsolete file. rec = &sessionRecord{} ) // Recover journals. if len(fds) > 0 { db.logf("journal@recovery F·%d", len(fds)) // Mark file number as used. db.s.markFileNum(fds[len(fds)-1].Num) var ( // Options. strict = db.s.o.GetStrict(opt.StrictJournal) checksum = db.s.o.GetStrict(opt.StrictJournalChecksum) writeBuffer = db.s.o.GetWriteBuffer() jr *journal.Reader mdb = memdb.New(db.s.icmp, writeBuffer) buf = &util.Buffer{} batch = &Batch{} ) for _, fd := range fds { db.logf("journal@recovery recovering @%d", fd.Num) fr, err := db.s.stor.Open(fd) if err != nil { return err } // Create or reset journal reader instance. if jr == nil { jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum) } else { jr.Reset(fr, dropper{db.s, fd}, strict, checksum) } // Flush memdb and remove obsolete journal file. if !ofd.Nil() { if mdb.Len() > 0 { if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil { fr.Close() return err } } rec.setJournalNum(fd.Num) rec.setSeqNum(db.seq) if err := db.s.commit(rec); err != nil { fr.Close() return err } rec.resetAddedTables() db.s.stor.Remove(ofd) ofd = storage.FileDesc{} } // Replay journal to memdb. mdb.Reset() for { r, err := jr.Next() if err != nil { if err == io.EOF { break } fr.Close() return errors.SetFd(err, fd) } buf.Reset() if _, err := buf.ReadFrom(r); err != nil { if err == io.ErrUnexpectedEOF { // This is error returned due to corruption, with strict == false. continue } fr.Close() return errors.SetFd(err, fd) } if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil { if !strict && errors.IsCorrupted(err) { db.s.logf("journal error: %v (skipped)", err) // We won't apply sequence number as it might be corrupted. continue } fr.Close() return errors.SetFd(err, fd) } // Save sequence number. db.seq = batch.seq + uint64(batch.Len()) // Flush it if large enough. if mdb.Size() >= writeBuffer { if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil { fr.Close() return err } mdb.Reset() } } fr.Close() ofd = fd } // Flush the last memdb. if mdb.Len() > 0 { if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil { return err } } } // Create a new journal. if _, err := db.newMem(0); err != nil { return err } // Commit. rec.setJournalNum(db.journalFd.Num) rec.setSeqNum(db.seq) if err := db.s.commit(rec); err != nil { // Close journal on error. if db.journal != nil { db.journal.Close() db.journalWriter.Close() } return err } // Remove the last obsolete journal file. if !ofd.Nil() { db.s.stor.Remove(ofd) } return nil }
// Recover a database session; need external synchronization. func (s *session) recover() (err error) { defer func() { if os.IsNotExist(err) { // Don't return os.ErrNotExist if the underlying storage contains // other files that belong to LevelDB. So the DB won't get trashed. if fds, _ := s.stor.List(storage.TypeAll); len(fds) > 0 { err = &errors.ErrCorrupted{Fd: storage.FileDesc{Type: storage.TypeManifest}, Err: &errors.ErrMissingFiles{}} } } }() fd, err := s.stor.GetMeta() if err != nil { return } reader, err := s.stor.Open(fd) if err != nil { return } defer reader.Close() var ( // Options. strict = s.o.GetStrict(opt.StrictManifest) jr = journal.NewReader(reader, dropper{s, fd}, strict, true) rec = &sessionRecord{} staging = s.stVersion.newStaging() ) for { var r io.Reader r, err = jr.Next() if err != nil { if err == io.EOF { err = nil break } return errors.SetFd(err, fd) } err = rec.decode(r) if err == nil { // save compact pointers for _, r := range rec.compPtrs { s.setCompPtr(r.level, internalKey(r.ikey)) } // commit record to version staging staging.commit(rec) } else { err = errors.SetFd(err, fd) if strict || !errors.IsCorrupted(err) { return } s.logf("manifest error: %v (skipped)", errors.SetFd(err, fd)) } rec.resetCompPtrs() rec.resetAddedTables() rec.resetDeletedTables() } switch { case !rec.has(recComparer): return newErrManifestCorrupted(fd, "comparer", "missing") case rec.comparer != s.icmp.uName(): return newErrManifestCorrupted(fd, "comparer", fmt.Sprintf("mismatch: want '%s', got '%s'", s.icmp.uName(), rec.comparer)) case !rec.has(recNextFileNum): return newErrManifestCorrupted(fd, "next-file-num", "missing") case !rec.has(recJournalNum): return newErrManifestCorrupted(fd, "journal-file-num", "missing") case !rec.has(recSeqNum): return newErrManifestCorrupted(fd, "seq-num", "missing") } s.manifestFd = fd s.setVersion(staging.finish()) s.setNextFileNum(rec.nextFileNum) s.recordCommited(rec) return nil }
func (db *DB) recoverJournal() error { // Get all tables and sort it by file number. journalFiles_, err := db.s.getFiles(storage.TypeJournal) if err != nil { return err } journalFiles := files(journalFiles_) journalFiles.sort() // Discard older journal. prev := -1 for i, file := range journalFiles { if file.Num() >= db.s.stJournalNum { if prev >= 0 { i-- journalFiles[i] = journalFiles[prev] } journalFiles = journalFiles[i:] break } else if file.Num() == db.s.stPrevJournalNum { prev = i } } var jr *journal.Reader var of storage.File var mem *memdb.DB batch := new(Batch) cm := newCMem(db.s) buf := new(util.Buffer) // Options. strict := db.s.o.GetStrict(opt.StrictJournal) checksum := db.s.o.GetStrict(opt.StrictJournalChecksum) writeBuffer := db.s.o.GetWriteBuffer() recoverJournal := func(file storage.File) error { db.logf("journal@recovery recovering @%d", file.Num()) reader, err := file.Open() if err != nil { return err } defer reader.Close() // Create/reset journal reader instance. if jr == nil { jr = journal.NewReader(reader, dropper{db.s, file}, strict, checksum) } else { jr.Reset(reader, dropper{db.s, file}, strict, checksum) } // Flush memdb and remove obsolete journal file. if of != nil { if mem.Len() > 0 { if err := cm.flush(mem, 0); err != nil { return err } } if err := cm.commit(file.Num(), db.seq); err != nil { return err } cm.reset() of.Remove() of = nil } // Replay journal to memdb. mem.Reset() for { r, err := jr.Next() if err != nil { if err == io.EOF { break } return errors.SetFile(err, file) } buf.Reset() if _, err := buf.ReadFrom(r); err != nil { if err == io.ErrUnexpectedEOF { // This is error returned due to corruption, with strict == false. continue } else { return errors.SetFile(err, file) } } if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mem); err != nil { if strict || !errors.IsCorrupted(err) { return errors.SetFile(err, file) } else { db.s.logf("journal error: %v (skipped)", err) // We won't apply sequence number as it might be corrupted. continue } } // Save sequence number. db.seq = batch.seq + uint64(batch.Len()) // Flush it if large enough. if mem.Size() >= writeBuffer { if err := cm.flush(mem, 0); err != nil { return err } mem.Reset() } } of = file return nil } // Recover all journals. if len(journalFiles) > 0 { db.logf("journal@recovery F·%d", len(journalFiles)) // Mark file number as used. db.s.markFileNum(journalFiles[len(journalFiles)-1].Num()) mem = memdb.New(db.s.icmp, writeBuffer) for _, file := range journalFiles { if err := recoverJournal(file); err != nil { return err } } // Flush the last journal. if mem.Len() > 0 { if err := cm.flush(mem, 0); err != nil { return err } } } // Create a new journal. if _, err := db.newMem(0); err != nil { return err } // Commit. if err := cm.commit(db.journalFile.Num(), db.seq); err != nil { // Close journal. if db.journal != nil { db.journal.Close() db.journalWriter.Close() } return err } // Remove the last obsolete journal file. if of != nil { of.Remove() } return nil }
func (db *DB) compactionTransact(name string, t compactionTransactInterface) { defer func() { if x := recover(); x != nil { if x == errCompactionTransactExiting { if err := t.revert(); err != nil { db.logf("%s revert error %q", name, err) } } panic(x) } }() const ( backoffMin = 1 * time.Second backoffMax = 8 * time.Second backoffMul = 2 * time.Second ) var ( backoff = backoffMin backoffT = time.NewTimer(backoff) lastCnt = compactionTransactCounter(0) disableBackoff = db.s.o.GetDisableCompactionBackoff() ) for n := 0; ; n++ { // Check wether the DB is closed. if db.isClosed() { db.logf("%s exiting", name) db.compactionExitTransact() } else if n > 0 { db.logf("%s retrying N·%d", name, n) } // Execute. cnt := compactionTransactCounter(0) err := t.run(&cnt) if err != nil { db.logf("%s error I·%d %q", name, cnt, err) } // Set compaction error status. select { case db.compErrSetC <- err: case perr := <-db.compPerErrC: if err != nil { db.logf("%s exiting (persistent error %q)", name, perr) db.compactionExitTransact() } case _, _ = <-db.closeC: db.logf("%s exiting", name) db.compactionExitTransact() } if err == nil { return } if errors.IsCorrupted(err) { db.logf("%s exiting (corruption detected)", name) db.compactionExitTransact() } if !disableBackoff { // Reset backoff duration if counter is advancing. if cnt > lastCnt { backoff = backoffMin lastCnt = cnt } // Backoff. backoffT.Reset(backoff) if backoff < backoffMax { backoff *= backoffMul if backoff > backoffMax { backoff = backoffMax } } select { case <-backoffT.C: case _, _ = <-db.closeC: db.logf("%s exiting", name) db.compactionExitTransact() } } } }
func main() { defer common.LogPanic() var err error // Global init to initialize glog and parse arguments. common.InitWithMetrics("internal", graphiteServer) if !*local { *targetList = metadata.Must(metadata.ProjectGet("datahopper_internal_targets")) } targets := strings.Split(*targetList, " ") glog.Infof("Targets: %#v", targets) codenameDB, err = leveldb.OpenFile(*codenameDbDir, nil) if err != nil && errors.IsCorrupted(err) { codenameDB, err = leveldb.RecoverFile(*codenameDbDir, nil) } if err != nil { glog.Fatalf("Failed to open codename leveldb at %s: %s", *codenameDbDir, err) } // Initialize the buildbot database. db, err = buildbot.NewRemoteDB(*buildbotDbHost) if err != nil { glog.Fatal(err) } var redirectURL = fmt.Sprintf("http://localhost%s/oauth2callback/", *port) if !*local { redirectURL = "https://internal.skia.org/oauth2callback/" } if err := login.InitFromMetadataOrJSON(redirectURL, login.DEFAULT_SCOPE, login.DEFAULT_DOMAIN_WHITELIST); err != nil { glog.Fatalf("Failed to initialize login system: %s", err) } if *local { webhook.InitRequestSaltForTesting() } else { webhook.MustInitRequestSaltFromMetadata() } repos = gitinfo.NewRepoMap(*workdir) // Ingest Android framework builds. go func() { glog.Infof("Starting.") // In this case we don't want a backoff transport since the Apiary backend // seems to fail a lot, so we basically want to fall back to polling if a // call fails. client, err := auth.NewJWTServiceAccountClient("", "", &http.Transport{Dial: util.DialTimeout}, androidbuildinternal.AndroidbuildInternalScope, storage.CloudPlatformScope) if err != nil { glog.Fatalf("Unable to create authenticated client: %s", err) } buildService, err := androidbuildinternal.New(client) if err != nil { glog.Fatalf("Failed to obtain Android build service: %v", err) } glog.Infof("Ready to start loop.") step(targets, buildService) for _ = range time.Tick(*period) { step(targets, buildService) } }() r := mux.NewRouter() r.HandleFunc("/", indexHandler) r.HandleFunc("/builders/{codename}/builds/{buildNumber}", redirectHandler) r.HandleFunc("/builders/{codename}", builderRedirectHandler) r.HandleFunc("/ingestBuild", ingestBuildHandler) r.HandleFunc("/loginstatus/", login.StatusHandler) r.HandleFunc("/logout/", login.LogoutHandler) r.HandleFunc("/oauth2callback/", login.OAuth2CallbackHandler) http.Handle("/", util.LoggingGzipRequestResponse(r)) glog.Fatal(http.ListenAndServe(*port, nil)) }