func main() { flag.Parse() httputil.InitHTTPTracker(false) if flag.NArg() == 0 { log.Fatalf("Seriesly URL required") } s, err := serieslyclient.New(flag.Arg(0)) maybeFatal(err, "Parsing %v: %v", flag.Arg(0), err) wg := &sync.WaitGroup{} ch := make(chan string) for i := 0; i < *concurrency; i++ { wg.Add(1) go dump(wg, s, ch) } if *dbName == "" { dbs, err := s.List() maybeFatal(err, "Error listing: %v", err) for _, db := range dbs { ch <- db } } else { ch <- *dbName } close(ch) wg.Wait() }
func main() { flag.Parse() if flag.NArg() < 1 { log.Fatalf("Usage: gzip -dc backup.gz | %v http://seriesly:3133/dbname", os.Args[0]) } httputil.InitHTTPTracker(false) u := flag.Arg(0) setupDb(u) minTime := parseMinTime() t := time.Tick(5 * time.Second) i := 0 var latestKey string d := json.NewDecoder(os.Stdin) for { kv := map[string]*json.RawMessage{} err := d.Decode(&kv) if err == io.EOF { log.Printf("Done! Processed %v items. Last was %v", i, latestKey) break } maybeFatal(err) for k, v := range kv { if !minTime.IsZero() { thist, err := timelib.ParseTime(k) if err == nil && minTime.After(thist) { continue } } body := []byte(*v) sendOne(u, k, body) latestKey = k } i++ select { case <-t: log.Printf("Processed %v items, latest was %v", i, latestKey) default: } } }
func downloadCommand(u string, args []string) { src := dlFlags.Arg(0) destbase := dlFlags.Arg(1) if destbase == "" { destbase = filepath.Base(src) } for len(src) > 0 && src[0] == '/' { src = src[1:] } httputil.InitHTTPTracker(false) client, err := cbfsclient.New(u) cbfstool.MaybeFatal(err, "Can't build a client: %v", err) things, err := client.ListDepth(src, 4096) cbfstool.MaybeFatal(err, "Can't list things: %v", err) start := time.Now() oids := []string{} dests := map[string][]string{} for fn, inf := range things.Files { fn = fn[len(src):] dests[inf.OID] = append(dests[inf.OID], filepath.Join(destbase, fn)) oids = append(oids, inf.OID) } err = client.Blobs(*totalConcurrency, *nodeConcurrency, func(oid string, r io.Reader) error { return saveDownload(dests[oid], oid, r) }, oids...) cbfstool.MaybeFatal(err, "Error getting blobs: %v", err) b := atomic.AddInt64(&totalBytes, 0) d := time.Since(start) cbfstool.Verbose(*dlverbose, "Moved %s in %v (%s/s)", humanize.Bytes(uint64(b)), d, humanize.Bytes(uint64(float64(b)/d.Seconds()))) }
func findCommand(u string, args []string) { if *findDashName != "" && *findDashIName != "" { log.Fatalf("Can't specify both -name and -iname") } src := findFlags.Arg(0) for src[len(src)-1] == '/' { src = src[:len(src)-1] } tmpl := cbfstool.GetTemplate(*findTemplate, *findTemplateFile, defaultFindTemplate) httputil.InitHTTPTracker(false) client, err := cbfsclient.New(u) cbfstool.MaybeFatal(err, "Can't build a client: %v", err) things, err := client.ListDepth(src, *findDashDepth) cbfstool.MaybeFatal(err, "Can't list things: %v", err) metaMatcher := findGetRefTimeMatch(time.Now()) matcher := newDirAndFileMatcher() for fn, inf := range things.Files { if !metaMatcher(inf.Modified) { continue } if len(fn) > len(src)+1 { fn = fn[len(src)+1:] } for _, match := range matcher.matches(fn) { if err := tmpl.Execute(os.Stdout, struct { Name string IsDir bool Meta cbfsclient.FileMeta }{match.path, match.isDir, inf}); err != nil { log.Fatalf("Error executing template: %v", err) } } } }
func main() { flag.Parse() rand.Seed(time.Now().UnixNano()) initLogger(*useSyslog) initNodeListKeys() http.DefaultTransport = TimeoutTransport(*internodeTimeout) expvar.Publish("httpclients", httputil.InitHTTPTracker(false)) if getHash() == nil { fmt.Fprintf(os.Stderr, "Unsupported hash specified: %v. Supported hashes:\n", globalConfig.Hash) for h := range hashBuilders { fmt.Fprintf(os.Stderr, " * %v\n", h) } os.Exit(1) } err := initServerId() if err != nil { log.Fatalf("Error initializing server ID: %v", err) } if *maxStorageString != "" { ms, err := humanize.ParseBytes(*maxStorageString) if err != nil { log.Fatalf("Error parsing max storage parameter: %v", err) } maxStorage = int64(ms) } couchbase, err = dbConnect() if err != nil { log.Fatalf("Can't connect to couchbase: %v", err) } if err = os.MkdirAll(*root, 0777); err != nil { log.Fatalf("Couldn't create storage dir: %v", err) } err = updateConfig() if err != nil && !gomemcached.IsNotFound(err) { log.Printf("Error updating initial config, using default: %v", err) } if *verbose { log.Printf("Server config:") globalConfig.Dump(os.Stdout) } go reloadConfig() go dnsServices() internodeTaskQueue = make(chan internodeTask, *taskWorkers*1024) initTaskQueueWorkers() go heartbeat() go startTasks() time.AfterFunc(time.Second*time.Duration(rand.Intn(30)+5), grabSomeData) go serveFrame() s := &http.Server{ Addr: *bindAddr, Handler: http.HandlerFunc(httpHandler), ReadTimeout: *readTimeout, } log.Printf("Listening to web requests on %s as server %s", *bindAddr, serverId) l, err := rateListen("tcp", *bindAddr) if err != nil { log.Fatalf("Error listening: %v", err) } log.Fatal(s.Serve(l)) }
func uploadCommand(u string, args []string) { initCrypto() httputil.InitHTTPTracker(false) uploadFlags.Visit(func(f *flag.Flag) { if f.Name == "revs" { uploadRevsSet = true } }) if *uploadIgnore != "" { err := loadIgnorePatternsFromFile(*uploadIgnore) cbfstool.MaybeFatal(err, "Error loading ignores: %v", err) } client, err := cbfsclient.New(u) cbfstool.MaybeFatal(err, "Error setting up client: %v", err) srcFn := uploadFlags.Arg(0) dest := uploadFlags.Arg(1) // Special case stdin. if srcFn == "-" { err := uploadStream(client, os.Stdin, "", dest, "") cbfstool.MaybeFatal(err, "Error uploading stdin: %v", err) return } // Special case http as well if strings.HasPrefix(srcFn, "http:") || strings.HasPrefix(srcFn, "https:") { req, err := http.NewRequest("GET", srcFn, nil) // Disable compression so we have a chance at getting a res length req.Header.Set("Accept-Encoding", "") cbfstool.MaybeFatal(err, "Error creating request: %v", err) res, err := http.DefaultClient.Do(req) cbfstool.MaybeFatal(err, "Error making http request to %v: %v", srcFn, err) defer res.Body.Close() if res.StatusCode != 200 { log.Printf("HTTP error fetching %v: %v", srcFn, err) io.Copy(os.Stderr, res.Body) os.Exit(1) } var r io.ReadCloser = res.Body if res.ContentLength > 0 { r = newProgressReader(res.Body, res.ContentLength) defer r.Close() } err = uploadStream(client, r, srcFn, dest, "") cbfstool.MaybeFatal(err, "Error uploading from URL: %v", err) return } fi, err := os.Stat(srcFn) cbfstool.MaybeFatal(err, "Error statting %v: %v", srcFn, err) if fi.IsDir() { ch := make(chan uploadReq, 1000) ech := make(chan error, *uploadWorkers) for i := 0; i < *uploadWorkers; i++ { uploadWg.Add(1) go uploadWorker(client, ch, ech) } start := time.Now() syncUp(client, srcFn, dest, ch) close(ch) cbfstool.Verbose(*uploadVerbose, "Finished traversal in %v", time.Since(start)) waitch := make(chan bool) go func() { uploadWg.Wait() close(waitch) }() rc := 0 collect: for { select { case <-waitch: close(ech) waitch = nil case err, ok := <-ech: if !ok { break collect } log.Printf("Permanent upload error: %v", err) rc = 1 } } cbfstool.Verbose(*uploadVerbose, "Finished sync in %v", time.Since(start)) os.Exit(rc) } else { err = uploadFile(client, srcFn, dest, localHash(srcFn)) cbfstool.MaybeFatal(err, "Error uploading file: %v", err) } }