func (c *Config) Install(logLevel string) (err error) { // check hook _, hookErr := c.Git.GetHook("pre-commit") if hookErr == nil { err = fmt.Errorf("pre-commit hook already exists") } h, err := c.runInfoTpl(preCommitHook, logLevel, c.ServerInfo) if err != nil { return } if err = c.Git.SetHook("pre-commit", h); err != nil { return } for k, v := range c.getConfigLines() { var v1 string if v1, err = c.runInfoTpl(v, logLevel, c.ServerInfo); err != nil { return } if err = c.Git.SetConfig(k, v1); err != nil { return } logx.Debugf("set %s = %s", k, v1) } return }
// Abort diversion func (d *Divert) Cleanup(spec DivertSpec) (err error) { // Remove orphan diverted files orphans := map[string]struct{}{} for _, f := range spec.TargetFiles { orphans[f] = struct{}{} } for _, f := range spec.ToRecover { _, ok := orphans[f] if ok { delete(orphans, f) } } for f, _ := range orphans { os.Remove(lists.OSFromSlash(lists.OSJoin(d.Git.Root, f))) logx.Debugf("removed orphan %s", f) } // Reset to Head if err = d.Git.Reset(spec.Head); err != nil { return } if err = d.Git.Checkout(spec.Head, spec.ToRecover...); err != nil { return } logx.Info("cleanup finished") return }
func (c *GitSmudgeCmd) Run(args ...string) (err error) { name := args[0] m, err := proto.NewFromAny(c.Stdin, c.ChunkSize) logx.Debugf("smudge manifest for %s (%s)", name, m.ID) err = m.Serialize(c.Stdout) return }
func (s *ThriftServer) Start() (err error) { processor := wire.NewBarProcessor(s.Handler) var transport t_thrift.TServerTransport if transport, err = t_thrift.NewTServerSocket(s.options.Bind); err != nil { return } protoFactory := t_thrift.NewTBinaryProtocolFactoryDefault() transportFactory := t_thrift.NewTBufferedTransportFactory( s.BufferSize) s.TServer = t_thrift.NewTSimpleServer4(processor, transport, transportFactory, protoFactory) logx.Debugf("thrift listening at %s", s.options.Info.RPCEndpoints[0]) errChan := make(chan error, 1) go func() { errChan <- s.TServer.Serve() }() defer s.TServer.Stop() defer s.cancel() select { case <-s.ctx.Done(): return case err = <-errChan: return } return }
func (c *GitDivertBeginCmd) Run(args ...string) (err error) { logx.Debugf("beginning covert op %s", args) if len(args) == 0 { err = fmt.Errorf("no branch") return } branch := args[0] names := args[1:] mod, err := model.New(c.WD, true, c.ChunkSize, c.PoolSize) if err != nil { return } divert := git.NewDivert(mod.Git) var spec git.DivertSpec if spec, err = divert.PrepareBegin(branch, names...); err != nil { return } if err = divert.Begin(spec); err == nil { return } return }
// Refresh files in git index (use after squash or blow) func (g *Git) UpdateIndex(what ...string) (err error) { rooted, err := g.ToRoot(what...) if _, err = g.Run("update-index", rooted...); err != nil { return } logx.Debugf("git index updated for %s", what) return }
func (c *Config) Uninstall() (err error) { c.Git.CleanHook("pre-commit") for k, _ := range c.getConfigLines() { err = c.Git.UnsetConfig(k) logx.Debugf("unset %s", k) } return }
func (r *Reporter) connect() (err error) { if r.conn == nil { r.conn, err = net.DialTimeout("udp", r.options.Endpoint, r.options.ConnTimeout) logx.OnFatal(err) logx.Debugf("connected to datapoints endpoint %s", r.options.Endpoint) } return }
func (h *Handler) GetManifests(ids [][]byte) (r []*wire.Manifest, err error) { var req proto.IDSlice if err = (&req).UnmarshalThrift(ids); err != nil { return } logx.Debugf("serving manifests %s", req) res, err := h.Storage.GetManifests(req) if err != nil { logx.Error(err) return } r, err = proto.ManifestSlice(res).MarshalThrift() logx.Debugf("manifests served %s", req) return }
func (h *Handler) FinishUpload(uploadId []byte) (err error) { reqUploadID, err := uuid.Parse(uploadId) if err != nil { return } if err = h.Storage.FinishUploadSession(*reqUploadID); err != nil { return } logx.Debugf("upload %s finished successfully", reqUploadID) return }
func (r *Reporter) Drop(rep *report.Report) (err error) { if !r.noop { if err = r.connect(); err != nil { logx.Error(err) return } _, err = fmt.Fprintf(r.conn, "%s.%s %s %d", strings.Join(rep.Meta(), "."), rep.Key(), rep.Value(), rep.When().Unix()) logx.Debugf("datapoint sent: %+v", rep) if err != nil { logx.Error(err) r.conn.Close() r.conn = nil } } else { logx.Debugf("datapoint: %s.%s %s %d", strings.Join(rep.Meta(), "."), rep.Key(), rep.Value(), rep.When().Unix()) } return }
// Try to get reader from git OID // If git status is dirty or file not in git - just return nil func (m *Model) getGitReader(name string) (res io.Reader) { dirty, _, err := m.Check(name) if err != nil { logx.Debug(err) return } if dirty { err = nil logx.Debugf("%s is dirty", name) return } oid, err := m.Git.GetOID(name) if err != nil { logx.Debug(err) return } res, err = m.Git.Cat(oid) if err != nil { logx.Debug(err) res = nil } logx.Debugf("manifest for %s parsed from git %s", name, oid) return }
func (c *UpCmd) Run(args ...string) (err error) { var mod *model.Model if mod, err = model.New(c.WD, c.UseGit, c.ChunkSize, c.PoolSize); err != nil { return } feed := lists.NewFileList(args...).ListDir(c.WD) isDirty, dirty, err := mod.Check(feed...) if err != nil { return } if isDirty { err = fmt.Errorf("dirty files in working tree %s", dirty) return } if c.UseGit { // filter by attrs feed, err = mod.Git.FilterByAttr("bar", feed...) } blobs, err := mod.FeedManifests(true, false, true, feed...) if err != nil { return } logx.Debugf("collected blobs %s", blobs.IDMap()) trans := transport.NewTransport(mod, "", c.Endpoint, c.PoolSize) err = trans.Upload(blobs) if err != nil { return } if c.Squash { if err = mod.SquashBlobs(blobs); err != nil { return } if c.UseGit { err = mod.Git.UpdateIndex(blobs.Names()...) } } return }
func (c *GitDivertPushCmd) Run(args ...string) (err error) { mod, err := model.New(c.WD, true, c.ChunkSize, c.PoolSize) if err != nil { return } var upstream, branch string if len(args) == 0 { err = fmt.Errorf("no upstream and/or branch provided") return } if len(args) == 1 { upstream = "origin" branch = args[0] } else { upstream = args[0] branch = args[1] } // checks current, branches, err := mod.Git.GetBranches() if err != nil { return } if branch == current { err = fmt.Errorf("cannot push current branch. use `git push ...`") return } var exists bool for _, i := range branches { if branch == i { exists = true break } } if !exists { err = fmt.Errorf("branch %s is not exists") return } if err = mod.Git.Push(upstream, branch); err != nil { return } logx.Debugf("%s/%s pushed", upstream, branch) return }
func (m *Model) SquashBlobs(blobs lists.BlobMap) (err error) { logx.Tracef("squashing blobs %s", blobs.IDMap()) var req, res []interface{} for _, v := range blobs.ToSlice() { req = append(req, v) } err = m.BatchPool.Do( func(ctx context.Context, in interface{}) (out interface{}, err error) { r := in.(lists.BlobLink) lock, err := m.FdLocks.Take() if err != nil { return } defer lock.Release() absname := filepath.Join(m.WD, r.Name) backName := absname + ".bar-backup" os.Rename(absname, absname+".bar-backup") os.MkdirAll(filepath.Dir(absname), 0755) w, err := os.Create(absname) if err != nil { return } err = r.Manifest.Serialize(w) if err != nil { os.Remove(absname) os.Rename(backName, absname) return } defer os.Remove(backName) logx.Debugf("squashed %s", r.Name) return }, &req, &res, concurrency.DefaultBatchOptions().AllowErrors(), ) if err != nil { return } logx.Infof("blob %s squashed successfully", blobs.Names()) return }
func (c *GitCleanCmd) Run(args ...string) (err error) { mod, err := model.New(c.WD, true, c.ChunkSize, c.PoolSize) var name string if len(args) > 0 { name = args[0] } // check divert divert := git.NewDivert(mod.Git) isInProgress, err := divert.IsInProgress() if err != nil { return } if isInProgress { var spec git.DivertSpec if spec, err = divert.ReadSpec(); err != nil { return } var exists bool for _, n := range spec.TargetFiles { if n == name { exists = true break } } if !exists { err = fmt.Errorf("wan't clean non-target file %s while divert in progress", name) return } } s, err := mod.GetManifest(name, c.Stdin) if err != nil { return } logx.Debugf("%s %s", name, s.ID) if c.Id { fmt.Fprintf(c.Stdout, "%s", s.ID) } else { err = s.Serialize(c.Stdout) } return }
func (h *Handlers) HandleSpec(w http.ResponseWriter, r *http.Request) { id := proto.ID(strings.TrimPrefix(r.URL.Path, "/v1/spec/")) logx.Debugf("serving spec %s", id) ok, err := h.Storage.IsSpecExists(id) if err != nil { logx.Error(err) w.WriteHeader(500) return } if !ok { logx.Errorf("bad spec id %s", id) w.WriteHeader(404) return } w.Header().Set("Content-Type", "text/html; charset=utf-8") h.handleTpl(w, "spec", map[string]interface{}{ "Info": h.options.Info, "ID": id, "ShortID": id[:12], }) }
func (s *Server) Start() (err error) { if s.Listener, err = net.Listen("tcp", s.options.Bind); err != nil { return } hs, err := NewHandlers(s.ctx, s.options, s.storage) if err != nil { return } // make http frontend mux := http.NewServeMux() mux.HandleFunc("/", hs.HandleFront) mux.HandleFunc("/v1/win/bar-export.bat", hs.HandleExportBat) mux.HandleFunc("/v1/win/bar-import/", hs.HandleImportBat) mux.HandleFunc("/v1/win/bar.exe", hs.HandleBarExe) mux.HandleFunc("/v1/spec/", hs.HandleSpec) logx.Debugf("bard http serving at http://%s/v1", s.options.Bind) srv := &http.Server{Handler: mux} errChan := make(chan error, 1) go func() { errChan <- srv.Serve(s.Listener) }() defer s.Listener.Close() defer s.cancel() select { case <-s.ctx.Done(): break case err = <-errChan: break } return }
func (s *Server) Stop() (err error) { s.cancel() logx.Debugf("http %s closed", s.options.Bind) return }
func (s *ThriftServer) Stop() (err error) { s.cancel() logx.Debugf("thrift stopped at %s", s.RPCEndpoints[0]) return }
func (c *SpecImportCmd) Run(args ...string) (err error) { var spec proto.Spec mod, err := model.New(c.WD, c.UseGit, c.ChunkSize, c.PoolSize) if err != nil { return } trans := transport.NewTransport(mod, "", c.Endpoint, c.PoolSize) if c.Raw { if err = json.NewDecoder(c.Stdin).Decode(&spec); err != nil { return } } else { // tree spec types id := proto.ID(args[0]) if spec, err = trans.GetSpec(id); err != nil { logx.Debug(spec, err) return } } idm := lists.IDMap{} for n, id := range spec.BLOBs { idm[id] = append(idm[id], n) } // request manifests and mans, err := trans.GetManifests(idm.IDs()) if err != nil { return } feed := idm.ToBlobMap(mans) names := feed.Names() if len(names) == 0 { logx.Fatalf("no manifests on server %s", names) } logx.Debugf("importing %s", names) if c.UseGit { // If git is used - check names for attrs byAttr, err := mod.Git.FilterByAttr("bar", names...) if err != nil { return err } diff := []string{} attrs := map[string]struct{}{} for _, x := range byAttr { attrs[x] = struct{}{} } for _, x := range names { if _, ok := attrs[x]; !ok { diff = append(diff, x) } } if len(diff) > 0 { return fmt.Errorf("some spec blobs is not under bar control %s", diff) } } // get stored links, ignore errors stored, _ := mod.FeedManifests(true, true, false, names...) logx.Debugf("already stored %s", stored.Names()) // squash present toSquash := lists.BlobMap{} for n, m := range feed { m1, ok := stored[filepath.FromSlash(n)] if !ok || m.ID != m1.ID { toSquash[n] = feed[n] } } if c.Squash { if err = mod.SquashBlobs(toSquash); err != nil { return } } for k, _ := range feed { fmt.Fprintf(c.Stdout, "%s ", filepath.FromSlash(k)) } return }
func (s *BlockStorage) FinishUploadSession(uploadID uuid.UUID) (err error) { hexid := proto.ID(hex.EncodeToString(uploadID[:])) base := s.idPath(upload_ns, hexid) defer os.RemoveAll(base) // load manifests manifests_base := filepath.Join(base, manifests_ns) var manifests []proto.Manifest if err = func() (err error) { lock, err := s.FDLocks.Take() if err != nil { return } defer lock.Release() err = filepath.Walk(manifests_base, func(path string, info os.FileInfo, ferr error) (err error) { if strings.HasSuffix(path, "-manifest.json") { var man proto.Manifest if man, err = s.readManifest(path); err != nil { return } manifests = append(manifests, man) } return }) return }(); err != nil { return } // collect all manifests var req, res []interface{} for _, v := range manifests { req = append(req, v) } err = s.BatchPool.Do( func(ctx context.Context, in interface{}) (out interface{}, err error) { lock, err := s.FDLocks.Take() if err != nil { return } defer lock.Release() m := in.(proto.Manifest) target := s.idPath(blob_ns, m.ID) f, fErr := s.getCAFile(target) if os.IsExist(fErr) { return } else if fErr != nil { err = fErr return } defer f.Close() logx.Debugf("assembling %s", m.ID) for _, chunk := range m.Chunks { if err = func(chunk proto.Chunk) (err error) { lock, err := s.FDLocks.Take() if err != nil { return } defer lock.Release() r, err := os.Open(filepath.Join(base, chunk.ID.String())) if err != nil { return } defer r.Close() _, err = io.Copy(f, r) return }(chunk); err != nil { return } } err = f.Accept() // move manifest manTarget := s.idPath(manifests_ns, m.ID) + ".json" os.MkdirAll(filepath.Dir(manTarget), 0755) err = os.Rename(filepath.Join(manifests_base, m.ID.String()+"-manifest.json"), manTarget) return }, &req, &res, concurrency.DefaultBatchOptions().AllowErrors(), ) return }
func (s *BlockStorage) CreateUploadSession(uploadID uuid.UUID, in []proto.Manifest, ttl time.Duration) (missing []proto.ID, err error) { hexid := proto.ID(hex.EncodeToString(uploadID[:])) // take lock lock, err := s.FDLocks.Take() if err != nil { return } defer lock.Release() // Create directories and write support data base := filepath.Join(s.idPath(upload_ns, proto.ID(hex.EncodeToString(uploadID[:]))), manifests_ns) if err = os.MkdirAll(base, 0755); err != nil { return } var missingBlobs []proto.Manifest for _, m := range in { if err = func(m proto.Manifest) (err error) { var statErr error _, statErr = os.Stat(s.idPath(manifests_ns, m.ID)) if os.IsNotExist(statErr) { missingBlobs = append(missingBlobs, m) } else if statErr != nil { err = statErr return } else { // exists - ok return } w, err := os.OpenFile(filepath.Join(base, m.ID.String()+"-manifest.json"), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0644) if err != nil { return } defer w.Close() err = json.NewEncoder(w).Encode(&m) return }(m); err != nil { return } } missing = proto.ManifestSlice(missingBlobs).GetChunkSlice() w, err := os.OpenFile(filepath.Join(s.idPath(upload_ns, hexid), "expires.timestamp"), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0644) if err != nil { return } defer w.Close() if _, err = w.Write([]byte(fmt.Sprintf("%d", time.Now().Add(ttl).UnixNano()))); err != nil { return } logx.Debugf("upload session %s created succefully", hexid) return }