func main() { flag.Parse() glog.Infof("Starting Goship...") ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() auth.Initialize(auth.User{Name: *defaultUser, Avatar: *defaultAvatar}, []byte(*cookieSessionHash)) h, err := buildHandler(ctx) if err != nil { glog.Fatal(err) } w := io.WriteCloser(os.Stdout) if *requestLog != "-" { w, err = os.OpenFile(*requestLog, os.O_APPEND|os.O_CREATE, 0644) if err != nil { glog.Fatalf("Cannot open request log %s: %v", *requestLog, err) } defer w.Close() } h = ghandlers.CombinedLoggingHandler(w, h) fmt.Printf("Running on %s\n", *bindAddress) s := &http.Server{ Addr: *bindAddress, Handler: h, } if err := s.ListenAndServe(); err != nil { glog.Fatal(err) } }
func NewUnixgramServer(t lotf.Tail, raddr *net.UnixAddr) (*DgramServer, error) { conn, err := net.DialUnix("unixgram", nil, raddr) if err != nil { return nil, err } return &DgramServer{t, io.WriteCloser(conn)}, nil }
func NewUDPServer(t lotf.Tail, raddr *net.UDPAddr) (*DgramServer, error) { conn, err := net.DialUDP("udp4", nil, raddr) if err != nil { return nil, err } return &DgramServer{t, io.WriteCloser(conn)}, nil }
// convert LastPass export to KeePass 1.0 XML format func main() { r := csv.NewReader(os.Stdin) head, err := r.Read() // `url,username,password,extra,name,grouping,fav` if err != nil { log.Fatal(err) } order := make(map[string]int, 7) for i, nm := range head { switch nm { case "url", "username", "password", "extra", "name", "grouping", "fav": order[nm] = i default: log.Printf("WARN: unknown field %q", nm) } } w := io.WriteCloser(os.Stdout) defer w.Close() io.WriteString(w, "<!DOCTYPE KEEPASSX_DATABASE>\n<database>\n<group><title>Import</title><icon>1</icon>\n") defer io.WriteString(w, "</group></database>") enc := xml.NewEncoder(w) defer enc.Flush() enc.Indent("", " ") for { row, err := r.Read() if err != nil { if err == io.EOF { break } log.Fatal(err) } log.Printf("ROW=%q", row) var rec Entry for nm, i := range order { switch nm { case "url": rec.URL = row[i] case "username": rec.Username = row[i] case "password": rec.Password = row[i] case "extra": rec.Comment = row[i] case "name": rec.Title = row[i] //case "grouping": //rec.Group = row[i] } } rec.Expire = NotExpire if err = enc.Encode(rec); err != nil { log.Fatal(err) } enc.Flush() } }
// write synchronously writes the key-value pair to disk, // making it immediately available for reads. write optionally // performs a Sync on the relevant file descriptor. func (d *Diskv) write(key string, r io.Reader, sync bool) error { if len(key) <= 0 { return fmt.Errorf("empty key") } // TODO use atomic FS ops in write() d.Lock() defer d.Unlock() if err := d.ensurePath(key); err != nil { return fmt.Errorf("ensure path: %s", err) } mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC // overwrite if exists f, err := os.OpenFile(d.completeFilename(key), mode, d.FilePerm) if err != nil { return fmt.Errorf("open file: %s", err) } var wc = io.WriteCloser(&nopWriteCloser{f}) if d.Compression != nil { wc, err = d.Compression.Writer(f) if err != nil { f.Close() // error deliberately ignored return fmt.Errorf("compression writer: %s", err) } } if _, err := io.Copy(wc, r); err != nil { f.Close() // error deliberately ignored return fmt.Errorf("i/o copy: %s", err) } if err := wc.Close(); err != nil { return fmt.Errorf("compression close: %s", err) } if sync { if err := f.Sync(); err != nil { f.Close() // error deliberately ignored return fmt.Errorf("file sync: %s", err) } } if err := f.Close(); err != nil { return fmt.Errorf("file close: %s", err) } if d.Index != nil { d.Index.Insert(key) } delete(d.cache, key) // cache only on read return nil }
func main() { flag.Usage = func() { _, program := filepath.Split(os.Args[0]) fmt.Fprintf(os.Stderr, "usage: %s [OPTIONS] [FILE]\n", program) flag.PrintDefaults() } shouldCompress := flag.Bool("compress", false, "compress output using gzip") flag.Parse() if flag.NArg() != 1 { flag.Usage() os.Exit(2) } root, err := filepath.Abs(flag.Arg(0)) if err != nil { fmt.Fprintf(os.Stderr, "Error: %s\n", err.Error()) os.Exit(-1) } f, err := os.OpenFile(root, os.O_RDONLY, os.ModeDir) if err != nil { fmt.Fprintf(os.Stderr, "Error: %s\n", err.Error()) os.Exit(-1) } output := io.WriteCloser(os.Stdout) if *shouldCompress { output = gzip.NewWriter(output) } _, rootArchivePath := filepath.Split(root) ctx := &creationContext{rootArchivePath, tar.NewWriter(output), make(map[string]bool)} err = ctx.addDir(root, rootArchivePath, f, true) if err != nil { fmt.Fprintf(os.Stderr, "Error: %s\n", err.Error()) os.Exit(-1) } err = ctx.archive.Close() if err != nil { fmt.Fprintf(os.Stderr, "Error: %s\n", err.Error()) os.Exit(-1) } err = output.Close() if err != nil { fmt.Fprintf(os.Stderr, "Error: %s\n", err.Error()) os.Exit(-1) } }
// RsyncRecv sets up the receiving half of the websocket to rsync (the other // half set up by RsyncSend), putting the contents in the directory specified // by path. func RsyncRecv(path string, conn *websocket.Conn, writeWrapper func(io.WriteCloser) io.WriteCloser) error { cmd := exec.Command("rsync", "--server", "-vlogDtpre.iLsfx", "--numeric-ids", "--devices", "--partial", ".", path) stdin, err := cmd.StdinPipe() if err != nil { return err } stdout, err := cmd.StdoutPipe() if err != nil { return err } stderr, err := cmd.StderrPipe() if err != nil { return err } if err := cmd.Start(); err != nil { return err } writePipe := io.WriteCloser(stdin) if writeWrapper != nil { writePipe = writeWrapper(stdin) } readDone, writeDone := shared.WebsocketMirror(conn, writePipe, stdout) data, err2 := ioutil.ReadAll(stderr) if err2 != nil { shared.LogDebugf("error reading rsync stderr: %s", err2) return err2 } err = cmd.Wait() if err != nil { shared.LogDebugf("rsync recv error for path %s: %s: %s", path, err, string(data)) } <-readDone <-writeDone return err }
// writeStream does no input validation checking. // TODO: use atomic FS ops. func (d *Diskv) writeStreamWithLock(key string, r io.Reader, sync bool) error { if err := d.ensurePathWithLock(key); err != nil { return fmt.Errorf("ensure path: %s", err) } mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC // overwrite if exists f, err := os.OpenFile(d.completeFilename(key), mode, d.FilePerm) if err != nil { return fmt.Errorf("open file: %s", err) } wc := io.WriteCloser(&nopWriteCloser{f}) if d.Compression != nil { wc, err = d.Compression.Writer(f) if err != nil { f.Close() // error deliberately ignored return fmt.Errorf("compression writer: %s", err) } } if _, err := io.Copy(wc, r); err != nil { f.Close() // error deliberately ignored return fmt.Errorf("i/o copy: %s", err) } if err := wc.Close(); err != nil { f.Close() // error deliberately ignored return fmt.Errorf("compression close: %s", err) } if sync { if err := f.Sync(); err != nil { f.Close() // error deliberately ignored return fmt.Errorf("file sync: %s", err) } } if err := f.Close(); err != nil { return fmt.Errorf("file close: %s", err) } if d.Index != nil { d.Index.Insert(key) } d.bustCacheWithLock(key) // cache only on read return nil }
func (s *storageZfs) MigrationSink(live bool, container container, snapshots []*Snapshot, conn *websocket.Conn, srcIdmap *shared.IdmapSet, op *operation) error { zfsRecv := func(zfsName string, writeWrapper func(io.WriteCloser) io.WriteCloser) error { zfsFsName := fmt.Sprintf("%s/%s", s.zfsPool, zfsName) args := []string{"receive", "-F", "-u", zfsFsName} cmd := exec.Command("zfs", args...) stdin, err := cmd.StdinPipe() if err != nil { return err } stderr, err := cmd.StderrPipe() if err != nil { return err } if err := cmd.Start(); err != nil { return err } writePipe := io.WriteCloser(stdin) if writeWrapper != nil { writePipe = writeWrapper(stdin) } <-shared.WebsocketRecvStream(writePipe, conn) output, err := ioutil.ReadAll(stderr) if err != nil { shared.LogDebug("problem reading zfs recv stderr %s", log.Ctx{"err": err}) } err = cmd.Wait() if err != nil { shared.LogError("problem with zfs recv", log.Ctx{"output": string(output)}) } return err } /* In some versions of zfs we can write `zfs recv -F` to mounted * filesystems, and in some versions we can't. So, let's always unmount * this fs (it's empty anyway) before we zfs recv. N.B. that `zfs recv` * of a snapshot also needs tha actual fs that it has snapshotted * unmounted, so we do this before receiving anything. */ zfsName := fmt.Sprintf("containers/%s", container.Name()) err := s.zfsUnmount(zfsName) if err != nil { return err } for _, snap := range snapshots { args := snapshotProtobufToContainerArgs(container.Name(), snap) _, err := containerCreateEmptySnapshot(container.Daemon(), args) if err != nil { return err } wrapper := StorageProgressWriter(op, "fs_progress", snap.GetName()) name := fmt.Sprintf("containers/%s@snapshot-%s", container.Name(), snap.GetName()) if err := zfsRecv(name, wrapper); err != nil { return err } err = os.MkdirAll(shared.VarPath(fmt.Sprintf("snapshots/%s", container.Name())), 0700) if err != nil { return err } err = os.Symlink("on-zfs", shared.VarPath(fmt.Sprintf("snapshots/%s/%s.zfs", container.Name(), snap.GetName()))) if err != nil { return err } } defer func() { /* clean up our migration-send snapshots that we got from recv. */ zfsSnapshots, err := s.zfsListSnapshots(fmt.Sprintf("containers/%s", container.Name())) if err != nil { shared.LogError("failed listing snapshots post migration", log.Ctx{"err": err}) return } for _, snap := range zfsSnapshots { // If we received a bunch of snapshots, remove the migration-send-* ones, if not, wipe any snapshot we got if snapshots != nil && len(snapshots) > 0 && !strings.HasPrefix(snap, "migration-send") { continue } s.zfsSnapshotDestroy(fmt.Sprintf("containers/%s", container.Name()), snap) } }() /* finally, do the real container */ wrapper := StorageProgressWriter(op, "fs_progress", container.Name()) if err := zfsRecv(zfsName, wrapper); err != nil { return err } if live { /* and again for the post-running snapshot if this was a live migration */ wrapper := StorageProgressWriter(op, "fs_progress", container.Name()) if err := zfsRecv(zfsName, wrapper); err != nil { return err } } /* Sometimes, zfs recv mounts this anyway, even if we pass -u * (https://forums.freebsd.org/threads/zfs-receive-u-shouldnt-mount-received-filesystem-right.36844/) * but sometimes it doesn't. Let's try to mount, but not complain about * failure. */ s.zfsMount(zfsName) return nil }
"strings" ) var Noout = NopWriteCloser{} type NopWriteCloser struct{} func (w NopWriteCloser) Write(b []byte) (int, error) { return len(b), nil } func (w NopWriteCloser) Close() error { return nil } var _ = io.WriteCloser(Noout) func ExecTee(stream io.WriteCloser, command string, args ...string) (stdout []byte, stderr error) { cmd := exec.Command(command, args...) read, write, _ := os.Pipe() defer func() { read.Close() }() cmd.Stdout = os.Stdout cmd.Stderr = os.Stdout stderr = cmd.Run() write.Close() out, readErr := ioutil.ReadAll(read) if readErr != nil {
// Convert converts from srcFn to dstFn, into the given format. // Either filenames can be empty or "-" which treated as stdin/stdout func Convert(srcFn, dstFn, format string) error { tempDir, err := ioutil.TempDir("", filepath.Base(srcFn)) if err != nil { return fmt.Errorf("cannot create temporary directory: %s", err) } defer os.RemoveAll(tempDir) if srcFn == "-" || srcFn == "" { srcFn = filepath.Join(tempDir, "source") fh, err := os.Create(srcFn) if err != nil { return fmt.Errorf("error creating temp file %q: %s", srcFn, err) } if _, err = io.Copy(fh, os.Stdin); err != nil { fh.Close() return fmt.Errorf("error writing stdout to %q: %s", srcFn, err) } fh.Close() } c := exec.Command(Loffice, "--nolockcheck", "--norestore", "--headless", "--convert-to", format, "--outdir", tempDir, srcFn) c.Stderr = os.Stderr c.Stdout = c.Stderr Log.Info("calling", "args", c.Args) if err = proc.RunWithTimeout(Timeout, c); err != nil { return fmt.Errorf("error running %q: %s", c.Args, err) } dh, err := os.Open(tempDir) if err != nil { return fmt.Errorf("error opening dest dir %q: %s", tempDir, err) } defer dh.Close() names, err := dh.Readdirnames(3) if err != nil { return fmt.Errorf("error listing %q: %s", tempDir, err) } if len(names) > 2 { return fmt.Errorf("too many files in %q: %q", tempDir, names) } var tfn string for _, fn := range names { if fn != "source" { tfn = filepath.Join(dh.Name(), fn) break } } src, err := os.Open(tfn) if err != nil { return fmt.Errorf("cannot open %q: %s", tfn, err) } defer src.Close() var dst = io.WriteCloser(os.Stdout) if !(dstFn == "-" || dstFn == "") { if dst, err = os.Create(dstFn); err != nil { return fmt.Errorf("cannot create dest file %q: %s", dstFn, err) } } if _, err = io.Copy(dst, src); err != nil { return fmt.Errorf("error copying from %v to %v: %v", src, dst, err) } return nil }
//Writer returns the writer for the conn func (p *StreamConn) Writer() io.WriteCloser { return io.WriteCloser(p.src) }
func main() { opts := parseFlags() if len(opts.args) != 1 { dieWithUsage() } if opts.compress && opts.decompress { dieWithUsage() } if !opts.compress && !opts.decompress { dieWithUsage() } srcPath := opts.args[0] algo, err := compress.FromString(opts.algo) if err != nil { die(err) } src := openSrc(srcPath) defer src.Close() dstPath := dstFilename(opts.compress, srcPath, opts.algo) if opts.useDevNull { dstPath = os.DevNull } dst := openDst(dstPath, opts.forceDstOverwrite) defer dst.Close() key := derivateAesKey([]byte("defaultpassword"), nil, 32) if key == nil { die(err) } var chiper uint16 = aeadCipherAES if opts.encalgo == "chacha" { chiper = aeadCipherChaCha } if opts.encalgo == "aes" { chiper = aeadCipherAES } if opts.encalgo == "none" { opts.encrypt = false } if opts.compress { ew := io.WriteCloser(dst) if opts.encrypt { ew, err = encrypt.NewWriterWithTypeAndBlockSize(dst, key, chiper, opts.maxblocksize) if err != nil { die(err) } } zw, err := compress.NewWriter(ew, algo) if err != nil { die(err) } _, err = io.Copy(zw, src) if err != nil { die(err) } if err := zw.Close(); err != nil { die(err) } if err := ew.Close(); err != nil { die(err) } } if opts.decompress { var reader io.ReadSeeker = src if opts.encrypt { er, err := encrypt.NewReader(src, key) if err != nil { die(err) } reader = er } zr := compress.NewReader(reader) _, err = io.Copy(dst, zr) if err != nil { die(err) } } }
func (s *storageBtrfs) MigrationSink(live bool, container container, snapshots []*Snapshot, conn *websocket.Conn, srcIdmap *shared.IdmapSet, op *operation) error { if runningInUserns { return rsyncMigrationSink(live, container, snapshots, conn, srcIdmap, op) } cName := container.Name() snapshotsPath := shared.VarPath(fmt.Sprintf("snapshots/%s", cName)) if !shared.PathExists(snapshotsPath) { err := os.MkdirAll(shared.VarPath(fmt.Sprintf("snapshots/%s", cName)), 0700) if err != nil { return err } } btrfsRecv := func(btrfsPath string, targetPath string, isSnapshot bool, writeWrapper func(io.WriteCloser) io.WriteCloser) error { args := []string{"receive", "-e", btrfsPath} cmd := exec.Command("btrfs", args...) // Remove the existing pre-created subvolume err := s.subvolsDelete(targetPath) if err != nil { return err } stdin, err := cmd.StdinPipe() if err != nil { return err } stderr, err := cmd.StderrPipe() if err != nil { return err } if err := cmd.Start(); err != nil { return err } writePipe := io.WriteCloser(stdin) if writeWrapper != nil { writePipe = writeWrapper(stdin) } <-shared.WebsocketRecvStream(writePipe, conn) output, err := ioutil.ReadAll(stderr) if err != nil { shared.LogDebugf("problem reading btrfs receive stderr %s", err) } err = cmd.Wait() if err != nil { shared.LogError("problem with btrfs receive", log.Ctx{"output": string(output)}) return err } if !isSnapshot { cPath := containerPath(fmt.Sprintf("%s/.root", cName), true) err := s.subvolSnapshot(cPath, targetPath, false) if err != nil { shared.LogError("problem with btrfs snapshot", log.Ctx{"err": err}) return err } err = s.subvolsDelete(cPath) if err != nil { shared.LogError("problem with btrfs delete", log.Ctx{"err": err}) return err } } return nil } for _, snap := range snapshots { args := snapshotProtobufToContainerArgs(container.Name(), snap) s, err := containerCreateEmptySnapshot(container.Daemon(), args) if err != nil { return err } wrapper := StorageProgressWriter(op, "fs_progress", snap.GetName()) if err := btrfsRecv(containerPath(cName, true), s.Path(), true, wrapper); err != nil { return err } } /* finally, do the real container */ wrapper := StorageProgressWriter(op, "fs_progress", container.Name()) if err := btrfsRecv(containerPath(cName, true), container.Path(), false, wrapper); err != nil { return err } if live { wrapper := StorageProgressWriter(op, "fs_progress", container.Name()) if err := btrfsRecv(containerPath(cName, true), container.Path(), false, wrapper); err != nil { return err } } // Cleanup if ok, _ := shared.PathIsEmpty(snapshotsPath); ok { err := os.Remove(snapshotsPath) if err != nil { return err } } return nil }