// Stat returns information about a blob. func (b *restBackend) Stat(h restic.Handle) (restic.FileInfo, error) { if err := h.Valid(); err != nil { return restic.FileInfo{}, err } <-b.connChan resp, err := b.client.Head(restPath(b.url, h)) b.connChan <- struct{}{} if err != nil { return restic.FileInfo{}, errors.Wrap(err, "client.Head") } io.Copy(ioutil.Discard, resp.Body) if err = resp.Body.Close(); err != nil { return restic.FileInfo{}, errors.Wrap(err, "Close") } if resp.StatusCode != 200 { return restic.FileInfo{}, errors.Errorf("unexpected HTTP response code %v", resp.StatusCode) } if resp.ContentLength < 0 { return restic.FileInfo{}, errors.New("negative content length") } bi := restic.FileInfo{ Size: resp.ContentLength, } return bi, nil }
// Rename temp file to final name according to type and name. func (r *SFTP) renameFile(oldname string, t restic.FileType, name string) error { filename := r.filename(t, name) // create directories if necessary if t == restic.DataFile { err := r.mkdirAll(path.Dir(filename), backend.Modes.Dir) if err != nil { return err } } // test if new file exists if _, err := r.c.Lstat(filename); err == nil { return errors.Errorf("Close(): file %v already exists", filename) } err := r.c.Rename(oldname, filename) if err != nil { return errors.Wrap(err, "Rename") } // set mode to read-only fi, err := r.c.Lstat(filename) if err != nil { return errors.Wrap(err, "Lstat") } err = r.c.Chmod(filename, fi.Mode()&os.FileMode(^uint32(0222))) return errors.Wrap(err, "Chmod") }
func (node Node) restoreMetadata(path string) error { var err error err = lchown(path, int(node.UID), int(node.GID)) if err != nil { return errors.Wrap(err, "Lchown") } if node.Type != "symlink" { err = fs.Chmod(path, node.Mode) if err != nil { return errors.Wrap(err, "Chmod") } } if node.Type != "dir" { err = node.RestoreTimestamps(path) if err != nil { debug.Log("error restoring timestamps for dir %v: %v", path, err) return err } } return nil }
// ReadPassword reads the password from a password file, the environment // variable RESTIC_PASSWORD or prompts the user. func ReadPassword(opts GlobalOptions, prompt string) (string, error) { if opts.PasswordFile != "" { s, err := ioutil.ReadFile(opts.PasswordFile) return strings.TrimSpace(string(s)), errors.Wrap(err, "Readfile") } if pwd := os.Getenv("RESTIC_PASSWORD"); pwd != "" { return pwd, nil } var ( password string err error ) if stdinIsTerminal() { password, err = readPasswordTerminal(os.Stdin, os.Stderr, prompt) } else { password, err = readPassword(os.Stdin) } if err != nil { return "", errors.Wrap(err, "unable to read password") } if len(password) == 0 { return "", errors.Fatal("an empty password is not a password") } return password, nil }
// Stat returns information about a blob. func (be s3) Stat(h restic.Handle) (bi restic.FileInfo, err error) { debug.Log("%v", h) path := be.s3path(h.Type, h.Name) var obj *minio.Object obj, err = be.client.GetObject(be.bucketname, path) if err != nil { debug.Log("GetObject() err %v", err) return restic.FileInfo{}, errors.Wrap(err, "client.GetObject") } // make sure that the object is closed properly. defer func() { e := obj.Close() if err == nil { err = errors.Wrap(e, "Close") } }() fi, err := obj.Stat() if err != nil { debug.Log("Stat() err %v", err) return restic.FileInfo{}, errors.Wrap(err, "Stat") } return restic.FileInfo{Size: fi.Size}, nil }
// Open opens the S3 backend at bucket and region. The bucket is created if it // does not exist yet. func Open(cfg Config) (restic.Backend, error) { debug.Log("open, config %#v", cfg) client, err := minio.New(cfg.Endpoint, cfg.KeyID, cfg.Secret, !cfg.UseHTTP) if err != nil { return nil, errors.Wrap(err, "minio.New") } be := &s3{client: client, bucketname: cfg.Bucket, prefix: cfg.Prefix} be.createConnections() ok, err := client.BucketExists(cfg.Bucket) if err != nil { debug.Log("BucketExists(%v) returned err %v, trying to create the bucket", cfg.Bucket, err) return nil, errors.Wrap(err, "client.BucketExists") } if !ok { // create new bucket with default ACL in default region err = client.MakeBucket(cfg.Bucket, "") if err != nil { return nil, errors.Wrap(err, "client.MakeBucket") } } return be, nil }
// Remove removes the blob with the given name and type. func (b *restBackend) Remove(t restic.FileType, name string) error { h := restic.Handle{Type: t, Name: name} if err := h.Valid(); err != nil { return err } req, err := http.NewRequest("DELETE", restPath(b.url, h), nil) if err != nil { return errors.Wrap(err, "http.NewRequest") } <-b.connChan resp, err := b.client.Do(req) b.connChan <- struct{}{} if err != nil { return errors.Wrap(err, "client.Do") } if resp.StatusCode != 200 { return errors.New("blob not removed") } io.Copy(ioutil.Discard, resp.Body) return resp.Body.Close() }
// Save stores data in the backend at the handle. func (b *restBackend) Save(h restic.Handle, p []byte) (err error) { if err := h.Valid(); err != nil { return err } <-b.connChan resp, err := b.client.Post(restPath(b.url, h), "binary/octet-stream", bytes.NewReader(p)) b.connChan <- struct{}{} if resp != nil { defer func() { io.Copy(ioutil.Discard, resp.Body) e := resp.Body.Close() if err == nil { err = errors.Wrap(e, "Close") } }() } if err != nil { return errors.Wrap(err, "client.Post") } if resp.StatusCode != 200 { return errors.Errorf("unexpected HTTP response code %v", resp.StatusCode) } return nil }
// Load returns the data stored in the backend for h at the given offset // and saves it in p. Load has the same semantics as io.ReaderAt. func (r *SFTP) Load(h restic.Handle, p []byte, off int64) (n int, err error) { debug.Log("load %v, %d bytes, offset %v", h, len(p), off) if err := r.clientError(); err != nil { return 0, err } if err := h.Valid(); err != nil { return 0, err } f, err := r.c.Open(r.filename(h.Type, h.Name)) if err != nil { return 0, errors.Wrap(err, "Open") } defer func() { e := f.Close() if err == nil && e != nil { err = errors.Wrap(e, "Close") } }() switch { case off > 0: _, err = f.Seek(off, 0) case off < 0: _, err = f.Seek(off, 2) } if err != nil { return 0, errors.Wrap(err, "Seek") } return io.ReadFull(f, p) }
// writeToTempfile saves p into a tempfile in tempdir. func writeToTempfile(tempdir string, p []byte) (filename string, err error) { tmpfile, err := ioutil.TempFile(tempdir, "temp-") if err != nil { return "", errors.Wrap(err, "TempFile") } n, err := tmpfile.Write(p) if err != nil { return "", errors.Wrap(err, "Write") } if n != len(p) { return "", errors.New("not all bytes writen") } if err = tmpfile.Sync(); err != nil { return "", errors.Wrap(err, "Syncn") } err = tmpfile.Close() if err != nil { return "", errors.Wrap(err, "Close") } return tmpfile.Name(), nil }
// Load returns the data stored in the backend for h at the given offset and // saves it in p. Load has the same semantics as io.ReaderAt, with one // exception: when off is lower than zero, it is treated as an offset relative // to the end of the file. func (b *Local) Load(h restic.Handle, p []byte, off int64) (n int, err error) { debug.Log("Load %v, length %v at %v", h, len(p), off) if err := h.Valid(); err != nil { return 0, err } f, err := fs.Open(filename(b.p, h.Type, h.Name)) if err != nil { return 0, errors.Wrap(err, "Open") } defer func() { e := f.Close() if err == nil { err = errors.Wrap(e, "Close") } }() switch { case off > 0: _, err = f.Seek(off, 0) case off < 0: _, err = f.Seek(off, 2) } if err != nil { return 0, errors.Wrap(err, "Seek") } return io.ReadFull(f, p) }
func (node Node) createFileAt(path string, repo Repository) error { f, err := fs.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0600) defer f.Close() if err != nil { return errors.Wrap(err, "OpenFile") } var buf []byte for _, id := range node.Content { size, err := repo.LookupBlobSize(id, DataBlob) if err != nil { return err } buf = buf[:cap(buf)] if uint(len(buf)) < size { buf = make([]byte, size) } n, err := repo.LoadBlob(DataBlob, id, buf) if err != nil { return err } buf = buf[:n] _, err = f.Write(buf) if err != nil { return errors.Wrap(err, "Write") } } return nil }
// savePacker stores p in the backend. func (r *Repository) savePacker(p *pack.Packer) error { debug.Log("save packer with %d blobs\n", p.Count()) n, err := p.Finalize() if err != nil { return err } tmpfile := p.Writer().(*os.File) f, err := fs.Open(tmpfile.Name()) if err != nil { return errors.Wrap(err, "Open") } data := make([]byte, n) m, err := io.ReadFull(f, data) if err != nil { return errors.Wrap(err, "ReadFul") } if uint(m) != n { return errors.Errorf("read wrong number of bytes from %v: want %v, got %v", tmpfile.Name(), n, m) } if err = f.Close(); err != nil { return errors.Wrap(err, "Close") } id := restic.Hash(data) h := restic.Handle{Type: restic.DataFile, Name: id.String()} err = r.be.Save(h, data) if err != nil { debug.Log("Save(%v) error: %v", h, err) return err } debug.Log("saved as %v", h) err = fs.Remove(tmpfile.Name()) if err != nil { return errors.Wrap(err, "Remove") } // update blobs in the index for _, b := range p.Blobs() { debug.Log(" updating blob %v to pack %v", b.ID.Str(), id.Str()) r.idx.Current().Store(restic.PackedBlob{ Blob: restic.Blob{ Type: b.Type, ID: b.ID, Offset: b.Offset, Length: uint(b.Length), }, PackID: id, }) } return nil }
func (node *Node) UnmarshalJSON(data []byte) error { type nodeJSON Node nj := (*nodeJSON)(node) err := json.Unmarshal(data, nj) if err != nil { return errors.Wrap(err, "Unmarshal") } nj.Name, err = strconv.Unquote(`"` + nj.Name + `"`) return errors.Wrap(err, "Unquote") }
func startClient(program string, args ...string) (*SFTP, error) { // Connect to a remote host and request the sftp subsystem via the 'ssh' // command. This assumes that passwordless login is correctly configured. cmd := exec.Command(program, args...) // prefix the errors with the program name stderr, err := cmd.StderrPipe() if err != nil { return nil, errors.Wrap(err, "cmd.StderrPipe") } go func() { sc := bufio.NewScanner(stderr) for sc.Scan() { fmt.Fprintf(os.Stderr, "subprocess %v: %v\n", program, sc.Text()) } }() // ignore signals sent to the parent (e.g. SIGINT) cmd.SysProcAttr = ignoreSigIntProcAttr() // get stdin and stdout wr, err := cmd.StdinPipe() if err != nil { return nil, errors.Wrap(err, "cmd.StdinPipe") } rd, err := cmd.StdoutPipe() if err != nil { return nil, errors.Wrap(err, "cmd.StdoutPipe") } // start the process if err := cmd.Start(); err != nil { return nil, errors.Wrap(err, "cmd.Start") } // wait in a different goroutine ch := make(chan error, 1) go func() { err := cmd.Wait() debug.Log("ssh command exited, err %v", err) ch <- errors.Wrap(err, "cmd.Wait") }() // open the SFTP session client, err := sftp.NewClientPipe(rd, wr) if err != nil { return nil, errors.Errorf("unable to start the sftp session, error: %v", err) } return &SFTP{c: client, cmd: cmd, result: ch}, nil }
// readDirNames reads the directory named by dirname and returns // a sorted list of directory entries. // taken from filepath/path.go func readDirNames(dirname string) ([]string, error) { f, err := fs.Open(dirname) if err != nil { return nil, errors.Wrap(err, "Open") } names, err := f.Readdirnames(-1) f.Close() if err != nil { return nil, errors.Wrap(err, "Readdirnames") } sort.Strings(names) return names, nil }
// UnmarshalJSON parses the JSON-encoded data and stores the result in id. func (id *ID) UnmarshalJSON(b []byte) error { var s string err := json.Unmarshal(b, &s) if err != nil { return errors.Wrap(err, "Unmarshal") } _, err = hex.Decode(id[:], []byte(s)) if err != nil { return errors.Wrap(err, "hex.Decode") } return nil }
// uidGidInt returns uid, gid of the user as a number. func uidGidInt(u user.User) (uid, gid uint32, err error) { var ui, gi int64 ui, err = strconv.ParseInt(u.Uid, 10, 32) if err != nil { return uid, gid, errors.Wrap(err, "ParseInt") } gi, err = strconv.ParseInt(u.Gid, 10, 32) if err != nil { return uid, gid, errors.Wrap(err, "ParseInt") } uid = uint32(ui) gid = uint32(gi) return }
func readdir(d string) (fileInfos []os.FileInfo, err error) { f, e := fs.Open(d) if e != nil { return nil, errors.Wrap(e, "Open") } defer func() { e := f.Close() if err == nil { err = errors.Wrap(e, "Close") } }() return f.Readdir(-1) }
// Load returns the data stored in the backend for h at the given offset // and saves it in p. Load has the same semantics as io.ReaderAt. func (b *restBackend) Load(h restic.Handle, p []byte, off int64) (n int, err error) { if err := h.Valid(); err != nil { return 0, err } // invert offset if off < 0 { info, err := b.Stat(h) if err != nil { return 0, errors.Wrap(err, "Stat") } if -off > info.Size { off = 0 } else { off = info.Size + off } } req, err := http.NewRequest("GET", restPath(b.url, h), nil) if err != nil { return 0, errors.Wrap(err, "http.NewRequest") } req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", off, off+int64(len(p)))) <-b.connChan resp, err := b.client.Do(req) b.connChan <- struct{}{} if resp != nil { defer func() { io.Copy(ioutil.Discard, resp.Body) e := resp.Body.Close() if err == nil { err = errors.Wrap(e, "Close") } }() } if err != nil { return 0, errors.Wrap(err, "client.Do") } if resp.StatusCode != 200 && resp.StatusCode != 206 { return 0, errors.Errorf("unexpected HTTP response code %v", resp.StatusCode) } return io.ReadFull(resp.Body, p) }
// Create creates all the necessary files and directories for a new sftp // backend at dir. Afterwards a new config blob should be created. `dir` must // be delimited by forward slashes ("/"), which is required by sftp. func Create(dir string, program string, args ...string) (*SFTP, error) { debug.Log("%v %v", program, args) sftp, err := startClient(program, args...) if err != nil { return nil, err } // test if config file already exists _, err = sftp.c.Lstat(Join(dir, backend.Paths.Config)) if err == nil { return nil, errors.New("config file already exists") } // create paths for data, refs and temp blobs for _, d := range paths(dir) { err = sftp.mkdirAll(d, backend.Modes.Dir) if err != nil { return nil, err } } err = sftp.Close() if err != nil { return nil, errors.Wrap(err, "Close") } // open backend return Open(dir, program, args...) }
// writeHeader constructs and writes the header to wr. func (p *Packer) writeHeader(wr io.Writer) (bytesWritten uint, err error) { for _, b := range p.blobs { entry := headerEntry{ Length: uint32(b.Length), ID: b.ID, } switch b.Type { case restic.DataBlob: entry.Type = 0 case restic.TreeBlob: entry.Type = 1 default: return 0, errors.Errorf("invalid blob type %v", b.Type) } err := binary.Write(wr, binary.LittleEndian, entry) if err != nil { return bytesWritten, errors.Wrap(err, "binary.Write") } bytesWritten += entrySize } return }
// ParseConfig parses the string s and extracts the s3 config. The two // supported configuration formats are s3://host/bucketname/prefix and // s3:host:bucketname/prefix. The host can also be a valid s3 region // name. If no prefix is given the prefix "restic" will be used. func ParseConfig(s string) (interface{}, error) { switch { case strings.HasPrefix(s, "s3:http"): // assume that a URL has been specified, parse it and // use the host as the endpoint and the path as the // bucket name and prefix url, err := url.Parse(s[3:]) if err != nil { return nil, errors.Wrap(err, "url.Parse") } if url.Path == "" { return nil, errors.New("s3: bucket name not found") } path := strings.SplitN(url.Path[1:], "/", 2) return createConfig(url.Host, path, url.Scheme == "http") case strings.HasPrefix(s, "s3://"): s = s[5:] case strings.HasPrefix(s, "s3:"): s = s[3:] default: return nil, errors.New("s3: invalid format") } // use the first entry of the path as the endpoint and the // remainder as bucket name and prefix path := strings.SplitN(s, "/", 3) return createConfig(path[0], path[1:], false) }
// readHeader reads the header at the end of rd. size is the length of the // whole data accessible in rd. func readHeader(rd io.ReaderAt, size int64) ([]byte, error) { hl, err := readHeaderLength(rd, size) if err != nil { return nil, err } if int64(hl) > size-int64(binary.Size(hl)) { return nil, errors.New("header is larger than file") } if int64(hl) > maxHeaderSize { return nil, errors.New("header is larger than maxHeaderSize") } buf := make([]byte, int(hl)) n, err := rd.ReadAt(buf, size-int64(hl)-int64(binary.Size(hl))) if err != nil { return nil, errors.Wrap(err, "ReadAt") } if n != len(buf) { return nil, errors.New("not enough bytes read") } return buf, nil }
// findPacker returns a packer for a new blob of size bytes. Either a new one is // created or one is returned that already has some blobs. func (r *packerManager) findPacker(size uint) (packer *pack.Packer, err error) { r.pm.Lock() defer r.pm.Unlock() // search for a suitable packer if len(r.packs) > 0 { debug.Log("searching packer for %d bytes\n", size) for i, p := range r.packs { if p.Size()+size < maxPackSize { debug.Log("found packer %v", p) // remove from list r.packs = append(r.packs[:i], r.packs[i+1:]...) return p, nil } } } // no suitable packer found, return new debug.Log("create new pack for %d bytes", size) tmpfile, err := ioutil.TempFile("", "restic-temp-pack-") if err != nil { return nil, errors.Wrap(err, "ioutil.TempFile") } return pack.NewPacker(r.key, tmpfile), nil }
// DecodeOldIndex loads and unserializes an index in the old format from rd. func DecodeOldIndex(rd io.Reader) (idx *Index, err error) { debug.Log("Start decoding old index") list := []*packJSON{} dec := json.NewDecoder(rd) err = dec.Decode(&list) if err != nil { debug.Log("Error %#v", err) return nil, errors.Wrap(err, "Decode") } idx = NewIndex() for _, pack := range list { for _, blob := range pack.Blobs { idx.store(restic.PackedBlob{ Blob: restic.Blob{ Type: blob.Type, ID: blob.ID, Offset: blob.Offset, Length: blob.Length, }, PackID: pack.ID, }) } } idx.final = true debug.Log("done") return idx, nil }
// Dump writes the pretty-printed JSON representation of the index to w. func (idx *Index) Dump(w io.Writer) error { debug.Log("dumping index") idx.m.Lock() defer idx.m.Unlock() list, err := idx.generatePackList() if err != nil { return err } outer := jsonIndex{ Supersedes: idx.Supersedes(), Packs: list, } buf, err := json.MarshalIndent(outer, "", " ") if err != nil { return err } _, err = w.Write(append(buf, '\n')) if err != nil { return errors.Wrap(err, "Write") } debug.Log("done") return nil }
// Save stores data in the backend at the handle. func (be s3) Save(h restic.Handle, p []byte) (err error) { if err := h.Valid(); err != nil { return err } debug.Log("%v with %d bytes", h, len(p)) path := be.s3path(h.Type, h.Name) // Check key does not already exist _, err = be.client.StatObject(be.bucketname, path) if err == nil { debug.Log("%v already exists", h) return errors.New("key already exists") } <-be.connChan defer func() { be.connChan <- struct{}{} }() debug.Log("PutObject(%v, %v, %v, %v)", be.bucketname, path, int64(len(p)), "binary/octet-stream") n, err := be.client.PutObject(be.bucketname, path, bytes.NewReader(p), "binary/octet-stream") debug.Log("%v -> %v bytes, err %#v", path, n, err) return errors.Wrap(err, "client.PutObject") }
func (node Node) createDirAt(path string) error { err := fs.Mkdir(path, node.Mode) if err != nil && !os.IsExist(err) { return errors.Wrap(err, "Mkdir") } return nil }
// OpenKey tries do decrypt the key specified by name with the given password. func OpenKey(s *Repository, name string, password string) (*Key, error) { k, err := LoadKey(s, name) if err != nil { debug.Log("LoadKey(%v) returned error %v", name[:12], err) return nil, err } // check KDF if k.KDF != "scrypt" { return nil, errors.New("only supported KDF is scrypt()") } // derive user key params := crypto.KDFParams{ N: k.N, R: k.R, P: k.P, } k.user, err = crypto.KDF(params, k.Salt, password) if err != nil { return nil, errors.Wrap(err, "crypto.KDF") } // decrypt master keys buf := make([]byte, len(k.Data)) n, err := crypto.Decrypt(k.user, buf, k.Data) if err != nil { return nil, err } buf = buf[:n] // restore json k.master = &crypto.Key{} err = json.Unmarshal(buf, k.master) if err != nil { debug.Log("Unmarshal() returned error %v", err) return nil, errors.Wrap(err, "Unmarshal") } k.name = name if !k.Valid() { return nil, errors.New("Invalid key for repository") } return k, nil }