func main() { dat, err := ioutil.ReadFile(os.Args[0]) check(err) // Calculate sums s256 := sha256.Sum256(dat) s512 := sha512.Sum512(dat) // Calculate multihash h, err := multihash.Encode(s256[:], multihash.SHA2_256) check(err) multiSha256 := b58.Encode(h) fmt.Println("Multihash256/58: ", len(multiSha256), string(multiSha256)) // Base 58 encoding s256b58 := b58.Encode(s256[:]) s512b58 := b58.Encode(s512[:]) // Base 64 encoding enc := base64.RawURLEncoding s256b64 := enc.EncodeToString(s256[:]) s512b64 := enc.EncodeToString(s512[:]) fmt.Println("SHA256/base58 : ", len(s256b58), string(s256b58)) fmt.Println("SHA256/base64 : ", len(s256b64), string(s256b64)) fmt.Println("SHA512/base58 : ", len(s512b58), string(s512b58)) fmt.Println("SHA512/base64 : ", len(s512b64), string(s512b64)) }
func run() error { opts, err := parseOpts() if err != nil { return err } if opts.keygen { k, err := senc.RandomKey() if err != nil { return err } fmt.Println(b58.Encode(k)) return nil } var r io.Reader switch { case opts.encrypt: r, err = senc.Encrypt(opts.key, os.Stdin) case opts.decrypt: r, err = senc.Decrypt(opts.key, os.Stdin) default: return fmt.Errorf("must choose either -e or -d") } if err != nil { return err } _, err = io.Copy(os.Stdout, r) return err }
// ConvertKey returns a B58 encoded Datastore key // TODO: this is hacky because it encodes every path component. some // path components may be proper strings already... func (b58KeyConverter) ConvertKey(dsk ds.Key) ds.Key { k := ds.NewKey("/") for _, n := range dsk.Namespaces() { k = k.ChildString(b58.Encode([]byte(n))) } return k }
func Encode(encoding string, hash mh.Multihash) (string, error) { switch encoding { case "raw": return string(hash), nil case "hex": return hex.EncodeToString(hash), nil case "base58": return base58.Encode(hash), nil case "base64": return base64.StdEncoding.EncodeToString(hash), nil default: return "", fmt.Errorf("unknown encoding: %s", encoding) } }
func entryToLine(prefix string, e Entry) string { if e.Drop { return "" } path := e.Path if prefix != "" { path = filepath.Join(prefix, path) if strings.HasSuffix(e.Path, "/") { path = path + "/" } } if e.Uuid != "" || e.Device != 0 || e.Inode != 0 { return "-\n" + formatKeyVal("p", path) + formatKeyVal("h", base58.Encode(e.Hash)) + formatKeyVal("u", e.Uuid) + formatKeyVal("I", DeviceInodeString(e.Device, e.Inode)) + "\n" } else { return fmt.Sprintf("%s\t%s\n", base58.Encode(e.Hash), EncodePath(path)) } }
func streamCipherReaderForKeyGenerator(kg KeyDataGenerator, r io.ReadCloser) (stream io.ReadCloser, key string, err error) { keyData := make([]byte, 1+keyTypeDefaultLen) keyData[0] = keyTypeDefault r2, err := kg.GenerateKeyData(r, keyData[1:]) if err != nil { r.Close() return nil, "", err } // Ignore error below, we control arguments and pass valid sizes stream, _ = streamCipherReaderForKeyData(keyData[0], keyData[1:], r2) key = base58.Encode(keyData) return stream, key, nil }
func (ks *keyset) generate() error { var err error ks.sk, ks.pk, err = tu.RandTestKeyPair(512) if err != nil { return err } bpk, err := ks.pk.Bytes() if err != nil { return err } ks.hpk = string(u.Hash(bpk)) ks.hpkp = b58.Encode([]byte(ks.hpk)) return nil }
// GenIdentity creates a random keypair and returns the associated // peerID and private key encoded to match config values func GenIdentity() (string, string, error) { k, pub, err := crypto.GenerateKeyPairWithReader(crypto.RSA, 512, u.NewTimeSeededRand()) if err != nil { return "", "", err } b, err := k.Bytes() if err != nil { return "", "", err } privkey := b64.StdEncoding.EncodeToString(b) pubkeyb, err := pub.Bytes() if err != nil { return "", "", err } id := b58.Encode(u.Hash(pubkeyb)) return id, privkey, nil }
func TestListenAnyDialAnyUnix(t *testing.T) { assert := assert.New(t) bytes := make([]byte, 8) _, err := rand.Read(bytes) assert.Nil(err) ipc_file := "/tmp/" + b58.Encode(bytes) ipc := "unix://" + ipc_file results := make(chan error, 1) listener, err := listenAny(ipc, tls.Config{}) assert.Nil(err) go func() { _, lerr := listener.Accept() results <- lerr }() _, err = dialAny(ipc, tls.Config{}) assert.Nil(<-results) err = os.Remove(ipc_file) assert.Nil(err) }
// Return a conflict filename to use. Return the empty string if the conflict // file already exists for the same hash. func FindConflictFileName(path string, digest []byte) string { hashname := base58.Encode(digest) hashext := "" if len(hashname) != 0 { hashext = "." + hashname } ext := filepath.Ext(path) dstname := fmt.Sprintf("%s%s%s", path, hashext, ext) i := 0 for { info, err := os.Lstat(dstname) if os.IsNotExist(err) { return dstname } hash, err := GetHash(dstname, info, false) if err == nil && bytes.Equal(hash, digest) { return "" } dstname = fmt.Sprintf("%s%s.%d%s", path, hashext, i, ext) i++ } }
func (ks *keyset) load(hpkp, skBytesStr string) error { skBytes, err := base64.StdEncoding.DecodeString(skBytesStr) if err != nil { return err } ks.sk, err = ic.UnmarshalPrivateKey(skBytes) if err != nil { return err } ks.pk = ks.sk.GetPublic() bpk, err := ks.pk.Bytes() if err != nil { return err } ks.hpk = string(u.Hash(bpk)) ks.hpkp = b58.Encode([]byte(ks.hpk)) if ks.hpkp != hpkp { return fmt.Errorf("hpkp doesn't match key. %s", hpkp) } return nil }
func mainDupes(args []string) int { f := flag.NewFlagSet("dupes", flag.ExitOnError) opt_show_links := f.Bool("l", false, "Show group of files that share the same inode") opt_progress := f.Bool("p", false, "Show progress") opt_hash := f.Bool("c", false, "Check real hash in case the file is updated") opt_dedup := f.Bool("d", false, "Deduplicate files (make links)") f.Usage = func() { fmt.Print(dupesUsage) f.PrintDefaults() } f.Parse(args) srcs := f.Args() if len(srcs) == 0 { srcs = append(srcs, ".") } dupes := map[string]sameFile{} num := 0 errors := 0 for _, src := range srcs { e := repo.Walk(src, func(path string, info os.FileInfo) error { // Skip symlinks if info.Mode()&os.ModeSymlink != 0 { return nil } hash, err := repo.GetHash(path, info, *opt_hash) if err != nil { return err } if hash == nil { return nil } sys, ok := info.Sys().(*syscall.Stat_t) if !ok { sys.Ino = 0 } f := dupes[string(hash)] f.hash = hash f.paths = append(f.paths, path) f.inodes = append(f.inodes, sys.Ino) f.devices = append(f.devices, sys.Dev) dupes[string(hash)] = f num = num + 1 if *opt_progress { fmt.Printf("\r\x1b[2K%d %s\r", num, path) } return nil }, func(path string, info os.FileInfo, err error) bool { fmt.Fprintf(os.Stderr, "%s: %v\n", path, err.Error()) return true }) errors = errors + len(e) } for _, f := range dupes { if len(f.paths) <= 1 { continue } files := map[uint64][]string{} for i, ino := range f.inodes { files[ino] = append(files[ino], f.paths[i]) } if len(files) == 1 && !*opt_show_links { continue } fmt.Println() hash := base58.Encode(f.hash) for _, paths := range files { for _, path := range paths { fmt.Printf("%s\t%d\t%s\n", hash, len(paths), path) } } if len(files) > 1 && *opt_dedup { err := deduplicate(f) if err != nil { fmt.Fprintf(os.Stderr, "%s", err.Error()) errors = errors + 1 } } } if errors > 0 { return 1 } return 0 }
// MarshalJSON returns a JSON-encoded Key (string) func (k *Key) MarshalJSON() ([]byte, error) { return json.Marshal(b58.Encode([]byte(*k))) }
// B58KeyEncode returns Key in a b58 encoded string func B58KeyEncode(k Key) string { return b58.Encode([]byte(k)) }
func (e *Entry) HashText() string { return base58.Encode(e.Hash) }
func mainCheck(args []string) int { f := flag.NewFlagSet("status", flag.ExitOnError) opt_all := f.Bool("a", false, "Check all files, including modified") f.Usage = func() { fmt.Print(checkUsage) f.PrintDefaults() } f.Parse(args) dir := f.Arg(0) if dir == "" { dir = "." } err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { if err != nil { return err } // Skip .dirstore/ at root if filepath.Base(path) == attrs.DirStoreName && filepath.Dir(path) == dir && info.IsDir() { return filepath.SkipDir } else if info.IsDir() { return nil } hashTimeStr, err := attrs.Get(path, repo.XattrHashTime) if err != nil { return nil } hashTime, err := time.Parse(time.RFC3339Nano, string(hashTimeStr)) if err != nil { return err } timeEqual := hashTime == info.ModTime() if *opt_all || timeEqual { hash, err := attrs.Get(path, repo.XattrHash) if err != nil { return err } digest, err := repo.HashFile(path, info) if err != nil { return err } hashEqual := bytes.Equal(hash, digest) if !timeEqual && !hashEqual { fmt.Printf("+\t%s\t%s\n", base58.Encode(digest), path) } else if !hashEqual { fmt.Printf("!\t%s\t%s\n", base58.Encode(digest), path) } else if !timeEqual { fmt.Printf("=\t%s\t%s", base58.Encode(digest), path) } } return nil }) if err != nil { fmt.Fprintf(os.Stderr, "%v", err) return 1 } return 0 }
func (r *Par2Repo) HashFile(digest []byte) string { return filepath.Join(r.repoPath, base58.Encode(digest)) }
// IDB58Encode returns b58-encoded string func IDB58Encode(id ID) string { return b58.Encode([]byte(id)) }
func mainInfo(args []string) int { f := flag.NewFlagSet("info", flag.ExitOnError) opt_check := f.Bool("c", false, "Run integrity check") f.Usage = func() { fmt.Print(infoUsage) f.PrintDefaults() } f.Parse(args) dir := f.Arg(0) if dir == "" { dir = "." } rep := repo.GetRepo(dir) status := 0 first := true err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { if err != nil { fmt.Fprintf(os.Stderr, "%s: %v\n", path, err.Error()) status = 1 return err } // Skip .dirstore/ at root if filepath.Base(path) == attrs.DirStoreName && filepath.Dir(path) == dir && info.IsDir() { return filepath.SkipDir } else if !info.Mode().IsRegular() { return nil } if first { first = false } else { fmt.Println() } fmt.Printf("File: %s\n", path) if conflict := repo.ConflictFile(path); conflict != "" { fmt.Printf("Conflict With: %s\n", conflict) } for _, alt := range repo.ConflictFileAlternatives(path) { fmt.Printf("Conflict Alternatives: %s\n", alt) } var realHash mh.Multihash if *opt_check { realHash, err = repo.HashFile(path, info) if err != nil { fmt.Fprintf(os.Stderr, "%s: %v\n", path, err) return nil } } hashTime, err := repo.GetHashTime(path) if repo.IsNoData(err) { if *opt_check { fmt.Printf("Actual Hash: %s\n", base58.Encode(realHash)) } fmt.Printf("Status: New\n") } else { if err != nil { fmt.Fprintf(os.Stderr, "%s: %v\n", path, err.Error()) return nil } fmt.Printf("Hash Time: %v\n", hashTime.Format(time.RFC3339Nano)) hash, err := attrs.Get(path, repo.XattrHash) if err != nil { fmt.Fprintf(os.Stderr, "%s: %v\n", path, err) return nil } var par2exists = false if rep != nil { par2exists, _ = rep.Par2Exists(hash) } fmt.Printf("Recorded Hash: %s (reduncency %s)\n", base58.Encode(hash), boolToAvailableStr(par2exists)) if *opt_check { par2exists = false if rep != nil { par2exists, _ = rep.Par2Exists(realHash) } fmt.Printf("Actual Hash: %s (redundency %s)\n", base58.Encode(realHash), boolToAvailableStr(par2exists)) } if hashTime != info.ModTime() { fmt.Printf("Status: Dirty\n") } else { if *opt_check && !bytes.Equal(realHash, hash) { fmt.Printf("Status: Corrupted\n") } else { fmt.Printf("Status: Clean\n") } } } return nil }) if err != nil { fmt.Fprintf(os.Stderr, "%v", err) os.Exit(1) } return status }
func mainStatus(args []string) int { f := flag.NewFlagSet("status", flag.ExitOnError) opt_no_par2 := f.Bool("n", false, "Do not show files missing PAR2 redundency data") opt_show_only_hash := f.Bool("c", false, "Show only unchanged committed files with their hash") opt_no_docignore := f.Bool("no-docignore", false, "Don't treat .docignore files specially") f.Usage = func() { fmt.Print(usageStatus) f.PrintDefaults() } f.Parse(args) dir := f.Arg(0) if dir == "" { dir = "." } rep := repo.GetRepo(dir) status := 0 err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { if err != nil { fmt.Fprintf(os.Stderr, "%s: %v\n", path, err.Error()) status = 1 return err } // Skip directories containing an empty .docignore file if !*opt_no_docignore && ignore.IsIgnored(path) { return filepath.SkipDir } // Skip .dirstore/ at root if filepath.Base(path) == attrs.DirStoreName && filepath.Dir(path) == dir && info.IsDir() { return filepath.SkipDir } else if !info.Mode().IsRegular() { return nil } if *opt_show_only_hash { hash, err := repo.GetHash(path, info, false) if err != nil { fmt.Fprintf(os.Stderr, "%s: %v\n", path, err.Error()) return nil } if hash != nil { fmt.Printf("%s\t%s\n", base58.Encode(hash), path) } } else { var conflict string = "" if repo.ConflictFile(path) != "" { conflict = " c" } else if len(repo.ConflictFileAlternatives(path)) > 0 { conflict = " C" } hashTime, err := repo.GetHashTime(path) if repo.IsNoData(err) { if info.Mode()&os.FileMode(0200) == 0 { fmt.Printf("?%s (ro)\t%s\n", conflict, path) } else { fmt.Printf("?%s\t%s\n", conflict, path) } return nil } else if err != nil { fmt.Fprintf(os.Stderr, "%s: %v\n", path, err.Error()) return nil } var redundency string = "*" if rep != nil { digest, err := repo.GetHash(path, info, true) if err != nil { fmt.Fprintf(os.Stderr, "%s: %v\n", path, err.Error()) return nil } if par2exists, _ := rep.Par2Exists(digest); par2exists { redundency = "" } } if hashTime != info.ModTime() { fmt.Printf("+%s%s\t%s\n", conflict, redundency, path) } else if conflict != "" || (redundency != "" && !*opt_no_par2) { fmt.Printf("%s%s\t%s\n", conflict, redundency, path) } } return nil }) if err != nil { fmt.Fprintf(os.Stderr, "%v", err) os.Exit(1) } return status }
func mainSave(args []string) int { f := flag.NewFlagSet("save", flag.ExitOnError) opt_force := f.Bool("force", false, "Force writing xattrs on read only files") opt_nodocignore := f.Bool("no-docignore", false, "Don't respect .docignore") f.Usage = func() { fmt.Print(saveUsage) f.PrintDefaults() } f.Parse(args) dir := f.Arg(0) if dir == "" { dir = "." } dirstore := repo.GetRepo(dir) if dirstore == nil { fmt.Fprintf(os.Stderr, "%s: Could not find repository, please run doc init\n", dir) os.Exit(1) } status := 0 err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { if err != nil { fmt.Fprintf(os.Stderr, "%s: %v\n", path, err.Error()) status = 1 return err } if !*opt_nodocignore && ignore.IsIgnored(path) { return filepath.SkipDir } // Skip .dirstore/ at root if filepath.Base(path) == attrs.DirStoreName && filepath.Dir(path) == dir && info.IsDir() { return filepath.SkipDir } else if info.IsDir() || !info.Mode().IsRegular() { return nil } hashTime, err := repo.GetHashTime(path) if err != nil && !repo.IsNoData(err) { fmt.Fprintf(os.Stderr, "%s: %v\n", path, err.Error()) return nil } var digest []byte if err != nil || hashTime != info.ModTime() { digest, err = commitFile(path, info, *opt_force) if err != nil { status = 1 fmt.Fprintf(os.Stderr, "%s: %v\n", path, err.Error()) } else if digest != nil { fmt.Printf("%s %s\n", base58.Encode(digest), path) } } else { digest, err = attrs.Get(path, repo.XattrHash) if err != nil { fmt.Fprintf(os.Stderr, "%s: %v\n", path, err) return nil } } err = dirstore.Create(path, digest) if err != nil { fmt.Fprintf(os.Stderr, "%s: %v\n", path, err) return nil } return nil }) if err != nil { fmt.Fprintf(os.Stderr, "%v", err) os.Exit(1) } return status }