// This will show a simple stream encoder where we encode from // a []io.Reader which contain a reader for each shard. // // Input and output can be exchanged with files, network streams // or what may suit your needs. func ExampleStreamEncoder() { dataShards := 5 parityShards := 2 // Create a StreamEncoder with the number of data and // parity shards. rs, err := reedsolomon.NewStream(dataShards, parityShards) if err != nil { log.Fatal(err) } shardSize := 50000 // Create input data shards. input := make([][]byte, dataShards) for s := range input { input[s] = make([]byte, shardSize) fillRandom(input[s]) } // Convert our buffers to io.Readers readers := make([]io.Reader, dataShards) for i := range readers { readers[i] = io.Reader(bytes.NewBuffer(input[i])) } // Create our output io.Writers out := make([]io.Writer, parityShards) for i := range out { out[i] = ioutil.Discard } // Encode from input to output. err = rs.Encode(readers, out) if err != nil { log.Fatal(err) } fmt.Println("ok") // OUTPUT: ok }
func main() { // Parse command line parameters. flag.Parse() args := flag.Args() if len(args) != 1 { fmt.Fprintf(os.Stderr, "Error: No input filename given\n") flag.Usage() os.Exit(1) } if *dataShards > 257 { fmt.Fprintf(os.Stderr, "Error: Too many data shards\n") os.Exit(1) } fname := args[0] // Create encoding matrix. enc, err := reedsolomon.NewStream(*dataShards, *parShards) checkErr(err) fmt.Println("Opening", fname) f, err := os.Open(fname) checkErr(err) instat, err := f.Stat() checkErr(err) shards := *dataShards + *parShards out := make([]*os.File, shards) // Create the resulting files. dir, file := filepath.Split(fname) if *outDir != "" { dir = *outDir } for i := range out { outfn := fmt.Sprintf("%s.%d", file, i) fmt.Println("Creating", outfn) out[i], err = os.Create(filepath.Join(dir, outfn)) checkErr(err) } // Split into files. data := make([]io.Writer, *dataShards) for i := range data { data[i] = out[i] } // Do the split err = enc.Split(f, data, instat.Size()) checkErr(err) // Close and re-open the files. input := make([]io.Reader, *dataShards) for i := range data { out[i].Close() f, err := os.Open(out[i].Name()) checkErr(err) input[i] = f defer f.Close() } // Create parity output writers parity := make([]io.Writer, *parShards) for i := range parity { parity[i] = out[*dataShards+i] defer out[*dataShards+i].Close() } // Encode parity err = enc.Encode(input, parity) checkErr(err) fmt.Printf("File split into %d data + %d parity shards.\n", *dataShards, *parShards) }
func main() { // Parse flags flag.Parse() args := flag.Args() if len(args) != 1 { fmt.Fprintf(os.Stderr, "Error: No filenames given\n") flag.Usage() os.Exit(1) } fname := args[0] // Create matrix enc, err := reedsolomon.NewStream(*dataShards, *parShards) checkErr(err) // Open the inputs shards, size, err := openInput(*dataShards, *parShards, fname) checkErr(err) // Verify the shards ok, err := enc.Verify(shards) if ok { fmt.Println("No reconstruction needed") } else { fmt.Println("Verification failed. Reconstructing data") shards, size, err = openInput(*dataShards, *parShards, fname) checkErr(err) // Create out destination writers out := make([]io.Writer, len(shards)) for i := range out { if shards[i] == nil { dir, _ := filepath.Split(fname) outfn := fmt.Sprintf("%s.%d", fname, i) fmt.Println("Creating", outfn) out[i], err = os.Create(filepath.Join(dir, outfn)) checkErr(err) } } err = enc.Reconstruct(shards, out) if err != nil { fmt.Println("Reconstruct failed -", err) os.Exit(1) } // Close output. for i := range out { if out[i] != nil { err := out[i].(*os.File).Close() checkErr(err) } } shards, size, err = openInput(*dataShards, *parShards, fname) ok, err = enc.Verify(shards) if !ok { fmt.Println("Verification failed after reconstruction, data likely corrupted:", err) os.Exit(1) } checkErr(err) } // Join the shards and write them outfn := *outFile if outfn == "" { outfn = fname } fmt.Println("Writing data to", outfn) f, err := os.Create(outfn) checkErr(err) shards, size, err = openInput(*dataShards, *parShards, fname) checkErr(err) // We don't know the exact filesize. err = enc.Join(f, shards, int64(*dataShards)*size) checkErr(err) }