Ejemplo n.º 1
0
func DirectAddCat(data []byte, conf testutil.LatencyConfig) error {
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()
	const numPeers = 2

	// create network
	mn, err := mocknet.FullMeshLinked(ctx, numPeers)
	if err != nil {
		return err
	}
	mn.SetLinkDefaults(mocknet.LinkOptions{
		Latency: conf.NetworkLatency,
		// TODO add to conf. This is tricky because we want 0 values to be functional.
		Bandwidth: math.MaxInt32,
	})

	peers := mn.Peers()
	if len(peers) < numPeers {
		return errors.New("test initialization error")
	}

	adder, err := core.NewIPFSNode(ctx, core.ConfigOption(MocknetTestRepo(peers[0], mn.Host(peers[0]), conf, core.DHTOption)))
	if err != nil {
		return err
	}
	defer adder.Close()
	catter, err := core.NewIPFSNode(ctx, core.ConfigOption(MocknetTestRepo(peers[1], mn.Host(peers[1]), conf, core.DHTOption)))
	if err != nil {
		return err
	}
	defer catter.Close()

	bs1 := []peer.PeerInfo{adder.Peerstore.PeerInfo(adder.Identity)}
	bs2 := []peer.PeerInfo{catter.Peerstore.PeerInfo(catter.Identity)}

	if err := catter.Bootstrap(core.BootstrapConfigWithPeers(bs1)); err != nil {
		return err
	}
	if err := adder.Bootstrap(core.BootstrapConfigWithPeers(bs2)); err != nil {
		return err
	}

	added, err := coreunix.Add(adder, bytes.NewReader(data))
	if err != nil {
		return err
	}

	readerCatted, err := coreunix.Cat(catter, added)
	if err != nil {
		return err
	}

	// verify
	bufout := new(bytes.Buffer)
	io.Copy(bufout, readerCatted)
	if 0 != bytes.Compare(bufout.Bytes(), data) {
		return errors.New("catted data does not match added data")
	}
	return nil
}
Ejemplo n.º 2
0
func runFileCattingWorker(ctx context.Context, n *core.IpfsNode) error {
	conf, err := config.Init(ioutil.Discard, *nBitsForKeypair)
	if err != nil {
		return err
	}

	r := &repo.Mock{
		D: ds2.CloserWrap(syncds.MutexWrap(datastore.NewMapDatastore())),
		C: *conf,
	}
	dummy, err := core.NewNode(ctx, &core.BuildCfg{
		Repo: r,
	})
	if err != nil {
		return err
	}

	errs := make(chan error)

	go func() {
		defer dummy.Close()
		var i int64 = 1
		for {
			buf := new(bytes.Buffer)
			if err := random.WritePseudoRandomBytes(sizeOfIthFile(i), buf, *seed); err != nil {
				errs <- err
			}
			// add to a dummy node to discover the key
			k, err := coreunix.Add(dummy, bytes.NewReader(buf.Bytes()))
			if err != nil {
				errs <- err
			}
			e := elog.EventBegin(ctx, "cat", logging.LoggableF(func() map[string]interface{} {
				return map[string]interface{}{
					"key":       k,
					"localPeer": n.Identity,
				}
			}))
			if r, err := coreunix.Cat(ctx, n, k); err != nil {
				e.Done()
				log.Printf("failed to cat file. seed: %d #%d key: %s err: %s", *seed, i, k, err)
			} else {
				log.Println("found file", "seed", *seed, "#", i, "key", k, "size", unit.Information(sizeOfIthFile(i)))
				io.Copy(ioutil.Discard, r)
				e.Done()
				log.Println("catted file", "seed", *seed, "#", i, "key", k, "size", unit.Information(sizeOfIthFile(i)))
				i++
			}
			time.Sleep(time.Second)
		}
	}()

	err = <-errs
	if err != nil {
		log.Fatal(err)
	}

	return nil
}
Ejemplo n.º 3
0
func clientHandlerSync(w http.ResponseWriter, ctx context.Context, n *core.IpfsNode, dspath string, targethash string, reloadindex chan *Entry) {
	target, err := peer.IDB58Decode(targethash)
	if err != nil {
		panic(err)
	}

	curentries := loadIndex(n, ctx, dspath)
	entrymap := make(map[string]bool)
	if len(curentries.Entries) != 0 {
		for i := range curentries.Entries {
			key := fmt.Sprintf("%v", curentries.Entries[i])
			//fmt.Println(key)
			//entrymap[curentries.Entries[i].Hash] = curentries.Entries[i]
			entrymap[key] = true
		}
	}

	fmt.Fprintln(w, "Syncing...", target)
	entrylist := getEntryList(n, target)

	for i := range entrylist.Entries {
		fmt.Fprintln(w, "Downloading ", entrylist.Entries[i].Name)
		reader, err := coreunix.Cat(ctx, n, entrylist.Entries[i].Hash)
		if err != nil {
			panic(err)
		}
		ioutil.ReadAll(reader)

		if len(curentries.Entries) != 0 {
			_, ok := entrymap[fmt.Sprintf("%v", entrylist.Entries[i])]
			if ok {
				fmt.Fprintln(w, "Already have", entrylist.Entries[i].Hash)
			} else {
				fmt.Fprintln(w, "Appending", entrylist.Entries[i].Hash)
				curentries.Entries = append(curentries.Entries, entrylist.Entries[i])
			}
		}

		//FIXME: potential data corruption because it could collide with the main startserver thread running
		err = pin(n, ctx, entrylist.Entries[i].Hash)
		if err != nil {
			panic(err)
		}
		fmt.Fprintln(w, "Pinned", entrylist.Entries[i].Hash, entrylist.Entries[i].Name)
	}

	if len(curentries.Entries) != 0 {
		saveIndex(curentries, dspath)
	} else {
		saveIndex(entrylist, dspath)
	}

	fmt.Fprintln(w, "Sync complete.")
	reloadindex <- &Entry{}
}
Ejemplo n.º 4
0
Archivo: cat.go Proyecto: malei/go-ipfs
func cat(ctx context.Context, node *core.IpfsNode, paths []string) ([]io.Reader, uint64, error) {
	readers := make([]io.Reader, 0, len(paths))
	length := uint64(0)
	for _, fpath := range paths {
		read, err := coreunix.Cat(ctx, node, fpath)
		if err != nil {
			return nil, 0, err
		}
		readers = append(readers, read)
		length += uint64(read.Size())
	}
	return readers, length, nil
}
Ejemplo n.º 5
0
// Cat returns an io.Reader that reads from ipfs.
func Cat(node *Node, hash multihash.Multihash) (Reader, error) {
	nd, err := node.proc()
	if err != nil {
		return nil, err
	}

	reader, err := coreunix.Cat(node.Context, nd, hash.B58String())
	if err != nil {
		log.Warningf("ipfs cat: %v", err)
		return nil, err
	}

	return reader, nil
}
Ejemplo n.º 6
0
func parseList(n *core.IpfsNode, ctx context.Context, hash string) ([]string, error) {
	list := []string{""}
	fmt.Println("Fetching list")
	reader, err := coreunix.Cat(ctx, n, hash)
	if err != nil {
		return nil, err
	}

	rawbytes, err := ioutil.ReadAll(reader)
	if err != nil {
		return nil, err
	}
	list = strings.Split(string(rawbytes), "\n")
	fmt.Println("Parsed list")
	return list, nil

}
Ejemplo n.º 7
0
func main() {
	nd, err := SetupIpfs()
	if err != nil {
		fmt.Println(err)
		return
	}

	if len(os.Args) < 2 {
		fmt.Println("Please pass in an argument!")
		return
	}
	keytofetch := os.Args[1]

	read, err := coreunix.Cat(nd, keytofetch)
	if err != nil {
		fmt.Println(err)
		return
	}

	fmt.Println(CountChars(read))
}
Ejemplo n.º 8
0
func clientHandlerCat(ctx context.Context, w http.ResponseWriter, n *core.IpfsNode, hash, targethash string) {
	target, err := peer.IDB58Decode(targethash)
	if err != nil {
		http.Error(w, fmt.Sprintf("%s", err), 500)
	}

	// FIXME: validate this in case there is a 46 len name!
	foundhash := false
	w.Header().Set("Content-Disposition", fmt.Sprintf("filename=\"%s\"", hash))
	if len(hash) != 46 {
		entrylist := getEntryList(n, target)
		//fmt.Println(entrylist)
		for i := len(entrylist.Entries) - 1; i >= 0; i-- {
			if entrylist.Entries[i].Name == hash {
				hash = entrylist.Entries[i].Hash
				foundhash = true
				break
			}
		}
	} else {
		foundhash = true
	}

	if !foundhash {
		http.Error(w, "No entry found.", 500)
		return
	}

	reader, err := coreunix.Cat(ctx, n, hash)
	if err != nil {
		panic(err)
	}

	_, err = io.Copy(w, reader)
	if err != nil {
		http.Error(w, fmt.Sprintf("Error reading or writing entry:", err), 500)
		return
	}
}
Ejemplo n.º 9
0
func RunSupernodeBootstrappedAddCat(data []byte, conf testutil.LatencyConfig) error {
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	servers, clients, err := InitializeSupernodeNetwork(ctx, 8, 2, conf)
	if err != nil {
		return err
	}
	for _, n := range append(servers, clients...) {
		defer n.Close()
	}

	adder := clients[0]
	catter := clients[1]

	log.Info("adder is", adder.Identity)
	log.Info("catter is", catter.Identity)

	keyAdded, err := coreunix.Add(adder, bytes.NewReader(data))
	if err != nil {
		return err
	}

	readerCatted, err := coreunix.Cat(ctx, catter, keyAdded)
	if err != nil {
		return err
	}

	// verify
	bufout := new(bytes.Buffer)
	io.Copy(bufout, readerCatted)
	if 0 != bytes.Compare(bufout.Bytes(), data) {
		return errors.New("catted data does not match added data")
	}
	cancel()
	return nil
}
Ejemplo n.º 10
0
// Perform a large number of concurrent reads to stress the system
func TestIpfsStressRead(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	nd, mnt := setupIpfsTest(t, nil)
	defer mnt.Close()

	var ks []key.Key
	var paths []string

	nobj := 50
	ndiriter := 50

	// Make a bunch of objects
	for i := 0; i < nobj; i++ {
		fi, _ := randObj(t, nd, rand.Int63n(50000))
		k, err := fi.Key()
		if err != nil {
			t.Fatal(err)
		}

		ks = append(ks, k)
		paths = append(paths, k.String())
	}

	// Now make a bunch of dirs
	for i := 0; i < ndiriter; i++ {
		db := uio.NewDirectory(nd.DAG)
		for j := 0; j < 1+rand.Intn(10); j++ {
			name := fmt.Sprintf("child%d", j)
			err := db.AddChild(nd.Context(), name, ks[rand.Intn(len(ks))])
			if err != nil {
				t.Fatal(err)
			}
		}
		newdir := db.GetNode()
		k, err := nd.DAG.Add(newdir)
		if err != nil {
			t.Fatal(err)
		}

		ks = append(ks, k)
		npaths := getPaths(t, nd, k.String(), newdir)
		paths = append(paths, npaths...)
	}

	// Now read a bunch, concurrently
	wg := sync.WaitGroup{}
	errs := make(chan error)

	for s := 0; s < 4; s++ {
		wg.Add(1)
		go func() {
			defer wg.Done()

			for i := 0; i < 2000; i++ {
				item := paths[rand.Intn(len(paths))]
				fname := path.Join(mnt.Dir, item)
				rbuf, err := ioutil.ReadFile(fname)
				if err != nil {
					errs <- err
				}

				read, err := coreunix.Cat(nd.Context(), nd, item)
				if err != nil {
					errs <- err
				}

				data, err := ioutil.ReadAll(read)
				if err != nil {
					errs <- err
				}

				if !bytes.Equal(rbuf, data) {
					errs <- errors.New("Incorrect Read!")
				}
			}
		}()
	}

	go func() {
		wg.Wait()
		close(errs)
	}()

	for err := range errs {
		if err != nil {
			t.Fatal(err)
		}
	}
}
Ejemplo n.º 11
0
func RunThreeLeggedCat(data []byte, conf testutil.LatencyConfig) error {
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()
	const numPeers = 3

	// create network
	mn := mocknet.New(ctx)
	mn.SetLinkDefaults(mocknet.LinkOptions{
		Latency: conf.NetworkLatency,
		// TODO add to conf. This is tricky because we want 0 values to be functional.
		Bandwidth: math.MaxInt32,
	})

	bootstrap, err := core.NewNode(ctx, &core.BuildCfg{
		Online: true,
		Host:   mock.MockHostOption(mn),
	})
	if err != nil {
		return err
	}
	defer bootstrap.Close()

	adder, err := core.NewNode(ctx, &core.BuildCfg{
		Online: true,
		Host:   mock.MockHostOption(mn),
	})
	if err != nil {
		return err
	}
	defer adder.Close()

	catter, err := core.NewNode(ctx, &core.BuildCfg{
		Online: true,
		Host:   mock.MockHostOption(mn),
	})
	if err != nil {
		return err
	}
	defer catter.Close()
	mn.LinkAll()

	bis := bootstrap.Peerstore.PeerInfo(bootstrap.PeerHost.ID())
	bcfg := core.BootstrapConfigWithPeers([]peer.PeerInfo{bis})
	if err := adder.Bootstrap(bcfg); err != nil {
		return err
	}
	if err := catter.Bootstrap(bcfg); err != nil {
		return err
	}

	added, err := coreunix.Add(adder, bytes.NewReader(data))
	if err != nil {
		return err
	}

	readerCatted, err := coreunix.Cat(ctx, catter, added)
	if err != nil {
		return err
	}

	// verify
	bufout := new(bytes.Buffer)
	io.Copy(bufout, readerCatted)
	if 0 != bytes.Compare(bufout.Bytes(), data) {
		return errors.New("catted data does not match added data")
	}
	return nil
}
Ejemplo n.º 12
0
func benchCat(b *testing.B, data []byte, conf testutil.LatencyConfig) error {
	b.StopTimer()
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	// create network
	mn := mocknet.New(ctx)
	mn.SetLinkDefaults(mocknet.LinkOptions{
		Latency: conf.NetworkLatency,
		// TODO add to conf. This is tricky because we want 0 values to be functional.
		Bandwidth: math.MaxInt32,
	})

	adder, err := core.NewNode(ctx, &core.BuildCfg{
		Online: true,
		Host:   mock.MockHostOption(mn),
	})
	if err != nil {
		return err
	}
	defer adder.Close()

	catter, err := core.NewNode(ctx, &core.BuildCfg{
		Online: true,
		Host:   mock.MockHostOption(mn),
	})
	if err != nil {
		return err
	}
	defer catter.Close()

	err = mn.LinkAll()
	if err != nil {
		return err
	}

	bs1 := []peer.PeerInfo{adder.Peerstore.PeerInfo(adder.Identity)}
	bs2 := []peer.PeerInfo{catter.Peerstore.PeerInfo(catter.Identity)}

	if err := catter.Bootstrap(core.BootstrapConfigWithPeers(bs1)); err != nil {
		return err
	}
	if err := adder.Bootstrap(core.BootstrapConfigWithPeers(bs2)); err != nil {
		return err
	}

	added, err := coreunix.Add(adder, bytes.NewReader(data))
	if err != nil {
		return err
	}

	b.StartTimer()
	readerCatted, err := coreunix.Cat(ctx, catter, added)
	if err != nil {
		return err
	}

	// verify
	bufout := new(bytes.Buffer)
	io.Copy(bufout, readerCatted)
	if 0 != bytes.Compare(bufout.Bytes(), data) {
		return errors.New("catted data does not match added data")
	}
	return nil
}