Example #1
0
func TestInitialization(t *testing.T) {
	ctx := context.TODO()
	id := testIdentity

	good := []*config.Config{
		{
			Identity: id,
			Datastore: config.Datastore{
				Type: "memory",
			},
			Addresses: config.Addresses{
				Swarm: []string{"/ip4/0.0.0.0/tcp/4001"},
				API:   "/ip4/127.0.0.1/tcp/8000",
			},
		},

		{
			Identity: id,
			Datastore: config.Datastore{
				Type: "leveldb",
				Path: ".testdb",
			},
			Addresses: config.Addresses{
				Swarm: []string{"/ip4/0.0.0.0/tcp/4001"},
				API:   "/ip4/127.0.0.1/tcp/8000",
			},
		},
	}

	bad := []*config.Config{
		{},
		{Datastore: config.Datastore{Type: "memory"}},
	}

	for i, c := range good {
		r := &repo.Mock{
			C: *c,
			D: testutil.ThreadSafeCloserMapDatastore(),
		}
		n, err := NewIPFSNode(ctx, Standard(r, false))
		if n == nil || err != nil {
			t.Error("Should have constructed.", i, err)
		}
	}

	for i, c := range bad {
		r := &repo.Mock{
			C: *c,
			D: testutil.ThreadSafeCloserMapDatastore(),
		}
		n, err := NewIPFSNode(ctx, Standard(r, false))
		if n != nil || err == nil {
			t.Error("Should have failed to construct.", i)
		}
	}
}
Example #2
0
func TestInitialization(t *testing.T) {
	ctx := context.Background()
	id := testIdentity

	good := []*config.Config{
		{
			Identity: id,
			Addresses: config.Addresses{
				Swarm: []string{"/ip4/0.0.0.0/tcp/4001"},
				API:   "/ip4/127.0.0.1/tcp/8000",
			},
		},

		{
			Identity: id,
			Addresses: config.Addresses{
				Swarm: []string{"/ip4/0.0.0.0/tcp/4001"},
				API:   "/ip4/127.0.0.1/tcp/8000",
			},
		},
	}

	bad := []*config.Config{
		{},
	}

	for i, c := range good {
		r := &repo.Mock{
			C: *c,
			D: testutil.ThreadSafeCloserMapDatastore(),
		}
		n, err := NewNode(ctx, &BuildCfg{Repo: r})
		if n == nil || err != nil {
			t.Error("Should have constructed.", i, err)
		}
	}

	for i, c := range bad {
		r := &repo.Mock{
			C: *c,
			D: testutil.ThreadSafeCloserMapDatastore(),
		}
		n, err := NewNode(ctx, &BuildCfg{Repo: r})
		if n != nil || err == nil {
			t.Error("Should have failed to construct.", i)
		}
	}
}
Example #3
0
func newNodeWithMockNamesys(ns mockNamesys) (*core.IpfsNode, error) {
	c := config.Config{
		Identity: config.Identity{
			PeerID: "Qmfoo", // required by offline node
		},
	}
	r := &repo.Mock{
		C: c,
		D: testutil.ThreadSafeCloserMapDatastore(),
	}
	n, err := core.NewNode(context.Background(), &core.BuildCfg{Repo: r})
	if err != nil {
		return nil, err
	}
	n.Namesys = ns
	return n, nil
}
Example #4
0
func newNodeWithMockNamesys(t *testing.T, ns mockNamesys) *core.IpfsNode {
	c := config.Config{
		Identity: config.Identity{
			PeerID: "Qmfoo", // required by offline node
		},
	}
	r := &repo.Mock{
		C: c,
		D: testutil.ThreadSafeCloserMapDatastore(),
	}
	n, err := core.NewIPFSNode(context.Background(), core.Offline(r))
	if err != nil {
		t.Fatal(err)
	}
	n.Namesys = ns
	return n
}
Example #5
0
func TestAddRecursive(t *testing.T) {
	r := &repo.Mock{
		C: config.Config{
			Identity: config.Identity{
				PeerID: "Qmfoo", // required by offline node
			},
		},
		D: testutil.ThreadSafeCloserMapDatastore(),
	}
	node, err := core.NewNode(context.Background(), &core.BuildCfg{Repo: r})
	if err != nil {
		t.Fatal(err)
	}
	if k, err := AddR(node, "test_data"); err != nil {
		t.Fatal(err)
	} else if k != "QmWCCga8AbTyfAQ7pTnGT6JgmRMAB3Qp8ZmTEFi5q5o8jC" {
		t.Fatal("keys do not match: ", k)
	}
}
Example #6
0
func TestAddRecursive(t *testing.T) {
	here, err := os.Getwd()
	if err != nil {
		t.Fatal(err)
	}
	r := &repo.Mock{
		C: config.Config{
			Identity: config.Identity{
				PeerID: "Qmfoo", // required by offline node
			},
		},
		D: testutil.ThreadSafeCloserMapDatastore(),
	}
	node, err := core.NewIPFSNode(context.Background(), core.Offline(r))
	if err != nil {
		t.Fatal(err)
	}
	if k, err := AddR(node, path.Join(here, "test_data")); err != nil {
		t.Fatal(err)
	} else if k != "QmWCCga8AbTyfAQ7pTnGT6JgmRMAB3Qp8ZmTEFi5q5o8jC" {
		t.Fatal("keys do not match")
	}
}
Example #7
0
func TestAddGCLive(t *testing.T) {
	r := &repo.Mock{
		C: config.Config{
			Identity: config.Identity{
				PeerID: "Qmfoo", // required by offline node
			},
		},
		D: testutil.ThreadSafeCloserMapDatastore(),
	}
	node, err := core.NewNode(context.Background(), &core.BuildCfg{Repo: r})
	if err != nil {
		t.Fatal(err)
	}

	errs := make(chan error)
	out := make(chan interface{})
	adder, err := NewAdder(context.Background(), node, out)
	if err != nil {
		t.Fatal(err)
	}

	dataa := ioutil.NopCloser(bytes.NewBufferString("testfileA"))
	rfa := files.NewReaderFile("a", "a", dataa, nil)

	// make two files with pipes so we can 'pause' the add for timing of the test
	piper, pipew := io.Pipe()
	hangfile := files.NewReaderFile("b", "b", piper, nil)

	datad := ioutil.NopCloser(bytes.NewBufferString("testfileD"))
	rfd := files.NewReaderFile("d", "d", datad, nil)

	slf := files.NewSliceFile("files", "files", []files.File{rfa, hangfile, rfd})

	addDone := make(chan struct{})
	go func() {
		defer close(addDone)
		defer close(out)
		err := adder.AddFile(slf)

		if err != nil {
			t.Fatal(err)
		}

	}()

	addedHashes := make(map[string]struct{})
	select {
	case o := <-out:
		addedHashes[o.(*AddedObject).Hash] = struct{}{}
	case <-addDone:
		t.Fatal("add shouldnt complete yet")
	}

	var gcout <-chan key.Key
	gcstarted := make(chan struct{})
	go func() {
		defer close(gcstarted)
		gcchan, err := gc.GC(context.Background(), node.Blockstore, node.Pinning)
		if err != nil {
			log.Error("GC ERROR:", err)
			errs <- err
			return
		}

		gcout = gcchan
	}()

	// gc shouldnt start until we let the add finish its current file.
	pipew.Write([]byte("some data for file b"))

	select {
	case <-gcstarted:
		t.Fatal("gc shouldnt have started yet")
	case err := <-errs:
		t.Fatal(err)
	default:
	}

	time.Sleep(time.Millisecond * 100) // make sure gc gets to requesting lock

	// finish write and unblock gc
	pipew.Close()

	// receive next object from adder
	select {
	case o := <-out:
		addedHashes[o.(*AddedObject).Hash] = struct{}{}
	case err := <-errs:
		t.Fatal(err)
	}

	select {
	case <-gcstarted:
	case err := <-errs:
		t.Fatal(err)
	}

	for k := range gcout {
		if _, ok := addedHashes[k.B58String()]; ok {
			t.Fatal("gc'ed a hash we just added")
		}
	}

	var last key.Key
	for a := range out {
		// wait for it to finish
		last = key.B58KeyDecode(a.(*AddedObject).Hash)
	}

	ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
	defer cancel()
	root, err := node.DAG.Get(ctx, last)
	if err != nil {
		t.Fatal(err)
	}

	err = dag.EnumerateChildren(ctx, node.DAG, root, key.NewKeySet())
	if err != nil {
		t.Fatal(err)
	}
}