Пример #1
0
// TestNilInputs tries supplying the renter with nil inputs and checks for
// correct rejection.
func TestNilInputs(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	rt, err := newRenterTester("TestNilInputs")
	if err != nil {
		t.Fatal(err)
	}
	_, err = New(rt.cs, rt.hostdb, rt.wallet, rt.tpool, rt.renter.saveDir+"1")
	if err != nil {
		t.Error(err)
	}
	_, err = New(nil, nil, nil, nil, rt.renter.saveDir+"2")
	if err == nil {
		t.Error("no error returned for nil inputs")
	}
	_, err = New(nil, rt.hostdb, rt.wallet, rt.tpool, rt.renter.saveDir+"3")
	if err != ErrNilCS {
		t.Error(err)
	}
	_, err = New(rt.cs, nil, rt.wallet, rt.tpool, rt.renter.saveDir+"5")
	if err != ErrNilHostDB {
		t.Error(err)
	}
	_, err = New(rt.cs, rt.hostdb, nil, rt.tpool, rt.renter.saveDir+"6")
	if err != ErrNilWallet {
		t.Error(err)
	}
	_, err = New(rt.cs, rt.hostdb, rt.wallet, nil, rt.renter.saveDir+"6")
	if err != ErrNilTpool {
		t.Error(err)
	}
}
Пример #2
0
func TestDockerHostNet(t *testing.T) {
	if !dockerLocated() {
		t.SkipNow()
	}

	task := &structs.Task{
		Config: map[string]string{
			"image":        "redis",
			"network_mode": "host",
		},
		Resources: &structs.Resources{
			MemoryMB: 256,
			CPU:      512,
		},
	}
	driverCtx := testDriverContext(task.Name)
	ctx := testDriverExecContext(task, driverCtx)
	defer ctx.AllocDir.Destroy()
	d := NewDockerDriver(driverCtx)

	handle, err := d.Start(ctx, task)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	if handle == nil {
		t.Fatalf("missing handle")
	}
	defer handle.Kill()
}
Пример #3
0
func TestZookeeperBackend(t *testing.T) {
	addr := os.Getenv("ZOOKEEPER_ADDR")
	if addr == "" {
		t.SkipNow()
	}

	client, _, err := zk.Connect([]string{addr}, time.Second)

	if err != nil {
		t.Fatalf("err: %v", err)
	}

	randPath := fmt.Sprintf("/vault-%d", time.Now().Unix())
	acl := zk.WorldACL(zk.PermAll)
	_, err = client.Create(randPath, []byte("hi"), int32(0), acl)

	if err != nil {
		t.Fatalf("err: %v", err)
	}

	defer func() {
		client.Delete(randPath, -1)
	}()

	b, err := NewBackend("zookeeper", map[string]string{
		"address": addr + "," + addr,
		"path":    randPath,
	})
	if err != nil {
		t.Fatalf("err: %s", err)
	}

	testBackend(t, b)
	testBackend_ListPrefix(t, b)
}
Пример #4
0
// TestThreadGroupOnStop tests that Stop calls functions registered with
// OnStop.
func TestThreadGroupOnStop(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	l, err := net.Listen("tcp", "localhost:0")
	if err != nil {
		t.Fatal(err)
	}

	// create ThreadGroup and register the closer
	var tg ThreadGroup
	tg.OnStop(func() { l.Close() })

	// send on channel when listener is closed
	var closed bool
	tg.Add()
	go func() {
		defer tg.Done()
		_, err := l.Accept()
		closed = err != nil
	}()

	tg.Stop()
	if !closed {
		t.Fatal("Stop did not close listener")
	}
}
Пример #5
0
// NewAuthenticatedClient creates a new vim25.Client, authenticates the user
// specified in the test URL, and returns it.
func NewAuthenticatedClient(t *testing.T) *vim25.Client {
	u := URL()
	if u == nil {
		t.SkipNow()
	}

	soapClient := soap.NewClient(u, true)
	vimClient, err := vim25.NewClient(context.Background(), soapClient)
	if err != nil {
		t.Fatal(err)
	}

	req := types.Login{
		This: *vimClient.ServiceContent.SessionManager,
	}

	req.UserName = u.User.Username()
	if pw, ok := u.User.Password(); ok {
		req.Password = pw
	}

	_, err = methods.Login(context.Background(), vimClient, &req)
	if err != nil {
		t.Fatal(err)
	}

	return vimClient
}
Пример #6
0
// TestTryInvalidTransactionSet submits an invalid transaction set to the
// TryTransaction method.
func TestTryInvalidTransactionSet(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	cst, err := createConsensusSetTester("TestValidTransaction")
	if err != nil {
		t.Fatal(err)
	}
	defer cst.Close()
	initialHash := cst.cs.dbConsensusChecksum()

	// Try a valid transaction followed by an invalid transaction.
	_, err = cst.wallet.SendSiacoins(types.NewCurrency64(1), types.UnlockHash{})
	if err != nil {
		t.Fatal(err)
	}
	txns := cst.tpool.TransactionList()
	txn := types.Transaction{
		SiacoinInputs: []types.SiacoinInput{{}},
	}
	txns = append(txns, txn)
	cc, err := cst.cs.TryTransactionSet(txns)
	if err == nil {
		t.Error("bad transaction survived filter")
	}
	if cst.cs.dbConsensusChecksum() != initialHash {
		t.Error("TryTransactionSet did not restore order")
	}
	if len(cc.SiacoinOutputDiffs) != 0 {
		t.Error("consensus change was not empty despite an error being returned")
	}
}
Пример #7
0
// TestThreadGroupStopEarly tests that a thread group can correctly interrupt
// an ongoing process.
func TestThreadGroupStopEarly(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	t.Parallel()

	var tg ThreadGroup
	for i := 0; i < 10; i++ {
		err := tg.Add()
		if err != nil {
			t.Fatal(err)
		}

		go func() {
			defer tg.Done()
			select {
			case <-time.After(1 * time.Second):
			case <-tg.StopChan():
			}
		}()
	}
	start := time.Now()
	err := tg.Stop()
	elapsed := time.Since(start)
	if err != nil {
		t.Fatal(err)
	} else if elapsed > 100*time.Millisecond {
		t.Fatal("Stop did not interrupt goroutines")
	}
}
Пример #8
0
func TestSizeSplitterIsDeterministic(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}

	test := func() {
		bufR := randBuf(t, 10000000) // crank this up to satisfy yourself.
		bufA := copyBuf(bufR)
		bufB := copyBuf(bufR)

		chunksA := DefaultSplitter.Split(bytes.NewReader(bufA))
		chunksB := DefaultSplitter.Split(bytes.NewReader(bufB))

		for n := 0; ; n++ {
			a, moreA := <-chunksA
			b, moreB := <-chunksB

			if !moreA {
				if moreB {
					t.Fatal("A ended, B didnt.")
				}
				return
			}

			if !bytes.Equal(a, b) {
				t.Fatalf("chunk %d not equal", n)
			}
		}
	}

	for run := 0; run < 1; run++ { // crank this up to satisfy yourself.
		test()
	}
}
Пример #9
0
func TestSizeSplitterFillsChunks(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}

	max := 10000000
	b := randBuf(t, max)
	r := &clipReader{r: bytes.NewReader(b), size: 4000}
	s := SizeSplitter{Size: 1024 * 256}
	c := s.Split(r)

	sofar := 0
	whole := make([]byte, max)
	for chunk := range c {

		bc := b[sofar : sofar+len(chunk)]
		if !bytes.Equal(bc, chunk) {
			t.Fatalf("chunk not correct: (sofar: %d) %d != %d, %v != %v", sofar, len(bc), len(chunk), bc[:100], chunk[:100])
		}

		copy(whole[sofar:], chunk)

		sofar += len(chunk)
		if sofar != max && len(chunk) < s.Size {
			t.Fatal("sizesplitter split at a smaller size")
		}
	}

	if !bytes.Equal(b, whole) {
		t.Fatal("splitter did not split right")
	}
}
Пример #10
0
// TestIntegrationMinerGET checks the GET call to the /miner endpoint.
func TestIntegrationMinerGET(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	st, err := createServerTester("TestIntegrationMinerGET")
	if err != nil {
		t.Fatal(err)
	}
	defer st.server.Close()

	// Get the api returned fields of the miner.
	var mg MinerGET
	err = st.getAPI("/miner", &mg)
	if err != nil {
		t.Fatal(err)
	}

	// Verify the correctness of the results.
	blocksMined, staleBlocksMined := st.server.api.miner.BlocksMined()
	if mg.BlocksMined != blocksMined {
		t.Error("blocks mined did not succeed")
	}
	if mg.StaleBlocksMined != staleBlocksMined {
		t.Error("stale blocks mined is incorrect")
	}
	if mg.CPUHashrate != st.server.api.miner.CPUHashrate() {
		t.Error("mismatched cpu hashrate")
	}
	if mg.CPUMining != st.server.api.miner.CPUMining() {
		t.Error("mismatched cpu miner status")
	}
}
Пример #11
0
func TestArchiveSpaceAPI(t *testing.T) {
	// Get the environment variables needed for testing.
	isSetup := checkConfig(t)
	if isSetup == false {
		t.Error("Environment variables needed to run tests not configured", isSetup)
		t.SkipNow()
	}

	cait := New(caitURL, caitUsername, caitPassword)
	if cait.BaseURL == nil {
		t.Errorf("%s\t%s", cait.BaseURL.String(), caitURL)
	}
	if strings.Compare(cait.BaseURL.String(), fmt.Sprintf("%s", caitURL)) != 0 {
		t.Errorf("%s != %s\n", cait.BaseURL.String(), caitURL)
	}

	if cait.IsAuth() == true {
		t.Error("cait.IsAuth() returning true before authentication")
	}
	err := cait.Login()
	if err != nil {
		t.Errorf("%s\t%s", err, cait.BaseURL.String())
		t.FailNow()
	}
	if cait.IsAuth() == false {
		t.Error("cait.IsAuth() return false after authentication")
	}

	err = cait.Logout()
	if err != nil {
		t.Errorf("Logout() %s", err)
	}
}
Пример #12
0
func TestFluentdHandlerOutput(t *testing.T) {
	if fluentdAddr == "" {
		t.SkipNow()
	}

	testHandlerOutput(t, NewFluentdHandler(fluentdAddr, "rglog.test"))
}
Пример #13
0
// TestFileSaveLoadASCII tests the ASCII saving/loading functions.
func TestFileSaveLoadASCII(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	rt, err := newRenterTester("TestRenterSaveLoad")
	if err != nil {
		t.Fatal(err)
	}
	defer rt.Close()

	// Create a file and add it to the renter.
	savedFile := newTestingFile()
	rt.renter.files[savedFile.name] = savedFile

	ascii, err := rt.renter.ShareFilesAscii([]string{savedFile.name})
	if err != nil {
		t.Fatal(err)
	}

	// Remove the file from the renter.
	delete(rt.renter.files, savedFile.name)

	names, err := rt.renter.LoadSharedFilesAscii(ascii)
	if err != nil {
		t.Fatal(err)
	}
	if len(names) != 1 || names[0] != savedFile.name {
		t.Fatal("nickname not loaded properly")
	}

	err = equalFiles(rt.renter.files[savedFile.name], savedFile)
	if err != nil {
		t.Fatal(err)
	}
}
Пример #14
0
// dockerSetup does all of the basic setup you need to get a running docker
// process up and running for testing. Use like:
//
//	task := taskTemplate()
//	// do custom task configuration
//	client, handle, cleanup := dockerSetup(t, task)
//	defer cleanup()
//	// do test stuff
//
// If there is a problem during setup this function will abort or skip the test
// and indicate the reason.
func dockerSetup(t *testing.T, task *structs.Task) (*docker.Client, DriverHandle, func()) {
	if !testutil.DockerIsConnected(t) {
		t.SkipNow()
	}

	client, err := docker.NewClientFromEnv()
	if err != nil {
		t.Fatalf("Failed to initialize client: %s\nStack\n%s", err, debug.Stack())
	}

	driverCtx, execCtx := testDriverContexts(task)
	driver := NewDockerDriver(driverCtx)

	handle, err := driver.Start(execCtx, task)
	if err != nil {
		execCtx.AllocDir.Destroy()
		t.Fatalf("Failed to start driver: %s\nStack\n%s", err, debug.Stack())
	}
	if handle == nil {
		execCtx.AllocDir.Destroy()
		t.Fatalf("handle is nil\nStack\n%s", debug.Stack())
	}

	cleanup := func() {
		handle.Kill()
		execCtx.AllocDir.Destroy()
	}

	return client, handle, cleanup
}
Пример #15
0
func TestGetclient(t *testing.T) {
	t.SkipNow()
	server := "http://127.0.0.1:9200/"
	client, err := Getclient(server)
	if err != nil {
		fmt.Println("fail to create the client:", err)
		return
	}
	fmt.Println(client)

	// Create an index
	//_, err = client.CreateIndex("testagent").Do()
	if err != nil {
		fmt.Println(err)
		return
	}

	message := &Test{Testa: "Testing messages a", Testb: "Testing message b", Testc: 100}
	//client.Push(message, "Test")
	jsonmessage, err := json.Marshal(message)
	if err != nil {
		fmt.Println(err)
		return
	}
	fmt.Println("json message:", string(jsonmessage))
	err = client.Push(jsonmessage, "Testindex", "Testtype")
	if err != nil {
		fmt.Println(err)
	}
}
Пример #16
0
func TestAPI_UnixSocket(t *testing.T) {
	t.Parallel()
	if runtime.GOOS == "windows" {
		t.SkipNow()
	}

	tempDir, err := ioutil.TempDir("", "consul")
	if err != nil {
		t.Fatalf("err: %s", err)
	}
	defer os.RemoveAll(tempDir)
	socket := filepath.Join(tempDir, "test.sock")

	c, s := makeClientWithConfig(t, func(c *Config) {
		c.Address = "unix://" + socket
	}, func(c *testutil.TestServerConfig) {
		c.Addresses = &testutil.TestAddressConfig{
			HTTP: "unix://" + socket,
		}
	})
	defer s.Stop()

	agent := c.Agent()

	info, err := agent.Self()
	if err != nil {
		t.Fatalf("err: %s", err)
	}
	if info["Config"]["NodeName"] == "" {
		t.Fatalf("bad: %v", info)
	}
}
Пример #17
0
// TestTryValidTransactionSet submits a valid transaction set to the
// TryTransactionSet method.
func TestTryValidTransactionSet(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	cst, err := createConsensusSetTester("TestValidTransaction")
	if err != nil {
		t.Fatal(err)
	}
	defer cst.Close()
	initialHash := cst.cs.dbConsensusChecksum()

	// Try a valid transaction.
	_, err = cst.wallet.SendSiacoins(types.NewCurrency64(1), types.UnlockHash{})
	if err != nil {
		t.Fatal(err)
	}
	txns := cst.tpool.TransactionList()
	cc, err := cst.cs.TryTransactionSet(txns)
	if err != nil {
		t.Error(err)
	}
	if cst.cs.dbConsensusChecksum() != initialHash {
		t.Error("TryTransactionSet did not resotre order")
	}
	if len(cc.SiacoinOutputDiffs) == 0 {
		t.Error("consensus change is missing diffs after verifying a transction clump")
	}
}
Пример #18
0
// TestCommitDelayedSiacoinOutputDiffBadMaturity commits a delayed sicoin
// output that has a bad maturity height and triggers a panic.
func TestCommitDelayedSiacoinOutputDiffBadMaturity(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	cst, err := createConsensusSetTester("TestCommitDelayedSiacoinOutputDiffBadMaturity")
	if err != nil {
		t.Fatal(err)
	}

	// Trigger an inconsistency check.
	defer func() {
		r := recover()
		if r == nil {
			t.Error("expecting error after corrupting database")
		}
	}()

	// Commit a delayed siacoin output with maturity height = cs.height()+1
	maturityHeight := cst.cs.height() - 1
	id := types.SiacoinOutputID{'1'}
	dsco := types.SiacoinOutput{Value: types.NewCurrency64(1)}
	dscod := modules.DelayedSiacoinOutputDiff{
		Direction:      modules.DiffApply,
		ID:             id,
		SiacoinOutput:  dsco,
		MaturityHeight: maturityHeight,
	}
	cst.cs.commitDelayedSiacoinOutputDiff(dscod, modules.DiffApply)
}
Пример #19
0
// TestIntegrationBlankEncryption probes the encryption process when the user
// supplies a blank encryption key during the encryption process.
func TestIntegrationBlankEncryption(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}

	// Create the wallet.
	wt, err := createBlankWalletTester("TestIntegrationBlankEncryption")
	if err != nil {
		t.Fatal(err)
	}
	defer wt.closeWt()
	// Encrypt the wallet using a blank key.
	seed, err := wt.wallet.Encrypt(crypto.TwofishKey{})
	if err != nil {
		t.Error(err)
	}

	// Try unlocking the wallet using a blank key.
	err = wt.wallet.Unlock(crypto.TwofishKey{})
	if err != modules.ErrBadEncryptionKey {
		t.Fatal(err)
	}
	// Try unlocking the wallet using the correct key.
	err = wt.wallet.Unlock(crypto.TwofishKey(crypto.HashObject(seed)))
	if err != nil {
		t.Fatal(err)
	}
	err = wt.wallet.Lock()
	if err != nil {
		t.Fatal(err)
	}
	postEncryptionTesting(wt.miner, wt.wallet, crypto.TwofishKey(crypto.HashObject(seed)))
}
Пример #20
0
func TestAddNode(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}

	g := newTestingGateway("TestAddNode", t)
	defer g.Close()
	g.mu.Lock()
	defer g.mu.Unlock()
	if err := g.addNode(dummyNode); err != nil {
		t.Fatal("addNode failed:", err)
	}
	if err := g.addNode(dummyNode); err != errNodeExists {
		t.Error("addNode added duplicate node")
	}
	if err := g.addNode("foo"); err == nil {
		t.Error("addNode added unroutable address")
	}
	if err := g.addNode("foo:9981"); err == nil {
		t.Error("addNode added a non-IP address")
	}
	if err := g.addNode("[::]:9981"); err == nil {
		t.Error("addNode added unspecified address")
	}
	if err := g.addNode(g.myAddr); err != errOurAddress {
		t.Error("addNode added our own address")
	}
}
Пример #21
0
// TestThreadGroupConcurrentAdd tests that Add can be called concurrently with Stop.
func TestThreadGroupConcurrentAdd(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	var tg ThreadGroup
	for i := 0; i < 10; i++ {
		go func() {
			err := tg.Add()
			if err != nil {
				return
			}
			defer tg.Done()

			select {
			case <-time.After(1 * time.Second):
			case <-tg.StopChan():
			}
		}()
	}
	time.Sleep(10 * time.Millisecond) // wait for at least one Add
	err := tg.Stop()
	if err != nil {
		t.Fatal(err)
	}
}
Пример #22
0
// TestNodesAreSharedOnConnect tests that nodes that a gateway has never seen
// before are added to the node list when connecting to another gateway that
// has seen said nodes.
func TestNodesAreSharedOnConnect(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	g1 := newTestingGateway("TestNodesAreSharedOnConnect1", t)
	defer g1.Close()
	g2 := newTestingGateway("TestNodesAreSharedOnConnect2", t)
	defer g2.Close()
	g3 := newTestingGateway("TestNodesAreSharedOnConnect3", t)
	defer g3.Close()

	// connect g2 to g1
	err := g2.Connect(g1.Address())
	if err != nil {
		t.Fatal("couldn't connect:", err)
	}

	// connect g3 to g1
	err = g3.Connect(g1.Address())
	if err != nil {
		t.Fatal("couldn't connect:", err)
	}

	// g3 should have received g2's address from g1
	time.Sleep(200 * time.Millisecond)
	g3.mu.Lock()
	defer g3.mu.Unlock()
	if _, ok := g3.nodes[g2.Address()]; !ok {
		t.Fatal("node was not relayed:", g3.nodes)
	}
}
Пример #23
0
// TestThreadGroupWait tests that a thread group will correctly wait for
// existing processes to halt.
func TestThreadGroupWait(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	t.Parallel()

	var tg ThreadGroup
	for i := 0; i < 10; i++ {
		err := tg.Add()
		if err != nil {
			t.Fatal(err)
		}

		go func() {
			defer tg.Done()
			time.Sleep(time.Second)
		}()
	}
	start := time.Now()
	err := tg.Stop()
	elapsed := time.Since(start)
	if err != nil {
		t.Fatal(err)
	} else if elapsed < time.Second {
		t.Fatal("Stop did not wait for goroutines")
	}
}
Пример #24
0
// TestMissedTarget submits a block that does not meet the required target.
func TestMissedTarget(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	cst, err := createConsensusSetTester("TestMissedTarget")
	if err != nil {
		t.Fatal(err)
	}
	defer cst.closeCst()

	// Mine a block that doesn't meet the target.
	block, target, err := cst.miner.BlockForWork()
	if err != nil {
		t.Fatal(err)
	}
	for checkTarget(block, target) && block.Nonce[0] != 255 {
		block.Nonce[0]++
	}
	if checkTarget(block, target) {
		t.Fatal("unable to find a failing target")
	}
	err = cst.cs.AcceptBlock(block)
	if err != modules.ErrBlockUnsolved {
		t.Fatalf("expected %v, got %v", modules.ErrBlockUnsolved, err)
	}
}
Пример #25
0
// TestGatewayPeerConnect checks that /gateway/connect is adding a peer to the
// gateway's peerlist.
func TestGatewayPeerConnect(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	st, err := createServerTester("TestGatewayPeerConnect1")
	if err != nil {
		t.Fatal(err)
	}
	defer st.server.Close()
	peer, err := gateway.New("localhost:0", build.TempDir("api", "TestGatewayPeerConnect2", "gateway"))
	if err != nil {
		t.Fatal(err)
	}
	err = st.stdPostAPI("/gateway/connect/"+string(peer.Address()), nil)
	if err != nil {
		t.Fatal(err)
	}

	var info GatewayInfo
	err = st.getAPI("/gateway", &info)
	if err != nil {
		t.Fatal(err)
	}
	if len(info.Peers) != 1 || info.Peers[0].NetAddress != peer.Address() {
		t.Fatal("/gateway/connect did not connect to peer", peer.Address())
	}
}
Пример #26
0
// testFutureTimestampHandling checks that blocks in the future (but not
// extreme future) are handled correctly.
func TestFutureTimestampHandling(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	cst, err := createConsensusSetTester("TestFutureTimestampHandling")
	if err != nil {
		t.Fatal(err)
	}
	defer cst.closeCst()

	// Submit a block with a timestamp in the future, but not the extreme
	// future.
	block, target, err := cst.miner.BlockForWork()
	if err != nil {
		t.Fatal(err)
	}
	block.Timestamp = types.CurrentTimestamp() + 2 + types.FutureThreshold
	solvedBlock, _ := cst.miner.SolveBlock(block, target)
	err = cst.cs.AcceptBlock(solvedBlock)
	if err != errFutureTimestamp {
		t.Fatalf("expected %v, got %v", errFutureTimestamp, err)
	}

	// Check that after waiting until the block is no longer too far in the
	// future, the block gets added to the consensus set.
	time.Sleep(time.Second * 3) // 3 seconds, as the block was originally 2 seconds too far into the future.
	_, err = cst.cs.dbGetBlockMap(solvedBlock.ID())
	if err == errNilItem {
		t.Fatalf("future block was not added to the consensus set after waiting the appropriate amount of time")
	}
}
Пример #27
0
func TestFindPeer(t *testing.T) {
	// t.Skip("skipping test to debug another")
	if testing.Short() {
		t.SkipNow()
	}

	ctx := context.Background()

	_, peers, dhts := setupDHTS(ctx, 4, t)
	defer func() {
		for i := 0; i < 4; i++ {
			dhts[i].Close()
			dhts[i].host.Close()
		}
	}()

	connect(t, ctx, dhts[0], dhts[1])
	connect(t, ctx, dhts[1], dhts[2])
	connect(t, ctx, dhts[1], dhts[3])

	ctxT, _ := context.WithTimeout(ctx, time.Second)
	p, err := dhts[0].FindPeer(ctxT, peers[2])
	if err != nil {
		t.Fatal(err)
	}

	if p.ID == "" {
		t.Fatal("Failed to find peer.")
	}

	if p.ID != peers[2] {
		t.Fatal("Didnt find expected peer.")
	}
}
Пример #28
0
// TestInconsistencyCheck puts the consensus set in to an inconsistent state
// and makes sure that the santiy checks are triggering panics.
func TestInconsistentCheck(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	cst, err := createConsensusSetTester("TestInconsistentCheck")
	if err != nil {
		t.Fatal(err)
	}
	defer cst.closeCst()

	// Corrupt the consensus set by adding a new siafund output.
	sfo := types.SiafundOutput{
		Value: types.NewCurrency64(1),
	}
	cst.cs.dbAddSiafundOutput(types.SiafundOutputID{}, sfo)

	// Catch a panic that should be caused by the inconsistency check after a
	// block is mined.
	defer func() {
		r := recover()
		if r == nil {
			t.Fatalf("inconsistency panic not triggered by corrupted database")
		}
	}()
	cst.miner.AddBlock()
}
Пример #29
0
func TestParsing(t *testing.T) {
	addresses := []string{
		"1DgxRTofdbau7kpf3pQeRydcoTPG2L5NUX",
		"17Nt7rWiRZKDgcNp421zZ1FHGPWSnnT1bk",
	}
	var decoder *json.Decoder
	if true {
		response, err := http.Get(url)
		if err != nil {
			t.SkipNow()
			// log.Fatal(err)
		}
		defer response.Body.Close()
		decoder = json.NewDecoder(response.Body)
	}

	r := new(OverviewReport)
	err := decoder.Decode(&r)
	if err != nil {
		t.Fail()
		// log.Fatal(err)
	}
	total := new(AddressReport)
	for _, address := range addresses {
		report, ok := r.Report[address]
		if !ok {
			t.Fail()
		}
		total.Add(report)
	}
}
Пример #30
0
// TestIntegrationFormContract tests that the contractor can form contracts
// with the host module.
func TestIntegrationFormContract(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	t.Parallel()
	h, c, _, err := newTestingTrio("TestIntegrationFormContract")
	if err != nil {
		t.Fatal(err)
	}

	// get the host's entry from the db
	hostEntry, ok := c.hdb.Host(h.ExternalSettings().NetAddress)
	if !ok {
		t.Fatal("no entry for host in db")
	}

	// form a contract with the host
	contract, err := c.managedNewContract(hostEntry, 10, c.blockHeight+100)
	if err != nil {
		t.Fatal(err)
	}

	if contract.NetAddress != h.ExternalSettings().NetAddress {
		t.Fatal("bad contract")
	}
}