Ejemplo n.º 1
0
func Test_RpcServiceServesRequests(t *testing.T) {
	f := NewRpcFixture(t)
	defer f.Close()

	serviceWasCalled := false
	svc := MockRpcService{
		callback: func(text string, result *string) error {
			serviceWasCalled = true
			(*result) = strings.ToUpper(text)
			return nil
		},
	}

	err := f.rpcServer.Register(&svc)
	require.NoError(t, err)

	conn, err := f.rpcServer.Connect()
	require.NoError(t, err)
	defer conn.Close()

	client := rpc.NewClient(conn)
	defer client.Close()

	var reply string
	err = client.Call("MockRpcService.Test", "lowercase", &reply)

	assert.NoError(t, err)
	assert.Equal(t, "LOWERCASE", reply)
	assert.True(t, serviceWasCalled)
}
Ejemplo n.º 2
0
// We read from a piece which is marked completed, but is missing data.
func TestCompletedPieceWrongSize(t *testing.T) {
	cfg := TestingConfig
	cfg.DefaultStorage = badStorage{}
	cl, err := NewClient(&cfg)
	require.NoError(t, err)
	defer cl.Close()
	ie := metainfo.InfoEx{
		Info: metainfo.Info{
			PieceLength: 15,
			Pieces:      make([]byte, 20),
			Files: []metainfo.FileInfo{
				metainfo.FileInfo{Path: []string{"greeting"}, Length: 13},
			},
		},
	}
	ie.UpdateBytes()
	tt, new, err := cl.AddTorrentSpec(&TorrentSpec{
		Info:     &ie,
		InfoHash: ie.Hash(),
	})
	require.NoError(t, err)
	defer tt.Drop()
	assert.True(t, new)
	r := tt.NewReader()
	defer r.Close()
	b, err := ioutil.ReadAll(r)
	assert.Len(t, b, 13)
	assert.NoError(t, err)
}
Ejemplo n.º 3
0
// NewTestServer creates a new initialised Laika httptest.Server. The server
// root credentials are "root" as username and password. It contains an
// environment named "test" with an enabled featured named "test_feature",
// and a user whose username is "user" and password is "password".
func NewTestServer(t *testing.T) *httptest.Server {
	s, err := store.NewMySQLStore(
		os.Getenv("LAIKA_MYSQL_USERNAME"),
		os.Getenv("LAIKA_MYSQL_PASSWORD"),
		os.Getenv("LAIKA_MYSQL_HOST"),
		os.Getenv("LAIKA_MYSQL_PORT"),
		os.Getenv("LAIKA_MYSQL_DBNAME"),
	)
	require.NoError(t, err)

	err = s.Ping()
	require.NoError(t, err)

	err = s.Migrate()
	require.NoError(t, err)

	user := models.User{
		Username:     "******" + store.Token(),
		PasswordHash: "awesome_password",
	}
	err = s.CreateUser(&user)
	require.NoError(t, err)

	server, err := NewServer(ServerConfig{
		Store:        s,
		RootUsername: "******",
		RootPassword: "******",
	})
	require.NoError(t, err)

	return httptest.NewServer(server)
}
Ejemplo n.º 4
0
// Test with exclude
func TestSyncWithExclude(t *testing.T) {
	r := NewRun(t)
	defer r.Finalise()
	file1 := r.WriteBoth("potato2", "------------------------------------------------------------", t1)
	file2 := r.WriteBoth("empty space", "", t2)
	file3 := r.WriteFile("enormous", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes
	fstest.CheckItems(t, r.fremote, file1, file2)
	fstest.CheckItems(t, r.flocal, file1, file2, file3)

	fs.Config.Filter.MaxSize = 40
	defer func() {
		fs.Config.Filter.MaxSize = -1
	}()

	fs.Stats.ResetCounters()
	err := fs.Sync(r.fremote, r.flocal)
	require.NoError(t, err)
	fstest.CheckItems(t, r.fremote, file2, file1)

	// Now sync the other way round and check enormous doesn't get
	// deleted as it is excluded from the sync
	fs.Stats.ResetCounters()
	err = fs.Sync(r.flocal, r.fremote)
	require.NoError(t, err)
	fstest.CheckItems(t, r.flocal, file2, file1, file3)
}
Ejemplo n.º 5
0
func TestZfsPoolMetrics(t *testing.T) {
	var acc testutil.Accumulator

	z := &Zfs{
		KstatMetrics: []string{"vdev_cache_stats"},
		sysctl:       mock_sysctl,
		zpool:        mock_zpool,
	}
	err := z.Gather(&acc)
	require.NoError(t, err)

	require.False(t, acc.HasMeasurement("zfs_pool"))
	acc.Metrics = nil

	z = &Zfs{
		KstatMetrics: []string{"vdev_cache_stats"},
		PoolMetrics:  true,
		sysctl:       mock_sysctl,
		zpool:        mock_zpool,
	}
	err = z.Gather(&acc)
	require.NoError(t, err)

	//one pool, all metrics
	tags := map[string]string{
		"pool":   "freenas-boot",
		"health": "ONLINE",
	}

	poolMetrics := getFreeNasBootPoolMetrics()

	acc.AssertContainsTaggedFields(t, "zfs_pool", poolMetrics, tags)
}
Ejemplo n.º 6
0
// Create a file and sync it. Change the last modified date and resync.
// If we're only doing sync by size and checksum, we expect nothing to
// to be transferred on the second sync.
func TestSyncBasedOnCheckSum(t *testing.T) {
	r := NewRun(t)
	defer r.Finalise()
	fs.Config.CheckSum = true
	defer func() { fs.Config.CheckSum = false }()

	file1 := r.WriteFile("check sum", "", t1)
	fstest.CheckItems(t, r.flocal, file1)

	fs.Stats.ResetCounters()
	err := fs.Sync(r.fremote, r.flocal)
	require.NoError(t, err)

	// We should have transferred exactly one file.
	assert.Equal(t, int64(1), fs.Stats.GetTransfers())
	fstest.CheckItems(t, r.fremote, file1)

	// Change last modified date only
	file2 := r.WriteFile("check sum", "", t2)
	fstest.CheckItems(t, r.flocal, file2)

	fs.Stats.ResetCounters()
	err = fs.Sync(r.fremote, r.flocal)
	require.NoError(t, err)

	// We should have transferred no files
	assert.Equal(t, int64(0), fs.Stats.GetTransfers())
	fstest.CheckItems(t, r.flocal, file2)
	fstest.CheckItems(t, r.fremote, file1)
}
Ejemplo n.º 7
0
// Create a file and sync it. Keep the last modified date but change
// the size.  With --ignore-size we expect nothing to to be
// transferred on the second sync.
func TestSyncIgnoreSize(t *testing.T) {
	r := NewRun(t)
	defer r.Finalise()
	fs.Config.IgnoreSize = true
	defer func() { fs.Config.IgnoreSize = false }()

	file1 := r.WriteFile("ignore-size", "contents", t1)
	fstest.CheckItems(t, r.flocal, file1)

	fs.Stats.ResetCounters()
	err := fs.Sync(r.fremote, r.flocal)
	require.NoError(t, err)

	// We should have transferred exactly one file.
	assert.Equal(t, int64(1), fs.Stats.GetTransfers())
	fstest.CheckItems(t, r.fremote, file1)

	// Update size but not date of file
	file2 := r.WriteFile("ignore-size", "longer contents but same date", t1)
	fstest.CheckItems(t, r.flocal, file2)

	fs.Stats.ResetCounters()
	err = fs.Sync(r.fremote, r.flocal)
	require.NoError(t, err)

	// We should have transferred no files
	assert.Equal(t, int64(0), fs.Stats.GetTransfers())
	fstest.CheckItems(t, r.flocal, file2)
	fstest.CheckItems(t, r.fremote, file1)
}
Ejemplo n.º 8
0
func TestUploadLogChunksAndCloseSuccessfully(t *testing.T) {
	ts := testserver.NewTestServer(t)
	defer ts.CloseAndAssertExpectations()

	client := NewArtifactStoreClient(ts.URL)

	ts.ExpectAndRespond("POST", "/buckets/", http.StatusOK, `{"Id": "foo"}`)
	ts.ExpectAndRespond("POST", "/buckets/foo/artifacts", http.StatusOK, `{"Name": "artifact"}`)

	b, _ := client.NewBucket("foo", "bar", 32)
	sa, err := b.NewChunkedArtifact("artifact")
	require.NotNil(t, sa)
	require.NoError(t, err)

	{
		// Content request might come later, even as late as Flush()
		ts.ExpectAndRespond("POST", "/buckets/foo/artifacts/artifact", 200, `{}`)
		err := sa.AppendLog("console contents")
		require.NoError(t, err)
	}

	{
		// Content request might come later, even as late as Flush()
		ts.ExpectAndRespond("POST", "/buckets/foo/artifacts/artifact", 200, `{}`)
		err := sa.AppendLog("more console contents")
		require.NoError(t, err)
	}

	{
		ts.ExpectAndRespond("POST", "/buckets/foo/artifacts/artifact/close", 200, `{}`)
		err := sa.Close()
		require.NoError(t, err)
	}
}
Ejemplo n.º 9
0
func TestPushLogChunkServerSucceedOnRetry(t *testing.T) {
	ts := testserver.NewTestServer(t)
	defer ts.CloseAndAssertExpectations()

	client := NewArtifactStoreClient(ts.URL)

	ts.ExpectAndRespond("POST", "/buckets/", http.StatusOK, `{"Id": "foo"}`)
	ts.ExpectAndRespond("POST", "/buckets/foo/artifacts", http.StatusOK, `{"Name": "artifact"}`)

	b, _ := client.NewBucket("foo", "bar", 32)
	sa, err := b.NewChunkedArtifact("artifact")
	require.NotNil(t, sa)
	require.NoError(t, err)

	{
		// Fail with a retriable error first
		ts.ExpectAndRespond("POST", "/buckets/foo/artifacts/artifact", 500, `{}`)
		// Then succeed on retry
		ts.ExpectAndRespond("POST", "/buckets/foo/artifacts/artifact", 200, `{}`)
		err := sa.AppendLog("console contents")
		require.NoError(t, err)
	}

	{
		ts.ExpectAndRespond("POST", "/buckets/foo/artifacts/artifact/close", 200, `{}`)
		err := sa.Close()
		require.NoError(t, err)
	}
}
Ejemplo n.º 10
0
func BenchmarkInboundParallel(b *testing.B) {
	var reqCount int32
	serverAddr, err := setupBenchServer()
	require.NoError(b, err, "setupBenchServer failed")

	started := time.Now()

	b.RunParallel(func(pb *testing.PB) {
		// Start a client for each runner
		client, err := startClient(serverAddr)
		require.NoError(b, err, "startClient failed")
		defer client.Close()

		for pb.Next() {
			client.CallAndWait()
			atomic.AddInt32(&reqCount, int32(1))
		}
		fmt.Println("Successful requests", client.numTimes, "Mean", client.mean)
		for err, count := range client.errors {
			fmt.Printf("%v: %v\n", count, err)
		}
	})

	duration := time.Since(started)
	fmt.Println("Requests", reqCount, "RPS: ", float64(reqCount)/duration.Seconds())
}
Ejemplo n.º 11
0
func Test_StepTemplate(t *testing.T) {
	configPth := "bitrise.yml"

	t.Log("step template test")
	{
		cmd := cmdex.NewCommand(binPath(), "run", "test", "--config", configPth)
		cmd.SetDir("step_template")
		out, err := cmd.RunAndReturnTrimmedCombinedOutput()
		require.NoError(t, err, out)
	}

	t.Log("bash toolkit step template test")
	{
		cmd := cmdex.NewCommand(binPath(), "run", "test", "--config", configPth)
		cmd.SetDir("bash_toolkit_step_template")
		out, err := cmd.RunAndReturnTrimmedCombinedOutput()
		require.NoError(t, err, out)
	}

	t.Log("go toolkit step template test")
	{
		cmd := cmdex.NewCommand(binPath(), "run", "test", "--config", configPth)
		cmd.SetDir("go_toolkit_step_template")
		out, err := cmd.RunAndReturnTrimmedCombinedOutput()
		require.NoError(t, err, out)
	}
}
Ejemplo n.º 12
0
// If applying a change fails due to a prefix error, changeTargetMeta fails outright
func TestChangeTargetMetaFailsIfPrefixError(t *testing.T) {
	repo, cs, err := testutils.EmptyRepo("docker.com/notary")
	require.NoError(t, err)

	newKey, err := cs.Create("targets/level1", "docker.com/notary", data.ED25519Key)
	require.NoError(t, err)

	err = repo.UpdateDelegationKeys("targets/level1", []data.PublicKey{newKey}, []string{}, 1)
	require.NoError(t, err)
	err = repo.UpdateDelegationPaths("targets/level1", []string{"pathprefix"}, []string{}, false)
	require.NoError(t, err)

	hash := sha256.Sum256([]byte{})
	f := &data.FileMeta{
		Length: 1,
		Hashes: map[string][]byte{
			"sha256": hash[:],
		},
	}
	fjson, err := json.Marshal(f)
	require.NoError(t, err)

	err = changeTargetMeta(repo, &changelist.TUFChange{
		Actn:       changelist.ActionCreate,
		Role:       "targets/level1",
		ChangeType: "target",
		ChangePath: "notPathPrefix",
		Data:       fjson,
	})
	require.Error(t, err)

	// no target in targets or targets/latest
	require.Empty(t, repo.Targets[data.CanonicalTargetsRole].Signed.Targets)
	require.Empty(t, repo.Targets["targets/level1"].Signed.Targets)
}
Ejemplo n.º 13
0
// Applying a delegation whose parent doesn't exist fails.
func TestApplyTargetsDelegationParentDoesntExist(t *testing.T) {
	repo, cs, err := testutils.EmptyRepo("docker.com/notary")
	require.NoError(t, err)

	// make sure a key exists for the previous level, so it's not a missing
	// key error, but we don't care about this key
	_, err = cs.Create("targets/level1", "docker.com/notary", data.ED25519Key)
	require.NoError(t, err)

	newKey, err := cs.Create("targets/level1/level2", "docker.com/notary", data.ED25519Key)
	require.NoError(t, err)

	// create delegation
	kl := data.KeyList{newKey}
	td := &changelist.TUFDelegation{
		NewThreshold: 1,
		AddKeys:      kl,
	}

	tdJSON, err := json.Marshal(td)
	require.NoError(t, err)

	ch := changelist.NewTUFChange(
		changelist.ActionCreate,
		"targets/level1/level2",
		changelist.TypeTargetsDelegation,
		"",
		tdJSON,
	)

	err = applyTargetsChange(repo, nil, ch)
	require.Error(t, err)
	require.IsType(t, data.ErrInvalidRole{}, err)
}
Ejemplo n.º 14
0
func TestApplyTargetsDelegationInvalidJSONContent(t *testing.T) {
	repo, cs, err := testutils.EmptyRepo("docker.com/notary")
	require.NoError(t, err)

	newKey, err := cs.Create("targets/level1", "docker.com/notary", data.ED25519Key)
	require.NoError(t, err)

	// create delegation
	kl := data.KeyList{newKey}
	td := &changelist.TUFDelegation{
		NewThreshold: 1,
		AddKeys:      kl,
		AddPaths:     []string{"level1"},
	}

	tdJSON, err := json.Marshal(td)
	require.NoError(t, err)

	ch := changelist.NewTUFChange(
		changelist.ActionCreate,
		"targets/level1",
		changelist.TypeTargetsDelegation,
		"",
		tdJSON[1:],
	)

	err = applyTargetsChange(repo, nil, ch)
	require.Error(t, err)
}
Ejemplo n.º 15
0
func TestDiskMailboxPush(t *testing.T) {
	dir, err := ioutil.TempDir("", "mailbox")
	if err != nil {
		panic(err)
	}

	defer os.RemoveAll(dir)

	r, err := NewDiskStorage(dir)
	if err != nil {
		panic(err)
	}

	defer r.Close()

	m := r.Mailbox("a")

	msg := vega.Msg([]byte("hello"))

	err = m.Push(msg)
	require.NoError(t, err)

	out, err := m.Poll()
	require.NoError(t, err)

	assert.True(t, out.Equal(msg), "wrong value")
}
Ejemplo n.º 16
0
func TestPushLogChunkCancelledContext(t *testing.T) {
	ts := testserver.NewTestServer(t)
	defer ts.CloseAndAssertExpectations()

	ctx, cancel := context.WithCancel(context.Background())
	client := NewArtifactStoreClientWithContext(ts.URL, 100*time.Millisecond, ctx)

	ts.ExpectAndRespond("POST", "/buckets/", http.StatusOK, `{"Id": "foo"}`)
	ts.ExpectAndRespond("POST", "/buckets/foo/artifacts", http.StatusOK, `{"Name": "artifact"}`)

	b, _ := client.NewBucket("foo", "bar", 32)
	sa, err := b.NewChunkedArtifact("artifact")
	require.NotNil(t, sa)
	require.NoError(t, err)

	// Cancel the context to prevent any further requests
	cancel()

	{
		err := sa.AppendLog("console contents")
		require.NoError(t, err)
		err = sa.Close()
		require.Error(t, err)
		require.False(t, err.IsRetriable())
	}
}
Ejemplo n.º 17
0
func TestDeleteRecords(t *testing.T) {
	var (
		id    = "foobar"
		name  = "foo.weave."
		addr1 = "10.2.2.3/24"
		addr2 = "10.2.7.8/24"
	)

	EnableDebugLogging(testing.Verbose())

	zone, err := NewZoneDb(ZoneConfig{})
	require.NoError(t, err)
	err = zone.Start()
	require.NoError(t, err)
	defer zone.Stop()

	for _, addr := range []string{addr1, addr2} {
		ip, _, _ := net.ParseCIDR(addr)
		err := zone.AddRecord(id, name, ip)
		require.NoError(t, err)
	}

	_, err = zone.LookupName(name)
	require.NoError(t, err)

	count := zone.DeleteRecords(id, "", net.IP{})
	require.Equal(t, 2, count, "wildcard delete failed")
	_, err = zone.LookupName(name)
	wt.AssertErrorType(t, (*LookupError)(nil), err, "after deleting records for ident")
}
Ejemplo n.º 18
0
func TestKey(t *testing.T) {
	c, err := newCipher(NameEncryptionStandard, "", "")
	assert.NoError(t, err)

	// Check zero keys OK
	assert.Equal(t, [32]byte{}, c.dataKey)
	assert.Equal(t, [32]byte{}, c.nameKey)
	assert.Equal(t, [16]byte{}, c.nameTweak)

	require.NoError(t, c.Key("potato", ""))
	assert.Equal(t, [32]byte{0x74, 0x55, 0xC7, 0x1A, 0xB1, 0x7C, 0x86, 0x5B, 0x84, 0x71, 0xF4, 0x7B, 0x79, 0xAC, 0xB0, 0x7E, 0xB3, 0x1D, 0x56, 0x78, 0xB8, 0x0C, 0x7E, 0x2E, 0xAF, 0x4F, 0xC8, 0x06, 0x6A, 0x9E, 0xE4, 0x68}, c.dataKey)
	assert.Equal(t, [32]byte{0x76, 0x5D, 0xA2, 0x7A, 0xB1, 0x5D, 0x77, 0xF9, 0x57, 0x96, 0x71, 0x1F, 0x7B, 0x93, 0xAD, 0x63, 0xBB, 0xB4, 0x84, 0x07, 0x2E, 0x71, 0x80, 0xA8, 0xD1, 0x7A, 0x9B, 0xBE, 0xC1, 0x42, 0x70, 0xD0}, c.nameKey)
	assert.Equal(t, [16]byte{0xC1, 0x8D, 0x59, 0x32, 0xF5, 0x5B, 0x28, 0x28, 0xC5, 0xE1, 0xE8, 0x72, 0x15, 0x52, 0x03, 0x10}, c.nameTweak)

	require.NoError(t, c.Key("Potato", ""))
	assert.Equal(t, [32]byte{0xAE, 0xEA, 0x6A, 0xD3, 0x47, 0xDF, 0x75, 0xB9, 0x63, 0xCE, 0x12, 0xF5, 0x76, 0x23, 0xE9, 0x46, 0xD4, 0x2E, 0xD8, 0xBF, 0x3E, 0x92, 0x8B, 0x39, 0x24, 0x37, 0x94, 0x13, 0x3E, 0x5E, 0xF7, 0x5E}, c.dataKey)
	assert.Equal(t, [32]byte{0x54, 0xF7, 0x02, 0x6E, 0x8A, 0xFC, 0x56, 0x0A, 0x86, 0x63, 0x6A, 0xAB, 0x2C, 0x9C, 0x51, 0x62, 0xE5, 0x1A, 0x12, 0x23, 0x51, 0x83, 0x6E, 0xAF, 0x50, 0x42, 0x0F, 0x98, 0x1C, 0x86, 0x0A, 0x19}, c.nameKey)
	assert.Equal(t, [16]byte{0xF8, 0xC1, 0xB6, 0x27, 0x2D, 0x52, 0x9B, 0x4A, 0x8F, 0xDA, 0xEB, 0x42, 0x4A, 0x28, 0xDD, 0xF3}, c.nameTweak)

	require.NoError(t, c.Key("potato", "sausage"))
	assert.Equal(t, [32]uint8{0x8e, 0x9b, 0x6b, 0x99, 0xf8, 0x69, 0x4, 0x67, 0xa0, 0x71, 0xf9, 0xcb, 0x92, 0xd0, 0xaa, 0x78, 0x7f, 0x8f, 0xf1, 0x78, 0xbe, 0xc9, 0x6f, 0x99, 0x9f, 0xd5, 0x20, 0x6e, 0x64, 0x4a, 0x1b, 0x50}, c.dataKey)
	assert.Equal(t, [32]uint8{0x3e, 0xa9, 0x5e, 0xf6, 0x81, 0x78, 0x2d, 0xc9, 0xd9, 0x95, 0x5d, 0x22, 0x5b, 0xfd, 0x44, 0x2c, 0x6f, 0x5d, 0x68, 0x97, 0xb0, 0x29, 0x1, 0x5c, 0x6f, 0x46, 0x2e, 0x2a, 0x9d, 0xae, 0x2c, 0xe3}, c.nameKey)
	assert.Equal(t, [16]uint8{0xf1, 0x7f, 0xd7, 0x14, 0x1d, 0x65, 0x27, 0x4f, 0x36, 0x3f, 0xc2, 0xa0, 0x4d, 0xd2, 0x14, 0x8a}, c.nameTweak)

	require.NoError(t, c.Key("potato", "Sausage"))
	assert.Equal(t, [32]uint8{0xda, 0x81, 0x8c, 0x67, 0xef, 0x11, 0xf, 0xc8, 0xd5, 0xc8, 0x62, 0x4b, 0x7f, 0xe2, 0x9e, 0x35, 0x35, 0xb0, 0x8d, 0x79, 0x84, 0x89, 0xac, 0xcb, 0xa0, 0xff, 0x2, 0x72, 0x3, 0x1a, 0x5e, 0x64}, c.dataKey)
	assert.Equal(t, [32]uint8{0x2, 0x81, 0x7e, 0x7b, 0xea, 0x99, 0x81, 0x5a, 0xd0, 0x2d, 0xb9, 0x64, 0x48, 0xb0, 0x28, 0x27, 0x7c, 0x20, 0xb4, 0xd4, 0xa4, 0x68, 0xad, 0x4e, 0x5c, 0x29, 0xf, 0x79, 0xef, 0xee, 0xdb, 0x3b}, c.nameKey)
	assert.Equal(t, [16]uint8{0x9a, 0xb5, 0xb, 0x3d, 0xcb, 0x60, 0x59, 0x55, 0xa5, 0x4d, 0xe6, 0xb6, 0x47, 0x3, 0x23, 0xe2}, c.nameTweak)

	require.NoError(t, c.Key("", ""))
	assert.Equal(t, [32]byte{}, c.dataKey)
	assert.Equal(t, [32]byte{}, c.nameKey)
	assert.Equal(t, [16]byte{}, c.nameTweak)
}
Ejemplo n.º 19
0
// Create a file and sync it. Change the last modified date and the
// file contents but not the size.  If we're only doing sync by size
// only, we expect nothing to to be transferred on the second sync.
func TestSyncSizeOnly(t *testing.T) {
	r := NewRun(t)
	defer r.Finalise()
	fs.Config.SizeOnly = true
	defer func() { fs.Config.SizeOnly = false }()

	file1 := r.WriteFile("sizeonly", "potato", t1)
	fstest.CheckItems(t, r.flocal, file1)

	fs.Stats.ResetCounters()
	err := fs.Sync(r.fremote, r.flocal)
	require.NoError(t, err)

	// We should have transferred exactly one file.
	assert.Equal(t, int64(1), fs.Stats.GetTransfers())
	fstest.CheckItems(t, r.fremote, file1)

	// Update mtime, md5sum but not length of file
	file2 := r.WriteFile("sizeonly", "POTATO", t2)
	fstest.CheckItems(t, r.flocal, file2)

	fs.Stats.ResetCounters()
	err = fs.Sync(r.fremote, r.flocal)
	require.NoError(t, err)

	// We should have transferred no files
	assert.Equal(t, int64(0), fs.Stats.GetTransfers())
	fstest.CheckItems(t, r.flocal, file2)
	fstest.CheckItems(t, r.fremote, file1)
}
Ejemplo n.º 20
0
func doTestShortcodeCrossrefs(t *testing.T, relative bool) {
	testCommonResetState()
	viper.Set("baseURL", baseURL)

	var refShortcode string
	var expectedBase string

	if relative {
		refShortcode = "relref"
		expectedBase = "/bar"
	} else {
		refShortcode = "ref"
		expectedBase = baseURL
	}

	path := filepath.FromSlash("blog/post.md")
	in := fmt.Sprintf(`{{< %s "%s" >}}`, refShortcode, path)

	writeSource(t, "content/"+path, simplePageWithURL+": "+in)

	expected := fmt.Sprintf(`%s/simple/url/`, expectedBase)

	sites, err := newHugoSitesDefaultLanguage()
	require.NoError(t, err)

	require.NoError(t, sites.Build(BuildCfg{}))
	require.Len(t, sites.Sites[0].Pages, 1)

	output := string(sites.Sites[0].Pages[0].Content)

	if !strings.Contains(output, expected) {
		t.Errorf("Got\n%q\nExpected\n%q", output, expected)
	}
}
Ejemplo n.º 21
0
func TestSyncIgnoreTimes(t *testing.T) {
	r := NewRun(t)
	defer r.Finalise()
	file1 := r.WriteBoth("existing", "potato", t1)
	fstest.CheckItems(t, r.fremote, file1)

	fs.Stats.ResetCounters()
	err := fs.Sync(r.fremote, r.flocal)
	require.NoError(t, err)

	// We should have transferred exactly 0 files because the
	// files were identical.
	assert.Equal(t, int64(0), fs.Stats.GetTransfers())

	fs.Config.IgnoreTimes = true
	defer func() { fs.Config.IgnoreTimes = false }()

	fs.Stats.ResetCounters()
	err = fs.Sync(r.fremote, r.flocal)
	require.NoError(t, err)

	// We should have transferred exactly one file even though the
	// files were identical.
	assert.Equal(t, int64(1), fs.Stats.GetTransfers())

	fstest.CheckItems(t, r.flocal, file1)
	fstest.CheckItems(t, r.fremote, file1)
}
Ejemplo n.º 22
0
// TestFsCopy tests Copy
func TestFsCopy(t *testing.T) {
	skipIfNotOk(t)

	// Check have Copy
	_, ok := remote.(fs.Copier)
	if !ok {
		t.Skip("FS has no Copier interface")
	}

	var file1Copy = file1
	file1Copy.Path += "-copy"

	// do the copy
	src := findObject(t, file1.Path)
	dst, err := remote.(fs.Copier).Copy(src, file1Copy.Path)
	require.NoError(t, err)

	// check file exists in new listing
	fstest.CheckListing(t, remote, []fstest.Item{file1, file2, file1Copy})

	// Check dst lightly - list above has checked ModTime/Hashes
	assert.Equal(t, file1Copy.Path, dst.Remote())

	// Delete copy
	err = dst.Remove()
	require.NoError(t, err)

}
Ejemplo n.º 23
0
// Test with exclude and delete excluded
func TestSyncWithExcludeAndDeleteExcluded(t *testing.T) {
	r := NewRun(t)
	defer r.Finalise()
	file1 := r.WriteBoth("potato2", "------------------------------------------------------------", t1) // 60 bytes
	file2 := r.WriteBoth("empty space", "", t2)
	file3 := r.WriteBoth("enormous", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes
	fstest.CheckItems(t, r.fremote, file1, file2, file3)
	fstest.CheckItems(t, r.flocal, file1, file2, file3)

	fs.Config.Filter.MaxSize = 40
	fs.Config.Filter.DeleteExcluded = true
	defer func() {
		fs.Config.Filter.MaxSize = -1
		fs.Config.Filter.DeleteExcluded = false
	}()

	fs.Stats.ResetCounters()
	err := fs.Sync(r.fremote, r.flocal)
	require.NoError(t, err)
	fstest.CheckItems(t, r.fremote, file2)

	// Check sync the other way round to make sure enormous gets
	// deleted even though it is excluded
	fs.Stats.ResetCounters()
	err = fs.Sync(r.flocal, r.fremote)
	require.NoError(t, err)
	fstest.CheckItems(t, r.flocal, file2)
}
Ejemplo n.º 24
0
// TestFsMove tests Move
func TestFsMove(t *testing.T) {
	skipIfNotOk(t)

	// Check have Move
	_, ok := remote.(fs.Mover)
	if !ok {
		t.Skip("FS has no Mover interface")
	}

	var file1Move = file1
	file1Move.Path += "-move"

	// do the move
	src := findObject(t, file1.Path)
	dst, err := remote.(fs.Mover).Move(src, file1Move.Path)
	require.NoError(t, err)

	// check file exists in new listing
	fstest.CheckListing(t, remote, []fstest.Item{file2, file1Move})

	// Check dst lightly - list above has checked ModTime/Hashes
	assert.Equal(t, file1Move.Path, dst.Remote())

	// move it back
	src = findObject(t, file1Move.Path)
	_, err = remote.(fs.Mover).Move(src, file1.Path)
	require.NoError(t, err)

	// check file exists in new listing
	fstest.CheckListing(t, remote, []fstest.Item{file2, file1})
}
Ejemplo n.º 25
0
func TestZfsGeneratesMetrics(t *testing.T) {
	var acc testutil.Accumulator

	z := &Zfs{
		KstatMetrics: []string{"vdev_cache_stats"},
		sysctl:       mock_sysctl,
		zpool:        mock_zpool,
	}
	err := z.Gather(&acc)
	require.NoError(t, err)

	//four pool, vdev_cache_stats metrics
	tags := map[string]string{
		"pools": "freenas-boot::red1::temp1::temp2",
	}
	intMetrics := getKstatMetricsVdevOnly()

	acc.AssertContainsTaggedFields(t, "zfs", intMetrics, tags)

	acc.Metrics = nil

	z = &Zfs{
		KstatMetrics: []string{"zfetchstats", "vdev_cache_stats"},
		sysctl:       mock_sysctl,
		zpool:        mock_zpool,
	}
	err = z.Gather(&acc)
	require.NoError(t, err)

	//four pool, vdev_cache_stats and zfetchstatus metrics
	intMetrics = getKstatMetricsVdevAndZfetch()

	acc.AssertContainsTaggedFields(t, "zfs", intMetrics, tags)
}
Ejemplo n.º 26
0
// TestFsDirMove tests DirMove
func TestFsDirMove(t *testing.T) {
	skipIfNotOk(t)

	// Check have DirMove
	_, ok := remote.(fs.DirMover)
	if !ok {
		t.Skip("FS has no DirMover interface")
	}

	// Check it can't move onto itself
	err := remote.(fs.DirMover).DirMove(remote)
	require.Equal(t, fs.ErrorDirExists, err)

	// new remote
	newRemote, _, removeNewRemote, err := fstest.RandomRemote(RemoteName, false)
	require.NoError(t, err)
	defer removeNewRemote()

	// try the move
	err = newRemote.(fs.DirMover).DirMove(remote)
	require.NoError(t, err)

	// check remotes
	// FIXME: Prints errors.
	fstest.CheckListing(t, remote, []fstest.Item{})
	fstest.CheckListing(t, newRemote, []fstest.Item{file2, file1})

	// move it back
	err = remote.(fs.DirMover).DirMove(newRemote)
	require.NoError(t, err)

	// check remotes
	fstest.CheckListing(t, remote, []fstest.Item{file2, file1})
	fstest.CheckListing(t, newRemote, []fstest.Item{})
}
Ejemplo n.º 27
0
// Ensure that it's an error for a peer to send an invalid have message.
func TestPeerInvalidHave(t *testing.T) {
	cl, err := NewClient(&TestingConfig)
	require.NoError(t, err)
	defer cl.Close()
	ie := metainfo.InfoEx{
		Info: metainfo.Info{
			PieceLength: 1,
			Pieces:      make([]byte, 20),
			Files:       []metainfo.FileInfo{{Length: 1}},
		},
	}
	ie.UpdateBytes()
	tt, _new, err := cl.AddTorrentSpec(&TorrentSpec{
		Info:     &ie,
		InfoHash: ie.Hash(),
	})
	require.NoError(t, err)
	assert.True(t, _new)
	defer tt.Drop()
	cn := &connection{
		t: tt,
	}
	assert.NoError(t, cn.peerSentHave(0))
	assert.Error(t, cn.peerSentHave(1))
}
Ejemplo n.º 28
0
// TestInit tests basic intitialisation
func TestInit(t *testing.T) {
	var err error

	// Never ask for passwords, fail instead.
	// If your local config is encrypted set environment variable
	// "RCLONE_CONFIG_PASS=hunter2" (or your password)
	*fs.AskPassword = false
	fs.LoadConfig()
	fs.Config.Verbose = *verbose
	fs.Config.Quiet = !*verbose
	fs.Config.DumpHeaders = *dumpHeaders
	fs.Config.DumpBodies = *dumpBodies
	t.Logf("Using remote %q", RemoteName)
	if RemoteName == "" {
		RemoteName, err = fstest.LocalRemote()
		require.NoError(t, err)
	}
	subRemoteName, subRemoteLeaf, err = fstest.RandomRemoteName(RemoteName)
	require.NoError(t, err)

	remote, err = fs.NewFs(subRemoteName)
	if err == fs.ErrorNotFoundInConfigFile {
		t.Logf("Didn't find %q in config file - skipping tests", RemoteName)
		return
	}
	require.NoError(t, err)
	fstest.TestMkdir(t, remote)
}
Ejemplo n.º 29
0
// CreateTestFeature creates a random feature to be used during testing.
func CreateTestFeature(t *testing.T) *models.Feature {
	s, err := store.NewMySQLStore(
		os.Getenv("LAIKA_MYSQL_USERNAME"),
		os.Getenv("LAIKA_MYSQL_PASSWORD"),
		os.Getenv("LAIKA_MYSQL_HOST"),
		os.Getenv("LAIKA_MYSQL_PORT"),
		os.Getenv("LAIKA_MYSQL_DBNAME"),
	)
	require.NoError(t, err)

	env, err := s.GetEnvironmentByName("test")
	if err != nil {
		env = &models.Environment{
			Name: "test",
		}
		err = s.CreateEnvironment(env)
		require.NoError(t, err)
	}

	feature := &models.Feature{
		Name: "test_feature" + store.Token(),
		Status: map[string]bool{
			"test": true,
		},
	}

	err = s.CreateFeature(feature)
	require.NoError(t, err)

	return feature
}
Ejemplo n.º 30
0
func TestInboundExistingMethods(t *testing.T) {
	// Create a channel with an existing "echo" method.
	ch, err := tchannel.NewChannel("foo", nil)
	require.NoError(t, err)
	json.Register(ch, json.Handlers{
		"echo": func(ctx json.Context, req map[string]string) (map[string]string, error) {
			return req, nil
		},
	}, nil)

	i := NewInbound(ch)
	service := transport.ServiceDetail{Name: "derp", Registry: new(transporttest.MockRegistry)}
	require.NoError(t, i.Start(service, transport.NoDeps))
	defer i.Stop()

	// Make a call to the "echo" method which should call our pre-registered method.
	ctx, cancel := json.NewContext(time.Second)
	defer cancel()

	var resp map[string]string
	arg := map[string]string{"k": "v"}

	svc := ch.ServiceName()
	peer := ch.Peers().GetOrAdd(ch.PeerInfo().HostPort)
	err = json.CallPeer(ctx, peer, svc, "echo", arg, &resp)
	require.NoError(t, err, "Call failed")
	assert.Equal(t, arg, resp, "Response mismatch")
}