コード例 #1
0
// TestItemsAreInLongTermStorage - make sure that each tar file is
// stored in our S3 test storage bucket, with correct metadata.
func TestItemsAreInLongTermStorage(t *testing.T) {
	if !apt_testutil.ShouldRunIntegrationTests() {
		t.Skip("Skipping integration test. Set ENV var RUN_EXCHANGE_INTEGRATION=true if you want to run them.")
	}
	_context, err := apt_testutil.GetContext("integration.json")
	require.Nil(t, err, "Could not create context")

	localClient, err := network.NewDPNRestClient(
		_context.Config.DPN.RestClient.LocalServiceURL,
		_context.Config.DPN.RestClient.LocalAPIRoot,
		_context.Config.DPN.RestClient.LocalAuthToken,
		_context.Config.DPN.LocalNode,
		_context.Config.DPN)
	require.Nil(t, err)

	// s3List lists bucket contents.
	s3List := apt_network.NewS3ObjectList(
		constants.AWSVirginia,
		_context.Config.DPN.DPNPreservationBucket,
		int64(100),
	)
	// s3Head gets metadata about specific objects in S3/Glacier.
	s3Head := apt_network.NewS3Head(_context.Config.APTrustS3Region,
		_context.Config.DPN.DPNPreservationBucket)

	for _, identifier := range dpn_testutil.BAG_IDS {
		resp := localClient.DPNBagGet(identifier)
		dpnBag := resp.Bag()
		require.NotNil(t, dpnBag)
		if dpnBag.IngestNode == _context.Config.DPN.LocalNode {
			continue // we would not have replicated our own bag
		}
		tarFileName := fmt.Sprintf("%s.tar", identifier)
		s3List.GetList(tarFileName)
		require.NotEmpty(t, s3List.Response.Contents, "%s not found in S3", tarFileName)
		object := s3List.Response.Contents[0]
		fiveMinutesAgo := time.Now().UTC().Add(-5 * time.Minute)
		require.NotNil(t, object.LastModified)
		assert.True(t, object.LastModified.After(fiveMinutesAgo))

		// Make sure each item has the expected metadata.
		// s3Head.Response.Metadata is map[string]*string.
		s3Head.Head(tarFileName)
		require.Empty(t, s3Head.ErrorMessage)
		metadata := s3Head.Response.Metadata
		require.NotNil(t, metadata)
		// Amazon library transforms first letters of keys to CAPS
		require.NotNil(t, metadata["From_node"])
		require.NotNil(t, metadata["Transfer_id"])
		require.NotNil(t, metadata["Member"])
		require.NotNil(t, metadata["Local_id"])
		require.NotNil(t, metadata["Version"])

		assert.NotEmpty(t, *metadata["From_node"])
		assert.NotEmpty(t, *metadata["Transfer_id"])
		assert.NotEmpty(t, *metadata["Member"])
		assert.NotEmpty(t, *metadata["Local_id"])
		assert.NotEmpty(t, *metadata["Version"])
	}
}
コード例 #2
0
func TestS3ObjectGetList(t *testing.T) {
	if !canTestS3() {
		return
	}
	s3ObjectList := network.NewS3ObjectList(
		constants.AWSVirginia,
		testBucket,
		int64(100),
	)
	s3ObjectList.GetList("")
	assert.Equal(t, "", s3ObjectList.ErrorMessage)
	assert.NotEmpty(t, s3ObjectList.Response.Contents)
}
コード例 #3
0
func TestNewS3ObjectList(t *testing.T) {
	if !canTestS3() {
		return
	}
	s3ObjectList := network.NewS3ObjectList(
		constants.AWSVirginia,
		testBucket,
		int64(100),
	)
	assert.Equal(t, testBucket, *s3ObjectList.ListObjectsInput.Bucket)
	assert.Equal(t, constants.AWSVirginia, s3ObjectList.AWSRegion)
	assert.Equal(t, int64(100), *s3ObjectList.ListObjectsInput.MaxKeys)
}
コード例 #4
0
// TestIngestStoreItemsAreInStorage makes sure that the items we sent off
// to long-term storage in AWS actually made it there.
func TestIngestStoreItemsAreInStorage(t *testing.T) {
	if !apt_testutil.ShouldRunIntegrationTests() {
		t.Skip("Skipping integration test. Set ENV var RUN_EXCHANGE_INTEGRATION=true if you want to run them.")
	}
	_context, err := apt_testutil.GetContext("integration.json")
	require.Nil(t, err, "Could not create context")
	maxItemsToList := int64(1)
	// s3List lists bucket contents.
	s3List := network.NewS3ObjectList(
		constants.AWSVirginia,
		_context.Config.DPN.DPNPreservationBucket,
		maxItemsToList)
	// s3Head gets metadata about specific objects in S3/Glacier.
	s3Head := network.NewS3Head(_context.Config.APTrustS3Region,
		_context.Config.DPN.DPNPreservationBucket)

	pathToLogFile := filepath.Join(_context.Config.LogDirectory, "dpn_ingest_store.json")
	for _, s3Key := range apt_testutil.INTEGRATION_GOOD_BAGS[0:7] {
		parts := strings.Split(s3Key, "/")
		localTarFileName := parts[1] // APTrust bag name. E.g. "test.edu.test_123.tar"
		manifest, err := apt_testutil.FindDPNIngestManifestInLog(pathToLogFile, localTarFileName)
		require.Nil(t, err, "Could not find JSON record for %s", localTarFileName)
		parts = strings.Split(manifest.StorageURL, "/")
		dpnTarFileName := parts[len(parts)-1] // DPN bag name: <uuid>.tar
		s3List.GetList(dpnTarFileName)
		require.Empty(t, s3List.ErrorMessage)
		require.EqualValues(t, 1, len(s3List.Response.Contents), "Nothing in S3 for %s", dpnTarFileName)
		obj := s3List.Response.Contents[0]
		assert.Equal(t, dpnTarFileName, *obj.Key)

		// Make sure each item has the expected metadata.
		// s3Head.Response.Metadata is map[string]*string.
		s3Head.Head(dpnTarFileName)
		require.Empty(t, s3Head.ErrorMessage)
		metadata := s3Head.Response.Metadata
		require.NotNil(t, metadata, dpnTarFileName)
		// Notice the Amazon library transforms the first letter of
		// all our keys to upper case. WTF?
		require.NotNil(t, metadata["From_node"], dpnTarFileName)
		require.NotNil(t, metadata["Transfer_id"], dpnTarFileName)
		require.NotNil(t, metadata["Member"], dpnTarFileName)
		require.NotNil(t, metadata["Local_id"], dpnTarFileName)
		require.NotNil(t, metadata["Version"], dpnTarFileName)

		assert.NotEmpty(t, *metadata["From_node"], dpnTarFileName)
		assert.NotEmpty(t, *metadata["Transfer_id"], dpnTarFileName)
		assert.NotEmpty(t, *metadata["Member"], dpnTarFileName)
		assert.NotEmpty(t, *metadata["Local_id"], dpnTarFileName)
		assert.NotEmpty(t, *metadata["Version"], dpnTarFileName)
	}
}
コード例 #5
0
ファイル: apt_bucket_reader.go プロジェクト: APTrust/exchange
func (reader *APTBucketReader) processBucket(bucketName string) {
	reader.Context.MessageLog.Debug("Checking bucket %s", bucketName)
	s3ObjList := network.NewS3ObjectList(reader.Context.Config.APTrustS3Region,
		bucketName, MAX_KEYS)
	keepFetching := true
	for keepFetching {
		s3ObjList.GetList("")
		if s3ObjList.ErrorMessage != "" {
			if reader.stats != nil {
				reader.stats.AddError(s3ObjList.ErrorMessage)
			}
			reader.Context.MessageLog.Error(s3ObjList.ErrorMessage)
			break
		}
		for _, s3Object := range s3ObjList.Response.Contents {
			reader.stats.AddS3Item(fmt.Sprintf("%s/%s", bucketName, *s3Object.Key))
			reader.processS3Object(s3Object, bucketName)
		}
		keepFetching = *s3ObjList.Response.IsTruncated
	}
}