Exemplo n.º 1
0
func (s3 *s3driver) CheckDataAndGetSize(dpconn, itemlocation, fileName string) (exist bool, size int64, err error) {
	bucket := getAwsInfoFromDpconn(dpconn)

	destFullPathFileName := bucket + "/" + itemlocation + "/" + fileName
	log.Info(destFullPathFileName)

	AWS_REGION = Env("AWS_REGION", false)

	svc := s3aws.New(session.New(&aws.Config{Region: aws.String(AWS_REGION)}))
	result, err := svc.ListObjects(&s3aws.ListObjectsInput{Bucket: aws.String(bucket),
		Prefix: aws.String(itemlocation + "/" + fileName)})
	if err != nil {
		log.Error("Failed to list objects", err)
		return exist, size, err
	}

	exist = false
	for _, v := range result.Contents {
		log.Infof("Tag:%s, key:%s, size:%v\n", aws.StringValue(v.ETag), aws.StringValue(v.Key), aws.Int64Value(v.Size))
		if aws.StringValue(v.Key) == fileName {
			size = aws.Int64Value(v.Size)
			exist = true
		}
	}

	return
}
Exemplo n.º 2
0
func (hdfs *hdfsdriver) StoreFile(status, filename, dpconn, dp, itemlocation, destfile string) string {

	log.Infof("Begin to upload %v to %v\n", filename, dp)

	client, err := getClient(dpconn)
	if err != nil {
		log.Error("Failed to get a client", err)
		status = "put to hdfs err"
		return status
	}
	defer client.Close()

	err = client.MkdirAll("/"+itemlocation, 1777)
	if err != nil {
		log.Error("Failed to mkdirall in hdfs", err)
		status = "put to hdfs err"
		return status
	}

	hdfsfile := "/" + itemlocation + "/" + destfile
	err = client.CopyToRemote(filename, hdfsfile)
	if err != nil {
		log.Error("Failed to CopyToRemote", err)
		status = "put to hdfs err"
		return status
	}

	status = "put to hdfs ok"
	log.Info("Successfully uploaded to", itemlocation, "in hdfs")
	return status
}
Exemplo n.º 3
0
func deleteItemsAccordingToHeartbeat(body []byte) {
	log.Debug("deleteItemsAccordingToHeartbeat() BEGIN:", string(body))
	result := ds.Result{}
	itemEvent := &Event{}
	result.Data = itemEvent
	itemsdelete := []ItemDel{}
	itemEvent.Data = &itemsdelete

	if err := json.Unmarshal(body, &result); err == nil {
		log.Debug("items delete:", itemsdelete)
		for _, v := range itemsdelete {
			log.Debugf("delete item according to heartbeat: %v/%v\n", v.Repname, v.Itemname)
			err := delTagsForDelItem(v.Repname, v.Itemname)
			if err != nil {
				log.Error(err)
				return
			}

			err = delItem(v.Repname, v.Itemname)
			if err != nil {
				log.Error(err)
				return
			}
			log.Infof("Delete data item %v/%v according to heartbeat successfully.\n", v.Repname, v.Itemname)
		}
	} else {
		log.Warn("Unmarshal error:", err)
	}
}
Exemplo n.º 4
0
func Env(name string, required bool) string {
	s := os.Getenv(name)
	if required && s == "" {
		panic("env variable required, " + name)
	}
	log.Infof("[env][%s] %s\n", name, s)
	return s
}
Exemplo n.º 5
0
func (s3 *s3driver) CheckItemLocation(datapoolname, dpconn, itemlocation string) error {
	bucket := getAwsInfoFromDpconn(dpconn)

	AWS_REGION = Env("AWS_REGION", false)

	svc := s3aws.New(session.New(&aws.Config{Region: aws.String(AWS_REGION)}))
	//result, err := svc.ListBuckets(&s3aws.ListBucketsInput{})
	result, err := svc.ListObjects(&s3aws.ListObjectsInput{Bucket: aws.String(bucket),
		Prefix: aws.String(itemlocation)})
	if err != nil {
		log.Println("Failed to list buckets content", err)
		return err
	}

	if len(result.Contents) == 0 {
		return errors.New("DataItem does not exist in the bucket.")
	}
	log.Println("Buckets:")

	bexist := true
	for _, objects := range result.Contents {
		log.Infof("object:%s, %s \n", aws.StringValue(objects.Key), aws.StringValue(objects.ETag))
		if aws.StringValue(objects.Key) == bucket {
			bexist = true
		}
	}

	if bexist == false {
		l := log.Infof("Bucket %s does not exist on s3.", bucket)
		logq.LogPutqueue(l)
		return errors.New(l)
	}

	log.Println(gDpPath + "/" + bucket + "/" + itemlocation)
	err = os.MkdirAll(gDpPath+"/"+bucket+"/"+itemlocation, 0777)
	if err != nil {
		log.Error(err)
	}
	return err
}
Exemplo n.º 6
0
func (s3 *s3driver) StoreFile(status, filename, dpconn, dp, itemlocation, destfile string) string {
	bucket := getAwsInfoFromDpconn(dpconn)

	//AWS_SECRET_ACCESS_KEY = Env("AWS_SECRET_ACCESS_KEY", false)
	//AWS_ACCESS_KEY_ID = Env("AWS_ACCESS_KEY_ID", false)
	AWS_REGION = Env("AWS_REGION", false)

	file, err := os.Open(filename)
	if err != nil {
		l := log.Error("Failed to open file", err)
		logq.LogPutqueue(l)
		status = "put to s3 err"
		return status
	}

	log.Infof("Begin to upload %v to %v\n", filename, dp)

	// Not required, but you could zip the file before uploading it
	// using io.Pipe read/writer to stream gzip'd file contents.
	reader, writer := io.Pipe()
	go func() {
		gw := gzip.NewWriter(writer)
		io.Copy(gw, file)

		file.Close()
		gw.Close()
		writer.Close()

		//updateJobQueueStatus(jobid, "puttos3ok")
	}()
	uploader := s3manager.NewUploader(session.New(&aws.Config{Region: aws.String(AWS_REGION)}))
	//uploader := s3manager.NewUploader(session.New(aws.NewConfig()))
	result, err := uploader.Upload(&s3manager.UploadInput{
		Body:   reader,
		Bucket: aws.String(bucket),
		Key:    aws.String( /*dp + "/" + */ itemlocation + "/" + destfile + ".gz"),
	})
	if err != nil {
		log.Error("Failed to upload", err)
		status = "put to s3 err"
		return status
	}
	status = "put to s3 ok"
	log.Info("Successfully uploaded to", result.Location)
	return status
}
Exemplo n.º 7
0
func getJsonBuildingErrorJson() []byte {

	return []byte(log.Infof(`{"code": %d, "msg": %s}`, cmd.ErrorMarshal, "Json building error"))

}