Exemplo n.º 1
0
// Put the specified resource to s3.
func (self *S3PutCommand) Put() error {

	fi, err := os.Stat(self.LocalFile)
	if err != nil {
		return err
	}

	fileReader, err := os.Open(self.LocalFile)
	if err != nil {
		return err
	}
	defer fileReader.Close()

	// get the appropriate session and bucket
	auth := &aws.Auth{
		AccessKey: self.AwsKey,
		SecretKey: self.AwsSecret,
	}
	session := s3.New(*auth, aws.USEast)
	bucket := session.Bucket(self.Bucket)

	// put the data
	return bucket.PutReader(
		self.RemoteFile,
		fileReader,
		fi.Size(),
		self.ContentType,
		s3.ACL(self.Permissions),
	)

}
Exemplo n.º 2
0
func uploadToS3(file string, s3_bucket string, awsRegion aws.Region) error {
	log.Println("Uploading to S3...")
	auth, err := aws.EnvAuth()
	if err != nil {
		return err
	}
	client := s3.New(auth, awsRegion)
	bucket := client.Bucket(s3_bucket)

	r, errF := os.Open(file)
	fi, _ := r.Stat()
	length := fi.Size()

	if errF != nil {
		return errF
	}

	path := file
	perm := s3.ACL("private")
	contType := "application/x-compressed"

	err = bucket.PutReader(path, r, length, contType, perm)
	if err != nil {
		return err
	}

	log.Println("Upload successful")
	return nil
}
Exemplo n.º 3
0
func resource_aws_s3_bucket_create(
	s *terraform.ResourceState,
	d *terraform.ResourceDiff,
	meta interface{}) (*terraform.ResourceState, error) {
	p := meta.(*ResourceProvider)
	s3conn := p.s3conn

	// Merge the diff into the state so that we have all the attributes
	// properly.
	rs := s.MergeDiff(d)

	// Get the bucket and optional acl
	bucket := rs.Attributes["bucket"]
	acl := "private"
	if other, ok := rs.Attributes["acl"]; ok {
		acl = other
	}

	log.Printf("[DEBUG] S3 bucket create: %s, ACL: %s", bucket, acl)
	s3Bucket := s3conn.Bucket(bucket)
	err := s3Bucket.PutBucket(s3.ACL(acl))
	if err != nil {
		return nil, fmt.Errorf("Error creating S3 bucket: %s", err)
	}

	// Assign the bucket name as the resource ID
	rs.ID = bucket
	return rs, nil
}
Exemplo n.º 4
0
func UploadToS3(file multipart.File, s3Key string) (err error) {

	auth, err := aws.EnvAuth()
	if err != nil {
		log.Fatal("Incorrect AWS Auth Details ", err)
	}

	connection := s3.New(auth, region)
	bucket := connection.Bucket(os.Getenv("S3_BUCKET"))

	buffer := new(bytes.Buffer)
	_, err = io.Copy(buffer, file)
	if err != nil {
		log.Fatal("Cannot create file ", err)
	}

	filetype := http.DetectContentType(buffer.Bytes())
	err = bucket.Put(
		s3Key,
		buffer.Bytes(),
		filetype,
		s3.ACL("public-read"),
	)
	return err
}
Exemplo n.º 5
0
// NewPublishedStorageRaw creates published storage from raw aws credentials
func NewPublishedStorageRaw(auth aws.Auth, region aws.Region, bucket, defaultACL, prefix,
	storageClass, encryptionMethod string, plusWorkaround, disabledMultiDel bool) (*PublishedStorage, error) {
	if defaultACL == "" {
		defaultACL = "private"
	}

	if storageClass == "STANDARD" {
		storageClass = ""
	}

	result := &PublishedStorage{
		s3:               s3.New(auth, region),
		acl:              s3.ACL(defaultACL),
		prefix:           prefix,
		storageClass:     storageClass,
		encryptionMethod: encryptionMethod,
		plusWorkaround:   plusWorkaround,
		disableMultiDel:  disabledMultiDel,
	}

	result.s3.HTTPClient = func() *http.Client {
		return RetryingClient
	}
	result.bucket = result.s3.Bucket(bucket)

	return result, nil
}
func init() {
	s3srv.SetUp()
	testS3 = s3.New(s3srv.Auth, s3srv.Region)
	err = testS3.Bucket("bucket").PutBucket(s3.ACL("public-read"))
	if err != nil {
		panic(err)
	}
}
func (p *PostProcessor) putManifest(manifest *Manifest) error {
	var buf bytes.Buffer
	if err := json.NewEncoder(&buf).Encode(manifest); err != nil {
		return err
	}
	if err := p.s3.Put(p.config.ManifestPath, buf.Bytes(), "application/json", s3.ACL(p.config.ACL)); err != nil {
		return err
	}
	return nil
}
Exemplo n.º 8
0
// NewPublishedStorageRaw creates published storage from raw aws credentials
func NewPublishedStorageRaw(auth aws.Auth, region aws.Region, bucket, defaultACL, prefix string) (*PublishedStorage, error) {
	if defaultACL == "" {
		defaultACL = "private"
	}

	result := &PublishedStorage{s3: s3.New(auth, region), acl: s3.ACL(defaultACL), prefix: prefix}
	result.bucket = result.s3.Bucket(bucket)

	return result, nil
}
Exemplo n.º 9
0
func validS3Permissions(perm string) bool {
	return util.SliceContains(
		[]s3.ACL{
			s3.Private,
			s3.PublicRead,
			s3.PublicReadWrite,
			s3.AuthenticatedRead,
			s3.BucketOwnerRead,
			s3.BucketOwnerFull,
		},
		s3.ACL(perm),
	)
}
Exemplo n.º 10
0
func (s *S3Config) Bucket(client *s3.S3) (*s3.Bucket, error) {
	b := client.Bucket(s.BucketName)
	resp, err := client.ListBuckets()
	if err != nil {
		return b, err
	}
	if bucketOfName(resp.Buckets, s.BucketName) == nil {
		err = b.PutBucket(s3.ACL(s.Acl))
		if err != nil {
			return b, err
		}
	}
	return b, nil
}
Exemplo n.º 11
0
func writeLocalFileToS3(bucket *s3.Bucket, path string, file string) error {
	contType := mime.TypeByExtension(filepath.Ext(file))
	Perms := s3.ACL("private")

	data, err := ioutil.ReadFile(file)
	if err != nil {
		return err
	}

	if err := bucket.Put(path, data, contType, Perms); err != nil {
		return err
	}

	return nil
}
Exemplo n.º 12
0
func writeS3FileToS3(sourceBucket, targetBucket *s3.Bucket, sourceKeyPath, targetKeyPath string) error {
	data, err := sourceBucket.Get(sourceKeyPath)
	if err != nil {
		return err
	}

	contType := mime.TypeByExtension(filepath.Ext(sourceKeyPath))
	Perms := s3.ACL("private")

	if err := targetBucket.Put(targetKeyPath, data, contType, Perms); err != nil {
		return err
	}

	return nil
}
Exemplo n.º 13
0
// NewOptions makes some *Options with defaults!
func NewOptions() *Options {
	cwd, _ := os.Getwd()
	cwd = env.Get("TRAVIS_BUILD_DIR", cwd)

	targetPaths := env.ExpandSlice(env.Slice("ARTIFACTS_TARGET_PATHS", ":", []string{}))
	if len(targetPaths) == 0 {
		targetPaths = DefaultTargetPaths
	}

	return &Options{
		AccessKey: env.Cascade([]string{
			"ARTIFACTS_KEY",
			"ARTIFACTS_AWS_ACCESS_KEY",
			"AWS_ACCESS_KEY_ID",
			"AWS_ACCESS_KEY",
		}, ""),
		SecretKey: env.Cascade([]string{
			"ARTIFACTS_SECRET",
			"ARTIFACTS_AWS_SECRET_KEY",
			"AWS_SECRET_ACCESS_KEY",
			"AWS_SECRET_KEY",
		}, ""),
		BucketName: strings.TrimSpace(env.Cascade([]string{
			"ARTIFACTS_BUCKET",
			"ARTIFACTS_S3_BUCKET",
		}, "")),
		CacheControl: strings.TrimSpace(env.Get("ARTIFACTS_CACHE_CONTROL", DefaultCacheControl)),
		Perm:         s3.ACL(env.Get("ARTIFACTS_PERMISSIONS", DefaultPerm)),

		RepoSlug:    DefaultRepoSlug,
		BuildNumber: DefaultBuildNumber,
		BuildID:     DefaultBuildID,
		JobNumber:   DefaultJobNumber,
		JobID:       DefaultJobID,

		Concurrency: env.Uint("ARTIFACTS_CONCURRENCY", DefaultConcurrency),
		MaxSize:     env.UintSize("ARTIFACTS_MAX_SIZE", DefaultMaxSize),
		Paths:       env.ExpandSlice(env.Slice("ARTIFACTS_PATHS", ":", DefaultPaths)),
		Provider:    env.Get("ARTIFACTS_UPLOAD_PROVIDER", DefaultUploadProvider),
		Retries:     env.Uint("ARTIFACTS_RETRIES", DefaultRetries),
		TargetPaths: targetPaths,
		WorkingDir:  cwd,

		ArtifactsSaveHost:  env.Get("ARTIFACTS_SAVE_HOST", ""),
		ArtifactsAuthToken: env.Get("ARTIFACTS_AUTH_TOKEN", ""),
	}
}
Exemplo n.º 14
0
func (u *Upload) Put() {
	auth, _ := aws.EnvAuth()
	client := s3.New(auth, aws.USEast)
	b := client.Bucket(u.Bucket)

	file, err := os.Open(u.Path)
	defer file.Close()
	if err != nil {
		log.Fatal(err)
	}

	stat, err := file.Stat()
	if err != nil {
		log.Fatal(err)
	}

	headers := map[string][]string{
		"Content-Length": {strconv.FormatInt(stat.Size(), 10)},
		"Content-Type":   {u.FileType()},
		"Cache-Control":  {"max-age=31104000"},
	}

	relativePath := u.RelativePath()

	attempt := 1
	for {
		fmt.Printf("[%d] Path: %s\n", attempt, relativePath)

		err = b.PutReaderHeader(relativePath, file, stat.Size(), headers, s3.ACL("public-read"))
		if err == nil || attempt >= MaxAttempts {
			break
		}

		log.Print(err)
		time.Sleep(time.Duration(attempt) * time.Second)
		attempt += 1
		file.Seek(0, 0)
	}

	if err != nil {
		log.Fatal(err)
	}

}
Exemplo n.º 15
0
// Make the bucket if it does not exist
func (s *AwsContentStore) makeBucket() error {
	buckets, err := s.bucket.ListBuckets()
	if err != nil {
		logger.Log(kv{"fn": "AwsContentStore.makeBucket", "err": ": " + err.Error()})
		return err
	}
	var exists bool
	exists = false
	for _, b := range buckets.Buckets {
		if b.Name == s.bucket.Name {
			exists = true
		}
	}
	if !exists {
		err := s.bucket.PutBucket(s3.ACL(Config.Aws.BucketAcl))
		return err
	}
	return nil
}
Exemplo n.º 16
0
func resourceAwsS3BucketCreate(d *schema.ResourceData, meta interface{}) error {
	s3conn := meta.(*AWSClient).s3conn

	// Get the bucket and acl
	bucket := d.Get("bucket").(string)
	acl := d.Get("acl").(string)

	log.Printf("[DEBUG] S3 bucket create: %s, ACL: %s", bucket, acl)
	s3Bucket := s3conn.Bucket(bucket)
	err := s3Bucket.PutBucket(s3.ACL(acl))
	if err != nil {
		return fmt.Errorf("Error creating S3 bucket: %s", err)
	}

	// Assign the bucket name as the resource ID
	d.SetId(bucket)

	return nil
}
Exemplo n.º 17
0
func (s *S3Config) Upload(fileDir string, progressFunc reader.ProgressReaderCallbackFunc) error {
	if len(s.Acl) == 0 {
		s.Acl = string(s3.PublicReadWrite)
	}
	auth, err := aws.GetAuth(s.AccessKey, s.SecretKey)
	s3client := s3.New(auth, aws.Region{Name: "us-east-1", S3Endpoint: s.S3Endpoint})

	filename := filepath.Join(fileDir, s.File)
	fmt.Println("start s3upload:", filename)
	b, err := s.Bucket(s3client)
	if err != nil {
		return err
	}

	f, err := os.Stat(filename)
	if err != nil {
		return err
	}

	file, err := os.Open(filename)
	if err != nil {
		return err
	}
	defer file.Close()
	progressR := &reader.Reader{
		Reader:   file,
		Size:     f.Size(),
		DrawFunc: progressFunc,
	}

	err = b.PutReader(s.File, progressR, f.Size(), "application/octet-stream", s3.ACL(s.Acl))
	//err = b.Put("zoujtw2015-12-16.mkv", file, "content-type", s3.PublicReadWrite)
	if err != nil {
		return err
	}
	fmt.Println("s3 upload file succeed!!!", file.Name())

	return nil
}
Exemplo n.º 18
0
// PUT on a bucket creates the bucket.
// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUT.html
func (r bucketResource) put(a *action) interface{} {
	var created bool
	if r.bucket == nil {
		if !validBucketName(r.name) {
			fatalf(400, "InvalidBucketName", "The specified bucket is not valid")
		}
		if loc := locationConstraint(a); loc == "" {
			fatalf(400, "InvalidRequets", "The unspecified location constraint is incompatible for the region specific endpoint this request was sent to.")
		}
		// TODO validate acl
		r.bucket = &bucket{
			name: r.name,
			// TODO default acl
			objects: make(map[string]*object),
		}
		a.srv.buckets[r.name] = r.bucket
		created = true
	}
	if !created && a.srv.config.send409Conflict() {
		fatalf(409, "BucketAlreadyOwnedByYou", "Your previous request to create the named bucket succeeded and you already own it.")
	}
	r.bucket.acl = s3.ACL(a.req.Header.Get("x-amz-acl"))
	return nil
}
Exemplo n.º 19
0
func (u *uploader) artifactFeederLoop(path *path.Path, artifacts chan *artifact.Artifact) error {
	to, from, root := path.To, path.From, path.Root
	u.log.WithField("path", path).Debug("incoming path")

	if path.IsDir() {
		root = filepath.Join(root, from)
		u.log.WithField("root", root).Debug("path is dir, so setting root to root+from")
	}

	artifactOpts := &artifact.Options{
		Perm:        s3.ACL(u.Opts.Perm),
		RepoSlug:    u.Opts.RepoSlug,
		BuildNumber: u.Opts.BuildNumber,
		BuildID:     u.Opts.BuildID,
		JobNumber:   u.Opts.JobNumber,
		JobID:       u.Opts.JobID,
	}

	filepath.Walk(path.Fullpath(), func(source string, info os.FileInfo, err error) error {
		if info != nil && info.IsDir() {
			u.log.WithField("path", source).Debug("skipping directory")
			return nil
		}

		relPath := strings.Replace(strings.Replace(source, root, "", -1), root+"/", "", -1)
		dest := relPath
		if len(to) > 0 {
			if path.IsDir() {
				dest = filepath.Join(to, relPath)
			} else {
				dest = to
			}
		}

		for _, targetPath := range u.Opts.TargetPaths {
			err := func() error {
				u.curSize.Lock()
				defer u.curSize.Unlock()

				a := artifact.New(targetPath, source, dest, artifactOpts)

				size, err := a.Size()
				if err != nil {
					return err
				}

				u.curSize.Current += size
				logFields := logrus.Fields{
					"current_size":     humanize.Bytes(u.curSize.Current),
					"max_size":         humanize.Bytes(u.Opts.MaxSize),
					"percent_max_size": pctMax(size, u.Opts.MaxSize),
					"artifact":         relPath,
					"artifact_size":    humanize.Bytes(size),
				}

				if u.curSize.Current > u.Opts.MaxSize {
					msg := "max-size would be exceeded"
					u.log.WithFields(logFields).Error(msg)
					return fmt.Errorf(msg)
				}

				u.log.WithFields(logFields).Debug("queueing artifact")
				artifacts <- a
				return nil
			}()
			if err != nil {
				return err
			}
		}
		return nil
	})

	return nil
}
Exemplo n.º 20
0
// Create a song, add details into db and upload file to aws
func (s SongController) Create() http.HandlerFunc {
	return func(w http.ResponseWriter, r *http.Request) {

		// retrieve user session
		session, err := store.Get(r, "user")
		user := session.Values["User"].(*models.UserModel)

		// check if user agreed to terms and copyright
		if r.FormValue("copyright") != "on" || r.FormValue("terms") != "on" {
			// redirect to upload page
			http.Redirect(w, r, "/upload", http.StatusFound)
			return
		}

		// get file from form
		file, _, err := r.FormFile("file")
		if err != nil {
			log.Println(err)

			// redirect to upload page
			http.Redirect(w, r, "/upload", http.StatusFound)
			return
		}

		// read file into memory
		data, err := ioutil.ReadAll(file)
		if err != nil {
			log.Println(err)

			// redirect to upload page
			http.Redirect(w, r, "/upload", http.StatusFound)
			return
		}

		// insert song data into database
		id, err := models.SongFactory{}.Create(map[string]interface{}{
			"title":     r.FormValue("title"),
			"artist_id": user.Id,
		})

		if err != nil {
			log.Println(err)

			// redirect to upload page
			http.Redirect(w, r, "/upload", http.StatusFound)
			return
		}

		// upload to Amazon S3
		err = bucket.Put(
			"songs/"+string(id)+".mp3", data,
			"audio/mpeg", s3.ACL("authenticated-read"))

		if err != nil {
			// [todo] - Remove song from database here
			log.Println(err)
			http.Redirect(w, r, "/upload", http.StatusFound)
			return
		}

		// redirect to /songs
		http.Redirect(w, r, "/songs/"+string(id), http.StatusFound)
	}
}
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) {
	if len(artifact.Files()) == 0 {
		return nil, false, fmt.Errorf("No files in artifact from %s post-processor", artifact.BuilderId())
	}

	if len(artifact.Files()) > 1 {
		return nil, false, fmt.Errorf("More than one file in artifact from %s post-processor: %s", artifact.BuilderId(), artifact.Files())
	}

	box := artifact.Files()[0]
	if !strings.HasSuffix(box, ".box") {
		return nil, false, fmt.Errorf("Unknown file in artifact from %s post-processor: %s", artifact.BuilderId(), artifact.Files())
	}

	provider := providerFromBuilderName(artifact.Id())
	ui.Say(fmt.Sprintf("Preparing to upload box for '%s' provider to S3 bucket '%s'", provider, p.config.Bucket))

	// open the box so we can upload to S3 and calculate checksum for the manifest
	file, err := os.Open(box)
	if err != nil {
		return nil, false, err
	}
	defer file.Close()

	// get the file's size
	info, err := file.Stat()
	if err != nil {
		return nil, false, err
	}
	size := info.Size()
	ui.Message(fmt.Sprintf("Box to upload: %s (%d bytes)", box, size))

	// generate the path to store the box in S3
	boxPath := fmt.Sprintf("%s/%s/%s", p.config.BoxDir, p.config.Version, path.Base(box))

	// upload the box to S3
	if size > 100*1024*1024 {
		ui.Message("File size > 100MB. Initiating multipart upload")

		multi, err := p.s3.InitMulti(boxPath, "application/octet-stream", s3.ACL(p.config.ACL))
		if err != nil {
			return nil, false, err
		}

		ui.Message("Uploading...")

		const chunkSize = 5 * 1024 * 1024

		totalParts := int(math.Ceil(float64(size) / float64(chunkSize)))
		totalUploadSize := int64(0)

		parts := make([]s3.Part, totalParts)

		errorCount := 0

		for partNum := int(1); partNum <= totalParts; partNum++ {

			filePos, err := file.Seek(0, 1)

			partSize := int64(math.Min(chunkSize, float64(size-filePos)))
			partBuffer := make([]byte, partSize)

			ui.Message(fmt.Sprintf("Upload: Uploading part %d of %d, %d (of max %d) bytes", partNum, int(totalParts), int(partSize), int(chunkSize)))

			readBytes, err := file.Read(partBuffer)
			ui.Message(fmt.Sprintf("Upload: Read %d bytes from box file on disk", readBytes))

			bufferReader := bytes.NewReader(partBuffer)
			part, err := multi.PutPart(partNum, bufferReader)

			parts[partNum-1] = part

			if err != nil {

				if errorCount < 10 {
					errorCount++
					ui.Message(fmt.Sprintf("Error encountered! %s. Retry %d.", err, errorCount))
					time.Sleep(5 * time.Second)
					//reset seek position to the beginning of this block
					file.Seek(filePos, 0)
					partNum--
				} else {
					ui.Message(fmt.Sprintf("Too many errors encountered! Aborting.", err, errorCount))
					return nil, false, err
				}
			} else {

				totalUploadSize += part.Size
				ui.Message(fmt.Sprintf("Upload: Finished part %d of %d, upload total is %d bytes. This part was %d bytes.", partNum, totalParts, int(totalUploadSize), int(part.Size)))
			}
		}

		ui.Message("Parts uploaded, completing upload...")
		if err := multi.Complete(parts); err != nil {
			return nil, false, err
		}
	} else {
		if err := p.s3.PutReader(boxPath, file, size, "application/octet-stream", s3.ACL(p.config.ACL)); err != nil {
			return nil, false, err
		}
	}

	// Rewinding as we already read the file to generate the checksum
	ui.Message(fmt.Sprintf("Uploading box to S3: %s", boxPath))
	if _, err := file.Seek(0, 0); err != nil {
		return nil, false, err
	}

	ui.Message("Generating checksum")
	checksum, err := sum256(file)
	if err != nil {
		return nil, false, err
	}
	ui.Message(fmt.Sprintf("Checksum is %s", checksum))

	// get the latest manifest so we can add to it
	ui.Message("Fetching latest manifest")
	manifest, err := p.getManifest()
	if err != nil {
		return nil, false, err
	}

	ui.Message(fmt.Sprintf("Adding %s %s box to manifest", provider, p.config.Version))
	if err := manifest.add(p.config.Version, &Provider{
		Name:         provider,
		Url:          p.s3.URL(boxPath),
		ChecksumType: "sha256",
		Checksum:     checksum,
	}); err != nil {
		return nil, false, err
	}

	ui.Message(fmt.Sprintf("Uploading the manifest: %s", p.config.ManifestPath))
	if err := p.putManifest(manifest); err != nil {
		return nil, false, err
	}

	return &Artifact{p.s3.URL(p.config.ManifestPath)}, true, nil
}
Exemplo n.º 22
0
func (c Feedbacks) Create(created string, rating string, comment string, photo []byte, pointId string) revel.Result {

	ratingConv, _ := strconv.Atoi(rating)

	pointIdConv, _ := strconv.Atoi(pointId)

	if len(photo) == 0 {

		insertQ, err := app.DB.Prepare("INSERT INTO feedback(Point_idPoint, created, rating, notes) VALUES(?, ?, ?, ?)")

		if err != nil {
			log.Fatal(err)
		}

		_, err = insertQ.Exec(pointIdConv, created, ratingConv, comment)
		if err != nil {
			log.Fatal(err)
		}

	} else {

		AWSAuth := aws.Auth{
			AccessKey: os.Getenv("S3_KEYID"), // change this to yours
			SecretKey: os.Getenv("S3_SECRETKEY"),
		}

		region := aws.USEast
		// change this to your AWS region
		// click on the bucketname in AWS control panel and click Properties
		// the region for your bucket should be under "Static Website Hosting" tab

		connection := s3.New(AWSAuth, region)

		bucket := connection.Bucket("staritapi")

		t := time.Now()

		fileName := t.Format("2006-01-02T15:04:05")
		path := "photos/" + fileName + ".jpg"

		err := bucket.Put(path, photo, "image/jpeg", s3.ACL("public-read"))

		if err != nil {
			log.Println(err)
		}

		log.Printf("Uploaded to %s with to S3.\n\n", path)

		insertQ, err := app.DB.Prepare("INSERT INTO feedback(Point_idPoint, created, rating, notes, photourl) VALUES(?, ?, ?, ?, ?)")

		if err != nil {
			log.Fatal(err)
		}

		_, err = insertQ.Exec(pointIdConv, created, ratingConv, comment, path)
		if err != nil {
			log.Fatal(err)
		}

	}
	//insertQ, err := app.DB.Prepare("INSERT INTO feedback(created) VALUES(?)")

	// img, _, _ := image.Decode(bytes.NewReader(photo))
	//
	// out, err := os.Create("./qwerty.jpeg")
	//
	// if err != nil {
	// 	log.Fatal(err)
	// }
	//
	// err = jpeg.Encode(out, img, nil)
	//
	// if err != nil {
	// 	log.Fatal(err)
	// }

	c.Response.Status = http.StatusCreated

	return c.RenderJson("success")
}