Exemple #1
0
func ExampleV4Signer() {
	// Get auth from env vars
	auth, err := aws.EnvAuth()
	if err != nil {
		fmt.Println(err)
	}

	// Create a signer with the auth, name of the service, and aws region
	signer := aws.NewV4Signer(auth, "dynamodb", aws.USEast)

	// Create a request
	req, err := http.NewRequest("POST", aws.USEast.DynamoDBEndpoint, strings.NewReader("sample_request"))
	if err != nil {
		fmt.Println(err)
	}

	// Date or x-amz-date header is required to sign a request
	req.Header.Add("Date", time.Now().UTC().Format(http.TimeFormat))

	// Sign the request
	signer.Sign(req)

	// Issue signed request
	http.DefaultClient.Do(req)
}
Exemple #2
0
func TestGetItemRange(t *testing.T) {
	if !*amazon {
		return
	}

	if !*amazon {
		t.Log("Amazon tests not enabled")
		return
	}

	auth, err := aws.EnvAuth()

	if err != nil {
		t.Log(err)
		t.FailNow()
	}

	server := dynamodb.Server{auth, aws.USEast}
	primary := dynamodb.NewStringAttribute("uuid_type", "")
	rangeK := dynamodb.NewNumericAttribute("time", "")
	key := dynamodb.PrimaryKey{primary, rangeK}
	table := server.NewTable("production_storyarc-accelerator-analytics",
		key)

	item, err := table.GetItem(&dynamodb.Key{HashKey: "aee5df14-6961-4baa-bad1-a1150576594f_MISSES", RangeKey: "1348187524"})

	if err != nil {
		t.Log(err)
		t.FailNow()
	}

	fmt.Printf("Item : %s\n", item)

}
func (s *DynamoDBCommonSuite) SetupDB() {
	if !*integration {
		s.t.Skip("Integration tests are disabled")
	}

	s.t.Logf("Performing Integration tests on %s...", *provider)

	var auth aws.Auth
	if *provider == "amazon" {
		s.t.Log("Using REAL AMAZON SERVER")
		awsAuth, err := aws.EnvAuth()
		if err != nil {
			log.Fatal(err)
		}
		auth = awsAuth
	} else {
		auth = dummyAuth
	}
	s.c = &dynamodb.Client{
		Auth:   auth,
		Region: dummyRegion[*provider],
	}
	// Ensure that the table does not exist
	s.DeleteTable()

	if s.CreateNewTable {
		s.CreateTable()
	}
}
Exemple #4
0
func TestBasicGroupRequest(t *testing.T) {
	var as *AutoScaling
	awsAuth, err := aws.EnvAuth()
	if err != nil {
		mockTest = true
		t.Log("Running mock tests as AWS environment variables are not set")
		awsAuth := aws.Auth{AccessKey: "abc", SecretKey: "123"}
		as = New(awsAuth, aws.Region{AutoScalingEndpoint: testServer.URL})
		testServer.Start()
		go testServer.WaitRequest()
		testServer.Response(200, nil, astest.BasicGroupResponse)
	} else {
		as = New(awsAuth, aws.USWest2)
	}

	groupResp, err := as.DescribeAutoScalingGroups(nil)

	if err != nil {
		t.Fatal(err)
	}
	if len(groupResp.AutoScalingGroups) > 0 {
		firstGroup := groupResp.AutoScalingGroups[0]
		if len(firstGroup.AutoScalingGroupName) > 0 {
			t.Logf("Found AutoScaling group %s\n",
				firstGroup.AutoScalingGroupName)
		}
	}
	testServer.Flush()
}
Exemple #5
0
func TestGetItem(t *testing.T) {
	if !*amazon {
		t.Log("Amazon tests not enabled")
		return
	}

	auth, err := aws.EnvAuth()

	if err != nil {
		t.Log(err)
		t.FailNow()
	}

	server := dynamodb.Server{auth, aws.USEast}
	primary := dynamodb.NewStringAttribute("domain", "")
	key := dynamodb.PrimaryKey{primary, nil}
	table := server.NewTable("production_storyarc-accelerator-sites",
		key)

	item, err := table.GetItem(&dynamodb.Key{HashKey: "ac-news.speedup.storytellerhq.com"})

	if err != nil {
		t.Log(err)
		t.FailNow()
	}

	fmt.Printf("Item : %s\n", item)

}
Exemple #6
0
func TestListTables(t *testing.T) {
	if !*amazon {
		t.Log("Amazon tests not enabled")
		return
	}

	auth, err := aws.EnvAuth()

	if err != nil {
		t.Log(err)
		t.FailNow()
	}

	server := dynamodb.Server{auth, aws.USEast}

	tables, err := server.ListTables()

	if err != nil {
		t.Error(err.Error())
	}

	if len(tables) == 0 {
		t.Log("Expected table to be returned")
		t.FailNow()
	}

	fmt.Printf("tables %s\n", tables)

}
func (s *AmazonServer) SetUp(c *C) {
	auth, err := aws.EnvAuth()
	if err != nil {
		c.Fatal(err)
	}
	s.auth = auth
}
Exemple #8
0
func (s *AmazonServer) SetUp(c *check.C) {
	auth, err := aws.EnvAuth()
	if err != nil {
		c.Fatal(err.Error())
	}
	s.auth = auth
}
Exemple #9
0
func (s *S) TestEnvAuthAlt(c *check.C) {
	os.Clearenv()
	os.Setenv("AWS_SECRET_KEY", "secret")
	os.Setenv("AWS_ACCESS_KEY", "access")
	auth, err := aws.EnvAuth()
	c.Assert(err, check.IsNil)
	c.Assert(auth, check.Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
}
Exemple #10
0
func GetBucket() *s3.Bucket {
	auth, err := aws.EnvAuth()
	if err != nil {
		panic(err.Error())
	}

	s := s3.New(auth, aws.USEast)
	return s.Bucket("filmapp-development")
}
Exemple #11
0
func main() {
	auth, _ := aws.EnvAuth()
	r53, _ := route53.NewRoute53(auth)

	res, _ := r53.GetHostedZone(os.Args[0])
	fmt.Printf("%#v\n", res)

	res, _ = r53.ListHostedZones()
	fmt.Printf("%#v\n", res)
}
Exemple #12
0
func (s *LiveSuite) SetUpSuite(c *check.C) {
	if !Amazon {
		c.Skip("amazon tests not enabled (-amazon flag)")
	}
	auth, err := aws.EnvAuth()
	if err != nil {
		c.Fatal(err.Error())
	}
	s.auth = auth
}
Exemple #13
0
func (s *SuiteI) SetUpSuite(c *check.C) {
	if !*integration {
		c.Skip("Integration tests not enabled (-i flag)")
	}
	auth, err := aws.EnvAuth()
	if err != nil {
		c.Fatal(err.Error())
	}
	s.auth = auth
}
Exemple #14
0
func main() {
	flag.Parse()
	auth, err := aws.EnvAuth()
	if err != nil {
		log.Fatalln("Failed to recieve auth from env")
	}
	awsConnection := s3.New(
		auth,
		aws.USWest2,
	)
	bucket := awsConnection.Bucket(targetBucket)
	info := gen.BuildInstanceInfo(&localInstanceFetcher{}, "basic_example", ".")
	rotateCoordinator := gologging.NewRotateCoordinator(adjustedMaxLines, time.Hour*1)
	logger, err := gologging.StartS3Logger(
		rotateCoordinator,
		info,
		&stdoutNotifier{},
		&uploader.S3UploaderBuilder{
			Bucket:           bucket,
			KeyNameGenerator: &gen.EdgeKeyNameGenerator{info},
		},
		&stderrNotifier{},
		5,
	)
	if err != nil {
		log.Fatalf("Error building uploader: %s\n ", err)
	}

	i := 0
	now := time.Now()
	sigc := make(chan os.Signal, 1)
	signal.Notify(sigc,
		syscall.SIGHUP,
		syscall.SIGINT,
		syscall.SIGTERM,
		syscall.SIGQUIT)
	go func() {
		<-sigc
		logger.Close()
		fmt.Printf("Produced %f rps\n", float64(i)/(float64(time.Now().Sub(now))/float64(time.Second)))
		os.Exit(0)
	}()
	x := int(time.Second) / *rps
	for ; i < MAX_LINES_PER_LOG*4; i++ {
		// throttle - there is a better solution to this
		defer func() {
			if x := recover(); x != nil { // means we cuagh a signal
				time.Sleep(120 * time.Second)
			}
		}()
		time.Sleep(time.Duration(int(0.8 * float64(x))))
		logger.Log("MOAR! %d", i)
	}
	logger.Close()
}
func setUpAuth(c *check.C) {
	if !*amazon {
		c.Skip("Test against amazon not enabled.")
	}
	if *local {
		c.Log("Using local server")
		dynamodb_region = aws.Region{DynamoDBEndpoint: "http://127.0.0.1:8000"}
		dynamodb_auth = aws.Auth{AccessKey: "DUMMY_KEY", SecretKey: "DUMMY_SECRET"}
	} else {
		c.Log("Using REAL AMAZON SERVER")
		dynamodb_region = aws.USWest2
		auth, err := aws.EnvAuth()
		if err != nil {
			c.Fatal(err)
		}
		dynamodb_auth = auth
	}
}
Exemple #16
0
func GetAWSAuth() (aws.Auth, error) {

	// First try to see if we have AWS environment variables and auth with that.
	auth, err := aws.EnvAuth()
	if err == nil {
		// If we it worked lets use this
		return auth, nil
	}

	// If ENV didn't work then lets try to get from IAM role
	auth, err = aws.GetAuth("", "", "", time.Now().AddDate(1, 1, 1))
	if err != nil {
		return aws.Auth{}, err
	}

	return auth, nil

}
func main() {
	// This assumes you have ENV vars: AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
	auth, err := aws.EnvAuth()
	if err != nil {
		log.Fatal(err.Error())
	}
	// aws.USEast.DynamoDBEndpoint = "http://localhost:3300"
	log.Printf("%+v", aws.USEast.DynamoDBEndpoint)
	server := dynamodb.Server{auth, aws.USEast}
	tables, err := server.ListTables()

	if err != nil {
		panic(err.Error())
	}

	if len(tables) == 0 {
		panic("Expected table to be returned")
	}

	fmt.Printf("tables %+v\n", tables)

	primary := dynamodb.NewStringAttribute("v", "")
	key := dynamodb.PrimaryKey{primary, nil}
	table := server.NewTable(tables[0], key)

	fmt.Printf("tables %+v\n", table)
	desc, err := table.DescribeTable()

	if err != nil {
		panic(err.Error())
	}

	if desc.TableSizeBytes > 0 {
		log.Println("TableSizeBytes > 0", desc.TableSizeBytes)
	}

	if desc.ItemCount > 0 {
		log.Println("ItemCount > 0", desc.ItemCount)
	}

	fmt.Printf("tables %+v\n", desc)
}
Exemple #18
0
func init() {
	var msgBodyTmplFile string
	flag.StringVar(&queueName, "q", "", "Name of queue to fill")
	flag.StringVar(&region, "r", "", `Queue region (e.g., "us-east-1", "usw01")`)
	flag.StringVar(&msgBodyTmpl, "b", defaultMsgTmpl, "Message body template")
	flag.StringVar(&msgBodyTmplFile, "f", "", "Read message body template from file")
	flag.IntVar(&count, "c", defaultCount, "Number of messages to insert")
	flag.BoolVar(&serialMode, "serial", false, "Fill queue non-concurrently")
	flag.Parse()
	auth, err := aws.EnvAuth()
	if err != nil {
		fmt.Println(err)
		os.Exit(1)
	}
	if queueName == "" || region == "" {
		flag.Usage()
		os.Exit(1)
	}
	if msgBodyTmpl == "" && msgBodyTmplFile == "" {
		flag.Usage()
		os.Exit(1)
	}
	if msgBodyTmplFile != "" {
		body, err := ioutil.ReadFile(msgBodyTmplFile)
		if err != nil {
			fmt.Println(err)
			os.Exit(1)
		}
		msgBodyTmpl = string(body)
	}
	region = normalizeRegion(region)
	svc, err := sqs.NewFrom(auth.AccessKey, auth.SecretKey, region)
	if err != nil {
		fmt.Println("Error accessing SQS:", err)
		os.Exit(1)
	}
	queue, err = svc.GetQueue(queueName)
	if err != nil {
		fmt.Printf("Error getting queue %s: %s\n", queueName, err)
		os.Exit(1)
	}
}
Exemple #19
0
func TestCreateTable(t *testing.T) {
	if !*amazon {
		t.Log("Amazon tests not enabled")
		return
	}

	auth, err := aws.EnvAuth()

	if err != nil {
		t.Log(err)
		t.FailNow()
	}

	server := dynamodb.Server{auth, aws.USEast}

	attr1 := dynamodb.AttributeDefinitionT{"TestHashKey", "S"}
	attr2 := dynamodb.AttributeDefinitionT{"TestRangeKey", "N"}

	tableName := "MyTestTable"

	keySch1 := dynamodb.KeySchemaT{"TestHashKey", "HASH"}
	keySch2 := dynamodb.KeySchemaT{"TestRangeKey", "RANGE"}

	provTPut := dynamodb.ProvisionedThroughputT{ReadCapacityUnits: 1, WriteCapacityUnits: 1}

	tdesc := dynamodb.TableDescriptionT{
		AttributeDefinitions:  []dynamodb.AttributeDefinitionT{attr1, attr2},
		TableName:             tableName,
		KeySchema:             []dynamodb.KeySchemaT{keySch1, keySch2},
		ProvisionedThroughput: provTPut,
	}

	status, err := server.CreateTable(tdesc)

	if err != nil {
		t.Error(err.Error())
		t.FailNow()
	}

	fmt.Println(status)

}
Exemple #20
0
func (locator Locator) LookupQueue() (*sqs.Queue, error) {
	// login to sqs, reading our credentials from the environment
	auth, err := aws.EnvAuth()
	if err != nil {
		return nil, err
	}

	// connect to our sqs q
	region, found := aws.Regions[locator.RegionName]
	if !found {
		return nil, errors.New(fmt.Sprintf("no such region, '%s'", locator.RegionName))
	}
	log.Printf("looking up sqs queue by name, '%s'\n", locator.QueueName)
	sqsService := sqs.New(auth, region)
	q, err := sqsService.GetQueue(locator.QueueName)
	if err != nil {
		return nil, err
	}
	log.Printf("%s: ok\n", locator.QueueName)

	return q, nil
}
Exemple #21
0
func NewCloudWatchHandler(region string) Handler {
	auth, err := aws.EnvAuth()
	if err != nil {
		log.Fatalln(err)
	}
	c, err := cloudwatch.NewCloudWatch(auth, aws.Regions[region].CloudWatchServicepoint)
	if err != nil {
		log.Fatalln(err)
	}
	return func(host Host, status bool, t time.Time) error {
		value := CloudWatchFailValue
		if status == true {
			value = CloudWatchOkValue
		}
		metric := cloudwatch.MetricDatum{
			MetricName: host.Address(),
			Value:      value,
		}
		_, err := c.PutMetricDataNamespace([]cloudwatch.MetricDatum{metric}, CloudWatchNamespace)
		return err
	}
}
func (u *S3Uploader) Setup(destination string) error {
	u.Destination = destination

	// Setup the AWS authentication
	auth, err := aws.EnvAuth()
	if err != nil {
		return errors.New("Error loading AWS credentials: " + err.Error())
	}

	// Decide what region to use
	// https://github.com/crowdmob/goamz/blob/master/aws/regions.go
	// I think S3 defaults to us-east-1
	regionName := "us-east-1"
	if os.Getenv("AWS_DEFAULT_REGION") != "" {
		regionName = os.Getenv("AWS_DEFAULT_REGION")
	}

	// Check to make sure the region exists
	region, ok := aws.Regions[regionName]
	if ok == false {
		return errors.New("Unknown AWS Region `" + regionName + "`")
	}

	// Find the bucket
	s3 := s3.New(auth, region)
	bucket := s3.Bucket(u.bucketName())

	// If the list doesn't return an error, then we've got our
	// bucket
	_, err = bucket.List("", "", "", 0)
	if err != nil {
		return errors.New("Could not find bucket `" + u.bucketName() + " in region `" + region.Name + "` (" + err.Error() + ")")
	}

	u.Bucket = bucket

	return nil
}
Exemple #23
0
func TestAutoScalingGroup(t *testing.T) {
	var as *AutoScaling
	// Launch configuration test config
	var lc LaunchConfiguration
	lc.LaunchConfigurationName = "LConf1"
	lc.ImageId = "ami-03e47533" // Octave debian ami
	lc.KernelId = "aki-98e26fa8"
	lc.KeyName = "testAWS" // Replace with valid key for your account
	lc.InstanceType = "m1.small"

	// AutoScalingGroup test config
	var asg AutoScalingGroup
	asg.AutoScalingGroupName = "ASGTest1"
	asg.LaunchConfigurationName = lc.LaunchConfigurationName
	asg.DefaultCooldown = 300
	asg.HealthCheckGracePeriod = 300
	asg.DesiredCapacity = 1
	asg.MinSize = 1
	asg.MaxSize = 5
	asg.AvailabilityZones = []string{"us-west-2a"}

	// Parameters for setting desired capacity to 1
	var sp1 SetDesiredCapacityRequestParams
	sp1.AutoScalingGroupName = asg.AutoScalingGroupName
	sp1.DesiredCapacity = 1

	// Parameters for setting desired capacity to 2
	var sp2 SetDesiredCapacityRequestParams
	sp2.AutoScalingGroupName = asg.AutoScalingGroupName
	sp2.DesiredCapacity = 2

	awsAuth, err := aws.EnvAuth()
	if err != nil {
		mockTest = true
		t.Log("Running mock tests as AWS environment variables are not set")
		awsAuth := aws.Auth{AccessKey: "abc", SecretKey: "123"}
		as = New(awsAuth, aws.Region{AutoScalingEndpoint: testServer.URL})
	} else {
		as = New(awsAuth, aws.USWest2)
	}

	// Create the launch configuration
	if mockTest {
		testServer.Response(200, nil, astest.CreateLaunchConfigurationResponse)
	}
	_, err = as.CreateLaunchConfiguration(lc)
	if err != nil {
		t.Fatal(err)
	}

	// Check that we can get the launch configuration details
	if mockTest {
		testServer.Response(200, nil, astest.DescribeLaunchConfigurationResponse)
	}
	_, err = as.DescribeLaunchConfigurations([]string{lc.LaunchConfigurationName})
	if err != nil {
		t.Fatal(err)
	}

	// Create the AutoScalingGroup
	if mockTest {
		testServer.Response(200, nil, astest.CreateAutoScalingGroupResponse)
	}
	_, err = as.CreateAutoScalingGroup(asg)
	if err != nil {
		t.Fatal(err)
	}

	// Check that we can get the autoscaling group details
	if mockTest {
		testServer.Response(200, nil, astest.DescribeAutoScalingGroupResponse)
	}
	_, err = as.DescribeAutoScalingGroups(nil)
	if err != nil {
		t.Fatal(err)
	}

	// Suspend the scaling processes for the test AutoScalingGroup
	if mockTest {
		testServer.Response(200, nil, astest.SuspendProcessesResponse)
	}
	_, err = as.SuspendProcesses(asg, nil)
	if err != nil {
		t.Fatal(err)
	}

	// Resume scaling processes for the test AutoScalingGroup
	if mockTest {
		testServer.Response(200, nil, astest.ResumeProcessesResponse)
	}
	_, err = as.ResumeProcesses(asg, nil)
	if err != nil {
		t.Fatal(err)
	}

	// Change the desired capacity from 1 to 2. This will launch a second instance
	if mockTest {
		testServer.Response(200, nil, astest.SetDesiredCapacityResponse)
	}
	_, err = as.SetDesiredCapacity(sp2)
	if err != nil {
		t.Fatal(err)
	}

	// Change the desired capacity from 2 to 1. This will terminate one of the instances
	if mockTest {
		testServer.Response(200, nil, astest.SetDesiredCapacityResponse)
	}
	_, err = as.SetDesiredCapacity(sp1)
	if err != nil {
		t.Fatal(err)
	}

	// Update the max capacity for the scaling group
	if mockTest {
		testServer.Response(200, nil, astest.UpdateAutoScalingGroupResponse)
	}
	asg.MaxSize = 6
	_, err = as.UpdateAutoScalingGroup(asg)
	if err != nil {
		t.Fatal(err)
	}

	// Add a scheduled action to the group
	var psar PutScheduledActionRequestParams
	psar.AutoScalingGroupName = asg.AutoScalingGroupName
	psar.MaxSize = 4
	psar.ScheduledActionName = "SATest1"
	psar.Recurrence = "30 0 1 1,6,12 *"
	if mockTest {
		testServer.Response(200, nil, astest.PutScheduledUpdateGroupActionResponse)
	}
	_, err = as.PutScheduledUpdateGroupAction(psar)
	if err != nil {
		t.Fatal(err)
	}

	// List the scheduled actions for the group
	var sar ScheduledActionsRequestParams
	sar.AutoScalingGroupName = asg.AutoScalingGroupName
	if mockTest {
		testServer.Response(200, nil, astest.DescribeScheduledActionsResponse)
	}
	_, err = as.DescribeScheduledActions(sar)
	if err != nil {
		t.Fatal(err)
	}

	// Delete the test scheduled action from the group
	var dsar DeleteScheduledActionRequestParams
	dsar.AutoScalingGroupName = asg.AutoScalingGroupName
	dsar.ScheduledActionName = psar.ScheduledActionName
	if mockTest {
		testServer.Response(200, nil, astest.DeleteScheduledActionResponse)
	}
	_, err = as.DeleteScheduledAction(dsar)
	if err != nil {
		t.Fatal(err)
	}
	testServer.Flush()
}
Exemple #24
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())

	flag.Parse()

	if AWS_ACCESS_KEY_ID == "" || AWS_SECRET_ACCESS_KEY == "" {
		log.Fatal("AWS Credentials Required")
	}

	os.Setenv("AWS_ACCESS_KEY_ID", AWS_ACCESS_KEY_ID)
	os.Setenv("AWS_SECRET_ACCESS_KEY", AWS_SECRET_ACCESS_KEY)

	// Since we're not messing with cacheAge, it's just easier to treat is as a string.
	if cacheAge == "" {
		cacheAge = "0"
	}

	if maxObjs != 0 || stopMarker != "" {
		// Set the conditional bit to check to stop
		doStop = true
	}

	log.Println("Starting Cache Alterations:")

	//  Connect to AWS using goamz
	auth, err := aws.EnvAuth()
	if err != nil {
		log.Panic(err.Error())
	}

	// Instantiate S3 Object
	s := s3.New(auth, aws.USEast)

	// Set the Bucket
	Bucket := s.Bucket(bucketName)

	// Initial Request - Outside Loop
	Response, err := Bucket.List("", "", lastMarker, 1000)
	if err != nil {
		log.Panic(err.Error())
	}

	// Set up the header for iterating.
	opts := s3.CopyOptions{}
	opts.CacheControl = "max-age=" + cacheAge
	opts.MetadataDirective = "REPLACE"

	log.Println("-> 0 START")

	// Loop Results
	for _, v := range Response.Contents {
		fmt.Printf(".") // Indicator that something is happening
		_, err := Bucket.PutCopy(v.Key, s3.PublicRead, opts, bucketName+"/"+v.Key)
		if err != nil {
			log.Panic(err.Error())
		}
		// We generate our own lastMarker.  This allows us to perform our own resume.
		lastMarker = v.Key
		results++

		if doStop == true {
			if results == maxObjs || lastMarker == stopMarker {
				break // End here.
			}
		}
	}

	fmt.Printf("\n")
	log.Println("->", results, " ", lastMarker)

	// Did Amazon say there was more?  If so, keep going.
	if Response.IsTruncated == true {
		for {
			// Issue List Command
			Response, err := Bucket.List("", "", lastMarker, 1000)
			if err != nil {
				panic(err.Error())
			}

			// Loop through Response and dump it to the console.
			for _, v := range Response.Contents {
				fmt.Printf(".") // Indicator that something is happening
				_, err := Bucket.PutCopy(v.Key, s3.PublicRead, opts, bucketName+"/"+v.Key)
				if err != nil {
					log.Panic(err.Error())
				}
				lastMarker = v.Key
				results++

				if doStop == true {
					if results == maxObjs || lastMarker == stopMarker {
						break // End here.
					}
				}
			}

			if Response.IsTruncated == false {
				break // End loop
			} else {
				fmt.Printf("\n")
				log.Println("->", results, " ", lastMarker)
			}
		}
	}
	log.Println("Wrote to", results, " S3 Objects. Last object was:", lastMarker)
}
Exemple #25
0
// HandleMessage reads the nsq message body and parses it as a github webhook,
// checks out the source for the repository & builds/uploads the binaries.
func (h *Handler) HandleMessage(m *nsq.Message) error {
	hook, err := github.ParseHook(m.Body)
	if err != nil {
		// Errors will most likely occur because not all GH
		// hooks are the same format
		// we care about those that are pushes to master
		logrus.Debugf("Error parsing hook: %v", err)
		return nil
	}

	shortSha := hook.After[0:7]
	// checkout the code in a temp dir
	temp, err := ioutil.TempDir("", fmt.Sprintf("commit-%s", shortSha))
	if err != nil {
		return err
	}
	defer os.RemoveAll(temp)

	if err := checkout(temp, hook.Repo.Url, hook.After); err != nil {
		logrus.Warn(err)
		return err
	}
	logrus.Debugf("Checked out %s for %s", hook.After, hook.Repo.Url)

	var (
		image     = fmt.Sprintf("docker:commit-%s", shortSha)
		container = fmt.Sprintf("build-%s", shortSha)
	)
	logrus.Infof("image=%s container=%s\n", image, container)

	// build the image
	if err := build(temp, image); err != nil {
		logrus.Warn(err)
		return err
	}
	logrus.Debugf("Successfully built image %s", image)

	// make the binary
	defer removeContainer(container)
	if err = makeBinary(temp, image, container, 20*time.Minute); err != nil {
		logrus.Warn(err)
		return err
	}
	logrus.Debugf("Successfully built binaries for %s", hook.After)

	// read the version
	version, err := getBinaryVersion(temp)
	if err != nil {
		logrus.Warnf("Getting binary version failed: %v", err)
		return err
	}

	bundlesPath := path.Join(temp, "bundles", version, "cross")

	// create commit file
	if err := ioutil.WriteFile(path.Join(bundlesPath, "commit"), []byte(hook.After), 0755); err != nil {
		return err
	}

	// create version file
	if err := ioutil.WriteFile(path.Join(bundlesPath, "version"), []byte(version), 0755); err != nil {
		return err
	}

	// use env variables to connect to s3
	auth, err := aws.EnvAuth()
	if err != nil {
		return fmt.Errorf("AWS Auth failed: %v", err)
	}

	// connect to s3 bucket
	s := s3.New(auth, aws.GetRegion(region))
	bucketname, bucketpath := bucketParts(bucket)
	bucket := s.Bucket(bucketname)

	// push to s3
	if err = pushToS3(bucket, bucketpath, bundlesPath); err != nil {
		logrus.Warn(err)
		return err
	}

	// add html to template
	if err := createIndexFile(bucket, bucketpath); err != nil {
		logrus.Warn(err)
		return err
	}

	return nil
}
func pushToS3(bundlesPath string) error {
	if _, err := os.Stat(bundlesPath); os.IsNotExist(err) {
		return fmt.Errorf("This is awkward, the bundles path DNE: %s", bundlesPath)
	}

	// use env variables to connect to s3
	auth, err := aws.EnvAuth()
	if err != nil {
		return fmt.Errorf("AWS Auth failed: %v", err)
	}

	// connect to s3 bucket
	s := s3.New(auth, aws.GetRegion(region))
	bucketname, bucketpath := bucketParts(bucket)
	bucket := s.Bucket(bucketname)

	//walk the bundles directory
	var html string
	walkFn := func(fpath string, info os.FileInfo, err error) error {
		stat, err := os.Stat(fpath)
		if err != nil {
			return err
		}

		relFilePath, err := filepath.Rel(bundlesPath, fpath)
		if err != nil || (fpath == bundlesPath && stat.IsDir()) {
			// Error getting relative path OR we are looking
			// at the root path. Skip in both situations.
			return nil
		}

		if stat.IsDir() {
			return nil
		}

		if err = uploadFileToS3(bucket, fpath, path.Join(bucketpath, relFilePath)); err != nil {
			log.Warnf("Uploading %s to s3 failed: %v", fpath, err)
			return err
		}

		// add to html
		image := "default"
		if strings.HasSuffix(relFilePath, ".sha256") || strings.HasSuffix(relFilePath, ".md5") {
			image = "text"
		}
		html += fmt.Sprintf(`<tr>
		<td valign="top"><a href="%s"><img src="/static/%s.png" alt="[ICO]"/></a></td>
		<td><a href="%s">%s</a></td>
		<td>%s</td>
		<td>%s</td>
</tr>`, relFilePath, image, relFilePath, relFilePath, humanSize(stat.Size()), stat.ModTime().Format(time.RFC3339))

		return nil
	}

	// walk the filepath
	if err := filepath.Walk(bundlesPath, walkFn); err != nil {
		return err
	}

	// add html to template
	if err := createIndexFile(bucket, bucketpath, html); err != nil {
		return err
	}

	return nil
}
Exemple #27
0
func (s *S) TestEnvAuthNoSecret(c *check.C) {
	os.Clearenv()
	_, err := aws.EnvAuth()
	c.Assert(err, check.ErrorMatches, "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment")
}
Exemple #28
0
func (s *S) TestEnvAuthNoAccess(c *check.C) {
	os.Clearenv()
	os.Setenv("AWS_SECRET_ACCESS_KEY", "foo")
	_, err := aws.EnvAuth()
	c.Assert(err, check.ErrorMatches, "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment")
}