// Cost: 0.00 USD func (s *ClientTests) TestSecurityGroups(c *C) { name := "goamz-test" descr := "goamz security group for tests" // Clean it up, if a previous test left it around and avoid leaving it around. s.ec2.DeleteSecurityGroup(ec2.SecurityGroup{Name: name}) defer s.ec2.DeleteSecurityGroup(ec2.SecurityGroup{Name: name}) resp1, err := s.ec2.CreateSecurityGroup(ec2.SecurityGroup{Name: name, Description: descr}) c.Assert(err, IsNil) c.Assert(resp1.RequestId, Matches, ".+") c.Assert(resp1.Name, Equals, name) c.Assert(resp1.Id, Matches, ".+") resp1, err = s.ec2.CreateSecurityGroup(ec2.SecurityGroup{Name: name, Description: descr}) ec2err, _ := err.(*ec2.Error) c.Assert(resp1, IsNil) c.Assert(ec2err, NotNil) c.Assert(ec2err.Code, Equals, "InvalidGroup.Duplicate") perms := []ec2.IPPerm{{ Protocol: "tcp", FromPort: 0, ToPort: 1024, SourceIPs: []string{"127.0.0.1/24"}, }} resp2, err := s.ec2.AuthorizeSecurityGroup(ec2.SecurityGroup{Name: name}, perms) c.Assert(err, IsNil) c.Assert(resp2.RequestId, Matches, ".+") resp3, err := s.ec2.SecurityGroups(ec2.SecurityGroupNames(name), nil) c.Assert(err, IsNil) c.Assert(resp3.RequestId, Matches, ".+") c.Assert(resp3.Groups, HasLen, 1) g0 := resp3.Groups[0] c.Assert(g0.Name, Equals, name) c.Assert(g0.Description, Equals, descr) c.Assert(g0.IPPerms, HasLen, 1) c.Assert(g0.IPPerms[0].Protocol, Equals, "tcp") c.Assert(g0.IPPerms[0].FromPort, Equals, 0) c.Assert(g0.IPPerms[0].ToPort, Equals, 1024) c.Assert(g0.IPPerms[0].SourceIPs, DeepEquals, []string{"127.0.0.1/24"}) resp2, err = s.ec2.DeleteSecurityGroup(ec2.SecurityGroup{Name: name}) c.Assert(err, IsNil) c.Assert(resp2.RequestId, Matches, ".+") }
func (cloudManager *EC2SpotManager) SpawnInstance(d *distro.Distro, owner string, userHost bool) (*host.Host, error) { if d.Provider != SpotProviderName { return nil, fmt.Errorf("Can't spawn instance of %v for distro %v: provider is %v", SpotProviderName, d.Id, d.Provider) } ec2Handle := getUSEast(*cloudManager.awsCredentials) //Decode and validate the ProviderSettings into the ec2-specific ones. ec2Settings := &EC2SpotSettings{} if err := mapstructure.Decode(d.ProviderSettings, ec2Settings); err != nil { return nil, fmt.Errorf("Error decoding params for distro %v: %v", d.Id, err) } if err := ec2Settings.Validate(); err != nil { return nil, fmt.Errorf("Invalid EC2 spot settings in distro %v: %v", d.Id, err) } blockDevices, err := makeBlockDeviceMappings(ec2Settings.MountPoints) if err != nil { return nil, err } instanceName := generateName(d.Id) intentHost := &host.Host{ Id: instanceName, User: d.User, Distro: *d, Tag: instanceName, CreationTime: time.Now(), Status: evergreen.HostUninitialized, TerminationTime: model.ZeroTime, TaskDispatchTime: model.ZeroTime, Provider: SpotProviderName, InstanceType: ec2Settings.InstanceType, RunningTask: "", Provisioned: false, StartedBy: owner, UserHost: userHost, } // record this 'intent host' if err := intentHost.Insert(); err != nil { return nil, evergreen.Logger.Errorf(slogger.ERROR, "Could not insert intent "+ "host “%v”: %v", intentHost.Id, err) } evergreen.Logger.Logf(slogger.DEBUG, "Successfully inserted intent host “%v” "+ "for distro “%v” to signal cloud instance spawn intent", instanceName, d.Id) spotRequest := &ec2.RequestSpotInstances{ SpotPrice: fmt.Sprintf("%v", ec2Settings.BidPrice), InstanceCount: 1, ImageId: ec2Settings.AMI, KeyName: ec2Settings.KeyName, InstanceType: ec2Settings.InstanceType, SecurityGroups: ec2.SecurityGroupNames(ec2Settings.SecurityGroup), BlockDevices: blockDevices, } spotResp, err := ec2Handle.RequestSpotInstances(spotRequest) if err != nil { //Remove the intent host if the API call failed if err := intentHost.Remove(); err != nil { evergreen.Logger.Logf(slogger.ERROR, "Failed to remove intent host %v: %v", intentHost.Id, err) } return nil, evergreen.Logger.Errorf(slogger.ERROR, "Failed starting spot instance "+ " for distro '%v' on intent host %v: %v", d.Id, intentHost.Id, err) } spotReqRes := spotResp.SpotRequestResults[0] if spotReqRes.State != SpotStatusOpen && spotReqRes.State != SpotStatusActive { return nil, evergreen.Logger.Errorf(slogger.ERROR, "Spot request %v was found in "+ " state %v on intent host %v", spotReqRes.SpotRequestId, spotReqRes.State, intentHost.Id) } intentHost.Id = spotReqRes.SpotRequestId err = intentHost.Insert() if err != nil { return nil, evergreen.Logger.Errorf(slogger.ERROR, "Could not insert updated host info with id %v"+ " for intent host %v: %v", intentHost.Id, instanceName, err) } //find the old intent host and remove it, since we now have the real //host doc successfully stored. oldIntenthost, err := host.FindOne(host.ById(instanceName)) if err != nil { return nil, evergreen.Logger.Errorf(slogger.ERROR, "Can't locate "+ "record inserted for intended host '%v' due to error: %v", instanceName, err) } if oldIntenthost == nil { return nil, evergreen.Logger.Errorf(slogger.ERROR, "Can't locate "+ "record inserted for intended host '%v'", instanceName) } err = oldIntenthost.Remove() if err != nil { evergreen.Logger.Logf(slogger.ERROR, "Could not remove intent host "+ "“%v”: %v", oldIntenthost.Id, err) return nil, err } // create some tags based on user, hostname, owner, time, etc. tags := makeTags(intentHost) // attach the tags to this instance err = attachTags(ec2Handle, tags, intentHost.Id) if err != nil { evergreen.Logger.Errorf(slogger.ERROR, "Unable to attach tags for %v: %v", intentHost.Id, err) } else { evergreen.Logger.Logf(slogger.DEBUG, "Attached tag name “%v” for “%v”", instanceName, intentHost.Id) } return intentHost, nil }
func (cloudManager *EC2Manager) SpawnInstance(d *distro.Distro, owner string, userHost bool) (*host.Host, error) { if d.Provider != OnDemandProviderName { return nil, fmt.Errorf("Can't spawn instance of %v for distro %v: provider is %v", OnDemandProviderName, d.Id, d.Provider) } ec2Handle := getUSEast(*cloudManager.awsCredentials) //Decode and validate the ProviderSettings into the ec2-specific ones. ec2Settings := &EC2ProviderSettings{} if err := mapstructure.Decode(d.ProviderSettings, ec2Settings); err != nil { return nil, fmt.Errorf("Error decoding params for distro %v: %v", d.Id, err) } if err := ec2Settings.Validate(); err != nil { return nil, fmt.Errorf("Invalid EC2 settings in distro %#v: %v and %#v", d, err, ec2Settings) } blockDevices, err := makeBlockDeviceMappings(ec2Settings.MountPoints) if err != nil { return nil, err } instanceName := generateName(d.Id) // proactively write all possible information pertaining // to the host we want to create. this way, if we are unable // to start it or record its instance id, we have a way of knowing // something went wrong - and what intentHost := &host.Host{ Id: instanceName, User: d.User, Distro: *d, Tag: instanceName, CreationTime: time.Now(), Status: evergreen.HostUninitialized, TerminationTime: model.ZeroTime, TaskDispatchTime: model.ZeroTime, Provider: evergreen.HostTypeEC2, InstanceType: ec2Settings.InstanceType, StartedBy: owner, UserHost: userHost, } // record this 'intent host' if err := intentHost.Insert(); err != nil { return nil, evergreen.Logger.Errorf(slogger.ERROR, "Could not insert intent "+ "host “%v”: %v", intentHost.Id, err) } evergreen.Logger.Logf(slogger.DEBUG, "Successfully inserted intent host “%v” "+ "for distro “%v” to signal cloud instance spawn intent", instanceName, d.Id) options := ec2.RunInstances{ MinCount: 1, MaxCount: 1, ImageId: ec2Settings.AMI, KeyName: ec2Settings.KeyName, InstanceType: ec2Settings.InstanceType, SecurityGroups: ec2.SecurityGroupNames(ec2Settings.SecurityGroup), BlockDevices: blockDevices, } // start the instance - starting an instance does not mean you can connect // to it immediately you have to use GetInstanceStatus below to ensure that // it's actually running newHost, resp, err := startEC2Instance(ec2Handle, &options, intentHost) if err != nil { return nil, evergreen.Logger.Errorf(slogger.ERROR, "Could not start new "+ "instance for distro “%v”. Accompanying host record is “%v”: %v", d.Id, intentHost.Id, err) } instance := resp.Instances[0] // create some tags based on user, hostname, owner, time, etc. tags := makeTags(intentHost) // attach the tags to this instance err = attachTags(ec2Handle, tags, instance.InstanceId) if err != nil { evergreen.Logger.Errorf(slogger.ERROR, "Unable to attach tags for %v: %v", instance.InstanceId, err) } else { evergreen.Logger.Logf(slogger.DEBUG, "Attached tag name “%v” for “%v”", instanceName, instance.InstanceId) } return newHost, nil }