Exemple #1
0
func groups(c cmd, conn *ec2.EC2, _ []string) {
	resp, err := conn.SecurityGroups(nil, nil)
	check(err, "list groups")
	var b bytes.Buffer
	printf := func(f string, a ...interface{}) {
		fmt.Fprintf(&b, f, a...)
	}
	for _, g := range resp.Groups {
		switch {
		case groupsFlags.vv:
			printf("%s:%s %s %q\n", g.OwnerId, g.Name, g.Id, g.Description)
			for _, p := range g.IPPerms {
				printf("\t")
				printf("\t-proto %s -from %d -to %d", p.Protocol, p.FromPort, p.ToPort)
				for _, g := range p.SourceGroups {
					printf(" %s", g.Id)
				}
				for _, ip := range p.SourceIPs {
					printf(" %s", ip)
				}
				printf("\n")
			}
		case groupsFlags.v:
			printf("%s %s %q\n", g.Name, g.Id, g.Description)
		case groupsFlags.ids:
			printf("%s\n", g.Id)
		default:
			printf("%s\n", g.Name)
		}
	}
	os.Stdout.Write(b.Bytes())
}
Exemple #2
0
Fichier : ebs.go Projet : bac/juju
func listVolumes(client *ec2.EC2, filter *ec2.Filter) ([]string, error) {
	resp, err := client.Volumes(nil, filter)
	if err != nil {
		return nil, err
	}
	volumeIds := make([]string, 0, len(resp.Volumes))
	for _, vol := range resp.Volumes {
		var isRootDisk bool
		for _, att := range vol.Attachments {
			if att.Device == rootDiskDeviceName {
				isRootDisk = true
				break
			}
		}
		if isRootDisk {
			// We don't want to list root disks in the output.
			// These are managed by the instance provisioning
			// code; they will be created and destroyed with
			// instances.
			continue
		}
		volumeIds = append(volumeIds, vol.Id)
	}
	return volumeIds, nil
}
Exemple #3
0
func revoke(c cmd, conn *ec2.EC2, args []string) {
	if len(args) < 1 {
		c.usage()
	}
	_, err := conn.RevokeSecurityGroup(parseGroup(args[0]), ipPerms(args[1:]))
	check(err, "revokeSecurityGroup")
}
Exemple #4
0
func mkgroup(c cmd, conn *ec2.EC2, args []string) {
	if len(args) != 2 {
		c.usage()
	}
	_, err := conn.CreateSecurityGroup("", args[0], args[1])
	check(err, "create security group")
}
Exemple #5
0
/**
 * Create new volume
 */
func Create(ec2Ref *ec2.EC2, volume *Volume) (ec2.Volume, error) {
	options := ec2.CreateVolume{
		VolumeType: volume.Type,
		VolumeSize: volume.Size,
	}

	if volume.AvailableZone == "" {
		options.AvailZone = DefaultAvailableZone
	}

	if volume.Type == "io1" {
		options.IOPS = volume.IOPS
	}

	resp, err := ec2Ref.CreateVolume(options)
	if err != nil {
		return ec2.Volume{}, err
	}

	volumeRef := resp.Volume
	_, err = ec2Ref.CreateTags([]string{volumeRef.Id}, []ec2.Tag{{"Name", volume.Name}})
	if err != nil {
		return ec2.Volume{}, err
	}

	mergeVolumes(volume, &volumeRef)

	err = WaitUntilState(ec2Ref, volume, "available")
	if err != nil {
		return ec2.Volume{}, err
	}

	return volumeRef, nil
}
Exemple #6
0
func auth(c cmd, conn *ec2.EC2, args []string) {
	if len(args) < 1 {
		c.usage()
	}
	_, err := conn.AuthorizeSecurityGroup(parseGroup(args[0]), ipPerms(args[1:]))
	check(err, "authorizeSecurityGroup")
}
Exemple #7
0
func instances(c cmd, conn *ec2.EC2, args []string) {
	resp, err := conn.Instances(nil, nil)
	if err != nil {
		fatalf("cannot get instances: %v", err)
	}
	var line []string
	for _, r := range resp.Reservations {
		for _, inst := range r.Instances {
			if !instancesFlags.all && inst.State.Name == "terminated" {
				continue
			}
			line = append(line[:0], inst.InstanceId)
			if instancesFlags.state {
				line = append(line, inst.State.Name)
			}
			if instancesFlags.addr {
				if inst.DNSName == "" {
					inst.DNSName = "none"
				}
				line = append(line, inst.DNSName)
			}
			fmt.Printf("%s\n", strings.Join(line, " "))
		}
	}
}
Exemple #8
0
func Terminate(ec2Ref *ec2.EC2, instance Instance) error {
	logger.Println("Terminating instance", instance.Id)
	_, err := ec2Ref.TerminateInstances([]string{instance.Id})
	if err == nil {
		logger.Printf("Instance <%s> was destroyed!\n", instance.Id)
	}

	return err
}
Exemple #9
0
func terminate(c cmd, conn *ec2.EC2, args []string) {
	if len(args) == 0 {
		return
	}
	_, err := conn.TerminateInstances(args)
	if err != nil {
		fatalf("cannot terminate instances: %v", err)
	}
}
Exemple #10
0
// Create new instance
func Create(ec2Ref *ec2.EC2, instance *Instance) (ec2.Instance, error) {
	options := ec2.RunInstances{
		ImageId:               instance.ImageID,
		InstanceType:          instance.Type,
		KeyName:               instance.KeyName,
		SecurityGroups:        make([]ec2.SecurityGroup, len(instance.SecurityGroups)),
		SubnetId:              instance.SubnetID,
		EBSOptimized:          instance.EBSOptimized,
		DisableAPITermination: !instance.EnableAPITermination,
		AvailZone:             instance.AvailableZone,
	}

	if instance.CloudConfig != "" {
		userdata, err := ioutil.ReadFile(instance.CloudConfig)
		if err != nil {
			panic(err.Error())
		}

		options.UserData = userdata
	}

	if instance.ShutdownBehavior != "" {
		options.ShutdownBehavior = instance.ShutdownBehavior
	}

	if instance.PlacementGroupName != "" {
		options.PlacementGroupName = instance.PlacementGroupName
	}

	for i, securityGroup := range instance.SecurityGroups {
		options.SecurityGroups[i] = ec2.SecurityGroup{Id: securityGroup}
	}

	resp, err := ec2Ref.RunInstances(&options)
	if err != nil {
		return ec2.Instance{}, err
	} else if len(resp.Instances) == 0 {
		return ec2.Instance{}, errors.New("Any instance was created!")
	}

	ec2Instance := resp.Instances[0]
	tags := append(instance.Tags, ec2.Tag{"Name", instance.Name})
	_, err = ec2Ref.CreateTags([]string{ec2Instance.InstanceId}, tags)
	if err != nil {
		return ec2.Instance{}, err
	}

	mergeInstances(instance, &ec2Instance)

	err = WaitUntilState(ec2Ref, instance, "running")
	if err != nil {
		return ec2.Instance{}, err
	}

	return ec2Instance, nil
}
Exemple #11
0
Fichier : ebs.go Projet : bac/juju
func describeVolume(client *ec2.EC2, volumeId string) (*ec2.Volume, error) {
	resp, err := client.Volumes([]string{volumeId}, nil)
	if err != nil {
		return nil, errors.Annotate(err, "querying volume")
	}
	if len(resp.Volumes) == 0 {
		return nil, errors.NotFoundf("%v", volumeId)
	} else if len(resp.Volumes) != 1 {
		return nil, errors.Errorf("expected one volume, got %d", len(resp.Volumes))
	}
	return &resp.Volumes[0], nil
}
Exemple #12
0
func AttachVolumes(ec2Ref *ec2.EC2, InstanceId string, volumes []volume.Volume) error {
	for _, myVolume := range volumes {
		_, err := ec2Ref.AttachVolume(myVolume.Id, InstanceId, myVolume.Device)
		if err != nil {
			reqError := err.(*ec2.Error)
			if reqError.Code != "VolumeInUse" {
				return err
			}
		}
	}

	return nil
}
Exemple #13
0
// AttachVolumes ...
func AttachVolumes(ec2Ref *ec2.EC2, InstanceID string, volumes []volume.Volume) error {
	for _, volumeConfig := range volumes {
		_, err := ec2Ref.AttachVolume(volumeConfig.ID, InstanceID, volumeConfig.Device)
		if err != nil {
			reqError := err.(*ec2.Error)
			if reqError.Code != "VolumeInUse" {
				return err
			}
		}
	}

	return nil
}
Exemple #14
0
/**
 * Load a instance passing its Id
 */
func Load(ec2Ref *ec2.EC2, instance *Instance) (ec2.Instance, error) {
	if instance.Id == "" {
		return ec2.Instance{}, errors.New("To load a instance you need to pass its Id")
	}

	resp, err := ec2Ref.Instances([]string{instance.Id}, nil)
	if err != nil {
		return ec2.Instance{}, err
	} else if len(resp.Reservations) == 0 || len(resp.Reservations[0].Instances) == 0 {
		return ec2.Instance{}, errors.New(fmt.Sprintf("Any instance was found with instance Id <%s>", instance.Id))
	}

	instanceRef := resp.Reservations[0].Instances[0]
	mergeInstances(instance, &instanceRef)

	return instanceRef, nil
}
Exemple #15
0
/**
 * Load a volume passing its Id
 */
func Load(ec2Ref *ec2.EC2, volume *Volume) (ec2.Volume, error) {
	if volume.Id == "" {
		return ec2.Volume{}, errors.New("To load a volume you need to pass its Id")
	}

	resp, err := ec2Ref.Volumes([]string{volume.Id}, nil)
	if err != nil {
		return ec2.Volume{}, err
	} else if len(resp.Volumes) == 0 {
		return ec2.Volume{}, errors.New(fmt.Sprintf("Any volume was found with volume Id <%s>", volume.Id))
	}

	volumeRef := resp.Volumes[0]
	mergeVolumes(volume, &volumeRef)

	return volumeRef, nil
}
Exemple #16
0
// Load a volume passing its Id
func Load(ec2Ref *ec2.EC2, volume *Volume) (ec2.Volume, error) {
	if volume.ID == "" {
		return ec2.Volume{}, errors.New("To load a volume you need to pass its Id")
	}

	resp, err := ec2Ref.Volumes([]string{volume.ID}, nil)
	if err != nil {
		return ec2.Volume{}, err
	} else if len(resp.Volumes) == 0 {
		return ec2.Volume{}, fmt.Errorf("Any volume was found with volume Id <%s>", volume.ID)
	}

	ec2Volume := resp.Volumes[0]
	mergeVolumes(volume, &ec2Volume)

	return ec2Volume, nil
}
Exemple #17
0
func (c instanceCache) update(ec2client *ec2.EC2, ids ...string) error {
	if len(ids) == 1 {
		if _, ok := c[ids[0]]; ok {
			return nil
		}
	}
	filter := ec2.NewFilter()
	filter.Add("instance-state-name", "running")
	resp, err := ec2client.Instances(ids, filter)
	if err != nil {
		return errors.Annotate(err, "querying instance details")
	}
	for j := range resp.Reservations {
		r := &resp.Reservations[j]
		for _, inst := range r.Instances {
			c[inst.InstanceId] = inst
		}
	}
	return nil
}
Exemple #18
0
// Create new volume
func Create(ec2Ref *ec2.EC2, volume *Volume) (ec2.Volume, error) {
	options := ec2.CreateVolume{
		VolumeType: volume.Type,
		AvailZone:  volume.AvailableZone,
	}

	if volume.Size > 0 {
		options.VolumeSize = volume.Size
	}

	if volume.SnapshotID != "" {
		options.SnapshotId = volume.SnapshotID
	}

	if volume.Type == "io1" {
		options.IOPS = volume.IOPS
	}

	resp, err := ec2Ref.CreateVolume(options)
	if err != nil {
		return ec2.Volume{}, err
	}

	ec2Volume := resp.Volume
	tags := append(volume.Tags, ec2.Tag{"Name", volume.Name})
	_, err = ec2Ref.CreateTags([]string{ec2Volume.Id}, tags)
	if err != nil {
		return ec2.Volume{}, err
	}

	mergeVolumes(volume, &ec2Volume)

	err = WaitUntilState(ec2Ref, volume, "available")
	if err != nil {
		return ec2.Volume{}, err
	}

	return ec2Volume, nil
}
Exemple #19
0
Fichier : ebs.go Projet : bac/juju
func detachVolumes(client *ec2.EC2, attachParams []storage.VolumeAttachmentParams) ([]error, error) {
	results := make([]error, len(attachParams))
	for i, params := range attachParams {
		_, err := client.DetachVolume(params.VolumeId, string(params.InstanceId), "", false)
		// Process aws specific error information.
		if err != nil {
			if ec2Err, ok := err.(*ec2.Error); ok {
				switch ec2Err.Code {
				// attachment not found means this volume is already detached.
				case attachmentNotFound:
					err = nil
				}
			}
		}
		if err != nil {
			results[i] = errors.Annotatef(
				err, "detaching %v from %v", params.Volume, params.Machine,
			)
		}
	}
	return results, nil
}
Exemple #20
0
func delgroup(c cmd, conn *ec2.EC2, args []string) {
	run := parallel.NewRun(40)
	for _, g := range args {
		g := g
		run.Do(func() error {
			var ec2g ec2.SecurityGroup
			if secGroupPat.MatchString(g) {
				ec2g.Id = g
			} else {
				ec2g.Name = g
			}
			_, err := conn.DeleteSecurityGroup(ec2g)
			if err != nil {
				errorf("cannot delete %q: %v", g, err)
				return errgo.Newf("error")
			}
			return nil
		})
	}
	if run.Wait() != nil {
		os.Exit(1)
	}
}
Exemple #21
0
// createGroup creates a new EC2 group and returns it. If it already exists,
// it revokes all its permissions and returns the existing group.
func createGroup(c *gc.C, ec2conn *amzec2.EC2, name, descr string) amzec2.SecurityGroup {
	resp, err := ec2conn.CreateSecurityGroup("", name, descr)
	if err == nil {
		return resp.SecurityGroup
	}
	if err.(*amzec2.Error).Code != "InvalidGroup.Duplicate" {
		c.Fatalf("cannot make group %q: %v", name, err)
	}

	// Found duplicate group, so revoke its permissions and return it.
	gresp, err := ec2conn.SecurityGroups(amzec2.SecurityGroupNames(name), nil)
	c.Assert(err, jc.ErrorIsNil)

	gi := gresp.Groups[0]
	if len(gi.IPPerms) > 0 {
		_, err = ec2conn.RevokeSecurityGroup(gi.SecurityGroup, gi.IPPerms)
		c.Assert(err, jc.ErrorIsNil)
	}
	return gi.SecurityGroup
}
Exemple #22
0
func Reboot(ec2Ref *ec2.EC2, instance Instance) error {
	logger.Println("Rebooting instance", instance.Id)
	_, err := ec2Ref.RebootInstances(instance.InstanceId)
	return err
}
Exemple #23
0
Fichier : ebs.go Projet : bac/juju
func destroyVolume(client *ec2.EC2, volumeId string) (err error) {
	defer func() {
		if err != nil {
			if ec2ErrCode(err) == volumeNotFound || errors.IsNotFound(err) {
				// Either the volume isn't found, or we queried the
				// instance corresponding to a DeleteOnTermination
				// attachment; in either case, the volume is or will
				// be destroyed.
				logger.Tracef("Ignoring error destroying volume %q: %v", volumeId, err)
				err = nil
			}
		}
	}()

	logger.Debugf("destroying %q", volumeId)
	// Volumes must not be in-use when destroying. A volume may
	// still be in-use when the instance it is attached to is
	// in the process of being terminated.
	volume, err := waitVolume(client, volumeId, destroyVolumeAttempt, func(volume *ec2.Volume) (bool, error) {
		if volume.Status != volumeStatusInUse {
			// Volume is not in use, it should be OK to destroy now.
			return true, nil
		}
		if len(volume.Attachments) == 0 {
			// There are no attachments remaining now; keep querying
			// until volume transitions out of in-use.
			return false, nil
		}
		var deleteOnTermination []string
		var args []storage.VolumeAttachmentParams
		for _, a := range volume.Attachments {
			switch a.Status {
			case attachmentStatusAttaching, attachmentStatusAttached:
				// The volume is attaching or attached to an
				// instance, we need for it to be detached
				// before we can destroy it.
				args = append(args, storage.VolumeAttachmentParams{
					AttachmentParams: storage.AttachmentParams{
						InstanceId: instance.Id(a.InstanceId),
					},
					VolumeId: volumeId,
				})
				if a.DeleteOnTermination {
					// The volume is still attached, and the
					// attachment is "delete on termination";
					// check if the related instance is being
					// terminated, in which case we can stop
					// waiting and skip destroying the volume.
					//
					// Note: we still accrue in "args" above
					// in case the instance is not terminating;
					// in that case we detach and destroy as
					// usual.
					deleteOnTermination = append(
						deleteOnTermination, a.InstanceId,
					)
				}
			}
		}
		if len(deleteOnTermination) > 0 {
			result, err := client.Instances(deleteOnTermination, nil)
			if err != nil {
				return false, errors.Trace(err)
			}
			for _, reservation := range result.Reservations {
				for _, instance := range reservation.Instances {
					switch instance.State.Name {
					case instanceStateShuttingDown, instanceStateTerminated:
						// The instance is or will be terminated,
						// and so the volume will be deleted by
						// virtue of delete-on-termination.
						return true, nil
					}
				}
			}
		}
		if len(args) == 0 {
			return false, nil
		}
		results, err := detachVolumes(client, args)
		if err != nil {
			return false, errors.Trace(err)
		}
		for _, err := range results {
			if err != nil {
				return false, errors.Trace(err)
			}
		}
		return false, nil
	})
	if err != nil {
		if err == errWaitVolumeTimeout {
			return errors.Errorf("timed out waiting for volume %v to not be in-use", volumeId)
		}
		return errors.Trace(err)
	}
	if volume.Status == volumeStatusInUse {
		// If the volume is in-use, that means it will be
		// handled by delete-on-termination and we have
		// nothing more to do.
		return nil
	}
	if _, err := client.DeleteVolume(volumeId); err != nil {
		return errors.Annotatef(err, "destroying %q", volumeId)
	}
	return nil
}
Exemple #24
0
/**
 * Create new instance
 */
func Create(ec2Ref *ec2.EC2, instance *Instance) (ec2.Instance, error) {
	options := ec2.RunInstances{
		ImageId:               instance.ImageId,
		InstanceType:          instance.Type,
		KeyName:               instance.KeyName,
		SecurityGroups:        make([]ec2.SecurityGroup, len(instance.SecurityGroups)),
		SubnetId:              instance.SubnetId,
		EBSOptimized:          instance.EBSOptimized,
		DisableAPITermination: !instance.EnableAPITermination,
	}

	if instance.CloudConfig != "" {
		cloudConfigTemplate, err := ioutil.ReadFile(instance.CloudConfig)
		if err != nil {
			panic(err.Error())
		}

		tpl := template.Must(template.New("cloudConfig").Parse(string(cloudConfigTemplate)))

		cloudConfig := new(bytes.Buffer)
		if err = tpl.Execute(cloudConfig, instance); err != nil {
			panic(err.Error())
		}

		options.UserData = cloudConfig.Bytes()
	}

	if instance.ShutdownBehavior != "" {
		options.ShutdownBehavior = instance.ShutdownBehavior
	}

	if instance.PlacementGroupName != "" {
		options.PlacementGroupName = instance.PlacementGroupName
	}

	for i, securityGroup := range instance.SecurityGroups {
		options.SecurityGroups[i] = ec2.SecurityGroup{Id: securityGroup}
	}

	resp, err := ec2Ref.RunInstances(&options)
	if err != nil {
		return ec2.Instance{}, err
	} else if len(resp.Instances) == 0 {
		return ec2.Instance{}, errors.New("Any instance was created!")
	}

	instanceRef := resp.Instances[0]
	_, err = ec2Ref.CreateTags([]string{instanceRef.InstanceId}, []ec2.Tag{{"Name", instance.Name}})
	if err != nil {
		return ec2.Instance{}, err
	}

	mergeInstances(instance, &instanceRef)

	err = WaitUntilState(ec2Ref, instance, "running")
	if err != nil {
		return ec2.Instance{}, err
	}

	return instanceRef, nil
}