func instances(c cmd, conn *ec2.EC2, args []string) { resp, err := conn.Instances(nil, nil) if err != nil { fatalf("cannot get instances: %v", err) } var line []string for _, r := range resp.Reservations { for _, inst := range r.Instances { if !instancesFlags.all && inst.State.Name == "terminated" { continue } line = append(line[:0], inst.InstanceId) if instancesFlags.state { line = append(line, inst.State.Name) } if instancesFlags.addr { if inst.DNSName == "" { inst.DNSName = "none" } line = append(line, inst.DNSName) } fmt.Printf("%s\n", strings.Join(line, " ")) } } }
/** * Load a instance passing its Id */ func Load(ec2Ref *ec2.EC2, instance *Instance) (ec2.Instance, error) { if instance.Id == "" { return ec2.Instance{}, errors.New("To load a instance you need to pass its Id") } resp, err := ec2Ref.Instances([]string{instance.Id}, nil) if err != nil { return ec2.Instance{}, err } else if len(resp.Reservations) == 0 || len(resp.Reservations[0].Instances) == 0 { return ec2.Instance{}, errors.New(fmt.Sprintf("Any instance was found with instance Id <%s>", instance.Id)) } instanceRef := resp.Reservations[0].Instances[0] mergeInstances(instance, &instanceRef) return instanceRef, nil }
func (c instanceCache) update(ec2client *ec2.EC2, ids ...string) error { if len(ids) == 1 { if _, ok := c[ids[0]]; ok { return nil } } filter := ec2.NewFilter() filter.Add("instance-state-name", "running") resp, err := ec2client.Instances(ids, filter) if err != nil { return errors.Annotate(err, "querying instance details") } for j := range resp.Reservations { r := &resp.Reservations[j] for _, inst := range r.Instances { c[inst.InstanceId] = inst } } return nil }
func destroyVolume(client *ec2.EC2, volumeId string) (err error) { defer func() { if err != nil { if ec2ErrCode(err) == volumeNotFound || errors.IsNotFound(err) { // Either the volume isn't found, or we queried the // instance corresponding to a DeleteOnTermination // attachment; in either case, the volume is or will // be destroyed. logger.Tracef("Ignoring error destroying volume %q: %v", volumeId, err) err = nil } } }() logger.Debugf("destroying %q", volumeId) // Volumes must not be in-use when destroying. A volume may // still be in-use when the instance it is attached to is // in the process of being terminated. volume, err := waitVolume(client, volumeId, destroyVolumeAttempt, func(volume *ec2.Volume) (bool, error) { if volume.Status != volumeStatusInUse { // Volume is not in use, it should be OK to destroy now. return true, nil } if len(volume.Attachments) == 0 { // There are no attachments remaining now; keep querying // until volume transitions out of in-use. return false, nil } var deleteOnTermination []string var args []storage.VolumeAttachmentParams for _, a := range volume.Attachments { switch a.Status { case attachmentStatusAttaching, attachmentStatusAttached: // The volume is attaching or attached to an // instance, we need for it to be detached // before we can destroy it. args = append(args, storage.VolumeAttachmentParams{ AttachmentParams: storage.AttachmentParams{ InstanceId: instance.Id(a.InstanceId), }, VolumeId: volumeId, }) if a.DeleteOnTermination { // The volume is still attached, and the // attachment is "delete on termination"; // check if the related instance is being // terminated, in which case we can stop // waiting and skip destroying the volume. // // Note: we still accrue in "args" above // in case the instance is not terminating; // in that case we detach and destroy as // usual. deleteOnTermination = append( deleteOnTermination, a.InstanceId, ) } } } if len(deleteOnTermination) > 0 { result, err := client.Instances(deleteOnTermination, nil) if err != nil { return false, errors.Trace(err) } for _, reservation := range result.Reservations { for _, instance := range reservation.Instances { switch instance.State.Name { case instanceStateShuttingDown, instanceStateTerminated: // The instance is or will be terminated, // and so the volume will be deleted by // virtue of delete-on-termination. return true, nil } } } } if len(args) == 0 { return false, nil } results, err := detachVolumes(client, args) if err != nil { return false, errors.Trace(err) } for _, err := range results { if err != nil { return false, errors.Trace(err) } } return false, nil }) if err != nil { if err == errWaitVolumeTimeout { return errors.Errorf("timed out waiting for volume %v to not be in-use", volumeId) } return errors.Trace(err) } if volume.Status == volumeStatusInUse { // If the volume is in-use, that means it will be // handled by delete-on-termination and we have // nothing more to do. return nil } if _, err := client.DeleteVolume(volumeId); err != nil { return errors.Annotatef(err, "destroying %q", volumeId) } return nil }