func (provisioner *Boot2DockerProvisioner) upgradeIso() error { // TODO: Ideally, we should not read from mcndirs directory at all. // The driver should be able to communicate how and where to place the // relevant files. b2dutils := mcnutils.NewB2dUtils(mcndirs.GetBaseDir()) // Check if the driver has specified a custom b2d url jsonDriver, err := json.Marshal(provisioner.GetDriver()) if err != nil { return err } var d struct { Boot2DockerURL string } json.Unmarshal(jsonDriver, &d) log.Info("Downloading latest boot2docker iso...") // Usually we call this implicitly, but call it here explicitly to get // the latest default boot2docker ISO. if d.Boot2DockerURL == "" { if err := b2dutils.DownloadLatestBoot2Docker(d.Boot2DockerURL); err != nil { return err } } log.Info("Stopping machine to do the upgrade...") if err := provisioner.Driver.Stop(); err != nil { return err } if err := mcnutils.WaitFor(drivers.MachineInState(provisioner.Driver, state.Stopped)); err != nil { return err } machineName := provisioner.GetDriver().GetMachineName() log.Infof("Upgrading machine %q...", machineName) // Either download the latest version of the b2d url that was explicitly // specified when creating the VM or copy the (updated) default ISO if err := b2dutils.CopyIsoToMachineDir(d.Boot2DockerURL, machineName); err != nil { return err } log.Infof("Starting machine back up...") if err := provisioner.Driver.Start(); err != nil { return err } return mcnutils.WaitFor(drivers.MachineInState(provisioner.Driver, state.Running)) }
func WaitForSSH(d Driver) error { // Try to dial SSH for 30 seconds before timing out. if err := mcnutils.WaitFor(sshAvailableFunc(d)); err != nil { return fmt.Errorf("Too many retries waiting for SSH to be available. Last error: %s", err) } return nil }
func (provisioner *DebianProvisioner) Provision(swarmOptions swarm.Options, authOptions auth.Options, engineOptions engine.Options) error { provisioner.SwarmOptions = swarmOptions provisioner.AuthOptions = authOptions provisioner.EngineOptions = engineOptions swarmOptions.Env = engineOptions.Env storageDriver, err := decideStorageDriver(provisioner, "aufs", engineOptions.StorageDriver) if err != nil { return err } provisioner.EngineOptions.StorageDriver = storageDriver // HACK: since debian does not come with sudo by default we install log.Debug("installing sudo") if _, err := provisioner.SSHCommand("if ! type sudo; then apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y sudo; fi"); err != nil { return err } log.Debug("setting hostname") if err := provisioner.SetHostname(provisioner.Driver.GetMachineName()); err != nil { return err } log.Debug("installing base packages") for _, pkg := range provisioner.Packages { if err := provisioner.Package(pkg, pkgaction.Install); err != nil { return err } } log.Debug("installing docker") if err := installDockerGeneric(provisioner, engineOptions.InstallURL); err != nil { return err } log.Debug("waiting for docker daemon") if err := mcnutils.WaitFor(provisioner.dockerDaemonResponding); err != nil { return err } provisioner.AuthOptions = setRemoteAuthOptions(provisioner) log.Debug("configuring auth") if err := ConfigureAuth(provisioner); err != nil { return err } log.Debug("configuring swarm") if err := configureSwarm(provisioner, swarmOptions, provisioner.AuthOptions); err != nil { return err } // enable in systemd log.Debug("enabling docker in systemd") if err := provisioner.Service("docker", serviceaction.Enable); err != nil { return err } return nil }
func (d *Driver) waitForInstance() error { if err := mcnutils.WaitFor(d.instanceIsRunning); err != nil { return err } return nil }
func (provisioner *SUSEProvisioner) Provision(swarmOptions swarm.SwarmOptions, authOptions auth.AuthOptions, engineOptions engine.EngineOptions) error { provisioner.SwarmOptions = swarmOptions provisioner.AuthOptions = authOptions provisioner.EngineOptions = engineOptions if err := provisioner.SetHostname(provisioner.Driver.GetMachineName()); err != nil { return err } for _, pkg := range provisioner.Packages { if err := provisioner.Package(pkg, pkgaction.Install); err != nil { return err } } // update OS -- this is needed for libdevicemapper and the docker install if _, err := provisioner.SSHCommand("sudo zypper ref"); err != nil { return err } if _, err := provisioner.SSHCommand("sudo zypper -n update"); err != nil { return err } if err := installDockerGeneric(provisioner, engineOptions.InstallURL); err != nil { return err } if _, err := provisioner.SSHCommand("sudo systemctl start docker"); err != nil { return err } if err := mcnutils.WaitFor(provisioner.dockerDaemonResponding); err != nil { return err } if _, err := provisioner.SSHCommand("sudo systemctl stop docker"); err != nil { return err } // open firewall port required by docker if _, err := provisioner.SSHCommand("sudo /sbin/yast2 firewall services add ipprotocol=tcp tcpport=2376 zone=EXT"); err != nil { return err } if err := makeDockerOptionsDir(provisioner); err != nil { return err } provisioner.AuthOptions = setRemoteAuthOptions(provisioner) if err := ConfigureAuth(provisioner); err != nil { return err } if err := configureSwarm(provisioner, swarmOptions, provisioner.AuthOptions); err != nil { return err } return nil }
func (provisioner *RancherProvisioner) upgradeIso() error { // Largely copied from Boot2Docker provisioner, we should find a way to share this code log.Info("Stopping machine to do the upgrade...") if err := provisioner.Driver.Stop(); err != nil { return err } if err := mcnutils.WaitFor(drivers.MachineInState(provisioner.Driver, state.Stopped)); err != nil { return err } machineName := provisioner.GetDriver().GetMachineName() log.Infof("Upgrading machine %s...", machineName) // TODO: Ideally, we should not read from mcndirs directory at all. // The driver should be able to communicate how and where to place the // relevant files. b2dutils := mcnutils.NewB2dUtils(mcndirs.GetBaseDir()) url, err := provisioner.getLatestISOURL() if err != nil { return err } if err := b2dutils.DownloadISOFromURL(url); err != nil { return err } // Copy the latest version of boot2docker ISO to the machine's directory if err := b2dutils.CopyIsoToMachineDir("", machineName); err != nil { return err } log.Infof("Starting machine back up...") if err := provisioner.Driver.Start(); err != nil { return err } return mcnutils.WaitFor(drivers.MachineInState(provisioner.Driver, state.Running)) }
// Create is the wrapper method which covers all of the boilerplate around // actually creating, provisioning, and persisting an instance in the store. func create(store persist.Store, h *host.Host, callback func(*host.Host)) error { if err := cert.BootstrapCertificates(h.HostOptions.AuthOptions); err != nil { return fmt.Errorf("Error generating certificates: %s", err) } log.Info("Running pre-create checks...") if err := h.Driver.PreCreateCheck(); err != nil { return fmt.Errorf("Error with pre-create check: %s", err) } if err := store.Save(h); err != nil { return fmt.Errorf("Error saving host to store before attempting creation: %s", err) } log.Info("Creating machine...") if err := h.Driver.Create(); err != nil { return fmt.Errorf("Error in driver during machine creation: %s", err) } if err := store.Save(h); err != nil { return fmt.Errorf("Error saving host to store after attempting creation: %s", err) } // TODO: Not really a fan of just checking "none" here. if h.Driver.DriverName() != "none" { log.Info("Waiting for machine to be running, this may take a few minutes...") if err := mcnutils.WaitFor(drivers.MachineInState(h.Driver, state.Running)); err != nil { return fmt.Errorf("Error waiting for machine to be running: %s", err) } log.Info("Machine is running, waiting for SSH to be available...") if err := drivers.WaitForSSH(h.Driver); err != nil { return fmt.Errorf("Error waiting for SSH: %s", err) } log.Info("Detecting operating system of created instance...") provisioner, err := provision.DetectProvisioner(h.Driver) if err != nil { return fmt.Errorf("Error detecting OS: %s", err) } callback(h) log.Info("Provisioning created instance...") if err := provisioner.Provision(*h.HostOptions.SwarmOptions, *h.HostOptions.AuthOptions, *h.HostOptions.EngineOptions); err != nil { return fmt.Errorf("Error running provisioning: %s", err) } } log.Debug("Reticulating splines...") return nil }
func (provisioner *RedHatProvisioner) Provision(swarmOptions swarm.Options, authOptions auth.Options, engineOptions engine.Options) error { provisioner.SwarmOptions = swarmOptions provisioner.AuthOptions = authOptions provisioner.EngineOptions = engineOptions swarmOptions.Env = engineOptions.Env // set default storage driver for redhat storageDriver, err := decideStorageDriver(provisioner, "devicemapper", engineOptions.StorageDriver) if err != nil { return err } provisioner.EngineOptions.StorageDriver = storageDriver if err := provisioner.SetHostname(provisioner.Driver.GetMachineName()); err != nil { return err } for _, pkg := range provisioner.Packages { log.Debugf("installing base package: name=%s", pkg) if err := provisioner.Package(pkg, pkgaction.Install); err != nil { return err } } // update OS -- this is needed for libdevicemapper and the docker install if _, err := provisioner.SSHCommand("sudo yum -y update"); err != nil { return err } // install docker if err := installDocker(provisioner); err != nil { return err } if err := mcnutils.WaitFor(provisioner.dockerDaemonResponding); err != nil { return err } if err := makeDockerOptionsDir(provisioner); err != nil { return err } provisioner.AuthOptions = setRemoteAuthOptions(provisioner) if err := ConfigureAuth(provisioner); err != nil { return err } if err := configureSwarm(provisioner, swarmOptions, provisioner.AuthOptions); err != nil { return err } return nil }
func (provisioner *UbuntuSystemdProvisioner) Provision(swarmOptions swarm.Options, authOptions auth.Options, engineOptions engine.Options) error { provisioner.SwarmOptions = swarmOptions provisioner.AuthOptions = authOptions provisioner.EngineOptions = engineOptions swarmOptions.Env = engineOptions.Env storageDriver, err := decideStorageDriver(provisioner, "aufs", engineOptions.StorageDriver) if err != nil { return err } provisioner.EngineOptions.StorageDriver = storageDriver log.Debug("setting hostname") if err := provisioner.SetHostname(provisioner.Driver.GetMachineName()); err != nil { return err } log.Debug("installing base packages") for _, pkg := range provisioner.Packages { if err := provisioner.Package(pkg, pkgaction.Install); err != nil { return err } } log.Info("Installing Docker...") if err := installDockerGeneric(provisioner, engineOptions.InstallURL); err != nil { return err } log.Debug("waiting for docker daemon") if err := mcnutils.WaitFor(provisioner.dockerDaemonResponding); err != nil { return err } provisioner.AuthOptions = setRemoteAuthOptions(provisioner) log.Debug("configuring auth") if err := ConfigureAuth(provisioner); err != nil { return err } log.Debug("configuring swarm") if err := configureSwarm(provisioner, swarmOptions, provisioner.AuthOptions); err != nil { return err } // enable in systemd log.Debug("enabling docker in systemd") if err := provisioner.Service("docker", serviceaction.Enable); err != nil { return err } return nil }
func (h *Host) runActionForState(action func() error, desiredState state.State) error { if drivers.MachineInState(h.Driver, desiredState)() { return fmt.Errorf("Machine %q is already %s.", h.Name, strings.ToLower(desiredState.String())) } if err := action(); err != nil { return err } return mcnutils.WaitFor(drivers.MachineInState(h.Driver, desiredState)) }
func (h *Host) Restart() error { if drivers.MachineInState(h.Driver, state.Running)() { if err := h.Stop(); err != nil { return err } if err := mcnutils.WaitFor(drivers.MachineInState(h.Driver, state.Stopped)); err != nil { return err } } if err := h.Start(); err != nil { return err } if err := mcnutils.WaitFor(drivers.MachineInState(h.Driver, state.Running)); err != nil { return err } return nil }
func (client NativeClient) session(command string) (*ssh.Session, error) { if err := mcnutils.WaitFor(client.dialSuccess); err != nil { return nil, fmt.Errorf("Error attempting SSH client dial: %s", err) } conn, err := ssh.Dial("tcp", fmt.Sprintf("%s:%d", client.Hostname, client.Port), &client.Config) if err != nil { return nil, fmt.Errorf("Mysterious error dialing TCP for SSH (we already succeeded at least once) : %s", err) } return conn.NewSession() }
func (client *NativeClient) session(command string) (*ssh.Client, *ssh.Session, error) { if err := mcnutils.WaitFor(client.dialSuccess); err != nil { return nil, nil, fmt.Errorf("Error attempting SSH client dial: %s", err) } conn, err := ssh.Dial("tcp", net.JoinHostPort(client.Hostname, strconv.Itoa(client.Port)), &client.Config) if err != nil { return nil, nil, fmt.Errorf("Mysterious error dialing TCP for SSH (we already succeeded at least once) : %s", err) } session, err := conn.NewSession() return conn, session, err }
func (provisioner *Boot2DockerProvisioner) upgradeIso() error { log.Info("Stopping machine to do the upgrade...") if err := provisioner.Driver.Stop(); err != nil { return err } if err := mcnutils.WaitFor(drivers.MachineInState(provisioner.Driver, state.Stopped)); err != nil { return err } machineName := provisioner.GetDriver().GetMachineName() log.Infof("Upgrading machine %s...", machineName) // TODO: Ideally, we should not read from mcndirs directory at all. // The driver should be able to communicate how and where to place the // relevant files. b2dutils := mcnutils.NewB2dUtils("", "", mcndirs.GetBaseDir()) // Usually we call this implicitly, but call it here explicitly to get // the latest boot2docker ISO. if err := b2dutils.DownloadLatestBoot2Docker(); err != nil { return err } // Copy the latest version of boot2docker ISO to the machine's directory if err := b2dutils.CopyIsoToMachineDir("", machineName); err != nil { return err } log.Infof("Starting machine back up...") if err := provisioner.Driver.Start(); err != nil { return err } return mcnutils.WaitFor(drivers.MachineInState(provisioner.Driver, state.Running)) }
func (d *Driver) configureSecurityGroup() error { var groupId int groupId, err := d.getSecurityGroup(d.SecurityGroupName) if err != nil { log.Debugf("get security group error:%s", err) } log.Debugf("groupId:%d", groupId) if groupId == 0 { log.Infof("security group is not found, create a new one") rule := []string{"TCP|22|0.0.0.0/0|ACCEPT|50", "TCP|3389|0.0.0.0/0|ACCEPT|50", "TCP|2376|0.0.0.0/0|ACCEPT|50", } if d.SwarmMaster && validPort(swarmPort) { swarmRule := fmt.Sprintf("TCP|%d|0.0.0.0/0|ACCEPT|50", swarmPort) rule = append(rule, swarmRule) } securityGroupParams := unet.CreateSecurityGroupParams{ Region: d.Region, GroupName: "docker-machine", Description: "docker machine to open 2379 and 22 port of tcp", Rule: rule, } _, err := d.getUNetService().CreateSecurityGroup(&securityGroupParams) if err != nil { return fmt.Errorf("create security group failed:%s", err) } log.Debug("waiting for security group to become avaliable") if err := mcnutils.WaitFor(d.securityGroupAvailableFunc(d.SecurityGroupName)); err != nil { return err } groupId, err = d.getSecurityGroup(d.SecurityGroupName) } d.SecurityGroupId = groupId grantSecurityGroupParams := unet.GrantSecurityGroupParams{ Region: d.Region, GroupId: groupId, ResourceType: "uhost", ResourceId: d.UhostID, } log.Debugf("grant security group(%d) to uhost(%s)", groupId, d.UhostID) _, err = d.getUNetService().GrantSecurityGroup(&grantSecurityGroupParams) if err != nil { return fmt.Errorf("grant security group failed:%s", err) } return nil }
func (h *Host) runActionForState(action func() error, desiredState state.State) error { if drivers.MachineInState(h.Driver, desiredState)() { return mcnerror.ErrHostAlreadyInState{ Name: h.Name, State: desiredState, } } if err := action(); err != nil { return err } return mcnutils.WaitFor(drivers.MachineInState(h.Driver, desiredState)) }
func (h *Host) Restart() error { log.Infof("Restarting %q...", h.Name) if drivers.MachineInState(h.Driver, state.Stopped)() { return h.Start() } if drivers.MachineInState(h.Driver, state.Running)() { if err := h.Driver.Restart(); err != nil { return err } return mcnutils.WaitFor(drivers.MachineInState(h.Driver, state.Running)) } return nil }
func (d *Driver) configureSecurityGroup(groupName string) error { log.Debugf("configuring security group in %s", d.VpcId) var securityGroup *amz.SecurityGroup groups, err := d.getClient().GetSecurityGroups() if err != nil { return err } for _, grp := range groups { if grp.GroupName == groupName { log.Debugf("found existing security group (%s) in %s", groupName, d.VpcId) securityGroup = &grp break } } // if not found, create if securityGroup == nil { log.Debugf("creating security group (%s) in %s", groupName, d.VpcId) group, err := d.getClient().CreateSecurityGroup(groupName, "Docker Machine", d.VpcId) if err != nil { return err } securityGroup = group // wait until created (dat eventual consistency) log.Debugf("waiting for group (%s) to become available", group.GroupId) if err := mcnutils.WaitFor(d.securityGroupAvailableFunc(group.GroupId)); err != nil { return err } } d.SecurityGroupId = securityGroup.GroupId perms := d.configureSecurityGroupPermissions(securityGroup) if len(perms) != 0 { log.Debugf("authorizing group %s with permissions: %v", securityGroup.GroupName, perms) if err := d.getClient().AuthorizeSecurityGroup(d.SecurityGroupId, perms); err != nil { return err } } return nil }
func (provisioner *UbuntuProvisioner) Provision(swarmOptions swarm.Options, authOptions auth.Options, engineOptions engine.Options) error { provisioner.SwarmOptions = swarmOptions provisioner.AuthOptions = authOptions provisioner.EngineOptions = engineOptions swarmOptions.Env = engineOptions.Env storageDriver, err := decideStorageDriver(provisioner, "aufs", engineOptions.StorageDriver) if err != nil { return err } provisioner.EngineOptions.StorageDriver = storageDriver if err := provisioner.SetHostname(provisioner.Driver.GetMachineName()); err != nil { return err } for _, pkg := range provisioner.Packages { if err := provisioner.Package(pkg, pkgaction.Install); err != nil { return err } } log.Info("Installing Docker...") if err := installDockerGeneric(provisioner, engineOptions.InstallURL); err != nil { return err } if err := mcnutils.WaitFor(provisioner.dockerDaemonResponding); err != nil { return err } if err := makeDockerOptionsDir(provisioner); err != nil { return err } provisioner.AuthOptions = setRemoteAuthOptions(provisioner) if err := ConfigureAuth(provisioner); err != nil { return err } if err := configureSwarm(provisioner, swarmOptions, provisioner.AuthOptions); err != nil { return err } return nil }
func (d *Driver) Create() error { svc := cloudformation.New(session.New()) params := &cloudformation.CreateStackInput{ StackName: aws.String(d.MachineName), //TemplateURL: aws.String("https://s3.amazonaws.com/com.tamr.fe.users/jellin/docker.json"), TemplateURL: aws.String(d.CloudFormationURL), Parameters: []*cloudformation.Parameter{ { // Required ParameterKey: aws.String("KeyName"), ParameterValue: aws.String(d.KeyPairName), }, // More values... }, } resp, err := svc.CreateStack(params) if err != nil { fmt.Println("Houston we have a problem") // Print the error, cast err to awserr.Error to get the Code and // Message from an error. fmt.Println(err.Error()) } else { // Pretty-print the response data. fmt.Println(resp) stackId := *resp.StackId fmt.Println(stackId) mcnutils.WaitFor(stackAvailable) } d.getInstanceInfo() log.Debugf("created instance ID %s, IP address %s, Private IP address %s", d.InstanceId, d.IPAddress, d.PrivateIPAddress, ) return nil }
func (api *Client) performCreate(h *host.Host) error { if err := h.Driver.Create(); err != nil { return fmt.Errorf("Error in driver during machine creation: %s", err) } if err := api.Save(h); err != nil { return fmt.Errorf("Error saving host to store after attempting creation: %s", err) } // TODO: Not really a fan of just checking "none" here. if h.Driver.DriverName() != "none" { log.Info("Waiting for machine to be running, this may take a few minutes...") if err := mcnutils.WaitFor(drivers.MachineInState(h.Driver, state.Running)); err != nil { return fmt.Errorf("Error waiting for machine to be running: %s", err) } log.Info("Machine is running, waiting for SSH to be available...") if err := drivers.WaitForSSH(h.Driver); err != nil { return fmt.Errorf("Error waiting for SSH: %s", err) } log.Info("Detecting operating system of created instance...") provisioner, err := provision.DetectProvisioner(h.Driver) if err != nil { return fmt.Errorf("Error detecting OS: %s", err) } log.Infof("Provisioning with %s...", provisioner.String()) if err := provisioner.Provision(*h.HostOptions.SwarmOptions, *h.HostOptions.AuthOptions, *h.HostOptions.EngineOptions); err != nil { return fmt.Errorf("Error running provisioning: %s", err) } // We should check the connection to docker here log.Info("Checking connection to Docker...") if _, _, err = check.DefaultConnChecker.Check(h, false); err != nil { return fmt.Errorf("Error checking the host: %s", err) } log.Info("Docker is up and running!") } return nil }
func waitForLockAptGetUpdate(ssh SSHCommander) error { var sshErr error err := mcnutils.WaitFor(func() bool { _, sshErr = ssh.SSHCommand("sudo apt-get update") if sshErr != nil { if strings.Contains(sshErr.Error(), "Could not get lock") { sshErr = nil return false } return true } return true }) if sshErr != nil { return fmt.Errorf("Error running apt-get update: %s", sshErr) } if err != nil { return fmt.Errorf("Failed to obtain apt-get update lock: %s", err) } return nil }
func (d *Driver) configureSecurityGroup(groupName string) error { log.Debugf("configuring security group in %s", d.VpcId) var group *ec2.SecurityGroup filters := []*ec2.Filter{ { Name: aws.String("group-name"), Values: []*string{&groupName}, }, { Name: aws.String("vpc-id"), Values: []*string{&d.VpcId}, }, } groups, err := d.getClient().DescribeSecurityGroups(&ec2.DescribeSecurityGroupsInput{ Filters: filters, }) if err != nil { return err } if len(groups.SecurityGroups) > 0 { log.Debugf("found existing security group (%s) in %s", groupName, d.VpcId) group = groups.SecurityGroups[0] } // if not found, create if group == nil { log.Debugf("creating security group (%s) in %s", groupName, d.VpcId) groupResp, err := d.getClient().CreateSecurityGroup(&ec2.CreateSecurityGroupInput{ GroupName: &groupName, Description: aws.String("Docker Machine"), VpcId: &d.VpcId, }) if err != nil { return err } // Manually translate into the security group construct group = &ec2.SecurityGroup{ GroupId: groupResp.GroupId, VpcId: aws.String(d.VpcId), GroupName: aws.String(groupName), } // wait until created (dat eventual consistency) log.Debugf("waiting for group (%s) to become available", *group.GroupId) if err := mcnutils.WaitFor(d.securityGroupAvailableFunc(*group.GroupId)); err != nil { return err } } d.SecurityGroupId = *group.GroupId perms := d.configureSecurityGroupPermissions(group) if len(perms) != 0 { log.Debugf("authorizing group %s with permissions: %v", groupName, perms) _, err := d.getClient().AuthorizeSecurityGroupIngress(&ec2.AuthorizeSecurityGroupIngressInput{ GroupId: group.GroupId, IpPermissions: perms, }) if err != nil { return err } } return nil }
func WaitForSSH(d Driver) error { if err := mcnutils.WaitFor(sshAvailableFunc(d)); err != nil { return fmt.Errorf("Too many retries waiting for SSH to be available. Last error: %s", err) } return nil }
func (d *Driver) Create() error { if err := d.checkPrereqs(); err != nil { return err } log.Infof("Launching instance...") if err := d.createKeyPair(); err != nil { return fmt.Errorf("unable to create key pair: %s", err) } if err := d.configureSecurityGroup(d.SecurityGroupName); err != nil { return err } bdm := &ec2.BlockDeviceMapping{ DeviceName: aws.String(d.DeviceName), Ebs: &ec2.EbsBlockDevice{ VolumeSize: aws.Int64(d.RootSize), VolumeType: aws.String(d.VolumeType), DeleteOnTermination: aws.Bool(true), }, } netSpecs := []*ec2.InstanceNetworkInterfaceSpecification{{ DeviceIndex: aws.Int64(0), // eth0 Groups: []*string{&d.SecurityGroupId}, SubnetId: &d.SubnetId, AssociatePublicIpAddress: aws.Bool(!d.PrivateIPOnly), }} regionZone := d.Region + d.Zone log.Debugf("launching instance in subnet %s", d.SubnetId) var instance *ec2.Instance if d.RequestSpotInstance { spotInstanceRequest, err := d.getClient().RequestSpotInstances(&ec2.RequestSpotInstancesInput{ LaunchSpecification: &ec2.RequestSpotLaunchSpecification{ ImageId: &d.AMI, Placement: &ec2.SpotPlacement{ AvailabilityZone: ®ionZone, }, KeyName: &d.KeyName, InstanceType: &d.InstanceType, NetworkInterfaces: netSpecs, Monitoring: &ec2.RunInstancesMonitoringEnabled{Enabled: aws.Bool(d.Monitoring)}, IamInstanceProfile: &ec2.IamInstanceProfileSpecification{ Name: &d.IamInstanceProfile, }, EbsOptimized: &d.UseEbsOptimizedInstance, BlockDeviceMappings: []*ec2.BlockDeviceMapping{bdm}, }, InstanceCount: aws.Int64(1), SpotPrice: &d.SpotPrice, }) if err != nil { return fmt.Errorf("Error request spot instance: %s", err) } log.Info("Waiting for spot instance...") err = d.getClient().WaitUntilSpotInstanceRequestFulfilled(&ec2.DescribeSpotInstanceRequestsInput{ SpotInstanceRequestIds: []*string{spotInstanceRequest.SpotInstanceRequests[0].SpotInstanceRequestId}, }) if err != nil { return fmt.Errorf("Error fulfilling spot request: %v", err) } log.Info("Created spot instance request %v", *spotInstanceRequest.SpotInstanceRequests[0].SpotInstanceRequestId) // resolve instance id for i := 0; i < 3; i++ { // Even though the waiter succeeded, eventual consistency means we could // get a describe output that does not include this information. Try a // few times just in case var resolvedSpotInstance *ec2.DescribeSpotInstanceRequestsOutput resolvedSpotInstance, err = d.getClient().DescribeSpotInstanceRequests(&ec2.DescribeSpotInstanceRequestsInput{ SpotInstanceRequestIds: []*string{spotInstanceRequest.SpotInstanceRequests[0].SpotInstanceRequestId}, }) if err != nil { // Unexpected; no need to retry return fmt.Errorf("Error describing previously made spot instance request: %v", err) } maybeInstanceId := resolvedSpotInstance.SpotInstanceRequests[0].InstanceId if maybeInstanceId != nil { var instances *ec2.DescribeInstancesOutput instances, err = d.getClient().DescribeInstances(&ec2.DescribeInstancesInput{ InstanceIds: []*string{maybeInstanceId}, }) if err != nil { // Retry if we get an id from spot instance but EC2 doesn't recognize it yet; see above, eventual consistency possible continue } instance = instances.Reservations[0].Instances[0] err = nil break } time.Sleep(5 * time.Second) } if err != nil { return fmt.Errorf("Error resolving spot instance to real instance: %v", err) } } else { inst, err := d.getClient().RunInstances(&ec2.RunInstancesInput{ ImageId: &d.AMI, MinCount: aws.Int64(1), MaxCount: aws.Int64(1), Placement: &ec2.Placement{ AvailabilityZone: ®ionZone, }, KeyName: &d.KeyName, InstanceType: &d.InstanceType, NetworkInterfaces: netSpecs, Monitoring: &ec2.RunInstancesMonitoringEnabled{Enabled: aws.Bool(d.Monitoring)}, IamInstanceProfile: &ec2.IamInstanceProfileSpecification{ Name: &d.IamInstanceProfile, }, EbsOptimized: &d.UseEbsOptimizedInstance, BlockDeviceMappings: []*ec2.BlockDeviceMapping{bdm}, }) if err != nil { return fmt.Errorf("Error launching instance: %s", err) } instance = inst.Instances[0] } d.InstanceId = *instance.InstanceId log.Debug("waiting for ip address to become available") if err := mcnutils.WaitFor(d.instanceIpAvailable); err != nil { return err } if instance.PrivateIpAddress != nil { d.PrivateIPAddress = *instance.PrivateIpAddress } d.waitForInstance() log.Debugf("created instance ID %s, IP address %s, Private IP address %s", d.InstanceId, d.IPAddress, d.PrivateIPAddress, ) log.Debug("Settings tags for instance") err := d.configureTags(d.Tags) if err != nil { return fmt.Errorf("Unable to tag instance %s: %s", d.InstanceId, err) } return nil }
func (d *Driver) configureSecurityGroup(vpcId string, groupName string) error { log.Debugf("%s | Configuring security group in %s", d.MachineName, d.VpcId) var securityGroup *ecs.DescribeSecurityGroupAttributeResponse args := ecs.DescribeSecurityGroupsArgs{ RegionId: d.Region, VpcId: vpcId, } for { groups, pagination, err := d.getClient().DescribeSecurityGroups(&args) if err != nil { return err } //log.Debugf("DescribeSecurityGroups: %++v\n", groups) for _, grp := range groups { if grp.SecurityGroupName == groupName && grp.VpcId == d.VpcId { log.Debugf("%s | Found existing security group (%s) in %s", d.MachineName, groupName, d.VpcId) securityGroup, _ = d.getSecurityGroup(grp.SecurityGroupId) break } } if securityGroup != nil { break } nextPage := pagination.NextPage() if nextPage == nil { break } args.Pagination = *nextPage } // if not found, create if securityGroup == nil { log.Debugf("%s | Creating security group (%s) in %s", d.MachineName, groupName, d.VpcId) creationArgs := ecs.CreateSecurityGroupArgs{ RegionId: d.Region, SecurityGroupName: groupName, Description: "Docker Machine", VpcId: vpcId, ClientToken: d.getClient().GenerateClientToken(), } groupId, err := d.getClient().CreateSecurityGroup(&creationArgs) if err != nil { return err } // wait until created (dat eventual consistency) log.Debugf("%s | Waiting for group (%s) to become available", d.MachineName, groupId) if err := mcnutils.WaitFor(d.securityGroupAvailableFunc(groupId)); err != nil { return err } securityGroup, err = d.getSecurityGroup(groupId) if err != nil { return err } } d.SecurityGroupId = securityGroup.SecurityGroupId perms := d.configureSecurityGroupPermissions(securityGroup) for _, permission := range perms { log.Debugf("%s | Authorizing group %s with permission: %v", d.MachineName, securityGroup.SecurityGroupName, permission) args := permission.createAuthorizeSecurityGroupArgs(d.Region, d.SecurityGroupId) if err := d.getClient().AuthorizeSecurityGroup(args); err != nil { return err } } return nil }
func (d *Driver) configureSecurityGroups(groupNames []string) error { if len(groupNames) == 0 { log.Debugf("no security groups to configure in %s", d.VpcId) return nil } log.Debugf("configuring security groups in %s", d.VpcId) filters := []*ec2.Filter{ { Name: aws.String("group-name"), Values: makePointerSlice(groupNames), }, { Name: aws.String("vpc-id"), Values: []*string{&d.VpcId}, }, } groups, err := d.getClient().DescribeSecurityGroups(&ec2.DescribeSecurityGroupsInput{ Filters: filters, }) if err != nil { return err } var groupsByName = make(map[string]*ec2.SecurityGroup) for _, securityGroup := range groups.SecurityGroups { groupsByName[*securityGroup.GroupName] = securityGroup } for _, groupName := range groupNames { var group *ec2.SecurityGroup securityGroup, ok := groupsByName[groupName] if ok { log.Debugf("found existing security group (%s) in %s", groupName, d.VpcId) group = securityGroup } else { log.Debugf("creating security group (%s) in %s", groupName, d.VpcId) groupResp, err := d.getClient().CreateSecurityGroup(&ec2.CreateSecurityGroupInput{ GroupName: aws.String(groupName), Description: aws.String("Docker Machine"), VpcId: aws.String(d.VpcId), }) if err != nil { return err } // Manually translate into the security group construct group = &ec2.SecurityGroup{ GroupId: groupResp.GroupId, VpcId: aws.String(d.VpcId), GroupName: aws.String(groupName), } // wait until created (dat eventual consistency) log.Debugf("waiting for group (%s) to become available", *group.GroupId) if err := mcnutils.WaitFor(d.securityGroupAvailableFunc(*group.GroupId)); err != nil { return err } } d.SecurityGroupIds = append(d.SecurityGroupIds, *group.GroupId) perms, err := d.configureSecurityGroupPermissions(group) if err != nil { return err } if len(perms) != 0 { log.Debugf("authorizing group %s with permissions: %v", groupNames, perms) _, err := d.getClient().AuthorizeSecurityGroupIngress(&ec2.AuthorizeSecurityGroupIngressInput{ GroupId: group.GroupId, IpPermissions: perms, }) if err != nil { return err } } } return nil }
func (provisioner *ArchProvisioner) Provision(swarmOptions swarm.Options, authOptions auth.Options, engineOptions engine.Options) error { provisioner.SwarmOptions = swarmOptions provisioner.AuthOptions = authOptions provisioner.EngineOptions = engineOptions swarmOptions.Env = engineOptions.Env if provisioner.EngineOptions.StorageDriver == "" { provisioner.EngineOptions.StorageDriver = "overlay" } // HACK: since Arch does not come with sudo by default we install log.Debug("Installing sudo") if _, err := provisioner.SSHCommand("if ! type sudo; then pacman -Sy --noconfirm --noprogressbar sudo; fi"); err != nil { return err } log.Debug("Setting hostname") if err := provisioner.SetHostname(provisioner.Driver.GetMachineName()); err != nil { return err } log.Debug("Installing base packages") for _, pkg := range provisioner.Packages { if err := provisioner.Package(pkg, pkgaction.Install); err != nil { return err } } log.Debug("Installing docker") if err := provisioner.Package("docker", pkgaction.Install); err != nil { return err } log.Debug("Starting systemd docker service") if err := provisioner.Service("docker", serviceaction.Start); err != nil { return err } log.Debug("Waiting for docker daemon") if err := mcnutils.WaitFor(provisioner.dockerDaemonResponding); err != nil { return err } provisioner.AuthOptions = setRemoteAuthOptions(provisioner) log.Debug("Configuring auth") if err := ConfigureAuth(provisioner); err != nil { return err } log.Debug("Configuring swarm") if err := configureSwarm(provisioner, swarmOptions, provisioner.AuthOptions); err != nil { return err } // enable in systemd log.Debug("Enabling docker in systemd") if err := provisioner.Service("docker", serviceaction.Enable); err != nil { return err } return nil }
func (provisioner *SUSEProvisioner) Provision(swarmOptions swarm.Options, authOptions auth.Options, engineOptions engine.Options) error { provisioner.SwarmOptions = swarmOptions provisioner.AuthOptions = authOptions provisioner.EngineOptions = engineOptions swarmOptions.Env = engineOptions.Env // figure out the filesytem used by /var/lib fs, err := provisioner.SSHCommand("stat -f -c %T /var/lib/") if err != nil { return err } graphDriver := "overlay" if strings.Contains(fs, "btrfs") { graphDriver = "btrfs" } storageDriver, err := decideStorageDriver(provisioner, graphDriver, engineOptions.StorageDriver) if err != nil { return err } provisioner.EngineOptions.StorageDriver = storageDriver log.Debug("Setting hostname") if err := provisioner.SetHostname(provisioner.Driver.GetMachineName()); err != nil { return err } if strings.ToLower(provisioner.OsReleaseInfo.ID) != "opensuse" { // This is a SLE machine, enable the containers module to have access // to the docker packages if _, err := provisioner.SSHCommand("sudo -E SUSEConnect -p sle-module-containers/12/x86_64 -r ''"); err != nil { return fmt.Errorf( "Error while adding the 'containers' module, make sure this machine is registered either against SUSE Customer Center (SCC) or to a local Subscription Management Tool (SMT): %v", err) } } log.Debug("Installing base packages") for _, pkg := range provisioner.Packages { if err := provisioner.Package(pkg, pkgaction.Install); err != nil { return err } } log.Debug("Installing docker") if err := provisioner.Package("docker", pkgaction.Install); err != nil { return err } // create symlinks for containerd, containerd-shim and runc. // We have to do that because machine overrides the openSUSE systemd // unit of docker if _, err := provisioner.SSHCommand("sudo -E ln -s /usr/sbin/runc /usr/sbin/docker-runc"); err != nil { return err } if _, err := provisioner.SSHCommand("sudo -E ln -s /usr/sbin/containerd /usr/sbin/docker-containerd"); err != nil { return err } if _, err := provisioner.SSHCommand("sudo -E ln -s /usr/sbin/containerd-shim /usr/sbin/docker-containerd-shim"); err != nil { return err } // Is yast2 firewall installed? if _, installed := provisioner.SSHCommand("rpm -q yast2-firewall"); installed == nil { // Open the firewall port required by docker if _, err := provisioner.SSHCommand("sudo -E /sbin/yast2 firewall services add ipprotocol=tcp tcpport=2376 zone=EXT"); err != nil { return err } } log.Debug("Starting systemd docker service") if err := provisioner.Service("docker", serviceaction.Start); err != nil { return err } log.Debug("Waiting for docker daemon") if err := mcnutils.WaitFor(provisioner.dockerDaemonResponding); err != nil { return err } provisioner.AuthOptions = setRemoteAuthOptions(provisioner) log.Debug("Configuring auth") if err := ConfigureAuth(provisioner); err != nil { return err } log.Debug("Configuring swarm") if err := configureSwarm(provisioner, swarmOptions, provisioner.AuthOptions); err != nil { return err } // enable in systemd log.Debug("Enabling docker in systemd") if err := provisioner.Service("docker", serviceaction.Enable); err != nil { return err } return nil }
func (d *Driver) Create() error { if err := d.checkPrereqs(); err != nil { return err } log.Infof("Launching instance...") if err := d.createKeyPair(); err != nil { return fmt.Errorf("unable to create key pair: %s", err) } if err := d.configureSecurityGroup(d.SecurityGroupName); err != nil { return err } bdm := &amz.BlockDeviceMapping{ DeviceName: "/dev/sda1", VolumeSize: d.RootSize, DeleteOnTermination: true, VolumeType: "gp2", } log.Debugf("launching instance in subnet %s", d.SubnetId) var instance amz.EC2Instance if d.RequestSpotInstance { spotInstanceRequestId, err := d.getClient().RequestSpotInstances(d.AMI, d.InstanceType, d.Zone, 1, d.SecurityGroupId, d.KeyName, d.SubnetId, bdm, d.IamInstanceProfile, d.SpotPrice, d.Monitoring) if err != nil { return fmt.Errorf("Error request spot instance: %s", err) } var instanceId string var spotInstanceRequestStatus string log.Info("Waiting for spot instance...") // check until fulfilled for instanceId == "" { time.Sleep(time.Second * 5) spotInstanceRequestStatus, instanceId, err = d.getClient().DescribeSpotInstanceRequests(spotInstanceRequestId) if err != nil { return fmt.Errorf("Error describe spot instance request: %s", err) } log.Debugf("spot instance request status: %s", spotInstanceRequestStatus) } instance, err = d.getClient().GetInstance(instanceId) if err != nil { return fmt.Errorf("Error get instance: %s", err) } } else { inst, err := d.getClient().RunInstance(d.AMI, d.InstanceType, d.Zone, 1, 1, d.SecurityGroupId, d.KeyName, d.SubnetId, bdm, d.IamInstanceProfile, d.PrivateIPOnly, d.Monitoring) if err != nil { return fmt.Errorf("Error launching instance: %s", err) } instance = inst } d.InstanceId = instance.InstanceId log.Debug("waiting for ip address to become available") if err := mcnutils.WaitFor(d.instanceIpAvailable); err != nil { return err } if len(instance.NetworkInterfaceSet) > 0 { d.PrivateIPAddress = instance.NetworkInterfaceSet[0].PrivateIpAddress } d.waitForInstance() log.Debugf("created instance ID %s, IP address %s, Private IP address %s", d.InstanceId, d.IPAddress, d.PrivateIPAddress, ) log.Debug("Settings tags for instance") tags := map[string]string{ "Name": d.MachineName, } if err := d.getClient().CreateTags(d.InstanceId, tags); err != nil { return err } return nil }