func (d *Driver) waitForSetupTransactions() { log.Infof("Waiting for host setup transactions to complete") // sometimes we'll hit a case where there's no active transaction, but if // we check again in a few seconds, it moves to the next transaction. We // don't want to get false-positives, so we check a few times in a row to make sure! noActiveCount, maxNoActiveCount := 0, 3 for { t, err := d.GetActiveTransaction() if err != nil { noActiveCount = 0 log.Debugf("Failed to GetActiveTransaction - %+v", err) continue } if t == "" { if noActiveCount == maxNoActiveCount { break } noActiveCount++ } else { noActiveCount = 0 log.Debugf("Still waiting - active transaction is %s...", t) } time.Sleep(2 * time.Second) } }
func sshAvailableFunc(d Driver) func() bool { return func() bool { log.Debug("Getting to WaitForSSH function...") hostname, err := d.GetSSHHostname() if err != nil { log.Debugf("Error getting IP address waiting for SSH: %s", err) return false } port, err := d.GetSSHPort() if err != nil { log.Debugf("Error getting SSH port: %s", err) return false } if err := ssh.WaitForTCP(fmt.Sprintf("%s:%d", hostname, port)); err != nil { log.Debugf("Error waiting for TCP waiting for SSH: %s", err) return false } if _, err := RunSSHCommandFromDriver(d, "exit 0"); err != nil { log.Debugf("Error getting ssh command 'exit 0' : %s", err) return false } return true } }
func (d *Driver) addDockerEndpoints(vmConfig *vmClient.Role) error { configSets := vmConfig.ConfigurationSets.ConfigurationSet if len(configSets) == 0 { return errors.New("no configuration set") } for i := 0; i < len(configSets); i++ { if configSets[i].ConfigurationSetType != "NetworkConfiguration" { continue } ep := vmClient.InputEndpoint{ Name: "docker", Protocol: "tcp", Port: d.DockerPort, LocalPort: d.DockerPort, } if d.SwarmMaster { swarm_ep := vmClient.InputEndpoint{ Name: "docker swarm", Protocol: "tcp", Port: d.DockerSwarmMasterPort, LocalPort: d.DockerSwarmMasterPort, } configSets[i].InputEndpoints.InputEndpoint = append(configSets[i].InputEndpoints.InputEndpoint, swarm_ep) log.Debugf("added Docker swarm master endpoint (port %d) to configuration", d.DockerSwarmMasterPort) } configSets[i].InputEndpoints.InputEndpoint = append(configSets[i].InputEndpoints.InputEndpoint, ep) log.Debugf("added Docker endpoint (port %d) to configuration", d.DockerPort) } return nil }
func vbmOutErr(args ...string) (string, string, error) { cmd := exec.Command(vboxManageCmd, args...) log.Debugf("executing: %v %v", vboxManageCmd, strings.Join(args, " ")) var stdout bytes.Buffer var stderr bytes.Buffer cmd.Stdout = &stdout cmd.Stderr = &stderr err := cmd.Run() stderrStr := stderr.String() if len(args) > 0 { log.Debugf("STDOUT:\n{\n%v}", stdout.String()) log.Debugf("STDERR:\n{\n%v}", stderrStr) } if err != nil { if ee, ok := err.(*exec.Error); ok && ee == exec.ErrNotFound { err = ErrVBMNotFound } } else { // VBoxManage will sometimes not set the return code, but has a fatal error // such as VBoxManage.exe: error: VT-x is not available. (VERR_VMX_NO_VMX) if strings.Contains(stderrStr, "error:") { err = fmt.Errorf("%v %v failed: %v", vboxManageCmd, strings.Join(args, " "), stderrStr) } } return stdout.String(), stderrStr, err }
func execute(args []string) (string, error) { cmd := exec.Command(powershell, args...) log.Debugf("[executing ==>] : %v %v", powershell, strings.Join(args, " ")) var stdout bytes.Buffer var stderr bytes.Buffer cmd.Stdout = &stdout cmd.Stderr = &stderr err := cmd.Run() log.Debugf("[stdout =====>] : %s", stdout.String()) log.Debugf("[stderr =====>] : %s", stderr.String()) return stdout.String(), err }
func (d *Driver) GetIP() (string, error) { // DHCP is used to get the IP, so virtualbox hosts don't have IPs unless // they are running s, err := d.GetState() if err != nil { return "", err } if s != state.Running { return "", drivers.ErrHostIsNotRunning } output, err := drivers.RunSSHCommandFromDriver(d, "ip addr show dev eth1") if err != nil { return "", err } log.Debugf("SSH returned: %s\nEND SSH\n", output) // parse to find: inet 192.168.59.103/24 brd 192.168.59.255 scope global eth1 lines := strings.Split(output, "\n") for _, line := range lines { vals := strings.Split(strings.TrimSpace(line), " ") if len(vals) >= 2 && vals[0] == "inet" { return vals[1][:strings.Index(vals[1], "/")], nil } } return "", fmt.Errorf("No IP address found %s", output) }
func (client NativeClient) dialSuccess() bool { if _, err := ssh.Dial("tcp", fmt.Sprintf("%s:%d", client.Hostname, client.Port), &client.Config); err != nil { log.Debugf("Error dialing TCP: %s", err) return false } return true }
func (d *Driver) buildHostSpec() *HostSpec { spec := &HostSpec{ Hostname: d.deviceConfig.Hostname, Domain: d.deviceConfig.Domain, Cpu: d.deviceConfig.Cpu, Memory: d.deviceConfig.Memory, Datacenter: Datacenter{Name: d.deviceConfig.Region}, Os: d.deviceConfig.Image, HourlyBilling: d.deviceConfig.HourlyBilling, PrivateNetOnly: d.deviceConfig.PrivateNet, LocalDisk: d.deviceConfig.LocalDisk, } if d.deviceConfig.DiskSize > 0 { spec.BlockDevices = []BlockDevice{{Device: "0", DiskImage: DiskImage{Capacity: d.deviceConfig.DiskSize}}} } if d.deviceConfig.PublicVLAN > 0 { spec.PrimaryNetworkComponent = &NetworkComponent{ NetworkVLAN: &NetworkVLAN{ Id: d.deviceConfig.PublicVLAN, }, } } if d.deviceConfig.PrivateVLAN > 0 { spec.PrimaryBackendNetworkComponent = &NetworkComponent{ NetworkVLAN: &NetworkVLAN{ Id: d.deviceConfig.PrivateVLAN, }, } } log.Debugf("Built host spec %#v", spec) return spec }
func (d *Driver) Restart() error { if err := d.Stop(); err != nil { return err } // Check for 120 seconds for the machine to stop for i := 1; i <= 60; i++ { machineState, err := d.GetState() if err != nil { return err } if machineState == state.Running { log.Debugf("Not there yet %d/%d", i, 60) time.Sleep(2 * time.Second) continue } if machineState == state.Stopped { break } } machineState, err := d.GetState() // If the VM is still running after 120 seconds just kill it. if machineState == state.Running { if err = d.Kill(); err != nil { return fmt.Errorf("can't stop VM: %s", err) } } return d.Start() }
func (d *Driver) configureSecurityGroup(groupName string) error { log.Debugf("configuring security group in %s", d.VpcId) var securityGroup *amz.SecurityGroup groups, err := d.getClient().GetSecurityGroups() if err != nil { return err } for _, grp := range groups { if grp.GroupName == groupName { log.Debugf("found existing security group (%s) in %s", groupName, d.VpcId) securityGroup = &grp break } } // if not found, create if securityGroup == nil { log.Debugf("creating security group (%s) in %s", groupName, d.VpcId) group, err := d.getClient().CreateSecurityGroup(groupName, "Docker Machine", d.VpcId) if err != nil { return err } securityGroup = group // wait until created (dat eventual consistency) log.Debugf("waiting for group (%s) to become available", group.GroupId) if err := mcnutils.WaitFor(d.securityGroupAvailableFunc(group.GroupId)); err != nil { return err } } d.SecurityGroupId = securityGroup.GroupId perms := d.configureSecurityGroupPermissions(securityGroup) if len(perms) != 0 { log.Debugf("authorizing group %s with permissions: %v", securityGroup.GroupName, perms) if err := d.getClient().AuthorizeSecurityGroup(d.SecurityGroupId, perms); err != nil { return err } } return nil }
func (d *Driver) waitForStart() { log.Infof("Waiting for host to become available") for { s, err := d.GetState() if err != nil { log.Debugf("Failed to GetState - %+v", err) continue } if s == state.Running { break } else { log.Debugf("Still waiting - state is %s...", s) } time.Sleep(2 * time.Second) } }
func (d *Driver) deleteKeyPair() error { log.Debugf("deleting key pair: %s", d.KeyName) if err := d.getClient().DeleteKeyPair(d.KeyName); err != nil { return err } return nil }
func (d *Driver) deleteSecurityGroup() error { log.Debugf("deleting security group %s", d.SecurityGroupId) if err := d.getClient().DeleteSecurityGroup(d.SecurityGroupId); err != nil { return err } return nil }
func (provisioner *DebianProvisioner) Package(name string, action pkgaction.PackageAction) error { var packageAction string updateMetadata := true switch action { case pkgaction.Install, pkgaction.Upgrade: packageAction = "install" case pkgaction.Remove: packageAction = "remove" updateMetadata = false } switch name { case "docker": name = "docker-engine" } if updateMetadata { if _, err := provisioner.SSHCommand("sudo apt-get update"); err != nil { return err } } // handle the new docker-engine package; we can probably remove this // after we have a few versions if action == pkgaction.Upgrade && name == "docker-engine" { // run the force remove on the existing lxc-docker package // and remove the existing apt source list // also re-run the get.docker.com script to properly setup // the system again commands := []string{ "rm /etc/apt/sources.list.d/docker.list || true", "apt-get remove -y lxc-docker || true", "curl -sSL https://get.docker.com | sh", } for _, cmd := range commands { command := fmt.Sprintf("sudo DEBIAN_FRONTEND=noninteractive %s", cmd) if _, err := provisioner.SSHCommand(command); err != nil { return err } } } command := fmt.Sprintf("DEBIAN_FRONTEND=noninteractive sudo -E apt-get %s -y %s", packageAction, name) log.Debugf("package: action=%s name=%s", action.String(), name) if _, err := provisioner.SSHCommand(command); err != nil { return err } return nil }
func (d *Driver) Create() error { log.Infof("Creating SSH key...") key, err := d.createSSHKey() if err != nil { return err } d.SSHKeyID = key.ID log.Infof("Creating Digital Ocean droplet...") client := d.getClient() createRequest := &godo.DropletCreateRequest{ Image: d.Image, Name: d.MachineName, Region: d.Region, Size: d.Size, IPv6: d.IPv6, PrivateNetworking: d.PrivateNetworking, Backups: d.Backups, SSHKeys: []interface{}{d.SSHKeyID}, } newDroplet, _, err := client.Droplets.Create(createRequest) if err != nil { return err } d.DropletID = newDroplet.Droplet.ID for { newDroplet, _, err = client.Droplets.Get(d.DropletID) if err != nil { return err } for _, network := range newDroplet.Droplet.Networks.V4 { if network.Type == "public" { d.IPAddress = network.IPAddress } } if d.IPAddress != "" { break } time.Sleep(1 * time.Second) } log.Debugf("Created droplet ID %d, IP address %s", newDroplet.Droplet.ID, d.IPAddress) return nil }
func RunSSHCommandFromDriver(d Driver, command string) (string, error) { client, err := GetSSHClientFromDriver(d) if err != nil { return "", err } log.Debugf("About to run SSH command:\n%s", command) output, err := client.Output(command) log.Debugf("SSH cmd err, output: %v: %s", err, output) if err != nil && !isErr255Exit(err) { log.Error("SSH cmd error!") log.Errorf("command: %s", command) log.Errorf("err : %v", err) log.Fatalf("output : %s", output) } return output, err }
func (provisioner *RancherProvisioner) getLatestISOURL() (string, error) { log.Debugf("Reading %s", versionsUrl) resp, err := http.Get(versionsUrl) if err != nil { return "", err } defer resp.Body.Close() // Don't want to pull in yaml parser, we'll do this manually scanner := bufio.NewScanner(resp.Body) for scanner.Scan() { line := scanner.Text() if strings.HasPrefix(line, "current: ") { log.Debugf("Found %s", line) return fmt.Sprintf(isoUrl, strings.Split(line, ":")[2]), err } } return "", fmt.Errorf("Failed to find current version") }
func WaitForDocker(ip string, daemonPort int) error { return WaitFor(func() bool { conn, err := net.Dial("tcp", fmt.Sprintf("%s:%d", ip, daemonPort)) if err != nil { log.Debugf("Daemon not responding yet: %s", err) return false } conn.Close() return true }) }
func MachineInState(d Driver, desiredState state.State) func() bool { return func() bool { currentState, err := d.GetState() if err != nil { log.Debugf("Error getting machine state: %s", err) } if currentState == desiredState { return true } return false } }
func (d *Driver) instanceIpAvailable() bool { ip, err := d.GetIP() if err != nil { log.Debug(err) } if ip != "" { d.IPAddress = ip log.Debugf("Got the IP Address, it's %q", d.IPAddress) return true } return false }
func (d *Driver) terminate() error { if d.InstanceId == "" { return fmt.Errorf("unknown instance") } log.Debugf("terminating instance: %s", d.InstanceId) if err := d.getClient().TerminateInstance(d.InstanceId); err != nil { return fmt.Errorf("unable to terminate instance: %s", err) } return nil }
// Make a boot2docker userdata.tar key bundle func (d *Driver) generateKeyBundle() error { log.Debugf("Creating Tar key bundle...") magicString := "boot2docker, this is vmware speaking" tf, err := os.Create(d.LocalArtifactPath("userdata.tar")) if err != nil { return err } defer tf.Close() var fileWriter = tf tw := tar.NewWriter(fileWriter) defer tw.Close() // magicString first so we can figure out who originally wrote the tar. file := &tar.Header{Name: magicString, Size: int64(len(magicString))} if err := tw.WriteHeader(file); err != nil { return err } if _, err := tw.Write([]byte(magicString)); err != nil { return err } // .ssh/key.pub => authorized_keys file = &tar.Header{Name: ".ssh", Typeflag: tar.TypeDir, Mode: 0700} if err := tw.WriteHeader(file); err != nil { return err } pubKey, err := ioutil.ReadFile(d.publicSSHKeyPath()) if err != nil { return err } file = &tar.Header{Name: ".ssh/authorized_keys", Size: int64(len(pubKey)), Mode: 0644} if err := tw.WriteHeader(file); err != nil { return err } if _, err := tw.Write([]byte(pubKey)); err != nil { return err } file = &tar.Header{Name: ".ssh/authorized_keys2", Size: int64(len(pubKey)), Mode: 0644} if err := tw.WriteHeader(file); err != nil { return err } if _, err := tw.Write([]byte(pubKey)); err != nil { return err } if err := tw.Close(); err != nil { return err } return nil }
func WaitForTCP(addr string) error { for { log.Debugf("Testing TCP connection to: %s", addr) conn, err := net.DialTimeout("tcp", addr, 2*time.Second) if err != nil { continue } defer conn.Close() return nil } }
func (d *Driver) hostOnlyIpAvailable() bool { ip, err := d.GetIP() if err != nil { log.Debug("ERROR getting IP: %s", err) return false } if ip != "" { log.Debugf("IP is %s", ip) return true } log.Debug("Strangely, there was no error attempting to get the IP, but it was still empty.") return false }
func token(storePath, authTokenPath string, config *oauth.Config) *oauth.Token { tokenPath := authTokenPath if authTokenPath == "" { tokenPath = filepath.Join(storePath, "gce_token") } log.Debugf("using auth token: %s", tokenPath) token, err := tokenFromCache(tokenPath) if err != nil { token = tokenFromWeb(config) saveToken(storePath, token) } return token }
func (provisioner *RedHatProvisioner) Provision(swarmOptions swarm.SwarmOptions, authOptions auth.AuthOptions, engineOptions engine.EngineOptions) error { provisioner.SwarmOptions = swarmOptions provisioner.AuthOptions = authOptions provisioner.EngineOptions = engineOptions // set default storage driver for redhat if provisioner.EngineOptions.StorageDriver == "" { provisioner.EngineOptions.StorageDriver = "devicemapper" } if err := provisioner.SetHostname(provisioner.Driver.GetMachineName()); err != nil { return err } for _, pkg := range provisioner.Packages { log.Debugf("installing base package: name=%s", pkg) if err := provisioner.Package(pkg, pkgaction.Install); err != nil { return err } } // update OS -- this is needed for libdevicemapper and the docker install if _, err := provisioner.SSHCommand("sudo yum -y update"); err != nil { return err } // install docker if err := installDocker(provisioner); err != nil { return err } if err := mcnutils.WaitFor(provisioner.dockerDaemonResponding); err != nil { return err } if err := makeDockerOptionsDir(provisioner); err != nil { return err } provisioner.AuthOptions = setRemoteAuthOptions(provisioner) if err := ConfigureAuth(provisioner); err != nil { return err } if err := configureSwarm(provisioner, swarmOptions, provisioner.AuthOptions); err != nil { return err } return nil }
func (provisioner *RancherProvisioner) Provision(swarmOptions swarm.SwarmOptions, authOptions auth.AuthOptions, engineOptions engine.EngineOptions) error { provisioner.SwarmOptions = swarmOptions provisioner.AuthOptions = authOptions provisioner.EngineOptions = engineOptions if provisioner.EngineOptions.StorageDriver == "" { provisioner.EngineOptions.StorageDriver = "overlay" } else if provisioner.EngineOptions.StorageDriver != "overlay" { return fmt.Errorf("Unsupported storage driver: %s", provisioner.EngineOptions.StorageDriver) } log.Debugf("Setting hostname %s", provisioner.Driver.GetMachineName()) if err := provisioner.SetHostname(provisioner.Driver.GetMachineName()); err != nil { return err } for _, pkg := range provisioner.Packages { log.Debugf("Installing package %s", pkg) if err := provisioner.Package(pkg, pkgaction.Install); err != nil { return err } } log.Debugf("Preparing certificates") provisioner.AuthOptions = setRemoteAuthOptions(provisioner) log.Debugf("Setting up certificates") if err := ConfigureAuth(provisioner); err != nil { return err } log.Debugf("Configuring swarm") if err := configureSwarm(provisioner, swarmOptions, provisioner.AuthOptions); err != nil { return err } return nil }
func (d *Driver) Remove() error { if err := d.setUserSubscription(); err != nil { return err } if available, _, err := vmClient.CheckHostedServiceNameAvailability(d.MachineName); err != nil { return err } else if available { return nil } log.Debugf("removing %s", d.MachineName) return vmClient.DeleteHostedService(d.MachineName) }
func (d *Driver) setupHostOnlyNetwork(machineName string) error { hostOnlyCIDR := d.HostOnlyCIDR // This is to assist in migrating from version 0.2 to 0.3 format // it should be removed in a later release if hostOnlyCIDR == "" { hostOnlyCIDR = defaultHostOnlyCIDR } ip, network, err := net.ParseCIDR(hostOnlyCIDR) if err != nil { return err } nAddr := network.IP.To4() dhcpAddr, err := getRandomIPinSubnet(network.IP) if err != nil { return err } lowerDHCPIP := net.IPv4(nAddr[0], nAddr[1], nAddr[2], byte(100)) upperDHCPIP := net.IPv4(nAddr[0], nAddr[1], nAddr[2], byte(254)) log.Debugf("using %s for dhcp address", dhcpAddr) hostOnlyNetwork, err := getOrCreateHostOnlyNetwork( ip, network.Mask, dhcpAddr, lowerDHCPIP, upperDHCPIP, ) if err != nil { return err } if err := vbm("modifyvm", machineName, "--nic2", "hostonly", "--nictype2", d.HostOnlyNicType, "--nicpromisc2", d.HostOnlyPromiscMode, "--hostonlyadapter2", hostOnlyNetwork.Name, "--cableconnected2", "on"); err != nil { return err } return nil }
func (d *Driver) Create() error { log.Infof("Importing SSH key...") if err := mcnutils.CopyFile(d.SSHKey, d.GetSSHKeyPath()); err != nil { return fmt.Errorf("unable to copy ssh key: %s", err) } if err := os.Chmod(d.GetSSHKeyPath(), 0600); err != nil { return err } log.Debugf("IP: %s", d.IPAddress) return nil }