func (lbp *LocalBinaryPlugin) execServer() error { outScanner, errScanner, err := lbp.Executor.Start() if err != nil { return err } // Scan just one line to get the address, then send it to the relevant // channel. outScanner.Scan() addr := outScanner.Text() if err := outScanner.Err(); err != nil { return fmt.Errorf("Reading plugin address failed: %s", err) } lbp.addrCh <- strings.TrimSpace(addr) stdOutCh, stopStdoutCh := lbp.AttachStream(outScanner) stdErrCh, stopStderrCh := lbp.AttachStream(errScanner) for { select { case out := <-stdOutCh: log.Debug(fmt.Sprintf(pluginOutPrefix, lbp.MachineName), out) case err := <-stdErrCh: log.Debug(fmt.Sprintf(pluginErrPrefix, lbp.MachineName), err) case _ = <-lbp.stopCh: stopStdoutCh <- true stopStderrCh <- true if err := lbp.Executor.Close(); err != nil { return fmt.Errorf("Error closing local plugin binary: %s", err) } return nil } } }
func (a AzureClient) findStorageAccount(resourceGroup, location, prefix string, storageType storage.AccountType) (*storage.AccountProperties, error) { f := logutil.Fields{ "type": storageType, "prefix": prefix, "location": location} log.Debug("Querying existing storage accounts.", f) l, err := a.storageAccountsClient().ListByResourceGroup(resourceGroup) if err != nil { return nil, err } if l.Value != nil { for _, v := range *l.Value { log.Debug("Iterating...", logutil.Fields{ "name": to.String(v.Name), "type": storageType, "location": to.String(v.Location), }) if to.String(v.Location) == location && v.Properties.AccountType == storageType && strings.HasPrefix(to.String(v.Name), prefix) { log.Debug("Found eligible storage account.", logutil.Fields{"name": to.String(v.Name)}) return v.Properties, nil } } } log.Debug("No account matching the pattern is found.", f) return nil, err }
func (provisioner *WindowsProvisioner) enableBasicAuthForWinRM() bool { log.Debug("Enabling basic auth for WinRM") ip, err := provisioner.Driver.GetIP() if err != nil { return false } d := provisioner.Driver stdout, stderr, exit, err := drivers.WinRMRunCmdWithNTLM(ip, d.GetSSHUsername(), d.GetSSHPassword(), enableBasicAuthCmd) if (err != nil) || (exit != 0) { log.Warn("Error getting WinRM command to enable basic auth:", err) log.Debug("Enable basic auth output:", stdout, ", err:", stderr, ", exit:", exit) return false } stdout, stderr, exit, err = drivers.WinRMRunCmdWithNTLM(ip, d.GetSSHUsername(), d.GetSSHPassword(), enableUnencryptedCmd) if (err != nil) || (exit != 0) { log.Warn("Error getting WinRM command to enable unencrypted comm:", err) log.Debug("Enable unencrypted comm output:", stdout, ", err:", stderr, ", exit:", exit) return false } // The daemon is up if the command worked. Carry on. log.Debug("Successfully enabled basic auth for WinRM") return true }
// TestSaveServer implement save server func TestSaveServer(t *testing.T) { var ( d *ICSPTest c *icsp.ICSPClient ) if os.Getenv("ICSP_TEST_ACCEPTANCE") == "true" { log.Debug("implements acceptance test for TestCreateServer") d, c = getTestDriverA() if c == nil { t.Fatalf("Failed to execute getTestDriver() ") } // get a Server serialNumber := d.Tc.GetTestData(d.Env, "FreeBladeSerialNumber").(string) s, err := c.GetServerBySerialNumber(serialNumber) assert.NoError(t, err, "GetServerBySerialNumber threw error -> %s, %+v\n", err, s) // set a custom attribute s.SetCustomAttribute("docker_user", "server", "docker") // use test keys like from https://github.com/mitchellh/vagrant/tree/master/keys // private key from https://raw.githubusercontent.com/mitchellh/vagrant/master/keys/vagrant s.SetCustomAttribute("public_key", "server", "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ==") // save a server news, err := c.SaveServer(s) assert.NoError(t, err, "SaveServer threw error -> %s, %+v\n", err, news) assert.Equal(t, s.UUID, news.UUID, "Should return a server with the same UUID") // verify that the server attribute was saved by getting the server again and checking the value _, testValue2 := s.GetValueItem("docker_user", "server") assert.Equal(t, "docker", testValue2.Value, "Should return the saved custom attribute") } else { log.Debug("implements unit test for TestCreateServer") var s icsp.Server _, c = getTestDriverU() s, err := c.SaveServer(s) assert.Error(t, err, "SaveServer threw error -> %s, %+v\n", err, s) } }
// func (s Server) GetPublicIPV4() (string, error) { // TestGetPublicIPV4 try to test for getting interface from custom attribute func TestGetPublicIPV4(t *testing.T) { var ( d *ICSPTest c *icsp.ICSPClient serialNumber string ) if os.Getenv("ICSP_TEST_ACCEPTANCE") == "true" { log.Debug("implements acceptance test for TestGetPublicIPV4") d, c = getTestDriverA() if c == nil { t.Fatalf("Failed to execute getTestDriver() ") } if os.Getenv("ONEVIEW_TEST_PROVISION") == "true" { serialNumber = d.Tc.GetTestData(d.Env, "FreeICSPSerialNumber").(string) } else { serialNumber = d.Tc.GetTestData(d.Env, "SerialNumber").(string) } s, err := c.GetServerBySerialNumber(serialNumber) testIP, err := s.GetPublicIPV4() assert.NoError(t, err, "Should GetPublicIPV4 without error -> %s, %+v\n", err, s) log.Debugf(" testIP -> %s", testIP) assert.True(t, (len(testIP) > 0), "Should return an ip address string") } else { // TODO: implement a test // need to simplate createing public_interface custom attribute object // need to read custom attribute object, see server_customattribute_test.go log.Debug("implements unit test for TestGetPublicIPV4") } }
func (d *Driver) stackAvailable() (bool, error) { log.Debug("Checking if the stack is available......") svc := cloudformation.New(session.New()) params := &cloudformation.DescribeStacksInput{ StackName: aws.String(d.MachineName), } resp, err := svc.DescribeStacks(params) log.Debug(resp) if err != nil { return false, err } if *resp.Stacks[0].StackStatus == cloudformation.StackStatusRollbackInProgress || *resp.Stacks[0].StackStatus == cloudformation.StackStatusRollbackComplete { return false, errors.New("Stack Rollback Occured") } if *resp.Stacks[0].StackStatus == cloudformation.StackStatusCreateComplete { return true, nil } else { log.Debug("Stack Not Available Yet") return false, nil } }
func (d *Driver) GetState() (state.State, error) { log.Debug("Get status for OpenStack instance...", map[string]string{"MachineId": d.MachineId}) if err := d.initCompute(); err != nil { return state.None, err } s, err := d.client.GetInstanceState(d) if err != nil { return state.None, err } log.Debug("State for OpenStack instance", map[string]string{ "MachineId": d.MachineId, "State": s, }) switch s { case "ACTIVE": return state.Running, nil case "PAUSED": return state.Paused, nil case "SUSPENDED": return state.Saved, nil case "SHUTOFF": return state.Stopped, nil case "BUILDING": return state.Starting, nil case "ERROR": return state.Error, nil } return state.None, nil }
func (d *Driver) Create() error { if err := d.setUserSubscription(); err != nil { return err } log.Info("Creating Azure machine...") vmConfig, err := vmClient.CreateAzureVMConfiguration(d.MachineName, d.Size, d.Image, d.Location) if err != nil { return err } log.Debug("Generating certificate for Azure...") if err := d.generateCertForAzure(); err != nil { return err } log.Debug("Adding Linux provisioning...") vmConfig, err = vmClient.AddAzureLinuxProvisioningConfig(vmConfig, d.GetSSHUsername(), d.UserPassword, d.azureCertPath(), d.SSHPort) if err != nil { return err } log.Debug("Authorizing ports...") if err := d.addDockerEndpoints(vmConfig); err != nil { return err } log.Debug("Creating VM...") if err := vmClient.CreateAzureVM(vmConfig, d.MachineName, d.Location); err != nil { return err } return nil }
// MakeDiskImage makes a boot2docker VM disk image. // See https://github.com/boot2docker/boot2docker/blob/master/rootfs/rootfs/etc/rc.d/automount func MakeDiskImage(publicSSHKeyPath string) (*bytes.Buffer, error) { magicString := "boot2docker, please format-me" buf := new(bytes.Buffer) tw := tar.NewWriter(buf) // magicString first so the automount script knows to format the disk file := &tar.Header{Name: magicString, Size: int64(len(magicString))} log.Debug("Writing magic tar header") if err := tw.WriteHeader(file); err != nil { return nil, err } if _, err := tw.Write([]byte(magicString)); err != nil { return nil, err } // .ssh/key.pub => authorized_keys file = &tar.Header{Name: ".ssh", Typeflag: tar.TypeDir, Mode: 0700} if err := tw.WriteHeader(file); err != nil { return nil, err } log.Debug("Writing SSH key tar header") pubKey, err := ioutil.ReadFile(publicSSHKeyPath) if err != nil { return nil, err } file = &tar.Header{Name: ".ssh/authorized_keys", Size: int64(len(pubKey)), Mode: 0644} if err := tw.WriteHeader(file); err != nil { return nil, err } if _, err := tw.Write([]byte(pubKey)); err != nil { return nil, err } file = &tar.Header{Name: ".ssh/authorized_keys2", Size: int64(len(pubKey)), Mode: 0644} if err := tw.WriteHeader(file); err != nil { return nil, err } if _, err := tw.Write([]byte(pubKey)); err != nil { return nil, err } if err := tw.Close(); err != nil { return nil, err } return buf, nil }
// integrated acceptance test // TestSaveServer implement save server //TODO: a workaround to figuring out how to bubble up public ip address information from the os to icsp after os build plan provisioning // @docker_user@ "@public_key@" @docker_hostname@ "@proxy_config@" "@proxy_enable@" "@interface@" func TestApplyDeploymentJobs(t *testing.T) { var ( d *ICSPTest c *ICSPClient serialNumber string ) if os.Getenv("ICSP_TEST_ACCEPTANCE") == "true" { log.Debug("implements acceptance test for ApplyDeploymentJobs") d, c = getTestDriverA() if c == nil { t.Fatalf("Failed to execute getTestDriver() ") } // get a Server osBuildPlans := make([]string, 1) osBuildPlans[0] = d.Tc.GetTestData(d.Env, "OSBuildPlan").(string) if os.Getenv("ONEVIEW_TEST_PROVISION") != "true" { serialNumber = d.Tc.GetTestData(d.Env, "SerialNumber").(string) } else { serialNumber = d.Tc.GetTestData(d.Env, "FreeICSPSerialNumber").(string) } s, err := c.GetServerBySerialNumber(serialNumber) assert.NoError(t, err, "GetServerBySerialNumber threw error -> %s, %+v\n", err, s) // set a custom attribute s.SetCustomAttribute("docker_user", "server", "docker") s.SetCustomAttribute("docker_hostname", "server", d.Tc.GetTestData(d.Env, "HostName").(string)) // use test keys like from https://github.com/mitchellh/vagrant/tree/master/keys // private key from https://raw.githubusercontent.com/mitchellh/vagrant/master/keys/vagrant s.SetCustomAttribute("public_key", "server", "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ==") // save a server news, err := c.SaveServer(s) assert.NoError(t, err, "SaveServer threw error -> %s, %+v\n", err, news) assert.Equal(t, s.UUID, news.UUID, "Should return a server with the same UUID") // verify that the server attribute was saved by getting the server again and checking the value _, testValue2 := s.GetValueItem("docker_user", "server") assert.Equal(t, "docker", testValue2.Value, "Should return the saved custom attribute") if os.Getenv("ONEVIEW_TEST_PROVISION") != "true" { log.Info("env ONEVIEW_TEST_PROVISION != ture for ApplyDeploymentJobs") log.Infof("Skipping OS build for : %s, %s", osBuildPlans, serialNumber) return } _, err = c.ApplyDeploymentJobs(osBuildPlans, nil, s) assert.NoError(t, err, "ApplyDeploymentJobs threw error -> %s, %+v\n", err, news) } else { var s Server _, c = getTestDriverU() log.Debug("implements unit test for ApplyDeploymentJobs") testPlans := make([]string, 1) testPlans[0] = "testbuildplan" _, err := c.ApplyDeploymentJobs(testPlans, nil, s) assert.Error(t, err, "ApplyDeploymentJobs threw error -> %s, %+v\n", err, s) } }
func (d *Driver) instanceIsRunning() bool { st, err := d.GetState() if err != nil { log.Debug(err) } if st == state.Running { return true } log.Debug("VPS not yet started") return false }
func (c *RpcClientDriver) Close() error { log.Debug("Making call to close driver server") if err := c.Client.Call("RpcServerDriver.Close", struct{}{}, nil); err != nil { return err } log.Debug("Successfully made call to close driver server") return nil }
// Make a boot2docker VM disk image. func (d *Driver) generateDiskImage(size int) error { log.Debugf("Creating %d MB hard disk image...", size) magicString := "boot2docker, please format-me" buf := new(bytes.Buffer) tw := tar.NewWriter(buf) // magicString first so the automount script knows to format the disk file := &tar.Header{Name: magicString, Size: int64(len(magicString))} log.Debug("Writing magic tar header") if err := tw.WriteHeader(file); err != nil { return err } if _, err := tw.Write([]byte(magicString)); err != nil { return err } // .ssh/key.pub => authorized_keys file = &tar.Header{Name: ".ssh", Typeflag: tar.TypeDir, Mode: 0700} if err := tw.WriteHeader(file); err != nil { return err } log.Debug("Writing SSH key tar header") pubKey, err := ioutil.ReadFile(d.publicSSHKeyPath()) if err != nil { return err } file = &tar.Header{Name: ".ssh/authorized_keys", Size: int64(len(pubKey)), Mode: 0644} if err := tw.WriteHeader(file); err != nil { return err } if _, err := tw.Write([]byte(pubKey)); err != nil { return err } file = &tar.Header{Name: ".ssh/authorized_keys2", Size: int64(len(pubKey)), Mode: 0644} if err := tw.WriteHeader(file); err != nil { return err } if _, err := tw.Write([]byte(pubKey)); err != nil { return err } if err := tw.Close(); err != nil { return err } raw := bytes.NewReader(buf.Bytes()) log.Debug("Calling inner createDiskImage") return createDiskImage(d.diskPath(), size, raw) }
func (d *Driver) getIPfromDHCPLease() (string, error) { var dhcpfh *os.File var dhcpcontent []byte // var macaddr string var err error var lastipmatch string var currentip string // DHCP lease table for NAT vmnet interface var dhcpfile = "/var/db/dhcpd_leases" if dhcpfh, err = os.Open(dhcpfile); err != nil { return "", err } defer dhcpfh.Close() if dhcpcontent, err = ioutil.ReadAll(dhcpfh); err != nil { return "", err } // Get the IP from the lease table. leaseip := regexp.MustCompile(`^\s*ip_address=(.+?)$`) log.Debug(leaseip) // TODO // Get the MAC address associated. // leasemac := regexp.MustCompile(`^\s*hw_address=1,(.+?)$`) // log.Debug(leasemac) // TODO for _, line := range strings.Split(string(dhcpcontent), "\n") { // if matches := leasemac.FindStringSubmatch(line); matches != nil { // currentip = lastipmatch // // macaddr = matches[1] // log.Debug(macaddr) // break // } if matches := leaseip.FindStringSubmatch(line); matches != nil { lastipmatch = matches[1] log.Debug(lastipmatch) break } } // // if currentip == "" { // return "", fmt.Errorf("IP not found for MAC %s in DHCP leases", leaseip) // } // // if macaddr == "" { // return "", fmt.Errorf("couldn't find MAC address in DHCP leases file %s", dhcpfile) // } log.Debugf("IP found in DHCP lease table: %s", currentip) return currentip, nil }
// TestGetInterfaces verify that interfaces works func TestGetInterfaces(t *testing.T) { var ( d *ICSPTest c *icsp.ICSPClient s icsp.Server serialNumber string err error ) if os.Getenv("ICSP_TEST_ACCEPTANCE") == "true" { log.Debug("implements acceptance test for TestGetInterfaces") d, c = getTestDriverA() if c == nil { t.Fatalf("Failed to execute getTestDriver() ") } if os.Getenv("ONEVIEW_TEST_PROVISION") == "true" { serialNumber = d.Tc.GetTestData(d.Env, "FreeBladeSerialNumber").(string) s, err = c.GetServerBySerialNumber(serialNumber) } else { serialNumber = d.Tc.GetTestData(d.Env, "SerialNumber").(string) s, err = c.GetServerBySerialNumber(serialNumber) } data := s.GetInterfaces() assert.NoError(t, err, "GetInterfaces threw error -> %s, %+v\n", err, data) assert.True(t, len(data) > 0, "Failed to get a valid list of interfaces -> %+v", data) for _, inet := range data { log.Infof("inet -> %+v", inet) log.Infof("inet ip -> %+v", inet.IPV4Addr) log.Infof("inet ip -> %+v", inet.Slot) log.Infof("inet ip -> %+v", inet.MACAddr) } } else { log.Debug("implements unit test for TestGetInterfaces") d, c = getTestDriverU() jsonServerData := d.Tc.GetTestData(d.Env, "ServerJSONString").(string) log.Debugf("jsonServerData => %s", jsonServerData) err := json.Unmarshal([]byte(jsonServerData), &s) assert.NoError(t, err, "Unmarshal Server threw error -> %s, %+v\n", err, jsonServerData) log.Debugf("server -> %v", s) data := s.GetInterfaces() log.Debugf("Interfaces -> %+v", data) assert.True(t, len(data) > 0, "Failed to get a valid list of interfaces -> %+v", data) for _, inet := range data { log.Debugf("inet -> %+v", inet) log.Debugf("inet ip -> %+v", inet.IPV4Addr) log.Debugf("inet ip -> %+v", inet.Slot) log.Debugf("inet ip -> %+v", inet.MACAddr) } } }
// waitVMPowerState polls the Virtual Machine instance view until it reaches the // specified goal power state or times out. If checking for virtual machine // state fails or waiting times out, an error is returned. func (a AzureClient) waitVMPowerState(resourceGroup, name string, goalState VMPowerState, timeout time.Duration) error { // NOTE(ahmetalpbalkan): Azure APIs for Start and Stop are actually async // operations on which our SDK blocks and does polling until the operation // is complete. // // By the time the issued power cycle operation is complete, the VM will be // already in the goal PowerState. Hence, this method will return in the // first check, however there is no harm in being defensive. log.Debug("Waiting until VM reaches goal power state.", logutil.Fields{ "vm": name, "goalState": goalState, "timeout": timeout, }) chErr := make(chan error) go func(ch chan error) { for { select { case <-ch: // channel closed return default: state, err := a.GetVirtualMachinePowerState(resourceGroup, name) if err != nil { ch <- err return } if state != goalState { log.Debug(fmt.Sprintf("Waiting %v...", powerStatePollingInterval), logutil.Fields{ "goalState": goalState, "state": state, }) time.Sleep(powerStatePollingInterval) } else { log.Debug("Reached goal power state.", logutil.Fields{"state": state}) ch <- nil return } } } }(chErr) select { case <-time.After(timeout): close(chErr) return fmt.Errorf("Waiting for goal state %q timed out after %v", goalState, timeout) case err := <-chErr: return err } }
func addFile(path string, metaData *bugsnag.MetaData) { file, err := os.Open(path) if err != nil { log.Debug(err) return } data, err := ioutil.ReadAll(file) if err != nil { log.Debug(err) return } metaData.Add("logfile", filepath.Base(path), string(data)) }
func (d *Driver) hostOnlyIpAvailable() bool { ip, err := d.GetIP() if err != nil { log.Debug("ERROR getting IP: %s", err) return false } if ip != "" { log.Debugf("IP is %s", ip) return true } log.Debug("Strangely, there was no error attempting to get the IP, but it was still empty.") return false }
// GetPublicIPAddress attempts to get public IP address from the Public IP // resource. If IP address is not allocated yet, returns empty string. func (a AzureClient) GetPublicIPAddress(resourceGroup, name string) (string, error) { f := logutil.Fields{"name": name} log.Debug("Querying public IP address.", f) ip, err := a.publicIPAddressClient().Get(resourceGroup, name, "") if err != nil { return "", err } if ip.Properties == nil { log.Debug("publicIP.Properties is nil. Could not determine IP address", f) return "", nil } return to.String(ip.Properties.IPAddress), nil }
func (d *Driver) configureIPAddress() error { // create an EIP and bind it to host if !d.PrivateIPOnly { createEIPParams := unet.AllocateEIPParams{ Region: d.Region, OperatorName: "Bgp", Bandwidth: 2, ChargeType: "Dynamic", Quantity: 1, } resp, err := d.getUNetService().AllocateEIP(&createEIPParams) if err != nil { return fmt.Errorf("Allocate EIP failed:%s", err) } log.Debug(resp) if len(*resp.EIPSet) == 0 { return fmt.Errorf("EIP is empty") } eipId := (*resp.EIPSet)[0].EIPId if len(*(*resp.EIPSet)[0].EIPAddr) == 0 { return fmt.Errorf("IP Address is empty") } d.IPAddress = (*(*resp.EIPSet)[0].EIPAddr)[0].IP bindHostParams := unet.BindEIPParams{ Region: d.Region, EIPId: eipId, ResourceType: "uhost", ResourceId: d.UhostID, } bindEIPResp, err := d.getUNetService().BindEIP(&bindHostParams) if err != nil { return fmt.Errorf("Bind EIP failed:%s", err) } log.Debug(bindEIPResp) } else { hostDetails, err := d.getHostDescription() if err != nil { return fmt.Errorf("get host detail failed: %s", err) } d.IPAddress = hostDetails.publicIPAddress d.PrivateIPAddress = hostDetails.privateIPAddress } return nil }
func (provisioner *DebianProvisioner) Provision(swarmOptions swarm.Options, authOptions auth.Options, engineOptions engine.Options) error { provisioner.SwarmOptions = swarmOptions provisioner.AuthOptions = authOptions provisioner.EngineOptions = engineOptions swarmOptions.Env = engineOptions.Env storageDriver, err := decideStorageDriver(provisioner, "aufs", engineOptions.StorageDriver) if err != nil { return err } provisioner.EngineOptions.StorageDriver = storageDriver // HACK: since debian does not come with sudo by default we install log.Debug("installing sudo") if _, err := provisioner.SSHCommand("if ! type sudo; then apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y sudo; fi"); err != nil { return err } log.Debug("setting hostname") if err := provisioner.SetHostname(provisioner.Driver.GetMachineName()); err != nil { return err } log.Debug("installing base packages") for _, pkg := range provisioner.Packages { if err := provisioner.Package(pkg, pkgaction.Install); err != nil { return err } } log.Debug("installing docker") if err := installDockerGeneric(provisioner, engineOptions.InstallURL); err != nil { return err } log.Debug("waiting for docker daemon") if err := mcnutils.WaitFor(provisioner.dockerDaemonResponding); err != nil { return err } provisioner.AuthOptions = setRemoteAuthOptions(provisioner) log.Debug("configuring auth") if err := ConfigureAuth(provisioner); err != nil { return err } log.Debug("configuring swarm") if err := configureSwarm(provisioner, swarmOptions, provisioner.AuthOptions); err != nil { return err } // enable in systemd log.Debug("enabling docker in systemd") if err := provisioner.Service("docker", serviceaction.Enable); err != nil { return err } return nil }
// GetPrivateIPAddress attempts to retrieve private IP address of the specified // network interface name. If IP address is not allocated yet, returns empty // string. func (a AzureClient) GetPrivateIPAddress(resourceGroup, name string) (string, error) { f := logutil.Fields{"name": name} log.Debug("Querying network interface.", f) nic, err := a.networkInterfacesClient().Get(resourceGroup, name, "") if err != nil { return "", err } if nic.Properties == nil || nic.Properties.IPConfigurations == nil || len(*nic.Properties.IPConfigurations) == 0 { log.Debug("No IPConfigurations found on NIC", f) return "", nil } return to.String((*nic.Properties.IPConfigurations)[0].Properties.PrivateIPAddress), nil }
func (d *Driver) assignFloatingIP() error { var err error if d.ComputeNetwork { err = d.initCompute() } else { err = d.initNetwork() } if err != nil { return err } ips, err := d.client.GetFloatingIPs(d) if err != nil { return err } var floatingIP *FloatingIP log.Debugf("Looking for an available floating IP", map[string]string{ "MachineId": d.MachineId, "Pool": d.FloatingIpPool, }) for _, ip := range ips { if ip.PortId == "" { log.Debug("Available floating IP found", map[string]string{ "MachineId": d.MachineId, "IP": ip.Ip, }) floatingIP = &ip break } } if floatingIP == nil { floatingIP = &FloatingIP{} log.Debug("No available floating IP found. Allocating a new one...", map[string]string{"MachineId": d.MachineId}) } else { log.Debug("Assigning floating IP to the instance", map[string]string{"MachineId": d.MachineId}) } if err := d.client.AssignFloatingIP(d, floatingIP); err != nil { return err } d.IPAddress = floatingIP.Ip return nil }
func (c *RPCClientDriver) close() error { c.heartbeatDoneCh <- true close(c.heartbeatDoneCh) log.Debug("Making call to close driver server") if err := c.Client.Call(CloseMethod, struct{}{}, nil); err != nil { return err } log.Debug("Successfully made call to close driver server") log.Debug("Making call to close connection to plugin binary") return c.plugin.Close() }
func (d *Driver) Remove() error { log.Debug("deleting instance...", map[string]string{"MachineId": d.MachineId}) log.Info("Deleting OpenStack instance...") if err := d.initCompute(); err != nil { return err } if err := d.client.DeleteInstance(d); err != nil { return err } log.Debug("deleting key pair...", map[string]string{"Name": d.KeyPairName}) if err := d.client.DeleteKeyPair(d, d.KeyPairName); err != nil { return err } return nil }
func NewClient(user string, host string, port int, auth *Auth) (Client, error) { sshBinaryPath, err := exec.LookPath("ssh") if err != nil { log.Debug("SSH binary not found, using native Go implementation") return NewNativeClient(user, host, port, auth) } if defaultClientType == Native { log.Debug("Using SSH client type: native") return NewNativeClient(user, host, port, auth) } log.Debug("Using SSH client type: external") return NewExternalClient(sshBinaryPath, user, host, port, auth) }
// Authenticate creates a Rackspace-specific Gophercloud client. func (c *Client) Authenticate(d *openstack.Driver) error { if c.Provider != nil { return nil } log.Debug("Authenticating to Rackspace.", map[string]string{ "Username": d.Username, }) apiKey := c.driver.APIKey opts := gophercloud.AuthOptions{ Username: d.Username, APIKey: apiKey, } provider, err := rackspace.NewClient(rackspace.RackspaceUSIdentity) if err != nil { return err } provider.UserAgent.Prepend(fmt.Sprintf("docker-machine/v%d", version.APIVersion)) err = rackspace.Authenticate(provider, opts) if err != nil { return err } c.Provider = provider return nil }
// Create is the wrapper method which covers all of the boilerplate around // actually creating, provisioning, and persisting an instance in the store. func (api *Client) Create(h *host.Host) error { if err := cert.BootstrapCertificates(h.AuthOptions()); err != nil { return fmt.Errorf("Error generating certificates: %s", err) } log.Info("Running pre-create checks...") if err := h.Driver.PreCreateCheck(); err != nil { return mcnerror.ErrDuringPreCreate{ Cause: err, } } if err := api.Save(h); err != nil { return fmt.Errorf("Error saving host to store before attempting creation: %s", err) } log.Info("Creating machine...") if err := api.performCreate(h); err != nil { return fmt.Errorf("Error creating machine: %s", err) } log.Debug("Reticulating splines...") return nil }
func cmdConfig(c CommandLine) error { // Ensure that log messages always go to stderr when this command is // being run (it is intended to be run in a subshell) log.SetOutWriter(os.Stderr) if len(c.Args()) != 1 { return ErrExpectedOneMachine } host, err := getFirstArgHost(c) if err != nil { return err } dockerHost, authOptions, err := runConnectionBoilerplate(host, c) if err != nil { return fmt.Errorf("Error running connection boilerplate: %s", err) } log.Debug(dockerHost) fmt.Printf("--tlsverify --tlscacert=%q --tlscert=%q --tlskey=%q -H=%s", authOptions.CaCertPath, authOptions.ClientCertPath, authOptions.ClientKeyPath, dockerHost) return nil }
func (d *Driver) getBlade() (err error) { log.Debug("In getBlade()") d.Profile, err = d.ClientOV.GetProfileByName(d.MachineName) if err != nil { return err } log.Debugf("***> check if we got a profile") if d.Profile.URI.IsNil() { err = fmt.Errorf("Attempting to get machine profile information, unable to find machine in oneview: %s", d.MachineName) return err } // power on the server // get the server hardware associated with that test profile log.Debugf("***> GetServerHardware") d.Hardware, err = d.ClientOV.GetServerHardware(d.Profile.ServerHardwareURI) if d.Hardware.URI.IsNil() { err = fmt.Errorf("Attempting to get machine blade information, unable to find machine: %s", d.MachineName) return err } // get an icsp server if d.Hardware.VirtualSerialNumber.IsNil() { // get the server profile with SerialNumber d.Server, err = d.ClientICSP.GetServerBySerialNumber(d.Hardware.SerialNumber.String()) } else { // get the server profile with the VirtualSerialNumber d.Server, err = d.ClientICSP.GetServerBySerialNumber(d.Hardware.VirtualSerialNumber.String()) } if err != nil { return err } return err }