func (provisioner *WindowsProvisioner) enableBasicAuthForWinRM() bool { log.Debug("Enabling basic auth for WinRM") ip, err := provisioner.Driver.GetIP() if err != nil { return false } d := provisioner.Driver stdout, stderr, exit, err := drivers.WinRMRunCmdWithNTLM(ip, d.GetSSHUsername(), d.GetSSHPassword(), enableBasicAuthCmd) if (err != nil) || (exit != 0) { log.Warn("Error getting WinRM command to enable basic auth:", err) log.Debug("Enable basic auth output:", stdout, ", err:", stderr, ", exit:", exit) return false } stdout, stderr, exit, err = drivers.WinRMRunCmdWithNTLM(ip, d.GetSSHUsername(), d.GetSSHPassword(), enableUnencryptedCmd) if (err != nil) || (exit != 0) { log.Warn("Error getting WinRM command to enable unencrypted comm:", err) log.Debug("Enable unencrypted comm output:", stdout, ", err:", stderr, ", exit:", exit) return false } // The daemon is up if the command worked. Carry on. log.Debug("Successfully enabled basic auth for WinRM") return true }
func MigrateHostV2ToHostV3(hostV2 *HostV2, data []byte, storePath string) *Host { // Migrate to include RawDriver so that driver plugin will work // smoothly. rawHost := &RawHost{} if err := json.Unmarshal(data, &rawHost); err != nil { log.Warn("Could not unmarshal raw host for RawDriver information: %s", err) } m := make(map[string]interface{}) // Must migrate to include store path in driver since it was not // previously stored in drivers directly if err := json.Unmarshal(*rawHost.Driver, &m); err != nil { log.Warn("Could not unmarshal raw host into map[string]interface{}: %s", err) } m["StorePath"] = storePath // Now back to []byte rawDriver, err := json.Marshal(m) if err != nil { log.Warn("Could not re-marshal raw driver: %s", err) } h := &Host{ ConfigVersion: 2, DriverName: hostV2.DriverName, Name: hostV2.Name, HostOptions: hostV2.HostOptions, RawDriver: rawDriver, } return h }
// Wait - wait on job task to complete func (jt *JobTask) Wait() error { var ( currenttime int ) log.Debugf("task : %+v", jt) if err := jt.GetCurrentStatus(); err != nil { jt.IsDone = true return err } for JOB_RUNNING_YES.Equal(jt.Running) && (currenttime < jt.Timeout) { log.Debugf("jt => %+v", jt) if jt.JobURI.URI.String() != "" { log.Debugf("Waiting for job to complete, %s ", jt.Description) lastjobstep := len(jt.JobProgress) if lastjobstep > 0 { statusmessage := jt.GetLastStatusUpdate() if statusmessage == "" { log.Infof("Waiting on, %s, %0.0f%%", jt.Description, jt.GetPercentProgress()) } else { log.Infof("Waiting on, %s, %0.0f%%, %s", jt.Description, jt.GetPercentProgress(), statusmessage) } } } else { log.Info("Waiting on job creation.") } // wait time before next check time.Sleep(time.Millisecond * (1000 * jt.WaitTime)) // wait 10sec before checking the status again currenttime++ // get the current status if err := jt.GetCurrentStatus(); err != nil { jt.IsDone = true return err } } if !(currenttime < jt.Timeout) { log.Warn("Task timed out.") } if JOB_RUNNING_NO.Equal(jt.Running) { log.Infof("Job, %s, completed", jt.GetComplettedStatus()) } else { log.Warn("Job still running un-expected.") } jt.IsDone = true return nil }
// isLatest checks the latest release tag and // reports whether the local ISO cache is the latest version. // // It returns false if failing to get the local ISO version // and true if failing to fetch the latest release tag. func (b *B2dUtils) isLatest() bool { localVer, err := b.version() if err != nil { log.Warn("Unable to get the local Boot2Docker ISO version: ", err) return false } latestVer, err := b.getReleaseTag("") if err != nil { log.Warn("Unable to get the latest Boot2Docker ISO release version: ", err) return true } return localVer == latestVer }
func (c *I3SClient) DeleteOSBuildPlan(name string) error { var ( osBuildPlan OSBuildPlan err error uri string ) osBuildPlan, err = c.GetOSBuildPlanByName(name) if err != nil { return err } if osBuildPlan.Name != "" { log.Debugf("REST : %s \n %+v\n", osBuildPlan.URI, osBuildPlan) uri = osBuildPlan.URI.String() if uri == "" { log.Warn("Unable to post delete, no uri found.") return err } _, err := c.RestAPICall(rest.DELETE, uri, nil) if err != nil { log.Errorf("Error submitting delete os build plan request: %s", err) return err } return nil } else { log.Infof("OS Build Plan could not be found to delete, %s, skipping delete ...", name) } return nil }
func getOrCreateHostOnlyNetwork(hostIP net.IP, netmask net.IPMask, nets map[string]*hostOnlyNetwork, vbox VBoxManager) (*hostOnlyNetwork, error) { // Search for an existing host-only adapter. hostOnlyAdapter := getHostOnlyAdapter(nets, hostIP, netmask) if hostOnlyAdapter != nil { return hostOnlyAdapter, nil } // No existing host-only adapter found. Create a new one. _, err := createHostonlyAdapter(vbox) if err != nil { // Sometimes the host-only adapter fails to create. See https://www.virtualbox.org/ticket/14040 // BUT, it is created in fact! So let's wait until it appears last in the list log.Warnf("Creating a new host-only adapter produced an error: %s", err) log.Warn("This is a known VirtualBox bug. Let's try to recover anyway...") } // It can take some time for an adapter to appear. Let's poll. hostOnlyAdapter, err = waitForNewHostOnlyNetwork(nets, vbox) if err != nil { // Sometimes, Vbox says it created it but then it cannot be found... return nil, errNewHostOnlyAdapterNotVisible } log.Warnf("Found a new host-only adapter: %q", hostOnlyAdapter.Name) hostOnlyAdapter.IPv4.IP = hostIP hostOnlyAdapter.IPv4.Mask = netmask if err := hostOnlyAdapter.Save(vbox); err != nil { return nil, err } return hostOnlyAdapter, nil }
func (provisioner *WindowsProvisioner) checkDockerVersion() bool { log.Debug("Checking docker version (secure)") ip, err := provisioner.Driver.GetIP() if err != nil { return false } dockerVersionSecureCmd := "docker -H tcp://127.0.0.1:2376 --tlsverify=false " + " --tlscacert=" + provisioner.AuthOptions.CaCertRemotePath + " --tlscert=" + provisioner.AuthOptions.ServerCertRemotePath + " --tlskey=" + provisioner.AuthOptions.ServerKeyRemotePath + " version" d := provisioner.Driver out, err, exit := drivers.WinRMRunCmd( ip, d.GetSSHUsername(), d.GetSSHPassword(), dockerVersionSecureCmd) if (err != nil) || (exit != 0) { log.Warn("Error getting WinRM command to get docker version (secure),", "out:", out, ", err:", err, ", exit:", exit) return false } if strings.Contains(out, expectedDockerBuiltDate) { return true } return false }
// submit new profile template func (c *OVClient) SubmitDeleteProfile(p ServerProfile) (t *Task, err error) { var ( uri = p.URI.String() // task = rest_api(:oneview, :post, '/rest/server-profiles', { 'body' => new_template_profile }) ) t = t.NewProfileTask(c) t.ResetTask() log.Debugf("REST : %s \n %+v\n", uri, p) log.Debugf("task -> %+v", t) if uri == "" { log.Warn("Unable to post delete, no uri found.") t.TaskIsDone = true return t, err } data, err := c.RestAPICall(rest.DELETE, uri, nil) if err != nil { log.Errorf("Error submitting new profile request: %s", err) t.TaskIsDone = true return t, err } log.Debugf("Response delete profile %s", data) if err := json.Unmarshal([]byte(data), &t); err != nil { t.TaskIsDone = true log.Errorf("Error with task un-marshal: %s", err) return t, err } return t, err }
func (c *I3SClient) DeletePlanScript(name string) error { var ( planScript PlanScript err error uri string ) planScript, err = c.GetPlanScriptByName(name) if err != nil { return err } if planScript.Name != "" { log.Debugf("REST : %s \n %+v\n", planScript.URI, planScript) uri = planScript.URI.String() if uri == "" { log.Warn("Unable to post delete, no uri found.") return err } _, err := c.RestAPICall(rest.DELETE, uri, nil) if err != nil { log.Errorf("Error submitting delete plan script request: %s", err) return err } } else { log.Infof("Plan script could not be found to delete, %s, skipping delete ...", name) } return nil }
func DumpVal(vals ...interface{}) { for _, val := range vals { prettyJSON, err := json.MarshalIndent(val, "", " ") if err != nil { log.Warn(err) } log.Debug(string(prettyJSON)) } }
func (provisioner *RedHatProvisioner) dockerDaemonResponding() bool { if _, err := provisioner.SSHCommand("sudo docker version"); err != nil { log.Warn("Error getting SSH command to check if the daemon is up: %s", err) return false } // The daemon is up if the command worked. Carry on. return true }
func generateId() string { rb := make([]byte, 10) _, err := rand.Read(rb) if err != nil { log.Warn("Unable to generate id: %s", err) } h := md5.New() io.WriteString(h, string(rb)) return fmt.Sprintf("%x", h.Sum(nil)) }
// is test case enabled? defaults are always true func (tc *TestConfig) IsTestEnabled(tc_name string) bool { if t := tc.GetTestCases(tc_name); t.Name == tc_name { return t.Enabled } if d := tc.GetTestCases("default"); d.Name != "" { return d.Enabled } log.Infof("tc no name -> %+v", tc.GetTestCases("foo")) log.Warn("Test config is using default true for enablement.") return true }
func (provisioner *WindowsProvisioner) Provision(swarmOptions swarm.Options, authOptions auth.Options, engineOptions engine.Options) error { provisioner.AuthOptions = authOptions log.Debug("Waiting to enable basic auth for WinRM") if err := mcnutils.WaitForSpecific(provisioner.enableBasicAuthForWinRM, 60, 3*time.Second); err != nil { log.Warn("failed to enable basic auth: ", err) return err } log.Debug("Waiting for docker daemon") if err := mcnutils.WaitForSpecific(provisioner.dockerDaemonResponding, 60, 3*time.Second); err != nil { log.Warn("failed to get docker daemon status: ", err) return err } log.Debug("Updating docker on host") if err := mcnutils.WaitForSpecific(provisioner.updateDocker, 60, 3*time.Second); err != nil { log.Warn("failed to update docker daemon: ", err) return err } provisioner.AuthOptions = setRemoteAuthOptions(provisioner) if err := ConfigureAuth(provisioner); err != nil { return err } log.Debug("Opening docker port on host") if err := mcnutils.WaitForSpecific(provisioner.openDockerPortOnHost, 60, 3*time.Second); err != nil { log.Warn("failed to update docker daemon: ", err) return err } log.Debug("Checking that docker version is as expected") if err := mcnutils.WaitForSpecific(provisioner.checkDockerVersion, 60, 3*time.Second); err != nil { log.Warn("failed to check docker version: ", err) return err } return nil }
// SaveIPv4 changes the ipv4 configuration of the host-only network. func (n *hostOnlyNetwork) SaveIPv4(vbox VBoxManager) error { if n.IPv4.IP != nil && n.IPv4.Mask != nil { if runtime.GOOS == "windows" { log.Warn("Windows might ask for the permission to configure a network adapter. Sometimes, such confirmation window is minimized in the taskbar.") } if err := vbox.vbm("hostonlyif", "ipconfig", n.Name, "--ip", n.IPv4.IP.String(), "--netmask", net.IP(n.IPv4.Mask).String()); err != nil { return err } } return nil }
// extractStorageAccountFromVHDURL parses a blob URL and extracts the Azure // Storage account name from the URL, namely first subdomain of the hostname and // the Azure Storage service base URL (e.g. core.windows.net). If it could not // be parsed, returns empty string. func extractStorageAccountFromVHDURL(vhdURL string) (string, string) { u, err := url.Parse(vhdURL) if err != nil { log.Warn(fmt.Sprintf("URL parse error: %v", err), logutil.Fields{"url": vhdURL}) return "", "" } parts := strings.SplitN(u.Host, ".", 2) if len(parts) != 2 { log.Warnf("Could not split account name and storage base URL: %s", vhdURL) return "", "" } return parts[0], strings.TrimPrefix(parts[1], "blob.") // "blob." prefix will added by azure storage sdk }
func matchesState(host *host.Host, states []string) bool { if len(states) == 0 { return true } for _, n := range states { s, err := host.Driver.GetState() if err != nil { log.Warn(err) } if strings.EqualFold(n, s.String()) { return true } } return false }
func (osr *OsRelease) ParseOsRelease(osReleaseContents []byte) error { r := bytes.NewReader(osReleaseContents) scanner := bufio.NewScanner(r) for scanner.Scan() { key, val, err := parseLine(scanner.Text()) if err != nil { log.Warn("Warning: got an invalid line error parsing /etc/os-release: %s", err) continue } if err := osr.setIfPossible(key, val); err != nil { log.Debug(err) } } return nil }
func convertMcnFlagsToCliFlags(mcnFlags []mcnflag.Flag) ([]cli.Flag, error) { cliFlags := []cli.Flag{} for _, f := range mcnFlags { switch t := f.(type) { // TODO: It seems pretty wrong to just default "nil" to this, // but cli.BoolFlag doesn't have a "Value" field (false is // always the default) case *mcnflag.BoolFlag: f := f.(*mcnflag.BoolFlag) cliFlags = append(cliFlags, cli.BoolFlag{ Name: f.Name, EnvVar: f.EnvVar, Usage: f.Usage, }) case *mcnflag.IntFlag: f := f.(*mcnflag.IntFlag) cliFlags = append(cliFlags, cli.IntFlag{ Name: f.Name, EnvVar: f.EnvVar, Usage: f.Usage, Value: f.Value, }) case *mcnflag.StringFlag: f := f.(*mcnflag.StringFlag) cliFlags = append(cliFlags, cli.StringFlag{ Name: f.Name, EnvVar: f.EnvVar, Usage: f.Usage, Value: f.Value, }) case *mcnflag.StringSliceFlag: f := f.(*mcnflag.StringSliceFlag) cliFlags = append(cliFlags, cli.StringSliceFlag{ Name: f.Name, EnvVar: f.EnvVar, Usage: f.Usage, //TODO: Is this used with defaults? Can we convert the literal []string to cli.StringSlice properly? Value: &cli.StringSlice{}, }) default: log.Warn("Flag is ", f) return nil, fmt.Errorf("Flag is unrecognized flag type: %T", t) } } return cliFlags, nil }
// PreCreateCheck checks that VBoxManage exists and works func (d *Driver) PreCreateCheck() error { // Check that VBoxManage exists and works if err := d.vbm(); err != nil { return err } if d.IsVTXDisabled() { // Let's log a warning to warn the user. When the vm is started, logs // will be checked for an error anyway. // We could fail right here but the method to check didn't prove being // bulletproof. log.Warn("This computer doesn't have VT-X/AMD-v enabled. Enabling it in the BIOS is mandatory.") } return nil }
func (c *I3SClient) DeleteGoldenImage(name string) error { var ( goldenImage GoldenImage err error t *Task uri string ) goldenImage, err = c.GetGoldenImageByName(name) if err != nil { return err } if goldenImage.Name != "" { t = t.NewTask(c) t.ResetTask() log.Debugf("REST : %s \n %+v\n", goldenImage.URI, goldenImage) log.Debugf("task -> %+v", t) uri = goldenImage.URI.String() if uri == "" { log.Warn("Unable to post delete, no uri found.") t.TaskIsDone = true return err } data, err := c.RestAPICall(rest.DELETE, uri, nil) if err != nil { log.Errorf("Error submitting delete golden image request: %s", err) t.TaskIsDone = true return err } log.Debugf("Response delete golden image %s", data) if err := json.Unmarshal([]byte(data), &t); err != nil { t.TaskIsDone = true log.Errorf("Error with task un-marshal: %s", err) return err } err = t.Wait() if err != nil { return err } return nil } else { log.Infof("GoldenImage could not be found to delete, %s, skipping delete ...", name) } return nil }
func (c *OVClient) DeleteStorageVolume(name string) error { var ( sVol StorageVolumeV3 err error t *Task uri string ) sVol, err = c.GetStorageVolumeByName(name) if err != nil { return err } if sVol.Name != "" { t = t.NewProfileTask(c) t.ResetTask() log.Debugf("REST : %s \n %+v\n", sVol.URI, sVol) log.Debugf("task -> %+v", t) uri = sVol.URI.String() if uri == "" { log.Warn("Unable to post delete, no uri found.") t.TaskIsDone = true return err } data, err := c.RestAPICall(rest.DELETE, uri, nil) if err != nil { log.Errorf("Error submitting delete storage volume request: %s", err) t.TaskIsDone = true return err } log.Debugf("Response delete storage volume %s", data) if err := json.Unmarshal([]byte(data), &t); err != nil { t.TaskIsDone = true log.Errorf("Error with task un-marshal: %s", err) return err } err = t.Wait() if err != nil { return err } return nil } else { log.Infof("StorageVolume could not be found to delete, %s, skipping delete ...", name) } return nil }
func (provisioner *WindowsProvisioner) openDockerPortOnHost() bool { ip, err := provisioner.Driver.GetIP() if err != nil { return false } d := provisioner.Driver out, err, exit := drivers.WinRMRunCmd(ip, d.GetSSHUsername(), d.GetSSHPassword(), openPortCmd) if (err != nil) || (exit != 0) { log.Warn("failed to enable firewall rule for docker:", out, ", err:", err, ", exit:", exit) return false } return true }
func (c *OVClient) DeleteFCoENetwork(name string) error { var ( fcoeNet FCoENetwork err error t *Task uri string ) fcoeNet, err = c.GetFCoENetworkByName(name) if err != nil { return err } if fcoeNet.Name != "" { t = t.NewProfileTask(c) t.ResetTask() log.Debugf("REST : %s \n %+v\n", fcoeNet.URI, fcoeNet) log.Debugf("task -> %+v", t) uri = fcoeNet.URI.String() if uri == "" { log.Warn("Unable to post delete, no uri found.") t.TaskIsDone = true return err } data, err := c.RestAPICall(rest.DELETE, uri, nil) if err != nil { log.Errorf("Error submitting deleting fcoe network request: %s", err) t.TaskIsDone = true return err } log.Debugf("Response delete fcoe network %s", data) if err := json.Unmarshal([]byte(data), &t); err != nil { t.TaskIsDone = true log.Errorf("Error with task un-marshal: %s", err) return err } err = t.Wait() if err != nil { return err } return nil } else { log.Infof("fcoeNetwork could not be found to delete, %s, skipping delete ...", name) } return nil }
// createHostonlyAdapter creates a new host-only network. func createHostonlyAdapter(vbox VBoxManager) (*hostOnlyNetwork, error) { if runtime.GOOS == "windows" { log.Warn("Windows might ask for the permission to create a network adapter. Sometimes, such confirmation window is minimized in the taskbar.") } out, err := vbox.vbmOut("hostonlyif", "create") if err != nil { return nil, err } res := reHostOnlyAdapterCreated.FindStringSubmatch(string(out)) if res == nil { return nil, errors.New("Failed to create host-only adapter") } return &hostOnlyNetwork{Name: res[1]}, nil }
func (c *OVClient) DeleteLogicalSwitchGroup(name string) error { var ( logicalSwitchGroup LogicalSwitchGroup err error t *Task uri string ) logicalSwitchGroup, err = c.GetLogicalSwitchGroupByName(name) if err != nil { return err } if logicalSwitchGroup.Name != "" { t = t.NewProfileTask(c) t.ResetTask() log.Debugf("REST : %s \n %+v\n", logicalSwitchGroup.URI, logicalSwitchGroup) log.Debugf("task -> %+v", t) uri = logicalSwitchGroup.URI.String() if uri == "" { log.Warn("Unable to post delete, no uri found.") t.TaskIsDone = true return err } data, err := c.RestAPICall(rest.DELETE, uri, nil) if err != nil { log.Errorf("Error submitting delete logicalSwitchGroup request: %s", err) t.TaskIsDone = true return err } log.Debugf("Response delete logicalSwitchGroup %s", data) if err := json.Unmarshal([]byte(data), &t); err != nil { t.TaskIsDone = true log.Errorf("Error with task un-marshal: %s", err) return err } err = t.Wait() if err != nil { return err } return nil } else { log.Infof("LogicalSwitchGroup could not be found to delete, %s, skipping delete ...", name) } return nil }
func (a AzureClient) removeOSDiskBlob(resourceGroup, vmName, vhdURL string) error { // NOTE(ahmetalpbalkan) Currently Azure APIs do not offer a Delete Virtual // Machine functionality which deletes the attached disks along with the VM // as well. Therefore we find out the storage account from OS disk URL and // fetch storage account keys to delete the container containing the disk. log.Debug("Attempting to remove OS disk.", logutil.Fields{"vm": vmName}) log.Debugf("OS Disk vhd URL: %q", vhdURL) vhdContainer := osDiskStorageContainerName(vmName) storageAccount, blobServiceBaseURL := extractStorageAccountFromVHDURL(vhdURL) if storageAccount == "" { log.Warn("Could not extract the storage account name from URL. Please clean up the disk yourself.") return nil } log.Debug("Fetching storage account keys.", logutil.Fields{ "account": storageAccount, "storageBase": blobServiceBaseURL, }) resp, err := a.storageAccountsClient().ListKeys(resourceGroup, storageAccount) if err != nil { return err } if resp.Keys == nil || len(*resp.Keys) < 1 { return errors.New("Returned storage keys list response does not contain any keys") } storageAccountKey := to.String(((*resp.Keys)[0]).Value) bs, err := blobstorage.NewClient(storageAccount, storageAccountKey, blobServiceBaseURL, defaultStorageAPIVersion, true) if err != nil { return fmt.Errorf("Error constructing blob storage client :%v", err) } f := logutil.Fields{ "account": storageAccount, "container": vhdContainer} log.Debug("Removing container of disk blobs.", f) ok, err := bs.GetBlobService().DeleteContainerIfExists(vhdContainer) // HTTP round-trip will not be inspected if err != nil { log.Debugf("Container remove happened: %v", ok) } return err }
// powerStateFromInstanceView reads the instance view response and extracts the // power state status (if exists) from there. If no status is found or an // unknown status has occured, returns Unknown. func powerStateFromInstanceView(instanceView *compute.VirtualMachineInstanceView) VMPowerState { if instanceView == nil { log.Debug("Retrieved nil instance view.") return Unknown } else if instanceView.Statuses == nil || len(*instanceView.Statuses) == 0 { log.Debug("Retrieved nil or empty instanceView.statuses.") return Unknown } statuses := *instanceView.Statuses // Filter statuses whose "code" starts with "PowerState/" var s *compute.InstanceViewStatus for _, v := range statuses { log.Debugf("Matching pattern for code=%q", to.String(v.Code)) if strings.HasPrefix(to.String(v.Code), powerStateCodePrefix) { log.Debug("Power state found.") s = &v break } } if s == nil { log.Debug("No PowerState found in the instance view statuses.") return Unknown } code := strings.TrimPrefix(to.String(s.Code), powerStateCodePrefix) switch code { case "stopped": return Stopped case "stopping": return Stopping case "starting": return Starting case "running": return Running case "deallocated": return Deallocated case "deallocating": return Deallocating default: log.Warn("Encountered unknown PowerState for virtual machine: %q", code) return Unknown } }
// addHostOnlyDHCPServer adds a DHCP server to a host-only network. func addHostOnlyDHCPServer(ifname string, d dhcpServer, vbox VBoxManager) error { name := dhcpPrefix + ifname dhcps, err := listDHCPServers(vbox) if err != nil { return err } // On some platforms (OSX), creating a host-only adapter adds a default dhcpserver, // while on others (Windows?) it does not. command := "add" if dhcp, ok := dhcps[name]; ok { command = "modify" if (dhcp.IPv4.IP.Equal(d.IPv4.IP)) && (dhcp.IPv4.Mask.String() == d.IPv4.Mask.String()) && (dhcp.LowerIP.Equal(d.LowerIP)) && (dhcp.UpperIP.Equal(d.UpperIP)) && (dhcp.Enabled == d.Enabled) { // dhcp is up to date return nil } } args := []string{"dhcpserver", command, "--netname", name, "--ip", d.IPv4.IP.String(), "--netmask", net.IP(d.IPv4.Mask).String(), "--lowerip", d.LowerIP.String(), "--upperip", d.UpperIP.String(), } if d.Enabled { args = append(args, "--enable") } else { args = append(args, "--disable") } if runtime.GOOS == "windows" { log.Warn("Windows might ask for the permission to configure a dhcp server. Sometimes, such confirmation window is minimized in the taskbar.") } return vbox.vbm(args...) }
func (provisioner *WindowsProvisioner) dockerDaemonResponding() bool { log.Debug("Checking docker daemon") ip, err := provisioner.Driver.GetIP() if err != nil { return false } d := provisioner.Driver out, err, exit := drivers.WinRMRunCmd( ip, d.GetSSHUsername(), d.GetSSHPassword(), dockerVersionCmd) if (err != nil) || (exit != 0) { log.Warn("Error getting WinRM command to check if the daemon is up,", "out:", out, ", err:", err, ", exit:", exit) return false } // The daemon is up if the command worked. Carry on. return true }