func (p *Provisioner) windowsInstallChefClient(
	o terraform.UIOutput,
	comm communicator.Communicator) error {
	script := path.Join(path.Dir(comm.ScriptPath()), "ChefClient.ps1")
	content := fmt.Sprintf(installScript, p.Version, p.HTTPProxy, strings.Join(p.NOProxy, ","))

	// Copy the script to the new instance
	if err := comm.UploadScript(script, strings.NewReader(content)); err != nil {
		return fmt.Errorf("Uploading client.rb failed: %v", err)
	}

	// Execute the script to install Chef Client
	installCmd := fmt.Sprintf("powershell -NoProfile -ExecutionPolicy Bypass -File %s", script)
	return p.runCommand(o, comm, installCmd)
}
// runCommand is used to run already prepared commands
func (p *Provisioner) runCommand(
	o terraform.UIOutput,
	comm communicator.Communicator,
	command string) error {
	var err error

	// Unless prevented, prefix the command with sudo
	if p.useSudo {
		command = "sudo " + command
	}

	outR, outW := io.Pipe()
	errR, errW := io.Pipe()
	outDoneCh := make(chan struct{})
	errDoneCh := make(chan struct{})
	go p.copyOutput(o, outR, outDoneCh)
	go p.copyOutput(o, errR, errDoneCh)

	cmd := &remote.Cmd{
		Command: command,
		Stdout:  outW,
		Stderr:  errW,
	}

	if err := comm.Start(cmd); err != nil {
		return fmt.Errorf("Error executing command %q: %v", cmd.Command, err)
	}

	cmd.Wait()
	if cmd.ExitStatus != 0 {
		err = fmt.Errorf(
			"Command %q exited with non-zero exit status: %d", cmd.Command, cmd.ExitStatus)
	}

	// Wait for output to clean up
	outW.Close()
	errW.Close()
	<-outDoneCh
	<-errDoneCh

	// If we have an error, return it out now that we've cleaned up
	if err != nil {
		return err
	}

	return nil
}
func (p *Provisioner) deployOhaiHints(
	o terraform.UIOutput,
	comm communicator.Communicator,
	hintDir string) error {
	for _, hint := range p.OhaiHints {
		// Open the hint file
		f, err := os.Open(hint)
		if err != nil {
			return err
		}
		defer f.Close()

		// Copy the hint to the new instance
		if err := comm.Upload(path.Join(hintDir, path.Base(hint)), f); err != nil {
			return fmt.Errorf("Uploading %s failed: %v", path.Base(hint), err)
		}
	}

	return nil
}
func (p *Provisioner) runCommand(
	o terraform.UIOutput,
	comm communicator.Communicator,
	command string) error {

	var err error
	if p.useSudo {
		command = "sudo " + command
	}

	outR, outW := io.Pipe()
	errR, errW := io.Pipe()
	outDoneCh := make(chan struct{})
	errDoneCh := make(chan struct{})

	go p.copyOutput(o, outR, outDoneCh)
	go p.copyOutput(o, errR, errDoneCh)

	cmd := &remote.Cmd{
		Command: command,
		Stdout:  outW,
		Stderr:  errW,
	}

	if err := comm.Start(cmd); err != nil {
		return fmt.Errorf("Error executing command %q: %v", cmd.Command, err)
	}
	cmd.Wait()
	if cmd.ExitStatus != 0 {
		err = fmt.Errorf(
			"Command %q exited with non-zero exit status: %d", cmd.Command, cmd.ExitStatus)

	}

	outW.Close()
	errW.Close()
	<-outDoneCh
	<-errDoneCh

	return err
}
// copyFiles is used to copy the files from a source to a destination
func (p *ResourceProvisioner) copyFiles(comm communicator.Communicator, src, dst string) error {
	// Wait and retry until we establish the connection
	err := retryFunc(comm.Timeout(), func() error {
		err := comm.Connect(nil)
		return err
	})
	if err != nil {
		return err
	}
	defer comm.Disconnect()

	info, err := os.Stat(src)
	if err != nil {
		return err
	}

	// If we're uploading a directory, short circuit and do that
	if info.IsDir() {
		if err := comm.UploadDir(dst, src); err != nil {
			return fmt.Errorf("Upload failed: %v", err)
		}
		return nil
	}

	// We're uploading a file...
	f, err := os.Open(src)
	if err != nil {
		return err
	}
	defer f.Close()

	err = comm.Upload(dst, f)
	if err != nil {
		return fmt.Errorf("Upload failed: %v", err)
	}
	return err
}
func (p *Provisioner) deployConfigFiles(
	o terraform.UIOutput,
	comm communicator.Communicator,
	confDir string) error {
	// Open the validation key file
	f, err := os.Open(p.ValidationKeyPath)
	if err != nil {
		return err
	}
	defer f.Close()

	// Copy the validation key to the new instance
	if err := comm.Upload(path.Join(confDir, validationKey), f); err != nil {
		return fmt.Errorf("Uploading %s failed: %v", validationKey, err)
	}

	if p.SecretKeyPath != "" {
		// Open the secret key file
		s, err := os.Open(p.SecretKeyPath)
		if err != nil {
			return err
		}
		defer s.Close()

		// Copy the secret key to the new instance
		if err := comm.Upload(path.Join(confDir, secretKey), s); err != nil {
			return fmt.Errorf("Uploading %s failed: %v", secretKey, err)
		}
	}

	// Make strings.Join available for use within the template
	funcMap := template.FuncMap{
		"join": strings.Join,
	}

	// Create a new template and parse the client config into it
	t := template.Must(template.New(clienrb).Funcs(funcMap).Parse(clientConf))

	var buf bytes.Buffer
	err = t.Execute(&buf, p)
	if err != nil {
		return fmt.Errorf("Error executing %s template: %s", clienrb, err)
	}

	// Copy the client config to the new instance
	if err := comm.Upload(path.Join(confDir, clienrb), &buf); err != nil {
		return fmt.Errorf("Uploading %s failed: %v", clienrb, err)
	}

	// Create a map with first boot settings
	fb := make(map[string]interface{})
	if p.Attributes != nil {
		fb = p.Attributes.(map[string]interface{})
	}

	// Check if the run_list was also in the attributes and if so log a warning
	// that it will be overwritten with the value of the run_list argument.
	if _, found := fb["run_list"]; found {
		log.Printf("[WARNING] Found a 'run_list' specified in the configured attributes! " +
			"This value will be overwritten by the value of the `run_list` argument!")
	}

	// Marshal the first boot settings to JSON
	d, err := json.Marshal(fb)
	if err != nil {
		return fmt.Errorf("Failed to create %s data: %s", firstBoot, err)
	}

	// Copy the first-boot.json to the new instance
	if err := comm.Upload(path.Join(confDir, firstBoot), bytes.NewReader(d)); err != nil {
		return fmt.Errorf("Uploading %s failed: %v", firstBoot, err)
	}

	return nil
}
func (p *Provisioner) deployConfigFiles(
	o terraform.UIOutput,
	comm communicator.Communicator,
	confDir string) error {
	contents, _, err := pathorcontents.Read(p.ValidationKey)
	if err != nil {
		return err
	}
	f := strings.NewReader(contents)

	// Copy the validation key to the new instance
	if err := comm.Upload(path.Join(confDir, validationKey), f); err != nil {
		return fmt.Errorf("Uploading %s failed: %v", validationKey, err)
	}

	if p.SecretKey != "" {
		contents, _, err := pathorcontents.Read(p.SecretKey)
		if err != nil {
			return err
		}
		s := strings.NewReader(contents)
		// Copy the secret key to the new instance
		if err := comm.Upload(path.Join(confDir, secretKey), s); err != nil {
			return fmt.Errorf("Uploading %s failed: %v", secretKey, err)
		}
	}

	// Make sure the SSLVerifyMode value is written as a symbol
	if p.SSLVerifyMode != "" && !strings.HasPrefix(p.SSLVerifyMode, ":") {
		p.SSLVerifyMode = fmt.Sprintf(":%s", p.SSLVerifyMode)
	}

	// Make strings.Join available for use within the template
	funcMap := template.FuncMap{
		"join": strings.Join,
	}

	// Create a new template and parse the client config into it
	t := template.Must(template.New(clienrb).Funcs(funcMap).Parse(clientConf))

	var buf bytes.Buffer
	err = t.Execute(&buf, p)
	if err != nil {
		return fmt.Errorf("Error executing %s template: %s", clienrb, err)
	}

	// Copy the client config to the new instance
	if err := comm.Upload(path.Join(confDir, clienrb), &buf); err != nil {
		return fmt.Errorf("Uploading %s failed: %v", clienrb, err)
	}

	// Create a map with first boot settings
	fb := make(map[string]interface{})
	if p.Attributes != nil {
		fb = p.Attributes.(map[string]interface{})
	}

	// Check if the run_list was also in the attributes and if so log a warning
	// that it will be overwritten with the value of the run_list argument.
	if _, found := fb["run_list"]; found {
		log.Printf("[WARNING] Found a 'run_list' specified in the configured attributes! " +
			"This value will be overwritten by the value of the `run_list` argument!")
	}

	// Add the initial runlist to the first boot settings
	if !p.UsePolicyfile {
		fb["run_list"] = p.RunList
	}

	// Marshal the first boot settings to JSON
	d, err := json.Marshal(fb)
	if err != nil {
		return fmt.Errorf("Failed to create %s data: %s", firstBoot, err)
	}

	// Copy the first-boot.json to the new instance
	if err := comm.Upload(path.Join(confDir, firstBoot), bytes.NewReader(d)); err != nil {
		return fmt.Errorf("Uploading %s failed: %v", firstBoot, err)
	}

	return nil
}
// runScripts is used to copy and execute a set of scripts
func (p *ResourceProvisioner) runScripts(
	o terraform.UIOutput,
	comm communicator.Communicator,
	scripts []io.ReadCloser) error {
	// Wait and retry until we establish the connection
	err := retryFunc(comm.Timeout(), func() error {
		err := comm.Connect(o)
		return err
	})
	if err != nil {
		return err
	}
	defer comm.Disconnect()

	for _, script := range scripts {
		var cmd *remote.Cmd
		outR, outW := io.Pipe()
		errR, errW := io.Pipe()
		outDoneCh := make(chan struct{})
		errDoneCh := make(chan struct{})
		go p.copyOutput(o, outR, outDoneCh)
		go p.copyOutput(o, errR, errDoneCh)

		err = retryFunc(comm.Timeout(), func() error {
			remotePath := comm.ScriptPath()

			if err := comm.UploadScript(remotePath, script); err != nil {
				return fmt.Errorf("Failed to upload script: %v", err)
			}

			cmd = &remote.Cmd{
				Command: remotePath,
				Stdout:  outW,
				Stderr:  errW,
			}
			if err := comm.Start(cmd); err != nil {
				return fmt.Errorf("Error starting script: %v", err)
			}

			// Upload a blank follow up file in the same path to prevent residual
			// script contents from remaining on remote machine
			empty := bytes.NewReader([]byte(""))
			if err := comm.Upload(remotePath, empty); err != nil {
				return fmt.Errorf("Failed to upload empty follow up script: %v", err)
			}

			return nil
		})
		if err == nil {
			cmd.Wait()
			if cmd.ExitStatus != 0 {
				err = fmt.Errorf("Script exited with non-zero exit status: %d", cmd.ExitStatus)
			}
		}

		// Wait for output to clean up
		outW.Close()
		errW.Close()
		<-outDoneCh
		<-errDoneCh

		// If we have an error, return it out now that we've cleaned up
		if err != nil {
			return err
		}
	}

	return nil
}
func (p *Provisioner) Run(o terraform.UIOutput, comm communicator.Communicator) error {
	// parse the playbook path and ensure that it is valid before doing
	// anything else. This is done in validate but is repeated here, just
	// in case.
	playbookPath, err := p.resolvePath(p.Playbook)
	if err != nil {
		return err
	}

	// commands that are needed to setup a basic environment to run the `ansible-local.py` script
	// TODO pivot based upon different platforms and allow optional python provision steps
	// TODO this should be configurable for folks who want to customize this
	provisionAnsibleCommands := []string{
		// https://github.com/hashicorp/terraform/issues/1025
		// cloud-init runs on fresh sources and can interfere with apt-get update commands causing intermittent failures
		"/bin/bash -c 'until [[ -f /var/lib/cloud/instance/boot-finished ]]; do sleep 1; done'",
		"apt-get update",
		"apt-get install -y build-essential python-dev",
		"curl https://bootstrap.pypa.io/get-pip.py | sudo python",
		"pip install ansible",
	}

	for _, command := range provisionAnsibleCommands {
		o.Output(fmt.Sprintf("running command: %s", command))
		err := p.runCommand(o, comm, command)
		if err != nil {
			return err
		}
	}

	// ansible projects are structured such that the playbook file is in
	// the top level of the module path. As such, we parse the playbook
	// path's directory and upload the entire thing
	playbookDir := filepath.Dir(playbookPath)

	// the host playbook path is the path on the host where the playbook
	// will be uploaded too
	remotePlaybookPath := filepath.Join("/tmp/ansible", filepath.Base(playbookPath))

	// upload ansible source and playbook to the host
	if err := comm.UploadDir("/tmp/ansible", playbookDir); err != nil {
		return err
	}

	extraVars, err := json.Marshal(p.ExtraVars)
	if err != nil {
		return err
	}

	// build a command to run ansible on the host machine
	command := fmt.Sprintf("curl %s | python - --playbook=%s --hosts=%s --plays=%s --groups=%s --extra-vars=%s",
		p.ansibleLocalScript,
		remotePlaybookPath,
		strings.Join(p.Hosts, ","),
		strings.Join(p.Plays, ","),
		strings.Join(p.Groups, ","),
		string(extraVars))

	o.Output(fmt.Sprintf("running command: %s", command))
	if err := p.runCommand(o, comm, command); err != nil {
		return err
	}

	return nil
}