예제 #1
0
func ensureSCCExists(ns string, serviceAccountName string) error {
	binary, err := exec.LookPath("oc")
	if err != nil {
		// no openshift so ignore
		return nil
	}

	text, err := getCommandOutputString(binary, []string{"export", "scc", serviceAccountName}, os.Stdin)
	if err != nil {
		log.Debug("Failed to get SecurityContextConstraints %s. %s", serviceAccountName, err)
	}
	if err != nil || len(text) == 0 {
		text = `
apiVersion: v1
kind: SecurityContextConstraints
groups:
- system:cluster-admins
- system:nodes
metadata:
  creationTimestamp: null
  name: ` + serviceAccountName + `
runAsUser:
  type: RunAsAny
seLinuxContext:
  type: RunAsAny
supplementalGroups:
  type: RunAsAny
users:
`
	}
	// lets ensure there's a users section
	if !strings.Contains(text, "\nusers:") {
		text = text + "\nusers:\n"
	}

	line := "system:serviceaccount:" + ns + ":" + serviceAccountName

	if strings.Contains(text, line) {
		log.Info("No need to modify SecurityContextConstraints as it already contains line for namespace %s and service account %s", ns, serviceAccountName)
		return nil
	}

	text = text + "\n- " + line + "\n"
	log.Debug("created SecurityContextConstraints YAML: %s", text)

	log.Info("Applying changes for SecurityContextConstraints %s for namespace %s and ServiceAccount %s", serviceAccountName, ns, serviceAccountName)
	reader := bytes.NewReader([]byte(text))
	err = runCommand(binary, []string{"apply", "-f", "-"}, reader)
	if err != nil {
		log.Err("Failed to update OpenShift SecurityContextConstraints named %s. %s", serviceAccountName, err)
	}
	return err
}
예제 #2
0
func applyOtherKubernetesResource(f *cmdutil.Factory, c *client.Client, ns string, file string, variables map[string]string) error {
	log.Info("applying kubernetes resource: %s", file)
	data, err := LoadFileAndReplaceVariables(file, variables)
	if err != nil {
		return err
	}
	// TODO the following should work ideally but something's wrong with the loading of versioned schemas...
	//return k8s.ApplyResource(f, c, ns, data, file)

	// lets use the `oc` binary instead
	isOc := true
	binary, err := exec.LookPath("oc")
	if err != nil {
		isOc = false
		var err2 error
		binary, err2 = exec.LookPath("kubectl")
		if err2 != nil {
			return err
		}
	}
	reader := bytes.NewReader(data)
	err = runCommand(binary, []string{"apply", "-f", "-"}, reader)
	if err != nil {
		return err
	}
	if isOc {
		// if we are a service lets try figure out the service name?
		service := api.Service{}
		if err := yaml.Unmarshal(data, &service); err != nil {
			log.Info("Probably not a service! %s", err)
			return nil
		}
		name := service.ObjectMeta.Name
		serviceType := service.Spec.Type
		if service.Kind == "Service" && len(name) > 0 && serviceType == "LoadBalancer" {
			log.Info("Checking the service %s is exposed in OpenShift", name)
			runCommand(binary, []string{"expose", "service", name}, os.Stdin)
			return nil
		}
	}
	return nil
}
예제 #3
0
func forwardPortLoop(name string, address string, forwardAddress string) error {
	log.Info("forwarding port %s %s => %s", name, address, forwardAddress)
	listener, err := net.Listen("tcp", address)
	if err != nil {
		return err
	}

	log.Info("About to start the acceptor goroutine!")
	go func() {
		for {
			conn, err := listener.Accept()
			if err != nil {
				log.Err("Failed to accept listener: %v", err)
			}
			log.Info("Accepted connection %v\n", conn)
			go forwardPort(conn, forwardAddress)
		}
	}()
	return nil
}
예제 #4
0
파일: winrm.go 프로젝트: fabric8io/kansible
// CloseShell closes the given WinRM Shell terminating any processes created within it
func CloseShell(user string, password string, host string, port string, shellID string) error {
	portNumber, err := parsePortNumber(port)
	if err != nil {
		return err
	}
	client, err := winrm.NewClient(&winrm.Endpoint{Host: host, Port: portNumber, HTTPS: false, Insecure: false}, user, password)
	if err != nil {
		return fmt.Errorf("Could not create WinRM client: %s", err)
	}

	log.Info("Closing shell %s", shellID)
	shell := client.NewShell(shellID)
	return shell.Close()
}
예제 #5
0
func deletePodsForOldHosts(c *client.Client, ns string, annotations map[string]string, pods *api.PodList, hostEntries []*HostEntry) {
	for annKey, podName := range annotations {
		if strings.HasPrefix(annKey, AnsibleHostPodAnnotationPrefix) {
			hostName := annKey[len(AnsibleHostPodAnnotationPrefix):]
			if k8s.PodIsRunning(pods, podName) {
				hostEntry := GetHostEntryByName(hostEntries, hostName)
				if hostEntry == nil {
					log.Info("Deleting pod %s as there is no longer an Ansible inventory host called %s", podName, hostName)
					c.Pods(ns).Delete(podName, nil)
				}
			}
		}
	}
}
예제 #6
0
func forwardPort(conn net.Conn, address string) {
	client, err := net.Dial("tcp", address)
	if err != nil {
		log.Err("Dial failed: %v", err)
	}
	log.Info("Connected to localhost %v\n", conn)
	go func() {
		defer client.Close()
		defer conn.Close()
		io.Copy(client, conn)
	}()
	go func() {
		defer client.Close()
		defer conn.Close()
		io.Copy(conn, client)
	}()
}
예제 #7
0
파일: k8s.go 프로젝트: fabric8io/kansible
// EnsureServiceAccountExists ensures that there is a service account created for the given name
func EnsureServiceAccountExists(c *client.Client, ns string, serviceAccountName string) (bool, error) {
	saClient := c.ServiceAccounts(ns)
	sa, err := saClient.Get(serviceAccountName)
	created := false
	if err != nil || sa == nil {
		// lets try create the SA
		sa = &api.ServiceAccount{
			ObjectMeta: api.ObjectMeta{
				Name: serviceAccountName,
			},
		}
		log.Info("Creating ServiceAccount %s", serviceAccountName)
		_, err = saClient.Create(sa)
		if err == nil {
			created = true
		}
	}
	return created, err
}
예제 #8
0
파일: rc.go 프로젝트: rhuss/kansible
// RC creates or updates the kansible ReplicationController for some hosts in an Ansible inventory
func RC(c *cli.Context) {
	args := c.Args()
	if len(args) < 1 {
		log.Die("Expected argument [hosts] for the name of the hosts in the ansible inventory file")
	}
	hosts := args[0]

	f := cmdutil.NewFactory(nil)
	if f == nil {
		log.Die("Failed to create Kuberentes client factory!")
	}
	kubeclient, _ := f.Client()
	if kubeclient == nil {
		log.Die("Failed to create Kuberentes client!")
	}
	ns, _, _ := f.DefaultNamespace()
	if len(ns) == 0 {
		ns = "default"
	}

	inventory, err := osExpandAndVerify(c, "inventory")
	if err != nil {
		fail(err)
	}

	hostEntries, err := ansible.LoadHostEntries(inventory, hosts)
	if err != nil {
		fail(err)
	}
	log.Info("Found %d host entries in the Ansible inventory for %s", len(hostEntries), hosts)

	rcFile := "kubernetes/" + hosts + "/rc.yml"

	_, err = ansible.UpdateKansibleRC(hostEntries, hosts, kubeclient, ns, rcFile)
	if err != nil {
		fail(err)
	}
}
예제 #9
0
// UpdateKansibleRC reads the Ansible inventory and the RC YAML for the hosts and updates it in Kubernetes
// along with removing any remaining pods which are running against old hosts that have been removed from the inventory
func UpdateKansibleRC(hostEntries []*HostEntry, hosts string, f *cmdutil.Factory, c *client.Client, ns string, rcFile string, replicas int) (*api.ReplicationController, error) {
	variables, err := LoadAnsibleVariables(hosts)
	if err != nil {
		return nil, err
	}
	data, err := LoadFileAndReplaceVariables(rcFile, variables)
	if err != nil {
		return nil, err
	}
	rcConfig, err := k8s.ReadReplicationController(data)
	if err != nil {
		return nil, err
	}
	rcName := rcConfig.ObjectMeta.Name
	podSpec := k8s.GetOrCreatePodSpec(rcConfig)

	// lets default labels and selectors if they are missing
	rcLabels := rcConfig.ObjectMeta.Labels
	if len(rcLabels) > 0 {
		rcSpec := rcConfig.Spec
		if len(rcSpec.Selector) == 0 {
			rcSpec.Selector = rcLabels
		}
		template := rcSpec.Template
		if template != nil {
			if len(template.ObjectMeta.Labels) == 0 {
				template.ObjectMeta.Labels = rcLabels
			}
		}
	}

	container := k8s.GetFirstContainerOrCreate(rcConfig)
	if len(container.Image) == 0 {
		container.Image = "fabric8/kansible"
	}
	if len(container.Name) == 0 {
		container.Name = "kansible"
	}
	if len(container.ImagePullPolicy) == 0 {
		container.ImagePullPolicy = "IfNotPresent"
	}
	preStopCommands := []string{"kansible", "kill"}
	if len(podSpec.ServiceAccountName) == 0 {
		podSpec.ServiceAccountName = rcName
	}
	serviceAccountName := podSpec.ServiceAccountName
	k8s.EnsureContainerHasPreStopCommand(container, preStopCommands)
	k8s.EnsureContainerHasEnvVar(container, EnvHosts, hosts)
	k8s.EnsureContainerHasEnvVar(container, EnvRC, rcName)
	k8s.EnsureContainerHasEnvVar(container, EnvBash, "/usr/local/bin/bash")
	k8s.EnsureContainerHasEnvVarFromField(container, EnvNamespace, "metadata.namespace")
	command := k8s.GetContainerEnvVar(container, EnvCommand)
	if len(command) == 0 {
		return nil, fmt.Errorf("No environemnt variable value defined for %s in ReplicationController YAML file %s", EnvCommand, rcFile)
	}

	if len(serviceAccountName) > 0 {
		created, err := k8s.EnsureServiceAccountExists(c, ns, serviceAccountName)
		if err != nil {
			return nil, err
		}
		if created {
			err = ensureSCCExists(ns, serviceAccountName)
			if err != nil {
				return nil, err
			}
		}
	}

	isUpdate := true
	rc, err := c.ReplicationControllers(ns).Get(rcName)
	if err != nil {
		isUpdate = false
		rc = &api.ReplicationController{
			ObjectMeta: api.ObjectMeta{
				Namespace: ns,
				Name:      rcName,
			},
		}
	}
	pods, err := c.Pods(ns).List(api.ListOptions{})
	if err != nil {
		return nil, err
	}

	// merge the RC configuration to allow configuration
	originalReplicas := rc.Spec.Replicas
	rc.Spec = rcConfig.Spec

	metadata := &rc.ObjectMeta
	resourceVersion := metadata.ResourceVersion
	rcSpec := &rc.Spec
	if replicas < 0 {
		replicas = originalReplicas
	}
	rcSpec.Replicas = replicas

	err = generatePrivateKeySecrets(c, ns, hostEntries, rc, podSpec, container)
	if err != nil {
		return rc, err
	}

	text := HostEntriesToString(hostEntries)
	if metadata.Annotations == nil {
		metadata.Annotations = make(map[string]string)
	}
	metadata.Annotations[HostInventoryAnnotation] = text
	metadata.Annotations[IconAnnotation] = IconURL

	log.Info("found RC with name %s and version %s and replicas %d", rcName, resourceVersion, rcSpec.Replicas)

	deletePodsForOldHosts(c, ns, metadata.Annotations, pods, hostEntries)

	replicationController := c.ReplicationControllers(ns)
	if isUpdate {
		_, err = replicationController.Update(rc)
	} else {
		_, err = replicationController.Create(rc)
	}
	if err != nil {
		log.Info("Failed to update the RC, could be concurrent update failure: %s", err)
		return nil, err
	}

	err = applyOtherKubernetesResources(f, c, ns, rcFile, variables)
	return rc, err
}
예제 #10
0
// ChooseHostAndPrivateKey parses the given Ansible inventory file for the hosts
// and chooses a single host inside it, returning the host name and the private key
func ChooseHostAndPrivateKey(thisPodName string, hosts string, c *client.Client, ns string, rcName string) (*HostEntry, *api.ReplicationController, map[string]string, error) {
	retryAttempts := 20

	for i := 0; i < retryAttempts; i++ {
		if i > 0 {
			// lets sleep before retrying
			time.Sleep(time.Duration(random(1000, 20000)) * time.Millisecond)
		}
		if c == nil {
			return nil, nil, nil, fmt.Errorf("No Kubernetes Client specified!")
		}
		rc, err := c.ReplicationControllers(ns).Get(rcName)
		if err != nil {
			return nil, nil, nil, err
		}
		if rc == nil {
			return nil, nil, nil, fmt.Errorf("No ReplicationController found for name %s", rcName)
		}

		pods, err := c.Pods(ns).List(api.ListOptions{})
		if err != nil {
			return nil, nil, nil, err
		}

		metadata := &rc.ObjectMeta
		resourceVersion := metadata.ResourceVersion
		if metadata.Annotations == nil {
			metadata.Annotations = make(map[string]string)
		}
		annotations := metadata.Annotations
		log.Info("Using ReplicationController with namespace %s name %s and version %s", ns, rcName, resourceVersion)

		hostsText := annotations[HostInventoryAnnotation]
		if len(hostsText) == 0 {
			return nil, nil, nil, fmt.Errorf("Could not find annotation %s on ReplicationController %s", HostInventoryAnnotation, rcName)
		}
		hostEntries, err := LoadHostEntriesFromText(hostsText)
		if err != nil {
			return nil, nil, nil, err
		}
		log.Info("Found %d host entries", len(hostEntries))

		// lets pick a random entry
		if len(hostEntries) > 0 {
			filteredHostEntries := hostEntries
			for annKey, podName := range annotations {
				if strings.HasPrefix(annKey, AnsibleHostPodAnnotationPrefix) {
					hostName := annKey[len(AnsibleHostPodAnnotationPrefix):]
					if k8s.PodIsRunning(pods, podName) {
						if podName != thisPodName {
							log.Info("Pod %s podName has already claimed host %s", podName, hostName)
							filteredHostEntries = removeHostEntry(filteredHostEntries, hostName)
						}
					} else {
						// lets remove this annotation as the pod is no longer valid
						log.Info("Pod %s is no longer running so removing the annotation %s", podName, annKey)
						delete(metadata.Annotations, annKey)
					}
				}
			}

			count := len(filteredHostEntries)

			if count == 0 {
				log.Info("There are no more hosts available to be supervised by this pod!")
				return nil, nil, nil, fmt.Errorf("No more hosts available to be supervised!")
			}
			log.Info("After filtering out hosts owned by other pods we have %v host entries left", count)

			pickedEntry := filteredHostEntries[random(0, count)]
			hostName := pickedEntry.Name
			if len(pickedEntry.Host) == 0 {
				return nil, nil, nil, fmt.Errorf("Could not find host name for entry %s", pickedEntry.Name)
			}
			if len(pickedEntry.User) == 0 {
				return nil, nil, nil, fmt.Errorf("Could not find User for entry %s", pickedEntry.Name)
			}

			// lets try pick this pod
			annotations[AnsibleHostPodAnnotationPrefix+hostName] = thisPodName

			rc, err = c.ReplicationControllers(ns).Update(rc)
			if err != nil {
				log.Info("Failed to update the RC, could be concurrent update failure: %s", err)
			} else {
				log.Info("Picked host " + pickedEntry.Host)

				// lets update the Pod with the host name label
				podClient := c.Pods(ns)
				pod, err := podClient.Get(thisPodName)
				if err != nil {
					return pickedEntry, nil, nil, err
				}
				metadata := &pod.ObjectMeta
				if metadata.Annotations == nil {
					metadata.Annotations = make(map[string]string)
				}
				metadata.Annotations[HostNameAnnotation] = pickedEntry.Name
				metadata.Annotations[HostAddressAnnotation] = pickedEntry.Host
				//pod.Status = api.PodStatus{}
				pod, err = podClient.UpdateStatus(pod)
				if err != nil {
					return pickedEntry, nil, nil, err
				}

				// lets export required environment variables
				exportEnvVars := os.Getenv(EnvExportEnvVars)
				envVars := make(map[string]string)
				if len(exportEnvVars) > 0 {
					names := strings.Split(exportEnvVars, " ")
					for _, name := range names {
						name = strings.TrimSpace(name)
						if len(name) > 0 {
							value := os.Getenv(name)
							if len(value) > 0 {
								envVars[name] = value
								log.Debug("Exporting environment variable %s = %s", name, value)
							}
						}
					}
				}

				err = forwardPorts(pod, pickedEntry)
				return pickedEntry, rc, envVars, err
			}
		}
	}
	return nil, nil, nil, fmt.Errorf("Could not find any available hosts on the ReplicationController %s and hosts %s", rcName, hosts)
}
예제 #11
0
파일: k8s.go 프로젝트: fabric8io/kansible
// ApplyResource applies the given data as a kubernetes resource
func ApplyResource(f *cmdutil.Factory, c *client.Client, ns string, data []byte, name string) error {
	schemaCacheDir := "/tmp/kubectl.schema"
	validate := true
	schema, err := f.Validator(validate, schemaCacheDir)
	if err != nil {
		log.Info("Failed to load kubernetes schema: %s", err)
		return err
	}

	mapper, typer := f.Object()
	r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)).
		Schema(schema).
		ContinueOnError().
		NamespaceParam(ns).DefaultNamespace().
		Stream(bytes.NewReader(data), name).
		Flatten().
		Do()
	err = r.Err()
	if err != nil {
		log.Info("Failed to load mapper!")
		return err
	}

	count := 0
	err = r.Visit(func(info *resource.Info, err error) error {
		// In this method, info.Object contains the object retrieved from the server
		// and info.VersionedObject contains the object decoded from the input source.
		if err != nil {
			return err
		}

		// Get the modified configuration of the object. Embed the result
		// as an annotation in the modified configuration, so that it will appear
		// in the patch sent to the server.
		modified, err := kubectl.GetModifiedConfiguration(info, true, f.JSONEncoder())
		if err != nil {
			return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving modified configuration from:\n%v\nfor:", info), info.Source, err)
		}

		if err := info.Get(); err != nil {
			return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving current configuration of:\n%v\nfrom server for:", info), info.Source, err)
		}

		// Serialize the current configuration of the object from the server.
		current, err := runtime.Encode(f.JSONEncoder(), info.Object)
		if err != nil {
			return cmdutil.AddSourceToErr(fmt.Sprintf("serializing current configuration from:\n%v\nfor:", info), info.Source, err)
		}

		// Retrieve the original configuration of the object from the annotation.
		original, err := kubectl.GetOriginalConfiguration(info)
		if err != nil {
			return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving original configuration from:\n%v\nfor:", info), info.Source, err)
		}

		// Compute a three way strategic merge patch to send to server.
		patch, err := strategicpatch.CreateThreeWayMergePatch(original, modified, current, info.VersionedObject, false)
		if err != nil {
			format := "creating patch with:\noriginal:\n%s\nmodified:\n%s\ncurrent:\n%s\nfrom:\n%v\nfor:"
			return cmdutil.AddSourceToErr(fmt.Sprintf(format, original, modified, current, info), info.Source, err)
		}

		helper := resource.NewHelper(info.Client, info.Mapping)
		_, err = helper.Patch(info.Namespace, info.Name, api.StrategicMergePatchType, patch)
		if err != nil {
			return cmdutil.AddSourceToErr(fmt.Sprintf("applying patch:\n%s\nto:\n%v\nfor:", patch, info), info.Source, err)
		}

		count++
		cmdutil.PrintSuccess(mapper, false, os.Stdout, info.Mapping.Resource, info.Name, "configured")
		return nil
	})

	if err != nil {
		return err
	}

	if count == 0 {
		return fmt.Errorf("no objects passed to apply")
	}
	return nil
}
예제 #12
0
파일: ssh.go 프로젝트: fabric8io/kansible
// RemoteSSHCommand invokes the given command on a host and port
func RemoteSSHCommand(user string, privateKey string, host string, port string, cmd string, envVars map[string]string) error {
	if len(privateKey) == 0 {
		return fmt.Errorf("Could not find PrivateKey for entry %s", host)
	}
	log.Info("Connecting to host over SSH on host %s and port %s with user %s with command `%s`", host, port, user, cmd)

	hostPort := net.JoinHostPort(host, port)

	sshConfig := &ssh.ClientConfig{
		User: user,
		Auth: []ssh.AuthMethod{
			PublicKeyFile(privateKey),
		},
	}
	if sshConfig == nil {
		log.Warn("No sshConfig could be created!")
	}
	connection, err := ssh.Dial("tcp", hostPort, sshConfig)
	if err != nil {
		return fmt.Errorf("Failed to dial: %s", err)
	}
	session, err := connection.NewSession()
	if err != nil {
		return fmt.Errorf("Failed to create session: %s", err)
	}
	defer session.Close()

	modes := ssh.TerminalModes{
		// ssh.ECHO:          0,     // disable echoing
		ssh.TTY_OP_ISPEED: 14400, // input speed = 14.4kbaud
		ssh.TTY_OP_OSPEED: 14400, // output speed = 14.4kbaud
	}

	if err := session.RequestPty("xterm", 80, 40, modes); err != nil {
		return fmt.Errorf("Request for pseudo terminal failed: %s", err)
	}

	stdin, err := session.StdinPipe()
	if err != nil {
		return fmt.Errorf("Unable to setup stdin for session: %v", err)
	}
	go io.Copy(stdin, os.Stdin)

	stdout, err := session.StdoutPipe()
	if err != nil {
		return fmt.Errorf("Unable to setup stdout for session: %v", err)
	}
	go io.Copy(os.Stdout, stdout)

	stderr, err := session.StderrPipe()
	if err != nil {
		return fmt.Errorf("Unable to setup stderr for session: %v", err)
	}
	go io.Copy(os.Stderr, stderr)

	for envName, envValue := range envVars {
		log.Info("Setting environment value %s = %s", envName, envValue)
		if err := session.Setenv(envName, envValue); err != nil {
			return fmt.Errorf("Could not set environment variable %s = %s over SSH. This could be disabled by the sshd configuration. See the `AcceptEnv` setting in your /etc/ssh/sshd_config more info: http://linux.die.net/man/5/sshd_config . Error: %s", envName, envValue, err)
		}
	}

	signals := make(chan os.Signal, 1)
	signal.Notify(signals, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
	signaled := false
	go func() {
		<-signals
		log.Info("Shutting down SSH session.")
		signaled = true
		session.Close()
	}()

	log.Info("Running command %s", cmd)
	err = session.Run(cmd)
	if !signaled && err != nil {
		return fmt.Errorf("Failed to run command: "+cmd+": %v", err)
	}
	return nil
}
예제 #13
0
파일: winrm.go 프로젝트: fabric8io/kansible
// RemoteWinRmCommand runs the remote command on a windows machine
func RemoteWinRmCommand(user string, password string, host string, port string, commandText string, c *client.Client, rc *api.ReplicationController, hostName string) error {
	portNumber, err := parsePortNumber(port)
	if err != nil {
		return err
	}
	log.Info("Connecting to windows host over WinRM on host %s and port %d with user %s with command `%s`", host, portNumber, user, commandText)
	client, err := winrm.NewClient(&winrm.Endpoint{Host: host, Port: portNumber, HTTPS: false, Insecure: false}, user, password)
	if err != nil {
		return fmt.Errorf("Could not create WinRM client: %s", err)
	}

	isBash := false
	isBashShellText := os.Getenv(ansible.EnvIsBashShell)
	if len(isBashShellText) > 0 && strings.ToLower(isBashShellText) == "true" {
		isBash = true
	}
	if rc.ObjectMeta.Annotations != nil && !isBash {
		oldShellID := rc.ObjectMeta.Annotations[ansible.WinRMShellAnnotationPrefix+hostName]
		if len(oldShellID) > 0 {
			// lets close the previously running shell on this machine
			log.Info("Closing the old WinRM Shell %s", oldShellID)
			shell := client.NewShell(oldShellID)
			err = shell.Close()
			if err != nil {
				log.Warn("Failed to close shell %s. Error: %s", oldShellID, err)
			}
		}
	}

	shell, err := client.CreateShell()
	if err != nil {
		return fmt.Errorf("Impossible to create WinRM shell: %s", err)
	}
	defer shell.Close()
	shellID := shell.ShellId
	log.Info("Created WinRM Shell %s", shellID)

	if rc != nil && c != nil && !isBash {
		rc.ObjectMeta.Annotations[ansible.WinRMShellAnnotationPrefix+hostName] = shellID
		_, err = c.ReplicationControllers(rc.ObjectMeta.Namespace).UpdateStatus(rc)
		if err != nil {
			return err
		}
	}

	var cmd *winrm.Command
	cmd, err = shell.Execute(commandText)
	if err != nil {
		return fmt.Errorf("Impossible to create Command %s\n", err)
	}

	go io.Copy(cmd.Stdin, os.Stdin)
	go io.Copy(os.Stdout, cmd.Stdout)
	go io.Copy(os.Stderr, cmd.Stderr)

	cmd.Wait()

	exitCode := cmd.ExitCode()
	if exitCode > 0 {
		return fmt.Errorf("Failed to run command '%s' got exit code %d", commandText, exitCode)
	}

	// TODO
	// return cmd.Error()
	return nil
}
예제 #14
0
파일: pod.go 프로젝트: rhuss/kansible
// Pod runs the kansible pod for a given group of hosts in an Ansible playbook
// this grabs a specific host (using annotations on the RC) then runs a remote command
// on that host binding stdin, stdout, stderr to the remote process
func Pod(c *cli.Context) {
	args := c.Args()
	if len(args) < 1 {
		log.Die("Expected arguments [hosts] [command]")
	}
	hosts := os.ExpandEnv(args[0])
	command := ""
	if len(args) > 1 {
		command = os.ExpandEnv(strings.Join(args[1:], " "))
	}

	f := cmdutil.NewFactory(nil)
	if f == nil {
		log.Die("Failed to create Kubernetes client factory!")
	}
	kubeclient, _ := f.Client()
	if kubeclient == nil {
		log.Die("Failed to create Kubernetes client!")
	}
	ns, _, _ := f.DefaultNamespace()
	if len(ns) == 0 {
		ns = "default"
	}

	inventory, err := osExpandAndVerify(c, "inventory")
	if err != nil {
		fail(err)
	}
	rcName, err := osExpandAndVerify(c, "rc")
	if err != nil {
		fail(err)
	}
	envVars := make(map[string]string)
	hostEntry, err := ansible.ChooseHostAndPrivateKey(inventory, hosts, kubeclient, ns, rcName, envVars)
	if err != nil {
		fail(err)
	}
	host := hostEntry.Host
	user := hostEntry.User
	port := hostEntry.Port
	if len(port) == 0 {
		port, err = osExpandAndVerifyGlobal(c, "port")
	}
	if err != nil {
		fail(err)
	}

	connection := hostEntry.Connection
	if len(connection) == 0 {
		connection = osExpand(c, "connection")
	}

	runCommand := hostEntry.RunCommand
	if len(runCommand) != 0 {
		command = runCommand
	}

	commandEnvVars := []string{}
	if len(command) == 0 {
		if len(connection) > 0 {
			envVarName := ansible.EnvCommand + "_" + strings.ToUpper(connection)
			commandEnvVars = append(commandEnvVars, envVarName)
			command = os.Getenv(envVarName)
		}
	}
	commandEnvVars = append(commandEnvVars, ansible.EnvCommand)
	if len(command) == 0 {
		command = os.Getenv(ansible.EnvCommand)
	}
	if len(command) == 0 {
		plural := ""
		if len(commandEnvVars) > 1 {
			plural = "s"
		}
		fail(fmt.Errorf("Could not find a command to execute from the environment variable%s: %s", plural, strings.Join(commandEnvVars, ", ")))
	}

	log.Info("running command on a host from %s and command `%s`", hosts, command)
	bash := osExpand(c, "bash")
	if len(bash) > 0 {
		err = generateBashScript(bash, connection)
		if err != nil {
			log.Err("Failed to generate bash script at %s due to: %v", bash, err)
		}
	}

	log.Info("using connection %s", connection)
	if connection == ansible.ConnectionWinRM {
		log.Info("Using WinRM to connect to the hosts %s", hosts)
		password := hostEntry.Password
		if len(password) == 0 {
			password, err = osExpandAndVerify(c, "password")
			if err != nil {
				fail(err)
			}
		}
		err = winrm.RemoteWinRmCommand(user, password, host, port, command)
	} else {
		privatekey := hostEntry.PrivateKey

		err = ssh.RemoteSSHCommand(user, privatekey, host, port, command, envVars)
	}
	if err != nil {
		log.Err("Failed: %v", err)
	}
}
예제 #15
0
파일: rc.go 프로젝트: fabric8io/kansible
			log.Die("Failed to create Kubernetes client factory!")
		}
		kubeclient, err := f.Client()
		if err != nil || kubeclient == nil {
			log.Die(MessageFailedToCreateKubernetesClient, err)
		}
		ns, _, _ := f.DefaultNamespace()
		if len(ns) == 0 {
			ns = "default"
		}

		inventory = os.ExpandEnv(inventory)
		if inventory == "" {
			log.Die("Value for inventory flag is empty")
		}

		hostEntries, err := ansible.LoadHostEntries(inventory, hosts)
		if err != nil {
			log.Die("Cannot load host entries: %s", err)
		}
		log.Info("Found %d host entries in the Ansible inventory for %s", len(hostEntries), hosts)

		rcFile := "kubernetes/" + hosts + "/rc.yml"

		_, err = ansible.UpdateKansibleRC(hostEntries, hosts, f, kubeclient, ns, rcFile, replicas)
		if err != nil {
			log.Die("Failed to update Kansible RC: %s", err)
		}
	},
}
예제 #16
0
파일: ssh.go 프로젝트: rhuss/kansible
// RemoteSSHCommand invokes the given command on a host and port
func RemoteSSHCommand(user string, privateKey string, host string, port string, cmd string, envVars map[string]string) error {
	if len(privateKey) == 0 {
		return fmt.Errorf("Could not find PrivateKey for entry %s", host)
	}
	hostPort := host + ":" + port

	sshConfig := &ssh.ClientConfig{
		User: user,
		Auth: []ssh.AuthMethod{
			PublicKeyFile(privateKey),
		},
	}
	if sshConfig == nil {
		log.Info("Whoah!")
	}
	connection, err := ssh.Dial("tcp", hostPort, sshConfig)
	if err != nil {
		return fmt.Errorf("Failed to dial: %s", err)
	}
	session, err := connection.NewSession()
	if err != nil {
		return fmt.Errorf("Failed to create session: %s", err)
	}
	defer session.Close()

	modes := ssh.TerminalModes{
		// ssh.ECHO:          0,     // disable echoing
		ssh.TTY_OP_ISPEED: 14400, // input speed = 14.4kbaud
		ssh.TTY_OP_OSPEED: 14400, // output speed = 14.4kbaud
	}

	if err := session.RequestPty("xterm", 80, 40, modes); err != nil {
		return fmt.Errorf("Request for pseudo terminal failed: %s", err)
	}

	stdin, err := session.StdinPipe()
	if err != nil {
		return fmt.Errorf("Unable to setup stdin for session: %v", err)
	}
	go io.Copy(stdin, os.Stdin)

	stdout, err := session.StdoutPipe()
	if err != nil {
		return fmt.Errorf("Unable to setup stdout for session: %v", err)
	}
	go io.Copy(os.Stdout, stdout)

	stderr, err := session.StderrPipe()
	if err != nil {
		return fmt.Errorf("Unable to setup stderr for session: %v", err)
	}
	go io.Copy(os.Stderr, stderr)

	for envName, envValue := range envVars {
		log.Info("Setting environment value %s = %s", envName, envValue)
		if err := session.Setenv(envName, envValue); err != nil {
			return fmt.Errorf("Could not set environment variable %s = %s over SSH. This could be disabled by the sshd configuration. See the `AcceptEnv` setting in your /etc/ssh/sshd_config more info: http://linux.die.net/man/5/sshd_config . Error: %s", envName, envValue, err)
		}
	}

	log.Info("Running command %s", cmd)
	err = session.Run(cmd)
	if err != nil {
		return fmt.Errorf("Failed to run command: "+cmd+": %v", err)
	}
	return nil
}
예제 #17
0
파일: kill.go 프로젝트: fabric8io/kansible
		if err != nil {
			log.Die("Failed to get this pod name: %s", err)
		}

		pod, err := kubeclient.Pods(ns).Get(thisPodName)
		if err != nil {
			log.Die("Failed to get pod from API server: %s", err)
		}

		annotations := pod.ObjectMeta.Annotations
		if annotations == nil {
			log.Die("No annotations available on pod %s", thisPodName)
		}
		hostName := annotations[ansible.HostNameAnnotation]
		if len(hostName) == 0 {
			log.Info("No annotation `%s` available on pod %s", ansible.HostNameAnnotation, thisPodName)
			return
		}

		// now lets load the connection details from the RC annotations
		rcName = os.ExpandEnv(rcName)
		if rcName == "" {
			log.Die("Replication controller name is required")
		}
		rc, err := kubeclient.ReplicationControllers(ns).Get(rcName)
		if err != nil {
			log.Die("Failed to get replication controller from API server: %s", err)
		}
		if rc == nil {
			log.Die("No ReplicationController found for name %s", rcName)
		}