Ejemplo n.º 1
0
func main() {
	cc, err := restclient.InClusterConfig()
	if err != nil {
		log.Fatalf("Failed to create client: %v", err)
	}

	kubeClient, err := clientset.NewForConfig(cc)
	if err != nil {
		log.Fatalf("Failed to create client: %v", err)
	}
	listAll := api.ListOptions{LabelSelector: labels.Everything(), FieldSelector: fields.Everything()}
	nodes, err := kubeClient.Core().Nodes().List(listAll)
	if err != nil {
		log.Fatalf("Failed to list nodes: %v", err)
	}
	log.Printf("Nodes:")
	for _, node := range nodes.Items {
		log.Printf("\t%v", node.Name)
	}
	services, err := kubeClient.Core().Services(api.NamespaceDefault).List(listAll)
	if err != nil {
		log.Fatalf("Failed to list services: %v", err)
	}
	log.Printf("Services:")
	for _, svc := range services.Items {
		log.Printf("\t%v", svc.Name)
	}
	log.Printf("Success")
	http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
		fmt.Fprintf(w, "Ok")
	})
	log.Fatal(http.ListenAndServe(":8080", nil))
}
Ejemplo n.º 2
0
func (s *DelegatingAuthenticationOptions) newTokenAccessReview() (authenticationclient.TokenReviewInterface, error) {
	var clientConfig *restclient.Config
	var err error
	if len(s.RemoteKubeConfigFile) > 0 {
		loadingRules := &clientcmd.ClientConfigLoadingRules{ExplicitPath: s.RemoteKubeConfigFile}
		loader := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{})

		clientConfig, err = loader.ClientConfig()

	} else {
		// without the remote kubeconfig file, try to use the in-cluster config.  Most addon API servers will
		// use this path
		clientConfig, err = restclient.InClusterConfig()
	}
	if err != nil {
		return nil, err
	}

	// set high qps/burst limits since this will effectively limit API server responsiveness
	clientConfig.QPS = 200
	clientConfig.Burst = 400

	client, err := authenticationclient.NewForConfig(clientConfig)
	if err != nil {
		return nil, err
	}

	return client.TokenReviews(), nil
}
Ejemplo n.º 3
0
// NewInCluster is a shortcut for calling InClusterConfig() and then New().
func NewInCluster() (*Client, error) {
	cc, err := restclient.InClusterConfig()
	if err != nil {
		return nil, err
	}
	return New(cc)
}
Ejemplo n.º 4
0
func getClient() (client.Interface, error) {
	clientConfig, err := restclient.InClusterConfig()
	if err != nil {
		return nil, fmt.Errorf("failed to get client config: %v", err)
	}
	osClient, err := client.New(clientConfig)
	if err != nil {
		return nil, fmt.Errorf("error obtaining OpenShift client: %v", err)
	}
	return osClient, nil
}
Ejemplo n.º 5
0
func TestFromEnv(t *testing.T) {
	if _, err := restclient.InClusterConfig(); err != nil {
		t.Skip("This can only be run inside Kubernetes. Skipping.")
	}

	me, err := FromEnv()
	if err != nil {
		t.Errorf("Could not get an environment: %s", err)
	}
	if len(me.Name) == 0 {
		t.Error("Could not get a pod name.")
	}
}
Ejemplo n.º 6
0
// BuildConfigFromFlags is a helper function that builds configs from a master
// url or a kubeconfig filepath. These are passed in as command line flags for cluster
// components. Warnings should reflect this usage. If neither masterUrl or kubeconfigPath
// are passed in we fallback to inClusterConfig. If inClusterConfig fails, we fallback
// to the default config.
func BuildConfigFromFlags(masterUrl, kubeconfigPath string) (*restclient.Config, error) {
	if kubeconfigPath == "" && masterUrl == "" {
		glog.Warningf("Neither --kubeconfig nor --master was specified.  Using the inClusterConfig.  This might not work.")
		kubeconfig, err := restclient.InClusterConfig()
		if err == nil {
			return kubeconfig, nil
		}
		glog.Warning("error creating inClusterConfig, falling back to default config: ", err)
	}
	return NewNonInteractiveDeferredLoadingClientConfig(
		&ClientConfigLoadingRules{ExplicitPath: kubeconfigPath},
		&ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterUrl}}).ClientConfig()
}
Ejemplo n.º 7
0
// Find all sibling pods in the service and post to their /write handler.
func contactOthers(state *State) {
	sleepTime := 5 * time.Second
	// In large cluster getting all endpoints is pretty expensive.
	// Thus, we will limit ourselves to send on average at most 10 such
	// requests per second
	if sleepTime < time.Duration(*peerCount/10)*time.Second {
		sleepTime = time.Duration(*peerCount/10) * time.Second
	}
	timeout := 5 * time.Minute
	// Similarly we need to bump timeout so that it is reasonable in large
	// clusters.
	if timeout < time.Duration(*peerCount)*time.Second {
		timeout = time.Duration(*peerCount) * time.Second
	}
	defer state.doneContactingPeers()

	config, err := restclient.InClusterConfig()
	if err != nil {
		log.Fatalf("Unable to create config; error: %v\n", err)
	}
	config.ContentType = "application/vnd.kubernetes.protobuf"
	client, err := client.New(config)
	if err != nil {
		log.Fatalf("Unable to create client; error: %v\n", err)
	}
	// Double check that that worked by getting the server version.
	if v, err := client.Discovery().ServerVersion(); err != nil {
		log.Fatalf("Unable to get server version: %v\n", err)
	} else {
		log.Printf("Server version: %#v\n", v)
	}

	for start := time.Now(); time.Since(start) < timeout; time.Sleep(sleepTime) {
		eps := getWebserverEndpoints(client)
		if eps.Len() >= *peerCount {
			break
		}
		state.Logf("%v/%v has %v endpoints (%v), which is less than %v as expected. Waiting for all endpoints to come up.", *namespace, *service, len(eps), eps.List(), *peerCount)
	}

	// Do this repeatedly, in case there's some propagation delay with getting
	// newly started pods into the endpoints list.
	for i := 0; i < 15; i++ {
		eps := getWebserverEndpoints(client)
		for ep := range eps {
			state.Logf("Attempting to contact %s", ep)
			contactSingle(ep, state)
		}
		time.Sleep(sleepTime)
	}
}
Ejemplo n.º 8
0
func newBuilderConfigFromEnvironment(out io.Writer) (*builderConfig, error) {
	cfg := &builderConfig{}
	var err error

	cfg.out = out

	// build (BUILD)
	buildStr := os.Getenv("BUILD")
	glog.V(4).Infof("$BUILD env var is %s \n", buildStr)
	cfg.build = &api.Build{}
	if err := runtime.DecodeInto(kapi.Codecs.UniversalDecoder(), []byte(buildStr), cfg.build); err != nil {
		return nil, fmt.Errorf("unable to parse build: %v", err)
	}
	if errs := validation.ValidateBuild(cfg.build); len(errs) > 0 {
		return nil, errors.NewInvalid(unversioned.GroupKind{Kind: "Build"}, cfg.build.Name, errs)
	}
	glog.V(4).Infof("Build: %#v", cfg.build)

	masterVersion := os.Getenv(api.OriginVersion)
	thisVersion := version.Get().String()
	if len(masterVersion) != 0 && masterVersion != thisVersion {
		fmt.Fprintf(cfg.out, "warning: OpenShift server version %q differs from this image %q\n", masterVersion, thisVersion)
	} else {
		glog.V(4).Infof("Master version %q, Builder version %q", masterVersion, thisVersion)
	}

	// sourceSecretsDir (SOURCE_SECRET_PATH)
	cfg.sourceSecretDir = os.Getenv("SOURCE_SECRET_PATH")

	// dockerClient and dockerEndpoint (DOCKER_HOST)
	// usually not set, defaults to docker socket
	cfg.dockerClient, cfg.dockerEndpoint, err = dockerutil.NewHelper().GetClient()
	if err != nil {
		return nil, fmt.Errorf("no Docker configuration defined: %v", err)
	}

	// buildsClient (KUBERNETES_SERVICE_HOST, KUBERNETES_SERVICE_PORT)
	clientConfig, err := restclient.InClusterConfig()
	if err != nil {
		return nil, fmt.Errorf("cannot connect to the server: %v", err)
	}
	osClient, err := client.New(clientConfig)
	if err != nil {
		return nil, fmt.Errorf("failed to get client: %v", err)
	}
	cfg.buildsClient = osClient.Builds(cfg.build.Namespace)

	return cfg, nil
}
Ejemplo n.º 9
0
// NewClient returns a usable Client. Don't forget to Stop it.
func NewClient(addr string, resyncPeriod time.Duration) (Client, error) {
	var config *restclient.Config
	if addr != "" {
		config = &restclient.Config{Host: addr}
	} else {
		// If no API server address was provided, assume we are running
		// inside a pod. Try to connect to the API server through its
		// Service environment variables, using the default Service
		// Account Token.
		var err error
		if config, err = restclient.InClusterConfig(); err != nil {
			return nil, err
		}
	}

	c, err := unversioned.New(config)
	if err != nil {
		return nil, err
	}

	ec, err := unversioned.NewExtensions(config)
	if err != nil {
		return nil, err
	}

	result := &client{
		quit:             make(chan struct{}),
		resyncPeriod:     resyncPeriod,
		client:           c,
		extensionsClient: ec,
	}

	result.podStore = &cache.StoreToPodLister{Store: result.setupStore(c, "pods", &api.Pod{})}
	result.serviceStore = &cache.StoreToServiceLister{Store: result.setupStore(c, "services", &api.Service{})}
	result.replicationControllerStore = &cache.StoreToReplicationControllerLister{Store: result.setupStore(c, "replicationcontrollers", &api.ReplicationController{})}
	result.nodeStore = &cache.StoreToNodeLister{Store: result.setupStore(c, "nodes", &api.Node{})}

	// We list deployments here to check if this version of kubernetes is >= 1.2.
	// We would use NegotiateVersion, but Kubernetes 1.1 "supports"
	// extensions/v1beta1, but not deployments or replicasets.
	if _, err := ec.Deployments(api.NamespaceAll).List(api.ListOptions{}); err != nil {
		log.Infof("Deployments and ReplicaSets are not supported by this Kubernetes version")
	} else {
		result.deploymentStore = &cache.StoreToDeploymentLister{Store: result.setupStore(ec, "deployments", &extensions.Deployment{})}
		result.replicaSetStore = &cache.StoreToReplicaSetLister{Store: result.setupStore(ec, "replicasets", &extensions.ReplicaSet{})}
	}

	return result, nil
}
Ejemplo n.º 10
0
func createKubeClient(flags *flag.FlagSet, inCluster bool) (*kube_client.Client, error) {
	var config *kube_restclient.Config
	var err error
	if inCluster {
		config, err = kube_restclient.InClusterConfig()
	} else {
		clientConfig := kubectl_util.DefaultClientConfig(flags)
		config, err = clientConfig.ClientConfig()
	}
	if err != nil {
		fmt.Errorf("error connecting to the client: %v", err)
	}
	config.ContentType = *contentType
	return kube_client.NewOrDie(config), nil
}
Ejemplo n.º 11
0
func makeClient() (*client.Client, error) {
	var cfg *restclient.Config
	var err error

	if *inCluster {
		if cfg, err = restclient.InClusterConfig(); err != nil {
			return nil, err
		}
	} else {
		clientConfig := kubectl_util.DefaultClientConfig(flags)
		if cfg, err = clientConfig.ClientConfig(); err != nil {
			return nil, err
		}
	}
	return client.New(cfg)
}
Ejemplo n.º 12
0
func TestPodClient(t *testing.T) {
	// A pod can't really be mocked efficiently without major filesystem
	// manipulation. So we're testing fully only when this is running inside of
	// a k8s pod.
	if _, err := restclient.InClusterConfig(); err != nil {
		t.Skip("This can only be run inside Kubernetes. Skipping.")
	}

	c, err := PodClient()
	if err != nil {
		t.Errorf("Error constructing client: %s", err)
	}

	if _, err := c.ServerVersion(); err != nil {
		t.Errorf("Failed to connect to given server: %s", err)
	}
}
Ejemplo n.º 13
0
// NewCommandDeployer provides a CLI handler for deploy.
func NewCommandDeployer(name string) *cobra.Command {
	cfg := &config{}

	cmd := &cobra.Command{
		Use:   fmt.Sprintf("%s%s", name, clientcmd.ConfigSyntax),
		Short: "Run the deployer",
		Long:  deployerLong,
		Run: func(c *cobra.Command, args []string) {
			if len(cfg.DeploymentName) == 0 {
				glog.Fatal("deployment is required")
			}
			if len(cfg.Namespace) == 0 {
				glog.Fatal("namespace is required")
			}

			kcfg, err := restclient.InClusterConfig()
			if err != nil {
				glog.Fatal(err)
			}
			kc, err := kclient.New(kcfg)
			if err != nil {
				glog.Fatal(err)
			}
			oc, err := client.New(kcfg)
			if err != nil {
				glog.Fatal(err)
			}

			deployer := NewDeployer(kc, oc)
			if err = deployer.Deploy(cfg.Namespace, cfg.DeploymentName); err != nil {
				glog.Fatal(err)
			}
		},
	}

	cmd.AddCommand(version.NewVersionCommand(name, false))

	flag := cmd.Flags()
	flag.StringVar(&cfg.DeploymentName, "deployment", util.Env("OPENSHIFT_DEPLOYMENT_NAME", ""), "The deployment name to start")
	flag.StringVar(&cfg.Namespace, "namespace", util.Env("OPENSHIFT_DEPLOYMENT_NAMESPACE", ""), "The deployment namespace")

	return cmd
}
Ejemplo n.º 14
0
func newBuilderConfigFromEnvironment() (*builderConfig, error) {
	cfg := &builderConfig{}
	var err error

	// build (BUILD)
	buildStr := os.Getenv("BUILD")
	glog.V(4).Infof("$BUILD env var is %s \n", buildStr)
	cfg.build = &api.Build{}
	if err = runtime.DecodeInto(kapi.Codecs.UniversalDecoder(), []byte(buildStr), cfg.build); err != nil {
		return nil, fmt.Errorf("unable to parse build: %v", err)
	}

	masterVersion := os.Getenv(api.OriginVersion)
	thisVersion := version.Get().String()
	if len(masterVersion) != 0 && masterVersion != thisVersion {
		glog.Warningf("Master version %q does not match Builder image version %q", masterVersion, thisVersion)
	} else {
		glog.V(2).Infof("Master version %q, Builder version %q", masterVersion, thisVersion)
	}

	// sourceSecretsDir (SOURCE_SECRET_PATH)
	cfg.sourceSecretDir = os.Getenv("SOURCE_SECRET_PATH")

	// dockerClient and dockerEndpoint (DOCKER_HOST)
	// usually not set, defaults to docker socket
	cfg.dockerClient, cfg.dockerEndpoint, err = dockerutil.NewHelper().GetClient()
	if err != nil {
		return nil, fmt.Errorf("error obtaining docker client: %v", err)
	}

	// buildsClient (KUBERNETES_SERVICE_HOST, KUBERNETES_SERVICE_PORT)
	clientConfig, err := restclient.InClusterConfig()
	if err != nil {
		return nil, fmt.Errorf("failed to get client config: %v", err)
	}
	osClient, err := client.New(clientConfig)
	if err != nil {
		return nil, fmt.Errorf("error obtaining OpenShift client: %v", err)
	}
	cfg.buildsClient = osClient.Builds(cfg.build.Namespace)

	return cfg, nil
}
Ejemplo n.º 15
0
func NewSubnetManager() (subnet.Manager, error) {
	cfg, err := restclient.InClusterConfig()
	if err != nil {
		return nil, fmt.Errorf("unable to initialize inclusterconfig: %v", err)
	}
	c, err := clientset.NewForConfig(cfg)
	if err != nil {
		return nil, fmt.Errorf("unable to initialize client: %v", err)
	}

	podName := os.Getenv("POD_NAME")
	podNamespace := os.Getenv("POD_NAMESPACE")
	if podName == "" || podNamespace == "" {
		return nil, fmt.Errorf("env variables POD_NAME and POD_NAMESPACE must be set")
	}

	pod, err := c.Pods(podNamespace).Get(podName)
	if err != nil {
		return nil, fmt.Errorf("error retrieving pod spec for '%s/%s': %v", podNamespace, podName, err)
	}
	nodeName := pod.Spec.NodeName
	if nodeName == "" {
		return nil, fmt.Errorf("node name not present in pod spec '%s/%s'", podNamespace, podName)
	}

	netConf, err := ioutil.ReadFile(netConfPath)
	if err != nil {
		return nil, fmt.Errorf("failed to read net conf: %v", err)
	}

	sc, err := subnet.ParseConfig(string(netConf))
	if err != nil {
		return nil, fmt.Errorf("error parsing subnet config: %s", err)
	}
	sm, err := newKubeSubnetManager(c, sc, nodeName)
	if err != nil {
		return nil, fmt.Errorf("error creating network manager: %s", err)
	}
	go sm.Run(context.Background())
	return sm, err
}
Ejemplo n.º 16
0
func clientFromConfig(path string) (*restclient.Config, string, error) {
	if path == "-" {
		cfg, err := restclient.InClusterConfig()
		if err != nil {
			return nil, "", fmt.Errorf("cluster config not available: %v", err)
		}
		return cfg, "", nil
	}
	rules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: path}
	credentials, err := rules.Load()
	if err != nil {
		return nil, "", fmt.Errorf("the provided credentials %q could not be loaded: %v", path, err)
	}
	cfg := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{})
	config, err := cfg.ClientConfig()
	if err != nil {
		return nil, "", fmt.Errorf("the provided credentials %q could not be used: %v", path, err)
	}
	namespace, _, _ := cfg.Namespace()
	return config, namespace, nil
}
Ejemplo n.º 17
0
func NewKubernetesConfig(certsPath, serviceHost, servicePort string) (*restclient.Config, error) {
	config, err := restclient.InClusterConfig()
	if err == nil {
		return config, nil
	}

	tlsTransport, err := transport.New(&transport.Config{
		TLS: transport.TLSConfig{
			CAFile:   fmt.Sprintf("%s/%s", certsPath, "ca.pem"),
			CertFile: fmt.Sprintf("%s/%s", certsPath, "cert.pem"),
			KeyFile:  fmt.Sprintf("%s/%s", certsPath, "key.pem"),
		},
	})
	if err != nil {
		return nil, errors.Wrap(err, "Couldn't set up tls transport")
	}

	return &restclient.Config{
		Host:      fmt.Sprintf("https://%s:%s", serviceHost, servicePort),
		Transport: tlsTransport,
	}, nil
}
Ejemplo n.º 18
0
// ClientConfig returns a complete client config
func (c defaultingClientConfig) ClientConfig() (*restclient.Config, error) {
	cfg, err := c.nested.ClientConfig()
	if err == nil {
		return cfg, nil
	}

	if !kclientcmd.IsEmptyConfig(err) {
		return nil, err
	}

	// TODO: need to expose inClusterConfig upstream and use that
	if icc, err := restclient.InClusterConfig(); err == nil {
		glog.V(4).Infof("Using in-cluster configuration")
		return icc, nil
	}

	return nil, errConfigurationMissing{fmt.Errorf(`Missing or incomplete configuration info.  Please login or point to an existing, complete config file:

  1. Via the command-line flag --config
  2. Via the KUBECONFIG environment variable
  3. In your home directory as ~/.kube/config

To view or setup config directly use the 'config' command.`)}
}
Ejemplo n.º 19
0
func (cfg *config) RunDeployer() error {
	if len(cfg.rcName) == 0 {
		return fmt.Errorf("--deployment or OPENSHIFT_DEPLOYMENT_NAME is required")
	}
	if len(cfg.Namespace) == 0 {
		return fmt.Errorf("--namespace or OPENSHIFT_DEPLOYMENT_NAMESPACE is required")
	}

	kcfg, err := restclient.InClusterConfig()
	if err != nil {
		return err
	}
	kc, err := kclient.New(kcfg)
	if err != nil {
		return err
	}
	oc, err := client.New(kcfg)
	if err != nil {
		return err
	}

	deployer := NewDeployer(kc, oc, cfg.Out, cfg.ErrOut, cfg.Until)
	return deployer.Deploy(cfg.Namespace, cfg.rcName)
}
Ejemplo n.º 20
0
func main() {
	flag.Parse()

	glog.Infof("Starting cauldron soak test with queries=%d podsPerNode=%d upTo=%d maxPar=%d",
		*queriesAverage, *podsPerNode, *upTo, *maxPar)

	cc, err := restclient.InClusterConfig()
	if err != nil {
		glog.Fatalf("Failed to make client: %v", err)
	}

	client, err := clientset.NewForConfig(cc)
	if err != nil {
		glog.Fatalf("Failed to make client: %v", err)
	}

	var nodes *api.NodeList
	for start := time.Now(); time.Since(start) < nodeListTimeout; time.Sleep(2 * time.Second) {
		nodes, err = client.Core().Nodes().List(api.ListOptions{})
		if err == nil {
			break
		}
		glog.Warningf("Failed to list nodes: %v", err)
	}
	if err != nil {
		glog.Fatalf("Giving up trying to list nodes: %v", err)
	}

	if len(nodes.Items) == 0 {
		glog.Fatalf("Failed to find any nodes.")
	}

	glog.Infof("Found %d nodes on this cluster:", len(nodes.Items))
	for i, node := range nodes.Items {
		glog.Infof("%d: %s", i, node.Name)
	}

	queries := *queriesAverage * len(nodes.Items) * *podsPerNode

	// Create a uniquely named namespace.
	got, err := client.Core().Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{GenerateName: "serve-hostnames-"}})
	if err != nil {
		glog.Fatalf("Failed to create namespace: %v", err)
	}
	ns := got.Name
	defer func(ns string) {
		if err := client.Core().Namespaces().Delete(ns, nil); err != nil {
			glog.Warningf("Failed to delete namespace ns: %e", ns, err)
		} else {
			// wait until the namespace disappears
			for i := 0; i < int(namespaceDeleteTimeout/time.Second); i++ {
				if _, err := client.Core().Namespaces().Get(ns); err != nil {
					if errors.IsNotFound(err) {
						return
					}
				}
				time.Sleep(time.Second)
			}
		}
	}(ns)
	glog.Infof("Created namespace %s", ns)

	// Create a service for these pods.
	glog.Infof("Creating service %s/serve-hostnames", ns)
	// Make several attempts to create a service.
	var svc *api.Service
	for start := time.Now(); time.Since(start) < serviceCreateTimeout; time.Sleep(2 * time.Second) {
		t := time.Now()
		svc, err = client.Core().Services(ns).Create(&api.Service{
			ObjectMeta: api.ObjectMeta{
				Name: "serve-hostnames",
				Labels: map[string]string{
					"name": "serve-hostname",
				},
			},
			Spec: api.ServiceSpec{
				Ports: []api.ServicePort{{
					Protocol:   "TCP",
					Port:       9376,
					TargetPort: intstr.FromInt(9376),
				}},
				Selector: map[string]string{
					"name": "serve-hostname",
				},
			},
		})
		glog.V(4).Infof("Service create %s/server-hostnames took %v", ns, time.Since(t))
		if err == nil {
			break
		}
		glog.Warningf("After %v failed to create service %s/serve-hostnames: %v", time.Since(start), ns, err)
	}
	if err != nil {
		glog.Warningf("Unable to create service %s/%s: %v", ns, svc.Name, err)
		return
	}
	// Clean up service
	defer func() {
		glog.Infof("Cleaning up service %s/serve-hostnames", ns)
		// Make several attempts to delete the service.
		for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) {
			if err := client.Core().Services(ns).Delete(svc.Name, nil); err == nil {
				return
			}
			glog.Warningf("After %v unable to delete service %s/%s: %v", time.Since(start), ns, svc.Name, err)
		}
	}()

	// Put serve-hostname pods on each node.
	podNames := []string{}
	for i, node := range nodes.Items {
		for j := 0; j < *podsPerNode; j++ {
			podName := fmt.Sprintf("serve-hostname-%d-%d", i, j)
			podNames = append(podNames, podName)
			// Make several attempts
			for start := time.Now(); time.Since(start) < podCreateTimeout; time.Sleep(2 * time.Second) {
				glog.Infof("Creating pod %s/%s on node %s", ns, podName, node.Name)
				t := time.Now()
				_, err = client.Core().Pods(ns).Create(&api.Pod{
					ObjectMeta: api.ObjectMeta{
						Name: podName,
						Labels: map[string]string{
							"name": "serve-hostname",
						},
					},
					Spec: api.PodSpec{
						Containers: []api.Container{
							{
								Name:  "serve-hostname",
								Image: "gcr.io/google_containers/serve_hostname:v1.4",
								Ports: []api.ContainerPort{{ContainerPort: 9376}},
							},
						},
						NodeName: node.Name,
					},
				})
				glog.V(4).Infof("Pod create %s/%s request took %v", ns, podName, time.Since(t))
				if err == nil {
					break
				}
				glog.Warningf("After %s failed to create pod %s/%s: %v", time.Since(start), ns, podName, err)
			}
			if err != nil {
				glog.Warningf("Failed to create pod %s/%s: %v", ns, podName, err)
				return
			}
		}
	}
	// Clean up the pods
	defer func() {
		glog.Info("Cleaning up pods")
		// Make several attempts to delete the pods.
		for _, podName := range podNames {
			for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) {
				if err = client.Core().Pods(ns).Delete(podName, nil); err == nil {
					break
				}
				glog.Warningf("After %v failed to delete pod %s/%s: %v", time.Since(start), ns, podName, err)
			}
		}
	}()

	glog.Info("Waiting for the serve-hostname pods to be ready")
	for _, podName := range podNames {
		var pod *api.Pod
		for start := time.Now(); time.Since(start) < podStartTimeout; time.Sleep(5 * time.Second) {
			pod, err = client.Core().Pods(ns).Get(podName)
			if err != nil {
				glog.Warningf("Get pod %s/%s failed, ignoring for %v: %v", ns, podName, err, podStartTimeout)
				continue
			}
			if pod.Status.Phase == api.PodRunning {
				break
			}
		}
		if pod.Status.Phase != api.PodRunning {
			glog.Warningf("Gave up waiting on pod %s/%s to be running (saw %v)", ns, podName, pod.Status.Phase)
		} else {
			glog.Infof("%s/%s is running", ns, podName)
		}
	}

	// Wait for the endpoints to propagate.
	for start := time.Now(); time.Since(start) < endpointTimeout; time.Sleep(10 * time.Second) {
		_, err = http.Get(fmt.Sprintf("http://serve-hostnames.%s:9376", ns))
		if err == nil {
			break
		}
		glog.Infof("After %v while making a request got error %v", time.Since(start), err)
	}
	if err != nil {
		glog.Errorf("Failed to get a response from service: %v", err)
	}

	// Repeatedly make requests.
	for iteration := 0; iteration != *upTo; iteration++ {
		responseChan := make(chan string, queries)
		// Use a channel of size *maxPar to throttle the number
		// of in-flight requests to avoid overloading the service.
		inFlight := make(chan struct{}, *maxPar)
		start := time.Now()
		for q := 0; q < queries; q++ {
			go func(i int, query int) {
				inFlight <- struct{}{}
				t := time.Now()
				resp, err := http.Get(fmt.Sprintf("http://serve-hostnames.%s:9376", ns))
				glog.V(4).Infof("Call to serve-hostnames in namespace %s took %v", ns, time.Since(t))
				if err != nil {
					glog.Warningf("Call failed during iteration %d query %d : %v", i, query, err)
					// If the query failed return a string which starts with a character
					// that can't be part of a hostname.
					responseChan <- fmt.Sprintf("!failed in iteration %d to issue query %d: %v", i, query, err)
				} else {
					defer resp.Body.Close()
					hostname, err := ioutil.ReadAll(resp.Body)
					if err != nil {
						responseChan <- fmt.Sprintf("!failed in iteration %d to read body of response: %v", i, err)
					} else {
						responseChan <- string(hostname)
					}
				}
				<-inFlight
			}(iteration, q)
		}
		responses := make(map[string]int, *podsPerNode*len(nodes.Items))
		missing := 0
		for q := 0; q < queries; q++ {
			r := <-responseChan
			glog.V(4).Infof("Got response from %s", r)
			responses[r]++
			// If the returned hostname starts with '!' then it indicates
			// an error response.
			if len(r) > 0 && r[0] == '!' {
				glog.V(3).Infof("Got response %s", r)
				missing++
			}
		}
		if missing > 0 {
			glog.Warningf("Missing %d responses out of %d", missing, queries)
		}
		// Report any nodes that did not respond.
		for n, node := range nodes.Items {
			for i := 0; i < *podsPerNode; i++ {
				name := fmt.Sprintf("serve-hostname-%d-%d", n, i)
				if _, ok := responses[name]; !ok {
					glog.Warningf("No response from pod %s on node %s at iteration %d", name, node.Name, iteration)
				}
			}
		}
		glog.Infof("Iteration %d took %v for %d queries (%.2f QPS) with %d missing",
			iteration, time.Since(start), queries-missing, float64(queries-missing)/time.Since(start).Seconds(), missing)
	}
}
Ejemplo n.º 21
0
// GetKubeClientConfig returns rest client configuration based on the passed url.
func GetKubeClientConfig(uri *url.URL) (*kube_rest.Config, error) {
	var (
		kubeConfig *kube_rest.Config
		err        error
	)

	opts := uri.Query()
	configOverrides, err := getConfigOverrides(uri)
	if err != nil {
		return nil, err
	}

	inClusterConfig := defaultInClusterConfig
	if len(opts["inClusterConfig"]) > 0 {
		inClusterConfig, err = strconv.ParseBool(opts["inClusterConfig"][0])
		if err != nil {
			return nil, err
		}
	}

	if inClusterConfig {
		kubeConfig, err = kube_rest.InClusterConfig()
		if err != nil {
			return nil, err
		}

		if configOverrides.ClusterInfo.Server != "" {
			kubeConfig.Host = configOverrides.ClusterInfo.Server
		}
		kubeConfig.GroupVersion = &schema.GroupVersion{Version: configOverrides.ClusterInfo.APIVersion}
		kubeConfig.Insecure = configOverrides.ClusterInfo.InsecureSkipTLSVerify
		if configOverrides.ClusterInfo.InsecureSkipTLSVerify {
			kubeConfig.TLSClientConfig.CAFile = ""
		}
	} else {
		authFile := ""
		if len(opts["auth"]) > 0 {
			authFile = opts["auth"][0]
		}

		if authFile != "" {
			if kubeConfig, err = kube_client_cmd.NewNonInteractiveDeferredLoadingClientConfig(
				&kube_client_cmd.ClientConfigLoadingRules{ExplicitPath: authFile},
				configOverrides).ClientConfig(); err != nil {
				return nil, err
			}
		} else {
			kubeConfig = &kube_rest.Config{
				Host:     configOverrides.ClusterInfo.Server,
				Insecure: configOverrides.ClusterInfo.InsecureSkipTLSVerify,
			}
			kubeConfig.GroupVersion = &schema.GroupVersion{Version: configOverrides.ClusterInfo.APIVersion}
		}
	}
	if len(kubeConfig.Host) == 0 {
		return nil, fmt.Errorf("invalid kubernetes master url specified")
	}
	if len(kubeConfig.GroupVersion.Version) == 0 {
		return nil, fmt.Errorf("invalid kubernetes API version specified")
	}

	useServiceAccount := defaultUseServiceAccount
	if len(opts["useServiceAccount"]) >= 1 {
		useServiceAccount, err = strconv.ParseBool(opts["useServiceAccount"][0])
		if err != nil {
			return nil, err
		}
	}

	if useServiceAccount {
		// If a readable service account token exists, then use it
		if contents, err := ioutil.ReadFile(defaultServiceAccountFile); err == nil {
			kubeConfig.BearerToken = string(contents)
		}
	}

	return kubeConfig, nil
}
Ejemplo n.º 22
0
func GetKubeClientConfig(uri *url.URL) (*kube_client.Config, error) {
	var (
		kubeConfig *kube_client.Config
		err        error
	)

	opts := uri.Query()
	configOverrides, err := getConfigOverrides(uri)
	if err != nil {
		return nil, err
	}

	inClusterConfig := defaultInClusterConfig
	if len(opts["inClusterConfig"]) > 0 {
		inClusterConfig, err = strconv.ParseBool(opts["inClusterConfig"][0])
		if err != nil {
			return nil, err
		}
	}

	if inClusterConfig {
		kubeConfig, err = kube_client.InClusterConfig()
		if err != nil {
			return nil, err
		}

		if configOverrides.ClusterInfo.Server != "" {
			kubeConfig.Host = configOverrides.ClusterInfo.Server
		}
		kubeConfig.GroupVersion = &unversioned.GroupVersion{Version: configOverrides.ClusterInfo.APIVersion}
		kubeConfig.Insecure = configOverrides.ClusterInfo.InsecureSkipTLSVerify
		if configOverrides.ClusterInfo.InsecureSkipTLSVerify {
			kubeConfig.TLSClientConfig.CAFile = ""
		}
	} else {
		authFile := ""
		if len(opts["auth"]) > 0 {
			authFile = opts["auth"][0]
		}

		if authFile != "" {
			// Load structured kubeconfig data from the given path.
			loader := &kubeClientCmd.ClientConfigLoadingRules{ExplicitPath: authFile}
			loadedConfig, err := loader.Load()
			if err != nil {
				return nil, err
			}

			// Flatten the loaded data to a particular restclient.Config based on the current context.
			if kubeConfig, err = kubeClientCmd.NewNonInteractiveClientConfig(
				*loadedConfig,
				loadedConfig.CurrentContext,
				&kubeClientCmd.ConfigOverrides{},
				loader).ClientConfig(); err != nil {
				return nil, err
			}
		} else {
			kubeConfig = &kube_client.Config{
				Host:     configOverrides.ClusterInfo.Server,
				Insecure: configOverrides.ClusterInfo.InsecureSkipTLSVerify,
			}
			kubeConfig.GroupVersion = &unversioned.GroupVersion{Version: configOverrides.ClusterInfo.APIVersion}
		}
	}
	if len(kubeConfig.Host) == 0 {
		return nil, fmt.Errorf("invalid kubernetes master url specified")
	}

	useServiceAccount := defaultUseServiceAccount
	if len(opts["useServiceAccount"]) >= 1 {
		useServiceAccount, err = strconv.ParseBool(opts["useServiceAccount"][0])
		if err != nil {
			return nil, err
		}
	}

	if useServiceAccount {
		// If a readable service account token exists, then use it
		if contents, err := ioutil.ReadFile(defaultServiceAccountFile); err == nil {
			kubeConfig.BearerToken = string(contents)
		}
	}

	kubeConfig.ContentType = "application/vnd.kubernetes.protobuf"

	return kubeConfig, nil
}
Ejemplo n.º 23
0
// main function for GLBC.
func main() {
	// TODO: Add a healthz endpoint
	var err error
	var clusterManager *controller.ClusterManager

	// TODO: We can simply parse all go flags with
	// flags.AddGoFlagSet(go_flag.CommandLine)
	// but that pollutes --help output with a ton of standard go flags.
	// We only really need a binary switch from light, v(2) logging to
	// heavier debug style V(4) logging, which we use --verbose for.
	flags.Parse(os.Args)
	clientConfig := kubectl_util.DefaultClientConfig(flags)

	// Set glog verbosity levels, unconditionally set --alsologtostderr.
	go_flag.Lookup("logtostderr").Value.Set("true")
	if *verbose {
		go_flag.Set("v", "4")
	}
	glog.Infof("Starting GLBC image: %v, cluster name %v", imageVersion, *clusterName)
	if *defaultSvc == "" {
		glog.Fatalf("Please specify --default-backend")
	}

	var config *restclient.Config
	// Create kubeclient
	if *inCluster {
		if config, err = restclient.InClusterConfig(); err != nil {
			glog.Fatalf("error creating client configuration: %v", err)
		}
	} else {
		config, err = clientConfig.ClientConfig()
		if err != nil {
			glog.Fatalf("error creating client configuration: %v", err)
		}
	}

	kubeClient, err := client.NewForConfig(config)
	if err != nil {
		glog.Fatalf("Failed to create client: %v.", err)
	}

	// Wait for the default backend Service. There's no pretty way to do this.
	parts := strings.Split(*defaultSvc, "/")
	if len(parts) != 2 {
		glog.Fatalf("Default backend should take the form namespace/name: %v",
			*defaultSvc)
	}
	defaultBackendNodePort, err := getNodePort(kubeClient, parts[0], parts[1])
	if err != nil {
		glog.Fatalf("Could not configure default backend %v: %v",
			*defaultSvc, err)
	}

	if *inCluster || *useRealCloud {
		// Create cluster manager
		namer, err := newNamer(kubeClient, *clusterName)
		if err != nil {
			glog.Fatalf("%v", err)
		}
		clusterManager, err = controller.NewClusterManager(*configFilePath, namer, defaultBackendNodePort, *healthCheckPath)
		if err != nil {
			glog.Fatalf("%v", err)
		}
	} else {
		// Create fake cluster manager
		clusterManager = controller.NewFakeClusterManager(*clusterName).ClusterManager
	}

	// Start loadbalancer controller
	lbc, err := controller.NewLoadBalancerController(kubeClient, clusterManager, *resyncPeriod, *watchNamespace)
	if err != nil {
		glog.Fatalf("%v", err)
	}
	if clusterManager.ClusterNamer.GetClusterName() != "" {
		glog.V(3).Infof("Cluster name %+v", clusterManager.ClusterNamer.GetClusterName())
	}
	clusterManager.Init(&controller.GCETranslator{LoadBalancerController: lbc})
	go registerHandlers(lbc)
	go handleSigterm(lbc, *deleteAllOnQuit)

	lbc.Run()
	for {
		glog.Infof("Handled quit, awaiting pod deletion.")
		time.Sleep(30 * time.Second)
	}
}
Ejemplo n.º 24
0
func (inClusterClientConfig) ClientConfig() (*restclient.Config, error) {
	return restclient.InClusterConfig()
}
Ejemplo n.º 25
0
func main() {
	// Flag domain.
	fs := pflag.NewFlagSet("default", pflag.ExitOnError)
	fs.Usage = func() {
		fmt.Fprintf(os.Stderr, "DESCRIPTION\n")
		fmt.Fprintf(os.Stderr, "  fluxd is the agent of flux.\n")
		fmt.Fprintf(os.Stderr, "\n")
		fmt.Fprintf(os.Stderr, "FLAGS\n")
		fs.PrintDefaults()
	}
	// This mirrors how kubectl extracts information from the environment.
	var (
		listenAddr        = fs.StringP("listen", "l", ":3031", "Listen address where /metrics will be served")
		fluxsvcAddress    = fs.String("fluxsvc-address", "wss://cloud.weave.works/api/flux", "Address of the fluxsvc to connect to.")
		token             = fs.String("token", "", "Token to use to authenticate with flux service")
		kubernetesKubectl = fs.String("kubernetes-kubectl", "", "Optional, explicit path to kubectl tool")
	)
	fs.Parse(os.Args)

	// Logger component.
	var logger log.Logger
	{
		logger = log.NewLogfmtLogger(os.Stderr)
		logger = log.NewContext(logger).With("ts", log.DefaultTimestampUTC)
		logger = log.NewContext(logger).With("caller", log.DefaultCaller)
	}

	// Platform component.
	var k8s platform.Platform
	{
		restClientConfig, err := restclient.InClusterConfig()
		if err != nil {
			logger.Log("err", err)
			os.Exit(1)
		}

		// When adding a new platform, don't just bash it in. Create a Platform
		// or Cluster interface in package platform, and have kubernetes.Cluster
		// and your new platform implement that interface.
		logger := log.NewContext(logger).With("component", "platform")
		logger.Log("host", restClientConfig.Host)

		cluster, err := kubernetes.NewCluster(restClientConfig, *kubernetesKubectl, logger)
		if err != nil {
			logger.Log("err", err)
			os.Exit(1)
		}

		if services, err := cluster.AllServices("", nil); err != nil {
			logger.Log("services", err)
		} else {
			logger.Log("services", len(services))
		}

		k8s = cluster
	}

	// Instrumentation
	var (
		daemonMetrics transport.DaemonMetrics
	)
	{
		k8s = platform.Instrument(k8s, platform.NewMetrics())
		daemonMetrics.ConnectionDuration = prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
			Namespace: "flux",
			Subsystem: "fluxd",
			Name:      "connection_duration_seconds",
			Help:      "Duration in seconds of the current connection to fluxsvc. Zero means unconnected.",
		}, []string{"target"})
	}

	// Connect to fluxsvc
	daemonLogger := log.NewContext(logger).With("component", "client")
	daemon, err := transport.NewDaemon(
		http.DefaultClient,
		flux.Token(*token),
		transport.NewRouter(),
		*fluxsvcAddress,
		k8s,
		daemonLogger,
		daemonMetrics,
	)
	if err != nil {
		logger.Log("err", err)
		os.Exit(1)
	}
	defer daemon.Close()

	// Mechanical components.
	errc := make(chan error)
	go func() {
		c := make(chan os.Signal)
		signal.Notify(c, syscall.SIGINT, syscall.SIGTERM)
		errc <- fmt.Errorf("%s", <-c)
	}()

	// HTTP transport component, for metrics
	go func() {
		logger.Log("addr", *listenAddr)
		mux := http.NewServeMux()
		mux.Handle("/metrics", promhttp.Handler())
		errc <- http.ListenAndServe(*listenAddr, mux)
	}()

	// Go!
	logger.Log("exiting", <-errc)
}
Ejemplo n.º 26
0
func (o DiscoveryServerOptions) RunDiscoveryServer() error {
	// if we don't have an etcd to back the server, we must be a legacy server
	if len(o.Etcd.StorageConfig.ServerList) == 0 {
		return o.RunLegacyDiscoveryServer()
	}

	// TODO have a "real" external address
	if err := o.SecureServing.MaybeDefaultWithSelfSignedCerts("localhost"); err != nil {
		return fmt.Errorf("error creating self-signed certificates: %v", err)
	}

	genericAPIServerConfig := genericapiserver.NewConfig()
	if _, err := genericAPIServerConfig.ApplySecureServingOptions(o.SecureServing); err != nil {
		return err
	}
	if _, err := genericAPIServerConfig.ApplyDelegatingAuthenticationOptions(o.Authentication); err != nil {
		return err
	}
	if _, err := genericAPIServerConfig.ApplyDelegatingAuthorizationOptions(o.Authorization); err != nil {
		return err
	}
	genericAPIServerConfig.LongRunningFunc = filters.BasicLongRunningRequestCheck(
		sets.NewString("watch", "proxy"),
		sets.NewString("attach", "exec", "proxy", "log", "portforward"),
	)

	var err error
	privilegedLoopbackToken := uuid.NewRandom().String()
	if genericAPIServerConfig.LoopbackClientConfig, err = genericAPIServerConfig.SecureServingInfo.NewSelfClientConfig(privilegedLoopbackToken); err != nil {
		return err
	}

	kubeconfig, err := restclient.InClusterConfig()
	if err != nil {
		return err
	}
	coreAPIServerClient, err := kubeclientset.NewForConfig(kubeconfig)
	if err != nil {
		return err
	}

	config := apiserver.Config{
		GenericConfig:       genericAPIServerConfig,
		RESTOptionsGetter:   &restOptionsFactory{storageConfig: &o.Etcd.StorageConfig},
		CoreAPIServerClient: coreAPIServerClient,
	}

	config.ProxyClientCert, err = ioutil.ReadFile(o.ProxyClientCertFile)
	if err != nil {
		return err
	}
	config.ProxyClientKey, err = ioutil.ReadFile(o.ProxyClientKeyFile)
	if err != nil {
		return err
	}

	server, err := config.Complete().New()
	if err != nil {
		return err
	}
	server.GenericAPIServer.PrepareRun().Run(wait.NeverStop)

	return nil
}
Ejemplo n.º 27
0
// NewIngressController returns a configured Ingress controller
func NewIngressController(backend ingress.Controller) *GenericController {
	var (
		flags = pflag.NewFlagSet("", pflag.ExitOnError)

		defaultSvc = flags.String("default-backend-service", "",
			`Service used to serve a 404 page for the default backend. Takes the form
    	namespace/name. The controller uses the first node port of this Service for
    	the default backend.`)

		ingressClass = flags.String("ingress-class", "",
			`Name of the ingress class to route through this controller.`)

		configMap = flags.String("configmap", "",
			`Name of the ConfigMap that contains the custom configuration to use`)

		publishSvc = flags.String("publish-service", "",
			`Service fronting the ingress controllers. Takes the form
 		namespace/name. The controller will set the endpoint records on the
 		ingress objects to reflect those on the service.`)

		tcpConfigMapName = flags.String("tcp-services-configmap", "",
			`Name of the ConfigMap that contains the definition of the TCP services to expose.
		The key in the map indicates the external port to be used. The value is the name of the
		service with the format namespace/serviceName and the port of the service could be a 
		number of the name of the port.
		The ports 80 and 443 are not allowed as external ports. This ports are reserved for the backend`)

		udpConfigMapName = flags.String("udp-services-configmap", "",
			`Name of the ConfigMap that contains the definition of the UDP services to expose.
		The key in the map indicates the external port to be used. The value is the name of the
		service with the format namespace/serviceName and the port of the service could be a 
		number of the name of the port.`)

		resyncPeriod = flags.Duration("sync-period", 60*time.Second,
			`Relist and confirm cloud resources this often.`)

		watchNamespace = flags.String("watch-namespace", api.NamespaceAll,
			`Namespace to watch for Ingress. Default is to watch all namespaces`)

		healthzPort = flags.Int("healthz-port", 10254, "port for healthz endpoint.")

		profiling = flags.Bool("profiling", true, `Enable profiling via web interface host:port/debug/pprof/`)

		defSSLCertificate = flags.String("default-ssl-certificate", "", `Name of the secret 
		that contains a SSL certificate to be used as default for a HTTPS catch-all server`)

		defHealthzURL = flags.String("health-check-path", "/healthz", `Defines 
		the URL to be used as health check inside in the default server in NGINX.`)
	)

	flags.AddGoFlagSet(flag.CommandLine)
	flags.Parse(os.Args)
	clientConfig := kubectl_util.DefaultClientConfig(flags)

	flag.Set("logtostderr", "true")

	glog.Info(backend.Info())

	if *ingressClass != "" {
		glog.Infof("Watching for ingress class: %s", *ingressClass)
	}

	if *defaultSvc == "" {
		glog.Fatalf("Please specify --default-backend-service")
	}

	kubeconfig, err := restclient.InClusterConfig()
	if err != nil {
		kubeconfig, err = clientConfig.ClientConfig()
		if err != nil {
			glog.Fatalf("error configuring the client: %v", err)
		}
	}

	kubeClient, err := clientset.NewForConfig(kubeconfig)
	if err != nil {
		glog.Fatalf("failed to create client: %v", err)
	}

	_, err = k8s.IsValidService(kubeClient, *defaultSvc)
	if err != nil {
		glog.Fatalf("no service with name %v found: %v", *defaultSvc, err)
	}
	glog.Infof("validated %v as the default backend", *defaultSvc)

	if *publishSvc != "" {
		svc, err := k8s.IsValidService(kubeClient, *publishSvc)
		if err != nil {
			glog.Fatalf("no service with name %v found: %v", *publishSvc, err)
		}

		if len(svc.Status.LoadBalancer.Ingress) == 0 {
			// We could poll here, but we instead just exit and rely on k8s to restart us
			glog.Fatalf("service %s does not (yet) have ingress points", *publishSvc)
		}

		glog.Infof("service %v validated as source of Ingress status", *publishSvc)
	}

	if *configMap != "" {
		_, _, err = k8s.ParseNameNS(*configMap)
		if err != nil {
			glog.Fatalf("configmap error: %v", err)
		}
	}

	os.MkdirAll(ingress.DefaultSSLDirectory, 0655)

	config := &Configuration{
		Client:                kubeClient,
		ResyncPeriod:          *resyncPeriod,
		DefaultService:        *defaultSvc,
		IngressClass:          *ingressClass,
		Namespace:             *watchNamespace,
		ConfigMapName:         *configMap,
		TCPConfigMapName:      *tcpConfigMapName,
		UDPConfigMapName:      *udpConfigMapName,
		DefaultSSLCertificate: *defSSLCertificate,
		DefaultHealthzURL:     *defHealthzURL,
		PublishService:        *publishSvc,
		Backend:               backend,
	}

	ic := newIngressController(config)
	go registerHandlers(*profiling, *healthzPort, ic)
	return ic
}
Ejemplo n.º 28
0
func main() {
	// First log our starting config, and then set up.
	log.Infof("Invoked by %v", os.Args)
	flag.Parse()

	// Perform further validation of flags.
	if *deployment == "" {
		log.Fatal("Must specify a deployment.")
	}

	if *threshold < 0 || *threshold > 100 {
		log.Fatalf("Threshold must be between 0 and 100 inclusively, was %d.", threshold)
	}

	log.Infof("Watching namespace: %s, pod: %s, container: %s.", *podNamespace, *podName, *containerName)
	log.Infof("cpu: %s, extra_cpu: %s, memory: %s, extra_memory: %s, storage: %s, extra_storage: %s", *baseCPU, *cpuPerNode, *baseMemory, *memoryPerNode, *baseStorage, *storagePerNode)

	// Set up work objects.
	config, err := restclient.InClusterConfig()
	if err != nil {
		log.Fatal(err)
	}

	clientset, err := client.NewForConfig(config)
	if err != nil {
		log.Fatal(err)
	}
	k8s := nanny.NewKubernetesClient(*podNamespace, *deployment, *podName, *containerName, clientset)

	var resources []nanny.Resource

	// Monitor only the resources specified.
	if *baseCPU != noValue {
		resources = append(resources, nanny.Resource{
			Base:         resource.MustParse(*baseCPU),
			ExtraPerNode: resource.MustParse(*cpuPerNode),
			Name:         "cpu",
		})
	}

	if *baseMemory != noValue {
		resources = append(resources, nanny.Resource{
			Base:         resource.MustParse(*baseMemory),
			ExtraPerNode: resource.MustParse(*memoryPerNode),
			Name:         "memory",
		})
	}

	if *baseStorage != noValue {
		resources = append(resources, nanny.Resource{
			Base:         resource.MustParse(*baseStorage),
			ExtraPerNode: resource.MustParse(*memoryPerNode),
			Name:         "storage",
		})
	}

	log.Infof("Resources: %+v", resources)

	var est nanny.ResourceEstimator
	if *estimator == "linear" {
		est = nanny.LinearEstimator{
			Resources: resources,
		}
	} else if *estimator == "exponential" {
		est = nanny.ExponentialEstimator{
			Resources:   resources,
			ScaleFactor: 1.5,
		}
	} else {
		log.Fatalf("Estimator %s not supported", *estimator)
	}

	// Begin nannying.
	nanny.PollAPIServer(k8s, est, *containerName, pollPeriod, uint64(*threshold))
}
Ejemplo n.º 29
0
		return KubeconfigGetterForSecret(secretRefName)()
	}
}

// KubeconfigGetterForSecret is used to get the kubeconfig from the given secret.
var KubeconfigGetterForSecret = func(secretName string) clientcmd.KubeconfigGetter {
	return func() (*clientcmdapi.Config, error) {
		var data []byte
		if secretName != "" {
			// Get the namespace this is running in from the env variable.
			namespace := os.Getenv("POD_NAMESPACE")
			if namespace == "" {
				return nil, fmt.Errorf("unexpected: POD_NAMESPACE env var returned empty string")
			}
			// Get a client to talk to the k8s apiserver, to fetch secrets from it.
			cc, err := restclient.InClusterConfig()
			if err != nil {
				return nil, fmt.Errorf("error in creating in-cluster client: %s", err)
			}
			client, err := clientset.NewForConfig(cc)
			if err != nil {
				return nil, fmt.Errorf("error in creating in-cluster client: %s", err)
			}
			data = []byte{}
			var secret *api.Secret
			err = wait.PollImmediate(1*time.Second, getSecretTimeout, func() (bool, error) {
				secret, err = client.Core().Secrets(namespace).Get(secretName, metav1.GetOptions{})
				if err == nil {
					return true, nil
				}
				glog.Warningf("error in fetching secret: %s", err)
func main() {
	flag.Parse()
	glog.Info("Kubernetes Elasticsearch logging discovery")

	cc, err := restclient.InClusterConfig()
	if err != nil {
		glog.Fatalf("Failed to make client: %v", err)
	}
	client, err := clientset.NewForConfig(cc)

	if err != nil {
		glog.Fatalf("Failed to make client: %v", err)
	}
	namespace := api.NamespaceSystem
	envNamespace := os.Getenv("NAMESPACE")
	if envNamespace != "" {
		if _, err := client.Core().Namespaces().Get(envNamespace); err != nil {
			glog.Fatalf("%s namespace doesn't exist: %v", envNamespace, err)
		}
		namespace = envNamespace
	}

	var elasticsearch *api.Service
	// Look for endpoints associated with the Elasticsearch loggging service.
	// First wait for the service to become available.
	for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
		elasticsearch, err = client.Core().Services(namespace).Get("elasticsearch-logging")
		if err == nil {
			break
		}
	}
	// If we did not find an elasticsearch logging service then log a warning
	// and return without adding any unicast hosts.
	if elasticsearch == nil {
		glog.Warningf("Failed to find the elasticsearch-logging service: %v", err)
		return
	}

	var endpoints *api.Endpoints
	addrs := []string{}
	// Wait for some endpoints.
	count := 0
	for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
		endpoints, err = client.Core().Endpoints(namespace).Get("elasticsearch-logging")
		if err != nil {
			continue
		}
		addrs = flattenSubsets(endpoints.Subsets)
		glog.Infof("Found %s", addrs)
		if len(addrs) > 0 && len(addrs) == count {
			break
		}
		count = len(addrs)
	}
	// If there was an error finding endpoints then log a warning and quit.
	if err != nil {
		glog.Warningf("Error finding endpoints: %v", err)
		return
	}

	glog.Infof("Endpoints = %s", addrs)
	fmt.Printf("discovery.zen.ping.unicast.hosts: [%s]\n", strings.Join(addrs, ", "))
}