コード例 #1
0
func NewMetricsGrabber(c *client.Client, kubelets bool, scheduler bool, controllers bool, apiServer bool) (*MetricsGrabber, error) {
	registeredMaster := false
	masterName := ""
	nodeList, err := c.Nodes().List(api.ListOptions{})
	if err != nil {
		return nil, err
	}
	if len(nodeList.Items) < 1 {
		glog.Warning("Can't find any Nodes in the API server to grab metrics from")
	}
	for _, node := range nodeList.Items {
		if system.IsMasterNode(&node) {
			registeredMaster = true
			masterName = node.Name
			break
		}
	}
	if !registeredMaster {
		scheduler = false
		controllers = false
		glog.Warningf("Master node is not registered. Grabbing metrics from Scheduler and ControllerManager is disabled.")
	}

	return &MetricsGrabber{
		client:                    c,
		grabFromApiServer:         apiServer,
		grabFromControllerManager: controllers,
		grabFromKubelets:          kubelets,
		grabFromScheduler:         scheduler,
		masterName:                masterName,
		registeredMaster:          registeredMaster,
	}, nil
}
コード例 #2
0
ファイル: run.go プロジェクト: ttysteale/kubernetes-api
func waitForPodRunning(c *client.Client, pod *api.Pod, out io.Writer) (status api.PodPhase, err error) {
	for {
		pod, err := c.Pods(pod.Namespace).Get(pod.Name)
		if err != nil {
			return api.PodUnknown, err
		}
		ready := false
		if pod.Status.Phase == api.PodRunning {
			ready = true
			for _, status := range pod.Status.ContainerStatuses {
				if !status.Ready {
					ready = false
					break
				}
			}
			if ready {
				return api.PodRunning, nil
			}
		}
		if pod.Status.Phase == api.PodSucceeded || pod.Status.Phase == api.PodFailed {
			return pod.Status.Phase, nil
		}
		fmt.Fprintf(out, "Waiting for pod %s/%s to be running, status is %s, pod ready: %v\n", pod.Namespace, pod.Name, pod.Status.Phase, ready)
		time.Sleep(2 * time.Second)
		continue
	}
}
コード例 #3
0
ファイル: pet_set.go プロジェクト: ttysteale/kubernetes-api
// NewPetSetController creates a new petset controller.
func NewPetSetController(podInformer framework.SharedIndexInformer, kubeClient *client.Client, resyncPeriod time.Duration) *PetSetController {
	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartLogging(glog.Infof)
	eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
	recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "petset"})
	pc := &apiServerPetClient{kubeClient, recorder, &defaultPetHealthChecker{}}

	psc := &PetSetController{
		kubeClient:       kubeClient,
		blockingPetStore: newUnHealthyPetTracker(pc),
		newSyncer: func(blockingPet *pcb) *petSyncer {
			return &petSyncer{pc, blockingPet}
		},
		queue: workqueue.New(),
	}

	podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
		// lookup the petset and enqueue
		AddFunc: psc.addPod,
		// lookup current and old petset if labels changed
		UpdateFunc: psc.updatePod,
		// lookup petset accounting for deletion tombstones
		DeleteFunc: psc.deletePod,
	})
	psc.podStore.Indexer = podInformer.GetIndexer()
	psc.podController = podInformer.GetController()

	psc.psStore.Store, psc.psController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return psc.kubeClient.Apps().PetSets(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return psc.kubeClient.Apps().PetSets(api.NamespaceAll).Watch(options)
			},
		},
		&apps.PetSet{},
		petSetResyncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc: psc.enqueuePetSet,
			UpdateFunc: func(old, cur interface{}) {
				oldPS := old.(*apps.PetSet)
				curPS := cur.(*apps.PetSet)
				if oldPS.Status.Replicas != curPS.Status.Replicas {
					glog.V(4).Infof("Observed updated replica count for PetSet: %v, %d->%d", curPS.Name, oldPS.Status.Replicas, curPS.Status.Replicas)
				}
				psc.enqueuePetSet(cur)
			},
			DeleteFunc: psc.enqueuePetSet,
		},
	)
	// TODO: Watch volumes
	psc.podStoreSynced = psc.podController.HasSynced
	psc.syncHandler = psc.Sync
	return psc
}
コード例 #4
0
// updatePetCount attempts to update the Status.Replicas of the given PetSet, with a single GET/PUT retry.
func updatePetCount(kubeClient *client.Client, ps apps.PetSet, numPets int) (updateErr error) {
	if ps.Status.Replicas == numPets || kubeClient == nil {
		return nil
	}
	psClient := kubeClient.Apps().PetSets(ps.Namespace)
	var getErr error
	for i, ps := 0, &ps; ; i++ {
		glog.V(4).Infof(fmt.Sprintf("Updating replica count for PetSet: %s/%s, ", ps.Namespace, ps.Name) +
			fmt.Sprintf("replicas %d->%d (need %d), ", ps.Status.Replicas, numPets, ps.Spec.Replicas))

		ps.Status = apps.PetSetStatus{Replicas: numPets}
		_, updateErr = psClient.UpdateStatus(ps)
		if updateErr == nil || i >= statusUpdateRetries {
			return updateErr
		}
		if ps, getErr = psClient.Get(ps.Name); getErr != nil {
			return getErr
		}
	}
}
コード例 #5
0
func dumpClusterInfo(f *cmdutil.Factory, cmd *cobra.Command, args []string, out io.Writer) error {
	var c *unversioned.Client
	var err error
	if c, err = f.Client(); err != nil {
		return err
	}
	printer, _, err := kubectl.GetPrinter("json", "")
	if err != nil {
		return err
	}

	nodes, err := c.Nodes().List(api.ListOptions{})
	if err != nil {
		return err
	}

	if err := printer.PrintObj(nodes, setupOutputWriter(cmd, out, "nodes.json")); err != nil {
		return err
	}

	var namespaces []string
	if cmdutil.GetFlagBool(cmd, "all-namespaces") {
		namespaceList, err := c.Namespaces().List(api.ListOptions{})
		if err != nil {
			return err
		}
		for ix := range namespaceList.Items {
			namespaces = append(namespaces, namespaceList.Items[ix].Name)
		}
	} else {
		namespaces = cmdutil.GetFlagStringSlice(cmd, "namespaces")
		if len(namespaces) == 0 {
			cmdNamespace, _, err := f.DefaultNamespace()
			if err != nil {
				return err
			}
			namespaces = []string{
				api.NamespaceSystem,
				cmdNamespace,
			}
		}
	}
	for _, namespace := range namespaces {
		// TODO: this is repetitive in the extreme.  Use reflection or
		// something to make this a for loop.
		events, err := c.Events(namespace).List(api.ListOptions{})
		if err != nil {
			return err
		}
		if err := printer.PrintObj(events, setupOutputWriter(cmd, out, path.Join(namespace, "events.json"))); err != nil {
			return err
		}

		rcs, err := c.ReplicationControllers(namespace).List(api.ListOptions{})
		if err != nil {
			return err
		}
		if err := printer.PrintObj(rcs, setupOutputWriter(cmd, out, path.Join(namespace, "replication-controllers.json"))); err != nil {
			return err
		}

		svcs, err := c.Services(namespace).List(api.ListOptions{})
		if err != nil {
			return err
		}
		if err := printer.PrintObj(svcs, setupOutputWriter(cmd, out, path.Join(namespace, "services.json"))); err != nil {
			return err
		}

		sets, err := c.DaemonSets(namespace).List(api.ListOptions{})
		if err != nil {
			return err
		}
		if err := printer.PrintObj(sets, setupOutputWriter(cmd, out, path.Join(namespace, "daemonsets.json"))); err != nil {
			return err
		}

		deps, err := c.Deployments(namespace).List(api.ListOptions{})
		if err != nil {
			return err
		}
		if err := printer.PrintObj(deps, setupOutputWriter(cmd, out, path.Join(namespace, "deployments.json"))); err != nil {
			return err
		}

		rps, err := c.ReplicaSets(namespace).List(api.ListOptions{})
		if err != nil {
			return err
		}
		if err := printer.PrintObj(rps, setupOutputWriter(cmd, out, path.Join(namespace, "replicasets.json"))); err != nil {
			return err
		}

		pods, err := c.Pods(namespace).List(api.ListOptions{})
		if err != nil {
			return err
		}

		if err := printer.PrintObj(pods, setupOutputWriter(cmd, out, path.Join(namespace, "pods.json"))); err != nil {
			return err
		}

		for ix := range pods.Items {
			pod := &pods.Items[ix]
			writer := setupOutputWriter(cmd, out, path.Join(namespace, pod.Name, "logs.txt"))
			writer.Write([]byte(fmt.Sprintf("==== START logs for %s/%s ====\n", pod.Namespace, pod.Name)))
			request, err := f.LogsForObject(pod, &api.PodLogOptions{})
			if err != nil {
				return err
			}

			data, err := request.DoRaw()
			if err != nil {
				return err
			}
			writer.Write(data)
			writer.Write([]byte(fmt.Sprintf("==== END logs for %s/%s ====\n", pod.Namespace, pod.Name)))
		}
	}
	dir := cmdutil.GetFlagString(cmd, "output-directory")
	if len(dir) == 0 {
		dir = "."
	}
	if dir != "-" {
		fmt.Fprintf(out, "Cluster info dumped to %s", dir)
	}
	return nil
}
コード例 #6
0
// podClient returns the given podClient for the given kubeClient/ns.
func podClient(kubeClient *client.Client, ns string) client.PodInterface {
	return kubeClient.Pods(ns)
}
コード例 #7
0
// claimClient returns the pvcClient for the given kubeClient/ns.
func claimClient(kubeClient *client.Client, ns string) client.PersistentVolumeClaimInterface {
	return kubeClient.PersistentVolumeClaims(ns)
}