func RunVersion(f cmdutil.Factory, out io.Writer, cmd *cobra.Command) error { v := fmt.Sprintf("%#v", version.Get()) if cmdutil.GetFlagBool(cmd, "short") { v = version.Get().GitVersion } fmt.Fprintf(out, "Client Version: %s\n", v) if cmdutil.GetFlagBool(cmd, "client") { return nil } clientset, err := f.ClientSet() if err != nil { return err } serverVersion, err := clientset.Discovery().ServerVersion() if err != nil { return err } v = fmt.Sprintf("%#v", *serverVersion) if cmdutil.GetFlagBool(cmd, "short") { v = serverVersion.GitVersion } fmt.Fprintf(out, "Server Version: %s\n", v) return nil }
// PrintAndExitIfRequested will check if the -version flag was passed // and, if so, print the version and exit. func PrintAndExitIfRequested() { if *versionFlag == VersionRaw { fmt.Printf("%#v\n", version.Get()) os.Exit(0) } else if *versionFlag == VersionTrue { fmt.Printf("Kubernetes %s\n", version.Get()) os.Exit(0) } }
// DefaultKubernetesUserAgent returns the default user agent that clients can use. func DefaultKubernetesUserAgent() string { commit := version.Get().GitCommit if len(commit) > 7 { commit = commit[:7] } if len(commit) == 0 { commit = "unknown" } version := version.Get().GitVersion seg := strings.SplitN(version, "-", 2) version = seg[0] return fmt.Sprintf("%s/%s (%s/%s) kubernetes/%s", path.Base(os.Args[0]), version, gruntime.GOOS, gruntime.GOARCH, commit) }
// NewVersionCommand creates a command for displaying the version of this binary func NewVersionCommand(basename string, options Options) *cobra.Command { return &cobra.Command{ Use: "version", Short: "Display version", Run: func(c *cobra.Command, args []string) { fmt.Printf("%s %v\n", basename, Get()) fmt.Printf("kubernetes %v\n", kubeversion.Get()) if options.PrintEtcdVersion { fmt.Printf("etcd %v\n", etcdversion.Version) } if options.PrintClientFeatures { features := []string{} if tokencmd.BasicEnabled() { features = append(features, "Basic-Auth") } if tokencmd.GSSAPIEnabled() { features = append(features, "GSSAPI") features = append(features, "Kerberos") // GSSAPI or SSPI features = append(features, "SPNEGO") // GSSAPI or SSPI } fmt.Printf("features: %s\n", strings.Join(features, " ")) } }, } }
// Complete fills in any fields not set that are required to have valid data. It's mutating the receiver. func (c *Config) Complete() completedConfig { c.GenericConfig.Complete() version := version.Get() c.GenericConfig.Version = &version return completedConfig{c} }
func (c *FakeDiscovery) ServerVersion() (*version.Info, error) { action := core.ActionImpl{} action.Verb = "get" action.Resource = unversioned.GroupVersionResource{Resource: "version"} c.Invokes(action, nil) versionInfo := version.Get() return &versionInfo, nil }
func (c *Fake) ServerVersion() (*version.Info, error) { action := ActionImpl{} action.Verb = "get" action.Resource = "version" c.Invokes(action, nil) versionInfo := version.Get() return &versionInfo, nil }
// NewVersionCommand creates a command for displaying the version of this binary func NewVersionCommand(basename string) *cobra.Command { return &cobra.Command{ Use: "version", Short: "Display version", Run: func(c *cobra.Command, args []string) { fmt.Printf("%s %v\n", basename, Get()) fmt.Printf("kubernetes %v\n", kubeversion.Get()) }, } }
// Set versioninfo for the node. func (kl *Kubelet) setNodeStatusVersionInfo(node *api.Node) { verinfo, err := kl.cadvisor.VersionInfo() if err != nil { glog.Errorf("Error getting version info: %v", err) } else { node.Status.NodeInfo.KernelVersion = verinfo.KernelVersion node.Status.NodeInfo.OSImage = verinfo.ContainerOsVersion runtimeVersion := "Unknown" if runtimeVer, err := kl.containerRuntime.Version(); err == nil { runtimeVersion = runtimeVer.String() } node.Status.NodeInfo.ContainerRuntimeVersion = fmt.Sprintf("%s://%s", kl.containerRuntime.Type(), runtimeVersion) node.Status.NodeInfo.KubeletVersion = version.Get().String() // TODO: kube-proxy might be different version from kubelet in the future node.Status.NodeInfo.KubeProxyVersion = version.Get().String() } }
func (server *KubeDNSServer) Run() { glog.Infof("%+v", version.Get()) pflag.VisitAll(func(flag *pflag.Flag) { glog.Infof("FLAG: --%s=%q", flag.Name, flag.Value) }) setupSignalHandlers() server.startSkyDNSServer() server.kd.Start() server.setupHealthzHandlers() glog.Infof("Setting up Healthz Handler(/readiness, /cache) on port :%d", server.healthzPort) glog.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", server.healthzPort), nil)) }
// TestVersion tests /version func TestVersion(t *testing.T) { s, etcdserver, _, _ := newMaster(t) defer etcdserver.Terminate(t) req, _ := http.NewRequest("GET", "/version", nil) resp := httptest.NewRecorder() s.GenericAPIServer.InsecureHandler.ServeHTTP(resp, req) if resp.Code != 200 { t.Fatalf("expected http 200, got: %d", resp.Code) } var info version.Info err := json.NewDecoder(resp.Body).Decode(&info) if err != nil { t.Errorf("unexpected error: %v", err) } if !reflect.DeepEqual(version.Get(), info) { t.Errorf("Expected %#v, Got %#v", version.Get(), info) } }
// NewVersionCommand creates a command for displaying the version of this binary func NewVersionCommand(basename string, printEtcdVersion bool) *cobra.Command { return &cobra.Command{ Use: "version", Short: "Display version", Run: func(c *cobra.Command, args []string) { fmt.Printf("%s %v\n", basename, Get()) fmt.Printf("kubernetes %v\n", kubeversion.Get()) if printEtcdVersion { fmt.Printf("etcd %v\n", etcdversion.Version) } }, } }
// MatchesServerVersion queries the server to compares the build version // (git hash) of the client with the server's build version. It returns an error // if it failed to contact the server or if the versions are not an exact match. func MatchesServerVersion(client DiscoveryInterface) error { cVer := version.Get() sVer, err := client.ServerVersion() if err != nil { return fmt.Errorf("couldn't read version from server: %v\n", err) } // GitVersion includes GitCommit and GitTreeState, but best to be safe? if cVer.GitVersion != sVer.GitVersion || cVer.GitCommit != sVer.GitCommit || cVer.GitTreeState != sVer.GitTreeState { return fmt.Errorf("server version (%#v) differs from client version (%#v)!\n", sVer, cVer) } return nil }
// Run runs the CMServer. This should never exit. func Run(s *options.CMServer) error { glog.Infof("%+v", version.Get()) if c, err := configz.New("componentconfig"); err == nil { c.Set(s.ControllerManagerConfiguration) } else { glog.Errorf("unable to register configz: %s", err) } // Create the config to talk to federation-apiserver. kubeconfigGetter := util.KubeconfigGetterForSecret(KubeconfigSecretName) restClientCfg, err := clientcmd.BuildConfigFromKubeconfigGetter(s.Master, kubeconfigGetter) if err != nil || restClientCfg == nil { // Retry with the deprecated name in 1.4. // TODO(madhusudancs): Remove this in 1.5. var depErr error kubeconfigGetter := util.KubeconfigGetterForSecret(DeprecatedKubeconfigSecretName) restClientCfg, depErr = clientcmd.BuildConfigFromKubeconfigGetter(s.Master, kubeconfigGetter) if depErr != nil { return fmt.Errorf("failed to find the secret containing Federation API server kubeconfig, tried the secret name %s and the deprecated name %s: %v, %v", KubeconfigSecretName, DeprecatedKubeconfigSecretName, err, depErr) } } // Override restClientCfg qps/burst settings from flags restClientCfg.QPS = s.APIServerQPS restClientCfg.Burst = s.APIServerBurst go func() { mux := http.NewServeMux() healthz.InstallHandler(mux) if s.EnableProfiling { mux.HandleFunc("/debug/pprof/", pprof.Index) mux.HandleFunc("/debug/pprof/profile", pprof.Profile) mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) } mux.Handle("/metrics", prometheus.Handler()) server := &http.Server{ Addr: net.JoinHostPort(s.Address, strconv.Itoa(s.Port)), Handler: mux, } glog.Fatal(server.ListenAndServe()) }() run := func() { err := StartControllers(s, restClientCfg) glog.Fatalf("error running controllers: %v", err) panic("unreachable") } run() panic("unreachable") }
func main() { config := options.NewKubeDNSConfig() config.AddFlags(pflag.CommandLine) flag.InitFlags() logs.InitLogs() defer logs.FlushLogs() verflag.PrintAndExitIfRequested() glog.V(0).Infof("version: %+v", version.Get()) server := app.NewKubeDNSServerDefault(config) server.Run() }
// setUp is a convience function for setting up for (most) tests. func setUp(t *testing.T) (*Master, *etcdtesting.EtcdTestServer, Config, *assert.Assertions) { server, storageConfig := etcdtesting.NewUnsecuredEtcd3TestClientServer(t) config := &Config{ GenericConfig: genericapiserver.NewConfig(), APIServerServicePort: 443, MasterCount: 1, } resourceEncoding := genericapiserver.NewDefaultResourceEncodingConfig() resourceEncoding.SetVersionEncoding(api.GroupName, registered.GroupOrDie(api.GroupName).GroupVersion, unversioned.GroupVersion{Group: api.GroupName, Version: runtime.APIVersionInternal}) resourceEncoding.SetVersionEncoding(autoscaling.GroupName, *testapi.Autoscaling.GroupVersion(), unversioned.GroupVersion{Group: autoscaling.GroupName, Version: runtime.APIVersionInternal}) resourceEncoding.SetVersionEncoding(batch.GroupName, *testapi.Batch.GroupVersion(), unversioned.GroupVersion{Group: batch.GroupName, Version: runtime.APIVersionInternal}) resourceEncoding.SetVersionEncoding(apps.GroupName, *testapi.Apps.GroupVersion(), unversioned.GroupVersion{Group: apps.GroupName, Version: runtime.APIVersionInternal}) resourceEncoding.SetVersionEncoding(extensions.GroupName, *testapi.Extensions.GroupVersion(), unversioned.GroupVersion{Group: extensions.GroupName, Version: runtime.APIVersionInternal}) resourceEncoding.SetVersionEncoding(rbac.GroupName, *testapi.Rbac.GroupVersion(), unversioned.GroupVersion{Group: rbac.GroupName, Version: runtime.APIVersionInternal}) resourceEncoding.SetVersionEncoding(certificates.GroupName, *testapi.Certificates.GroupVersion(), unversioned.GroupVersion{Group: certificates.GroupName, Version: runtime.APIVersionInternal}) storageFactory := genericapiserver.NewDefaultStorageFactory(*storageConfig, testapi.StorageMediaType(), api.Codecs, resourceEncoding, DefaultAPIResourceConfigSource()) kubeVersion := version.Get() config.GenericConfig.Version = &kubeVersion config.StorageFactory = storageFactory config.GenericConfig.LoopbackClientConfig = &restclient.Config{APIPath: "/api", ContentConfig: restclient.ContentConfig{NegotiatedSerializer: api.Codecs}} config.GenericConfig.APIResourceConfigSource = DefaultAPIResourceConfigSource() config.GenericConfig.PublicAddress = net.ParseIP("192.168.10.4") config.GenericConfig.LegacyAPIGroupPrefixes = sets.NewString("/api") config.GenericConfig.APIResourceConfigSource = DefaultAPIResourceConfigSource() config.GenericConfig.RequestContextMapper = api.NewRequestContextMapper() config.GenericConfig.LoopbackClientConfig = &restclient.Config{APIPath: "/api", ContentConfig: restclient.ContentConfig{NegotiatedSerializer: api.Codecs}} config.GenericConfig.EnableMetrics = true config.EnableCoreControllers = false config.KubeletClientConfig = kubeletclient.KubeletClientConfig{Port: 10250} config.ProxyTransport = utilnet.SetTransportDefaults(&http.Transport{ Dial: func(network, addr string) (net.Conn, error) { return nil, nil }, TLSClientConfig: &tls.Config{}, }) master, err := config.Complete().New() if err != nil { t.Fatal(err) } return master, server, *config, assert.New(t) }
// TestNotRestRoutesHaveAuth checks that special non-routes are behind authz/authn. func TestNotRestRoutesHaveAuth(t *testing.T) { etcdserver, config, _ := setUp(t) defer etcdserver.Terminate(t) authz := mockAuthorizer{} config.LegacyAPIGroupPrefixes = sets.NewString("/apiPrefix") config.APIGroupPrefix = "/apiGroupPrefix" config.Authorizer = &authz config.EnableSwaggerUI = true config.EnableIndex = true config.EnableProfiling = true config.EnableSwaggerSupport = true kubeVersion := version.Get() config.Version = &kubeVersion s, err := config.SkipComplete().New() if err != nil { t.Fatalf("Error in bringing up the server: %v", err) } for _, test := range []struct { route string }{ {"/"}, {"/swagger-ui/"}, {"/debug/pprof/"}, {"/version"}, } { resp := httptest.NewRecorder() req, _ := http.NewRequest("GET", test.route, nil) s.Handler.ServeHTTP(resp, req) if resp.Code != 200 { t.Errorf("route %q expected to work: code %d", test.route, resp.Code) continue } if authz.lastURI != test.route { t.Errorf("route %q expected to go through authorization, last route did: %q", test.route, authz.lastURI) } } }
// MatchesServerVersion queries the server to compares the build version // (git hash) of the client with the server's build version. It returns an error // if it failed to contact the server or if the versions are not an exact match. func MatchesServerVersion(client *Client, c *Config) error { var err error if client == nil { client, err = New(c) if err != nil { return err } } clientVersion := version.Get() serverVersion, err := client.ServerVersion() if err != nil { return fmt.Errorf("couldn't read version from server: %v\n", err) } if s := *serverVersion; !reflect.DeepEqual(clientVersion, s) { return fmt.Errorf("server version (%#v) differs from client version (%#v)!\n", s, clientVersion) } return nil }
// setUp is a convience function for setting up for (most) tests. func setUp(t *testing.T) (*Master, *etcdtesting.EtcdTestServer, Config, *assert.Assertions) { server, storageConfig := etcdtesting.NewUnsecuredEtcd3TestClientServer(t) config := &Config{ GenericConfig: genericapiserver.NewConfig(), } resourceEncoding := genericapiserver.NewDefaultResourceEncodingConfig() resourceEncoding.SetVersionEncoding(api.GroupName, registered.GroupOrDie(api.GroupName).GroupVersion, unversioned.GroupVersion{Group: api.GroupName, Version: runtime.APIVersionInternal}) resourceEncoding.SetVersionEncoding(autoscaling.GroupName, *testapi.Autoscaling.GroupVersion(), unversioned.GroupVersion{Group: autoscaling.GroupName, Version: runtime.APIVersionInternal}) resourceEncoding.SetVersionEncoding(batch.GroupName, *testapi.Batch.GroupVersion(), unversioned.GroupVersion{Group: batch.GroupName, Version: runtime.APIVersionInternal}) resourceEncoding.SetVersionEncoding(apps.GroupName, *testapi.Apps.GroupVersion(), unversioned.GroupVersion{Group: apps.GroupName, Version: runtime.APIVersionInternal}) resourceEncoding.SetVersionEncoding(extensions.GroupName, *testapi.Extensions.GroupVersion(), unversioned.GroupVersion{Group: extensions.GroupName, Version: runtime.APIVersionInternal}) resourceEncoding.SetVersionEncoding(rbac.GroupName, *testapi.Rbac.GroupVersion(), unversioned.GroupVersion{Group: rbac.GroupName, Version: runtime.APIVersionInternal}) resourceEncoding.SetVersionEncoding(certificates.GroupName, *testapi.Certificates.GroupVersion(), unversioned.GroupVersion{Group: certificates.GroupName, Version: runtime.APIVersionInternal}) storageFactory := genericapiserver.NewDefaultStorageFactory(*storageConfig, testapi.StorageMediaType(), api.Codecs, resourceEncoding, DefaultAPIResourceConfigSource()) kubeVersion := version.Get() config.GenericConfig.Version = &kubeVersion config.StorageFactory = storageFactory config.GenericConfig.LoopbackClientConfig = &restclient.Config{APIPath: "/api", ContentConfig: restclient.ContentConfig{NegotiatedSerializer: api.Codecs}} config.GenericConfig.APIResourceConfigSource = DefaultAPIResourceConfigSource() config.GenericConfig.PublicAddress = net.ParseIP("192.168.10.4") config.KubeletClient = client.FakeKubeletClient{} config.GenericConfig.LegacyAPIGroupPrefixes = sets.NewString("/api") config.GenericConfig.APIGroupPrefix = "/apis" config.GenericConfig.APIResourceConfigSource = DefaultAPIResourceConfigSource() config.GenericConfig.ProxyDialer = func(network, addr string) (net.Conn, error) { return nil, nil } config.GenericConfig.ProxyTLSClientConfig = &tls.Config{} config.GenericConfig.RequestContextMapper = api.NewRequestContextMapper() config.GenericConfig.LoopbackClientConfig = &restclient.Config{APIPath: "/api", ContentConfig: restclient.ContentConfig{NegotiatedSerializer: api.Codecs}} config.EnableCoreControllers = false master, err := config.Complete().New() if err != nil { t.Fatal(err) } fakeNodeClient := fake.NewSimpleClientset(registrytest.MakeNodeList([]string{"node1", "node2"}, api.NodeResources{})) master.nodeClient = fakeNodeClient.Core().Nodes() return master, server, *config, assert.New(t) }
// NewCmdInit defines the `init` command that bootstraps a federation // control plane inside a set of host clusters. func NewCmdInit(cmdOut io.Writer, config util.AdminConfig) *cobra.Command { cmd := &cobra.Command{ Use: "init FEDERATION_NAME --host-cluster-context=HOST_CONTEXT", Short: "init initializes a federation control plane", Long: init_long, Example: init_example, Run: func(cmd *cobra.Command, args []string) { err := initFederation(cmdOut, config, cmd, args) cmdutil.CheckErr(err) }, } defaultImage := fmt.Sprintf("%s:%s", hyperkubeImageName, version.Get()) util.AddSubcommandFlags(cmd) cmd.Flags().String("dns-zone-name", "", "DNS suffix for this federation. Federated Service DNS names are published with this suffix.") cmd.Flags().String("image", defaultImage, "Image to use for federation API server and controller manager binaries.") return cmd }
// MatchesServerVersion queries the server to compares the build version // (git hash) of the client with the server's build version. It returns an error // if it failed to contact the server or if the versions are not an exact match. func MatchesServerVersion(client *Client, c *restclient.Config) error { var err error if client == nil { client, err = New(c) if err != nil { return err } } cVer := version.Get() sVer, err := client.Discovery().ServerVersion() if err != nil { return fmt.Errorf("couldn't read version from server: %v\n", err) } // GitVersion includes GitCommit and GitTreeState, but best to be safe? if cVer.GitVersion != sVer.GitVersion || cVer.GitCommit != sVer.GitCommit || cVer.GitTreeState != cVer.GitTreeState { return fmt.Errorf("server version (%#v) differs from client version (%#v)!\n", sVer, cVer) } return nil }
// NewCmdInit defines the `init` command that bootstraps a federation // control plane inside a set of host clusters. func NewCmdInit(cmdOut io.Writer, config util.AdminConfig) *cobra.Command { cmd := &cobra.Command{ Use: "init FEDERATION_NAME --host-cluster-context=HOST_CONTEXT", Short: "init initializes a federation control plane", Long: init_long, Example: init_example, Run: func(cmd *cobra.Command, args []string) { err := initFederation(cmdOut, config, cmd, args) cmdutil.CheckErr(err) }, } defaultImage := fmt.Sprintf("%s:%s", hyperkubeImageName, version.Get()) util.AddSubcommandFlags(cmd) cmd.Flags().String("dns-zone-name", "", "DNS suffix for this federation. Federated Service DNS names are published with this suffix.") cmd.Flags().String("image", defaultImage, "Image to use for federation API server and controller manager binaries.") cmd.Flags().String("dns-provider", "google-clouddns", "Dns provider to be used for this deployment.") cmd.Flags().String("etcd-pv-capacity", "10Gi", "Size of persistent volume claim to be used for etcd.") cmd.Flags().Bool("dry-run", false, "dry run without sending commands to server.") return cmd }
func TestUpdateNewNodeStatus(t *testing.T) { // generate one more than maxImagesInNodeStatus in inputImageList inputImageList, expectedImageList := generateTestingImageList(maxImagesInNodeStatus + 1) testKubelet := newTestKubeletWithImageList( t, inputImageList, false /* controllerAttachDetachEnabled */) kubelet := testKubelet.kubelet kubeClient := testKubelet.fakeKubeClient kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{ {ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}}, }}).ReactionChain machineInfo := &cadvisorapi.MachineInfo{ MachineID: "123", SystemUUID: "abc", BootID: "1b3", NumCores: 2, MemoryCapacity: 10E9, // 10G } mockCadvisor := testKubelet.fakeCadvisor mockCadvisor.On("Start").Return(nil) mockCadvisor.On("MachineInfo").Return(machineInfo, nil) versionInfo := &cadvisorapi.VersionInfo{ KernelVersion: "3.16.0-0.bpo.4-amd64", ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)", } mockCadvisor.On("VersionInfo").Return(versionInfo, nil) // Make kubelet report that it has sufficient disk space. if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 200, 200, 100, 100); err != nil { t.Fatalf("can't update disk space manager: %v", err) } expectedNode := &api.Node{ ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}, Spec: api.NodeSpec{}, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeOutOfDisk, Status: api.ConditionFalse, Reason: "KubeletHasSufficientDisk", Message: fmt.Sprintf("kubelet has sufficient disk space available"), LastHeartbeatTime: unversioned.Time{}, LastTransitionTime: unversioned.Time{}, }, { Type: api.NodeMemoryPressure, Status: api.ConditionFalse, Reason: "KubeletHasSufficientMemory", Message: fmt.Sprintf("kubelet has sufficient memory available"), LastHeartbeatTime: unversioned.Time{}, LastTransitionTime: unversioned.Time{}, }, { Type: api.NodeDiskPressure, Status: api.ConditionFalse, Reason: "KubeletHasNoDiskPressure", Message: fmt.Sprintf("kubelet has no disk pressure"), LastHeartbeatTime: unversioned.Time{}, LastTransitionTime: unversioned.Time{}, }, { Type: api.NodeReady, Status: api.ConditionTrue, Reason: "KubeletReady", Message: fmt.Sprintf("kubelet is posting ready status"), LastHeartbeatTime: unversioned.Time{}, LastTransitionTime: unversioned.Time{}, }, }, NodeInfo: api.NodeSystemInfo{ MachineID: "123", SystemUUID: "abc", BootID: "1b3", KernelVersion: "3.16.0-0.bpo.4-amd64", OSImage: "Debian GNU/Linux 7 (wheezy)", OperatingSystem: goruntime.GOOS, Architecture: goruntime.GOARCH, ContainerRuntimeVersion: "test://1.5.0", KubeletVersion: version.Get().String(), KubeProxyVersion: version.Get().String(), }, Capacity: api.ResourceList{ api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), api.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI), api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI), }, Allocatable: api.ResourceList{ api.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI), api.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI), api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI), }, Addresses: []api.NodeAddress{ {Type: api.NodeLegacyHostIP, Address: "127.0.0.1"}, {Type: api.NodeInternalIP, Address: "127.0.0.1"}, {Type: api.NodeHostName, Address: testKubeletHostname}, }, Images: expectedImageList, }, } kubelet.updateRuntimeUp() if err := kubelet.updateNodeStatus(); err != nil { t.Errorf("unexpected error: %v", err) } actions := kubeClient.Actions() if len(actions) != 2 { t.Fatalf("unexpected actions: %v", actions) } if !actions[1].Matches("update", "nodes") || actions[1].GetSubresource() != "status" { t.Fatalf("unexpected actions: %v", actions) } updatedNode, ok := actions[1].(core.UpdateAction).GetObject().(*api.Node) if !ok { t.Errorf("unexpected object type") } for i, cond := range updatedNode.Status.Conditions { if cond.LastHeartbeatTime.IsZero() { t.Errorf("unexpected zero last probe timestamp for %v condition", cond.Type) } if cond.LastTransitionTime.IsZero() { t.Errorf("unexpected zero last transition timestamp for %v condition", cond.Type) } updatedNode.Status.Conditions[i].LastHeartbeatTime = unversioned.Time{} updatedNode.Status.Conditions[i].LastTransitionTime = unversioned.Time{} } // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961 if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != api.NodeReady { t.Errorf("unexpected node condition order. NodeReady should be last.") } if maxImagesInNodeStatus != len(updatedNode.Status.Images) { t.Errorf("unexpected image list length in node status, expected: %v, got: %v", maxImagesInNodeStatus, len(updatedNode.Status.Images)) } else { if !api.Semantic.DeepEqual(expectedNode, updatedNode) { t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNode, updatedNode)) } } }
func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) kubelet := testKubelet.kubelet clock := testKubelet.fakeClock kubeClient := testKubelet.fakeKubeClient kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{ {ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}}, }}).ReactionChain mockCadvisor := testKubelet.fakeCadvisor mockCadvisor.On("Start").Return(nil) machineInfo := &cadvisorapi.MachineInfo{ MachineID: "123", SystemUUID: "abc", BootID: "1b3", NumCores: 2, MemoryCapacity: 10E9, } mockCadvisor.On("MachineInfo").Return(machineInfo, nil) versionInfo := &cadvisorapi.VersionInfo{ KernelVersion: "3.16.0-0.bpo.4-amd64", ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)", } mockCadvisor.On("VersionInfo").Return(versionInfo, nil) // Make kubelet report that it has sufficient disk space. if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 200, 200, 100, 100); err != nil { t.Fatalf("can't update disk space manager: %v", err) } expectedNode := &api.Node{ ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}, Spec: api.NodeSpec{}, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeOutOfDisk, Status: api.ConditionFalse, Reason: "KubeletHasSufficientDisk", Message: "kubelet has sufficient disk space available", LastHeartbeatTime: unversioned.Time{}, LastTransitionTime: unversioned.Time{}, }, { Type: api.NodeMemoryPressure, Status: api.ConditionFalse, Reason: "KubeletHasSufficientMemory", Message: fmt.Sprintf("kubelet has sufficient memory available"), LastHeartbeatTime: unversioned.Time{}, LastTransitionTime: unversioned.Time{}, }, { Type: api.NodeDiskPressure, Status: api.ConditionFalse, Reason: "KubeletHasNoDiskPressure", Message: fmt.Sprintf("kubelet has no disk pressure"), LastHeartbeatTime: unversioned.Time{}, LastTransitionTime: unversioned.Time{}, }, {}, //placeholder }, NodeInfo: api.NodeSystemInfo{ MachineID: "123", SystemUUID: "abc", BootID: "1b3", KernelVersion: "3.16.0-0.bpo.4-amd64", OSImage: "Debian GNU/Linux 7 (wheezy)", OperatingSystem: goruntime.GOOS, Architecture: goruntime.GOARCH, ContainerRuntimeVersion: "test://1.5.0", KubeletVersion: version.Get().String(), KubeProxyVersion: version.Get().String(), }, Capacity: api.ResourceList{ api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), api.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI), api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI), }, Allocatable: api.ResourceList{ api.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI), api.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI), api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI), }, Addresses: []api.NodeAddress{ {Type: api.NodeLegacyHostIP, Address: "127.0.0.1"}, {Type: api.NodeInternalIP, Address: "127.0.0.1"}, {Type: api.NodeHostName, Address: testKubeletHostname}, }, Images: []api.ContainerImage{ { Names: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"}, SizeBytes: 456, }, { Names: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"}, SizeBytes: 123, }, }, }, } checkNodeStatus := func(status api.ConditionStatus, reason string) { kubeClient.ClearActions() if err := kubelet.updateNodeStatus(); err != nil { t.Errorf("unexpected error: %v", err) } actions := kubeClient.Actions() if len(actions) != 2 { t.Fatalf("unexpected actions: %v", actions) } if !actions[1].Matches("update", "nodes") || actions[1].GetSubresource() != "status" { t.Fatalf("unexpected actions: %v", actions) } updatedNode, ok := actions[1].(core.UpdateAction).GetObject().(*api.Node) if !ok { t.Errorf("unexpected action type. expected UpdateAction, got %#v", actions[1]) } for i, cond := range updatedNode.Status.Conditions { if cond.LastHeartbeatTime.IsZero() { t.Errorf("unexpected zero last probe timestamp") } if cond.LastTransitionTime.IsZero() { t.Errorf("unexpected zero last transition timestamp") } updatedNode.Status.Conditions[i].LastHeartbeatTime = unversioned.Time{} updatedNode.Status.Conditions[i].LastTransitionTime = unversioned.Time{} } // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961 lastIndex := len(updatedNode.Status.Conditions) - 1 if updatedNode.Status.Conditions[lastIndex].Type != api.NodeReady { t.Errorf("unexpected node condition order. NodeReady should be last.") } if updatedNode.Status.Conditions[lastIndex].Message == "" { t.Errorf("unexpected empty condition message") } updatedNode.Status.Conditions[lastIndex].Message = "" expectedNode.Status.Conditions[lastIndex] = api.NodeCondition{ Type: api.NodeReady, Status: status, Reason: reason, LastHeartbeatTime: unversioned.Time{}, LastTransitionTime: unversioned.Time{}, } if !api.Semantic.DeepEqual(expectedNode, updatedNode) { t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNode, updatedNode)) } } // TODO(random-liu): Refactor the unit test to be table driven test. // Should report kubelet not ready if the runtime check is out of date clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime)) kubelet.updateRuntimeUp() checkNodeStatus(api.ConditionFalse, "KubeletNotReady") // Should report kubelet ready if the runtime check is updated clock.SetTime(time.Now()) kubelet.updateRuntimeUp() checkNodeStatus(api.ConditionTrue, "KubeletReady") // Should report kubelet not ready if the runtime check is out of date clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime)) kubelet.updateRuntimeUp() checkNodeStatus(api.ConditionFalse, "KubeletNotReady") // Should report kubelet not ready if the runtime check failed fakeRuntime := testKubelet.fakeRuntime // Inject error into fake runtime status check, node should be NotReady fakeRuntime.StatusErr = fmt.Errorf("injected runtime status error") clock.SetTime(time.Now()) kubelet.updateRuntimeUp() checkNodeStatus(api.ConditionFalse, "KubeletNotReady") // Test cri integration. kubelet.kubeletConfiguration.EnableCRI = true fakeRuntime.StatusErr = nil // Should report node not ready if runtime status is nil. fakeRuntime.RuntimeStatus = nil kubelet.updateRuntimeUp() checkNodeStatus(api.ConditionFalse, "KubeletNotReady") // Should report node not ready if runtime status is empty. fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{} kubelet.updateRuntimeUp() checkNodeStatus(api.ConditionFalse, "KubeletNotReady") // Should report node not ready if RuntimeReady is false. fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{ Conditions: []kubecontainer.RuntimeCondition{ {Type: kubecontainer.RuntimeReady, Status: false}, {Type: kubecontainer.NetworkReady, Status: true}, }, } kubelet.updateRuntimeUp() checkNodeStatus(api.ConditionFalse, "KubeletNotReady") // Should report node ready if RuntimeReady is true. fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{ Conditions: []kubecontainer.RuntimeCondition{ {Type: kubecontainer.RuntimeReady, Status: true}, {Type: kubecontainer.NetworkReady, Status: true}, }, } kubelet.updateRuntimeUp() checkNodeStatus(api.ConditionTrue, "KubeletReady") // Should report node not ready if NetworkReady is false. fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{ Conditions: []kubecontainer.RuntimeCondition{ {Type: kubecontainer.RuntimeReady, Status: true}, {Type: kubecontainer.NetworkReady, Status: false}, }, } kubelet.updateRuntimeUp() checkNodeStatus(api.ConditionFalse, "KubeletNotReady") }
func TestUpdateExistingNodeStatus(t *testing.T) { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) kubelet := testKubelet.kubelet kubeClient := testKubelet.fakeKubeClient kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{ { ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}, Spec: api.NodeSpec{}, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeOutOfDisk, Status: api.ConditionTrue, Reason: "KubeletOutOfDisk", Message: "out of disk space", LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, { Type: api.NodeMemoryPressure, Status: api.ConditionFalse, Reason: "KubeletHasSufficientMemory", Message: fmt.Sprintf("kubelet has sufficient memory available"), LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, { Type: api.NodeDiskPressure, Status: api.ConditionFalse, Reason: "KubeletHasSufficientDisk", Message: fmt.Sprintf("kubelet has sufficient disk space available"), LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, { Type: api.NodeReady, Status: api.ConditionTrue, Reason: "KubeletReady", Message: fmt.Sprintf("kubelet is posting ready status"), LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, }, Capacity: api.ResourceList{ api.ResourceCPU: *resource.NewMilliQuantity(3000, resource.DecimalSI), api.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI), api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), }, Allocatable: api.ResourceList{ api.ResourceCPU: *resource.NewMilliQuantity(2800, resource.DecimalSI), api.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI), api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), }, }, }, }}).ReactionChain mockCadvisor := testKubelet.fakeCadvisor mockCadvisor.On("Start").Return(nil) machineInfo := &cadvisorapi.MachineInfo{ MachineID: "123", SystemUUID: "abc", BootID: "1b3", NumCores: 2, MemoryCapacity: 20E9, } mockCadvisor.On("MachineInfo").Return(machineInfo, nil) versionInfo := &cadvisorapi.VersionInfo{ KernelVersion: "3.16.0-0.bpo.4-amd64", ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)", } mockCadvisor.On("VersionInfo").Return(versionInfo, nil) // Make kubelet report that it is out of disk space. if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 50, 50, 100, 100); err != nil { t.Fatalf("can't update disk space manager: %v", err) } expectedNode := &api.Node{ ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}, Spec: api.NodeSpec{}, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeOutOfDisk, Status: api.ConditionTrue, Reason: "KubeletOutOfDisk", Message: "out of disk space", LastHeartbeatTime: unversioned.Time{}, // placeholder LastTransitionTime: unversioned.Time{}, // placeholder }, { Type: api.NodeMemoryPressure, Status: api.ConditionFalse, Reason: "KubeletHasSufficientMemory", Message: fmt.Sprintf("kubelet has sufficient memory available"), LastHeartbeatTime: unversioned.Time{}, LastTransitionTime: unversioned.Time{}, }, { Type: api.NodeDiskPressure, Status: api.ConditionFalse, Reason: "KubeletHasSufficientDisk", Message: fmt.Sprintf("kubelet has sufficient disk space available"), LastHeartbeatTime: unversioned.Time{}, LastTransitionTime: unversioned.Time{}, }, { Type: api.NodeReady, Status: api.ConditionTrue, Reason: "KubeletReady", Message: fmt.Sprintf("kubelet is posting ready status"), LastHeartbeatTime: unversioned.Time{}, // placeholder LastTransitionTime: unversioned.Time{}, // placeholder }, }, NodeInfo: api.NodeSystemInfo{ MachineID: "123", SystemUUID: "abc", BootID: "1b3", KernelVersion: "3.16.0-0.bpo.4-amd64", OSImage: "Debian GNU/Linux 7 (wheezy)", OperatingSystem: goruntime.GOOS, Architecture: goruntime.GOARCH, ContainerRuntimeVersion: "test://1.5.0", KubeletVersion: version.Get().String(), KubeProxyVersion: version.Get().String(), }, Capacity: api.ResourceList{ api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), api.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI), api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI), }, Allocatable: api.ResourceList{ api.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI), api.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI), api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI), }, Addresses: []api.NodeAddress{ {Type: api.NodeLegacyHostIP, Address: "127.0.0.1"}, {Type: api.NodeInternalIP, Address: "127.0.0.1"}, {Type: api.NodeHostName, Address: testKubeletHostname}, }, // images will be sorted from max to min in node status. Images: []api.ContainerImage{ { Names: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"}, SizeBytes: 456, }, { Names: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"}, SizeBytes: 123, }, }, }, } kubelet.updateRuntimeUp() if err := kubelet.updateNodeStatus(); err != nil { t.Errorf("unexpected error: %v", err) } actions := kubeClient.Actions() if len(actions) != 2 { t.Errorf("unexpected actions: %v", actions) } updateAction, ok := actions[1].(core.UpdateAction) if !ok { t.Errorf("unexpected action type. expected UpdateAction, got %#v", actions[1]) } updatedNode, ok := updateAction.GetObject().(*api.Node) if !ok { t.Errorf("unexpected object type") } for i, cond := range updatedNode.Status.Conditions { // Expect LastProbeTime to be updated to Now, while LastTransitionTime to be the same. if old := unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time; reflect.DeepEqual(cond.LastHeartbeatTime.Rfc3339Copy().UTC(), old) { t.Errorf("Condition %v LastProbeTime: expected \n%v\n, got \n%v", cond.Type, unversioned.Now(), old) } if got, want := cond.LastTransitionTime.Rfc3339Copy().UTC(), unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time; !reflect.DeepEqual(got, want) { t.Errorf("Condition %v LastTransitionTime: expected \n%#v\n, got \n%#v", cond.Type, want, got) } updatedNode.Status.Conditions[i].LastHeartbeatTime = unversioned.Time{} updatedNode.Status.Conditions[i].LastTransitionTime = unversioned.Time{} } // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961 if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != api.NodeReady { t.Errorf("unexpected node condition order. NodeReady should be last.") } if !api.Semantic.DeepEqual(expectedNode, updatedNode) { t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNode, updatedNode)) } }
// handleVersion writes the server's version information. func handleVersion(req *restful.Request, resp *restful.Response) { // TODO: use restful's Response methods writeRawJSON(http.StatusOK, version.Get(), resp.ResponseWriter) }
func TestClient(t *testing.T) { _, s := framework.RunAMaster(t) defer s.Close() ns := api.NamespaceDefault framework.DeleteAllEtcdKeys() client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) info, err := client.Discovery().ServerVersion() if err != nil { t.Fatalf("unexpected error: %v", err) } if e, a := version.Get(), *info; !reflect.DeepEqual(e, a) { t.Errorf("expected %#v, got %#v", e, a) } pods, err := client.Pods(ns).List(api.ListOptions{}) if err != nil { t.Fatalf("unexpected error: %v", err) } if len(pods.Items) != 0 { t.Errorf("expected no pods, got %#v", pods) } // get a validation error pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ GenerateName: "test", }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "test", }, }, }, } got, err := client.Pods(ns).Create(pod) if err == nil { t.Fatalf("unexpected non-error: %v", got) } // get a created pod pod.Spec.Containers[0].Image = "an-image" got, err = client.Pods(ns).Create(pod) if err != nil { t.Fatalf("unexpected error: %v", err) } if got.Name == "" { t.Errorf("unexpected empty pod Name %v", got) } // pod is shown, but not scheduled pods, err = client.Pods(ns).List(api.ListOptions{}) if err != nil { t.Fatalf("unexpected error: %v", err) } if len(pods.Items) != 1 { t.Errorf("expected one pod, got %#v", pods) } actual := pods.Items[0] if actual.Name != got.Name { t.Errorf("expected pod %#v, got %#v", got, actual) } if actual.Spec.NodeName != "" { t.Errorf("expected pod to be unscheduled, got %#v", actual) } }
// setNodeStatus fills in the Status fields of the given Node, overwriting // any fields that are currently set. func (nm *realNodeManager) setNodeStatus(node *api.Node) error { // Set addresses for the node. if nm.cloud != nil { instances, ok := nm.cloud.Instances() if !ok { return fmt.Errorf("failed to get instances from cloud provider") } // TODO(roberthbailey): Can we do this without having credentials to talk // to the cloud provider? // TODO(justinsb): We can if CurrentNodeName() was actually CurrentNode() and returned an interface nodeAddresses, err := instances.NodeAddresses(nm.nodeName) if err != nil { return fmt.Errorf("failed to get node address from cloud provider: %v", err) } node.Status.Addresses = nodeAddresses } else { addr := net.ParseIP(nm.hostname) if addr != nil { node.Status.Addresses = []api.NodeAddress{ {Type: api.NodeLegacyHostIP, Address: addr.String()}, {Type: api.NodeInternalIP, Address: addr.String()}, } } else { addrs, err := net.LookupIP(node.Name) if err != nil { return fmt.Errorf("can't get ip address of node %s: %v", node.Name, err) } else if len(addrs) == 0 { return fmt.Errorf("no ip address for node %v", node.Name) } else { // check all ip addresses for this node.Name and try to find the first non-loopback IPv4 address. // If no match is found, it uses the IP of the interface with gateway on it. for _, ip := range addrs { if ip.IsLoopback() { continue } if ip.To4() != nil { node.Status.Addresses = []api.NodeAddress{ {Type: api.NodeLegacyHostIP, Address: ip.String()}, {Type: api.NodeInternalIP, Address: ip.String()}, } break } } if len(node.Status.Addresses) == 0 { ip, err := util.ChooseHostInterface() if err != nil { return err } node.Status.Addresses = []api.NodeAddress{ {Type: api.NodeLegacyHostIP, Address: ip.String()}, {Type: api.NodeInternalIP, Address: ip.String()}, } } } } } // TODO: Post NotReady if we cannot get MachineInfo from cAdvisor. This needs to start // cAdvisor locally, e.g. for test-cmd.sh, and in integration test. info, err := nm.infoGetter.GetMachineInfo() if err != nil { // TODO(roberthbailey): This is required for test-cmd.sh to pass. // See if the test should be updated instead. node.Status.Capacity = api.ResourceList{ api.ResourceCPU: *resource.NewMilliQuantity(0, resource.DecimalSI), api.ResourceMemory: resource.MustParse("0Gi"), api.ResourcePods: *resource.NewQuantity(int64(nm.pods), resource.DecimalSI), } glog.Errorf("Error getting machine info: %v", err) } else { node.Status.NodeInfo.MachineID = info.MachineID node.Status.NodeInfo.SystemUUID = info.SystemUUID node.Status.Capacity = CapacityFromMachineInfo(info) node.Status.Capacity[api.ResourcePods] = *resource.NewQuantity( int64(nm.pods), resource.DecimalSI) if node.Status.NodeInfo.BootID != "" && node.Status.NodeInfo.BootID != info.BootID { // TODO: This requires a transaction, either both node status is updated // and event is recorded or neither should happen, see issue #6055. nm.recorder.Eventf(nm.nodeRef, "Rebooted", "Node %s has been rebooted, boot id: %s", nm.nodeName, info.BootID) } node.Status.NodeInfo.BootID = info.BootID } verinfo, err := nm.infoGetter.GetVersionInfo() if err != nil { glog.Errorf("Error getting version info: %v", err) } else { node.Status.NodeInfo.KernelVersion = verinfo.KernelVersion node.Status.NodeInfo.OsImage = verinfo.ContainerOsVersion // TODO: Determine the runtime is docker or rocket node.Status.NodeInfo.ContainerRuntimeVersion = "docker://" + verinfo.DockerVersion node.Status.NodeInfo.KubeletVersion = version.Get().String() // TODO: kube-proxy might be different version from kubelet in the future node.Status.NodeInfo.KubeProxyVersion = version.Get().String() } node.Status.DaemonEndpoints = *nm.daemonEndpoints // Check whether container runtime can be reported as up. containerRuntimeUp := nm.infoGetter.ContainerRuntimeUp() // Check whether network is configured properly networkConfigured := nm.infoGetter.NetworkConfigured() currentTime := unversioned.Now() var newNodeReadyCondition api.NodeCondition var oldNodeReadyConditionStatus api.ConditionStatus if containerRuntimeUp && networkConfigured { newNodeReadyCondition = api.NodeCondition{ Type: api.NodeReady, Status: api.ConditionTrue, Reason: "KubeletReady", Message: "kubelet is posting ready status", LastHeartbeatTime: currentTime, } } else { var reasons []string var messages []string if !containerRuntimeUp { messages = append(messages, "container runtime is down") } if !networkConfigured { messages = append(reasons, "network not configured correctly") } newNodeReadyCondition = api.NodeCondition{ Type: api.NodeReady, Status: api.ConditionFalse, Reason: "KubeletNotReady", Message: strings.Join(messages, ","), LastHeartbeatTime: currentTime, } } updated := false for i := range node.Status.Conditions { if node.Status.Conditions[i].Type == api.NodeReady { oldNodeReadyConditionStatus = node.Status.Conditions[i].Status if oldNodeReadyConditionStatus == newNodeReadyCondition.Status { newNodeReadyCondition.LastTransitionTime = node.Status.Conditions[i].LastTransitionTime } else { newNodeReadyCondition.LastTransitionTime = currentTime } node.Status.Conditions[i] = newNodeReadyCondition updated = true } } if !updated { newNodeReadyCondition.LastTransitionTime = currentTime node.Status.Conditions = append(node.Status.Conditions, newNodeReadyCondition) } if !updated || oldNodeReadyConditionStatus != newNodeReadyCondition.Status { if newNodeReadyCondition.Status == api.ConditionTrue { nm.recordNodeStatusEvent("NodeReady") } else { nm.recordNodeStatusEvent("NodeNotReady") } } if oldNodeUnschedulable != node.Spec.Unschedulable { if node.Spec.Unschedulable { nm.recordNodeStatusEvent("NodeNotSchedulable") } else { nm.recordNodeStatusEvent("NodeSchedulable") } oldNodeUnschedulable = node.Spec.Unschedulable } return nil }
// handleVersion writes the server's version information. func handleVersion(req *restful.Request, resp *restful.Response) { writeRawJSON(http.StatusOK, version.Get(), resp.ResponseWriter) }
func (c *AssetConfig) addHandlers(mux *http.ServeMux) error { assetHandler, err := c.buildAssetHandler() if err != nil { return err } publicURL, err := url.Parse(c.Options.PublicURL) if err != nil { return err } masterURL, err := url.Parse(c.Options.MasterPublicURL) if err != nil { return err } // Web console assets mux.Handle(publicURL.Path, http.StripPrefix(publicURL.Path, assetHandler)) originResources := sets.NewString() k8sResources := sets.NewString() versions := []unversioned.GroupVersion{} versions = append(versions, registered.GroupOrDie(api.GroupName).GroupVersions...) versions = append(versions, registered.GroupOrDie(kapi.GroupName).GroupVersions...) deadOriginVersions := sets.NewString(configapi.DeadOpenShiftAPILevels...) deadKubernetesVersions := sets.NewString(configapi.DeadKubernetesAPILevels...) for _, version := range versions { for kind := range kapi.Scheme.KnownTypes(version) { if strings.HasSuffix(kind, "List") { continue } resource, _ := meta.KindToResource(version.WithKind(kind), false) if latest.OriginKind(version.WithKind(kind)) { if !deadOriginVersions.Has(version.String()) { originResources.Insert(resource.Resource) } } else { if !deadKubernetesVersions.Has(version.String()) { k8sResources.Insert(resource.Resource) } } } } commonResources := sets.NewString() for _, r := range originResources.List() { if k8sResources.Has(r) { commonResources.Insert(r) } } if commonResources.Len() > 0 { return fmt.Errorf("Resources for kubernetes and origin types intersect: %v", commonResources.List()) } // Generated web console config and server version config := assets.WebConsoleConfig{ APIGroupAddr: masterURL.Host, APIGroupPrefix: KubernetesAPIGroupPrefix, MasterAddr: masterURL.Host, MasterPrefix: OpenShiftAPIPrefix, MasterResources: originResources.List(), KubernetesAddr: masterURL.Host, KubernetesPrefix: KubernetesAPIPrefix, KubernetesResources: k8sResources.List(), OAuthAuthorizeURI: OpenShiftOAuthAuthorizeURL(masterURL.String()), OAuthRedirectBase: c.Options.PublicURL, OAuthClientID: OpenShiftWebConsoleClientID, LogoutURI: c.Options.LogoutURL, LoggingURL: c.Options.LoggingPublicURL, MetricsURL: c.Options.MetricsPublicURL, } kVersionInfo := kversion.Get() oVersionInfo := oversion.Get() versionInfo := assets.WebConsoleVersion{ KubernetesVersion: kVersionInfo.GitVersion, OpenShiftVersion: oVersionInfo.GitVersion, } configPath := path.Join(publicURL.Path, "config.js") configHandler, err := assets.GeneratedConfigHandler(config, versionInfo) if err != nil { return err } mux.Handle(configPath, assets.GzipHandler(configHandler)) // Extension scripts extScriptsPath := path.Join(publicURL.Path, "scripts/extensions.js") extScriptsHandler, err := assets.ExtensionScriptsHandler(c.Options.ExtensionScripts, c.Options.ExtensionDevelopment) if err != nil { return err } mux.Handle(extScriptsPath, assets.GzipHandler(extScriptsHandler)) // Extension stylesheets extStylesheetsPath := path.Join(publicURL.Path, "styles/extensions.css") extStylesheetsHandler, err := assets.ExtensionStylesheetsHandler(c.Options.ExtensionStylesheets, c.Options.ExtensionDevelopment) if err != nil { return err } mux.Handle(extStylesheetsPath, assets.GzipHandler(extStylesheetsHandler)) // Extension files for _, extConfig := range c.Options.Extensions { extPath := path.Join(publicURL.Path, "extensions", extConfig.Name) + "/" extHandler := assets.AssetExtensionHandler(extConfig.SourceDirectory, extPath, extConfig.HTML5Mode) mux.Handle(extPath, http.StripPrefix(extPath, extHandler)) } return nil }