示例#1
0
func newK8sClient(conf utils.NetConf, logger *log.Entry) (*kubernetes.Clientset, error) {
	// Some config can be passed in a kubeconfig file
	kubeconfig := conf.Kubernetes.Kubeconfig

	// Config can be overridden by config passed in explicitly in the network config.
	configOverrides := &clientcmd.ConfigOverrides{}

	// If an API root is given, make sure we're using using the name / port rather than
	// the full URL. Earlier versions of the config required the full `/api/v1/` extension,
	// so split that off to ensure compatibility.
	conf.Policy.K8sAPIRoot = strings.Split(conf.Policy.K8sAPIRoot, "/api/")[0]

	var overridesMap = []struct {
		variable *string
		value    string
	}{
		{&configOverrides.ClusterInfo.Server, conf.Policy.K8sAPIRoot},
		{&configOverrides.AuthInfo.ClientCertificate, conf.Policy.K8sClientCertificate},
		{&configOverrides.AuthInfo.ClientKey, conf.Policy.K8sClientKey},
		{&configOverrides.ClusterInfo.CertificateAuthority, conf.Policy.K8sCertificateAuthority},
		{&configOverrides.AuthInfo.Token, conf.Policy.K8sAuthToken},
	}

	// Using the override map above, populate any non-empty values.
	for _, override := range overridesMap {
		if override.value != "" {
			*override.variable = override.value
		}
	}

	// Also allow the K8sAPIRoot to appear under the "kubernetes" block in the network config.
	if conf.Kubernetes.K8sAPIRoot != "" {
		configOverrides.ClusterInfo.Server = conf.Kubernetes.K8sAPIRoot
	}

	// Use the kubernetes client code to load the kubeconfig file and combine it with the overrides.
	config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
		&clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig},
		configOverrides).ClientConfig()
	if err != nil {
		return nil, err
	}

	logger.Debugf("Kubernetes config %v", config)

	// Create the clientset
	return kubernetes.NewForConfig(config)
}
示例#2
0
// Delete all K8s pods from the "test" namespace
func WipeK8sPods() {
	config, err := clientcmd.DefaultClientConfig.ClientConfig()
	if err != nil {
		panic(err)
	}
	clientset, err := kubernetes.NewForConfig(config)

	if err != nil {
		panic(err)
	}
	pods, err := clientset.Pods(K8S_TEST_NS).List(v1.ListOptions{})
	if err != nil {
		panic(err)
	}

	for _, pod := range pods.Items {
		err = clientset.Pods(K8S_TEST_NS).Delete(pod.Name, &v1.DeleteOptions{})

		if err != nil {
			panic(err)
		}
	}
}
示例#3
0
// BeforeEach gets a client and makes a namespace.
func (f *Framework) BeforeEach() {
	// The fact that we need this feels like a bug in ginkgo.
	// https://github.com/onsi/ginkgo/issues/222
	f.cleanupHandle = AddCleanupAction(f.AfterEach)
	if f.ClientSet == nil {
		By("Creating a kubernetes client")
		config, err := LoadConfig()
		Expect(err).NotTo(HaveOccurred())
		config.QPS = f.options.ClientQPS
		config.Burst = f.options.ClientBurst
		if f.options.GroupVersion != nil {
			config.GroupVersion = f.options.GroupVersion
		}
		if TestContext.KubeAPIContentType != "" {
			config.ContentType = TestContext.KubeAPIContentType
		}
		f.ClientSet, err = internalclientset.NewForConfig(config)
		Expect(err).NotTo(HaveOccurred())
		f.ClientSet_1_5, err = release_1_5.NewForConfig(config)
		Expect(err).NotTo(HaveOccurred())
		clientRepoConfig := getClientRepoConfig(config)
		f.StagingClient, err = staging.NewForConfig(clientRepoConfig)
		Expect(err).NotTo(HaveOccurred())
		f.ClientPool = dynamic.NewClientPool(config, registered.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
	}

	if f.federated {
		if f.FederationClientset_1_5 == nil {
			By("Creating a release 1.4 federation Clientset")
			var err error
			f.FederationClientset_1_5, err = LoadFederationClientset_1_5()
			Expect(err).NotTo(HaveOccurred())
		}
		By("Waiting for federation-apiserver to be ready")
		err := WaitForFederationApiserverReady(f.FederationClientset_1_5)
		Expect(err).NotTo(HaveOccurred())
		By("federation-apiserver is ready")

		By("Creating a federation namespace")
		ns, err := f.createFederationNamespace(f.BaseName)
		Expect(err).NotTo(HaveOccurred())
		f.FederationNamespace = ns
		By(fmt.Sprintf("Created federation namespace %s", ns.Name))
	}

	By("Building a namespace api object")
	namespace, err := f.CreateNamespace(f.BaseName, map[string]string{
		"e2e-framework": f.BaseName,
	})
	Expect(err).NotTo(HaveOccurred())

	f.Namespace = namespace

	if TestContext.VerifyServiceAccount {
		By("Waiting for a default service account to be provisioned in namespace")
		err = WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
		Expect(err).NotTo(HaveOccurred())
	} else {
		Logf("Skipping waiting for service account")
	}

	if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" {
		f.gatherer, err = NewResourceUsageGatherer(f.ClientSet, ResourceGathererOptions{
			inKubemark: ProviderIs("kubemark"),
			masterOnly: TestContext.GatherKubeSystemResourceUsageData == "master",
		})
		if err != nil {
			Logf("Error while creating NewResourceUsageGatherer: %v", err)
		} else {
			go f.gatherer.startGatheringData()
		}
	}

	if TestContext.GatherLogsSizes {
		f.logsSizeWaitGroup = sync.WaitGroup{}
		f.logsSizeWaitGroup.Add(1)
		f.logsSizeCloseChannel = make(chan bool)
		f.logsSizeVerifier = NewLogsVerifier(f.ClientSet, f.logsSizeCloseChannel)
		go func() {
			f.logsSizeVerifier.Run()
			f.logsSizeWaitGroup.Done()
		}()
	}
}
示例#4
0
func run(s *options.KubeletServer, kubeDeps *kubelet.KubeletDeps) (err error) {
	// TODO: this should be replaced by a --standalone flag
	standaloneMode := (len(s.APIServerList) == 0 && !s.RequireKubeConfig)

	if s.ExitOnLockContention && s.LockFilePath == "" {
		return errors.New("cannot exit on lock file contention: no lock file specified")
	}

	done := make(chan struct{})
	if s.LockFilePath != "" {
		glog.Infof("acquiring file lock on %q", s.LockFilePath)
		if err := flock.Acquire(s.LockFilePath); err != nil {
			return fmt.Errorf("unable to acquire file lock on %q: %v", s.LockFilePath, err)
		}
		if s.ExitOnLockContention {
			glog.Infof("watching for inotify events for: %v", s.LockFilePath)
			if err := watchForLockfileContention(s.LockFilePath, done); err != nil {
				return err
			}
		}
	}

	// Set feature gates based on the value in KubeletConfiguration
	err = utilconfig.DefaultFeatureGate.Set(s.KubeletConfiguration.FeatureGates)
	if err != nil {
		return err
	}

	// Register current configuration with /configz endpoint
	cfgz, cfgzErr := initConfigz(&s.KubeletConfiguration)
	if utilconfig.DefaultFeatureGate.DynamicKubeletConfig() {
		// Look for config on the API server. If it exists, replace s.KubeletConfiguration
		// with it and continue. initKubeletConfigSync also starts the background thread that checks for new config.

		// Don't do dynamic Kubelet configuration in runonce mode
		if s.RunOnce == false {
			remoteKC, err := initKubeletConfigSync(s)
			if err == nil {
				// Update s (KubeletServer) with new config from API server
				s.KubeletConfiguration = *remoteKC
				// Ensure that /configz is up to date with the new config
				if cfgzErr != nil {
					glog.Errorf("was unable to register configz before due to %s, will not be able to set now", cfgzErr)
				} else {
					setConfigz(cfgz, &s.KubeletConfiguration)
				}
				// Update feature gates from the new config
				err = utilconfig.DefaultFeatureGate.Set(s.KubeletConfiguration.FeatureGates)
				if err != nil {
					return err
				}
			} else {
				glog.Errorf("failed to init dynamic Kubelet configuration sync: %v", err)
			}
		}
	}

	if kubeDeps == nil {
		var kubeClient, eventClient *clientset.Clientset
		var externalKubeClient clientgoclientset.Interface
		var cloud cloudprovider.Interface

		if s.CloudProvider != componentconfigv1alpha1.AutoDetectCloudProvider {
			cloud, err = cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
			if err != nil {
				return err
			}
			if cloud == nil {
				glog.V(2).Infof("No cloud provider specified: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile)
			} else {
				glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile)
			}
		}

		if s.BootstrapKubeconfig != "" {
			nodeName, err := getNodeName(cloud, nodeutil.GetHostname(s.HostnameOverride))
			if err != nil {
				return err
			}
			if err := bootstrapClientCert(s.KubeConfig.Value(), s.BootstrapKubeconfig, s.CertDirectory, nodeName); err != nil {
				return err
			}
		}

		clientConfig, err := CreateAPIServerClientConfig(s)
		if err == nil {
			kubeClient, err = clientset.NewForConfig(clientConfig)
			if err != nil {
				glog.Warningf("New kubeClient from clientConfig error: %v", err)
			}
			// make a separate client for events
			eventClientConfig := *clientConfig
			eventClientConfig.QPS = float32(s.EventRecordQPS)
			eventClientConfig.Burst = int(s.EventBurst)
			eventClient, err = clientset.NewForConfig(&eventClientConfig)
			if err != nil {
				glog.Warningf("Failed to create API Server client: %v", err)
			}
		} else {
			if s.RequireKubeConfig {
				return fmt.Errorf("invalid kubeconfig: %v", err)
			}
			if standaloneMode {
				glog.Warningf("No API client: %v", err)
			}
		}

		// client-go and kuberenetes generated clients are incompatible because the runtime
		// and runtime/serializer types have been duplicated in client-go.  This means that
		// you can't reasonably convert from one to the other and its impossible for a single
		// type to fulfill both interfaces.  Because of that, we have to build the clients
		// up from scratch twice.
		// TODO eventually the kubelet should only use the client-go library
		clientGoConfig, err := createAPIServerClientGoConfig(s)
		if err == nil {
			externalKubeClient, err = clientgoclientset.NewForConfig(clientGoConfig)
			if err != nil {
				glog.Warningf("New kubeClient from clientConfig error: %v", err)
			}
		} else {
			if s.RequireKubeConfig {
				return fmt.Errorf("invalid kubeconfig: %v", err)
			}
			if standaloneMode {
				glog.Warningf("No API client: %v", err)
			}
		}

		kubeDeps, err = UnsecuredKubeletDeps(s)
		if err != nil {
			return err
		}

		kubeDeps.Cloud = cloud
		kubeDeps.KubeClient = kubeClient
		kubeDeps.ExternalKubeClient = externalKubeClient
		kubeDeps.EventClient = eventClient
	}

	if kubeDeps.Auth == nil {
		nodeName, err := getNodeName(kubeDeps.Cloud, nodeutil.GetHostname(s.HostnameOverride))
		if err != nil {
			return err
		}

		auth, err := buildAuth(nodeName, kubeDeps.ExternalKubeClient, s.KubeletConfiguration)
		if err != nil {
			return err
		}
		kubeDeps.Auth = auth
	}

	if kubeDeps.CAdvisorInterface == nil {
		kubeDeps.CAdvisorInterface, err = cadvisor.New(uint(s.CAdvisorPort), s.ContainerRuntime, s.RootDirectory)
		if err != nil {
			return err
		}
	}

	if kubeDeps.ContainerManager == nil {
		if s.SystemCgroups != "" && s.CgroupRoot == "" {
			return fmt.Errorf("invalid configuration: system container was specified and cgroup root was not specified")
		}
		kubeDeps.ContainerManager, err = cm.NewContainerManager(
			kubeDeps.Mounter,
			kubeDeps.CAdvisorInterface,
			cm.NodeConfig{
				RuntimeCgroupsName:    s.RuntimeCgroups,
				SystemCgroupsName:     s.SystemCgroups,
				KubeletCgroupsName:    s.KubeletCgroups,
				ContainerRuntime:      s.ContainerRuntime,
				CgroupsPerQOS:         s.ExperimentalCgroupsPerQOS,
				CgroupRoot:            s.CgroupRoot,
				CgroupDriver:          s.CgroupDriver,
				ProtectKernelDefaults: s.ProtectKernelDefaults,
				EnableCRI:             s.EnableCRI,
			},
			s.ExperimentalFailSwapOn)

		if err != nil {
			return err
		}
	}

	if err := checkPermissions(); err != nil {
		glog.Error(err)
	}

	utilruntime.ReallyCrash = s.ReallyCrashForTesting

	rand.Seed(time.Now().UTC().UnixNano())

	// TODO(vmarmol): Do this through container config.
	oomAdjuster := kubeDeps.OOMAdjuster
	if err := oomAdjuster.ApplyOOMScoreAdj(0, int(s.OOMScoreAdj)); err != nil {
		glog.Warning(err)
	}

	if err := RunKubelet(&s.KubeletConfiguration, kubeDeps, s.RunOnce, standaloneMode); err != nil {
		return err
	}

	if s.HealthzPort > 0 {
		healthz.DefaultHealthz()
		go wait.Until(func() {
			err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress, strconv.Itoa(int(s.HealthzPort))), nil)
			if err != nil {
				glog.Errorf("Starting health server failed: %v", err)
			}
		}, 5*time.Second, wait.NeverStop)
	}

	if s.RunOnce {
		return nil
	}

	<-done
	return nil
}