// RunKubelet is responsible for setting up and running a kubelet. It is used in three different applications: // 1 Integration tests // 2 Kubelet binary // 3 Standalone 'kubernetes' binary // Eventually, #2 will be replaced with instances of #3 func RunKubelet(kcfg *KubeletConfig, builder KubeletBuilder) error { kcfg.Hostname = nodeutil.GetHostname(kcfg.HostnameOverride) if len(kcfg.NodeName) == 0 { // Query the cloud provider for our node name, default to Hostname nodeName := kcfg.Hostname if kcfg.Cloud != nil { var err error instances, ok := kcfg.Cloud.Instances() if !ok { return fmt.Errorf("failed to get instances from cloud provider") } nodeName, err = instances.CurrentNodeName(kcfg.Hostname) if err != nil { return fmt.Errorf("error fetching current instance name from cloud provider: %v", err) } glog.V(2).Infof("cloud provider determined current node name to be %s", nodeName) } kcfg.NodeName = nodeName } eventBroadcaster := record.NewBroadcaster() kcfg.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "kubelet", Host: kcfg.NodeName}) eventBroadcaster.StartLogging(glog.V(3).Infof) if kcfg.KubeClient != nil { glog.V(4).Infof("Sending events to api server.") eventBroadcaster.StartRecordingToSink(kcfg.KubeClient.Events("")) } else { glog.Warning("No api server defined - no events will be sent to API server.") } capabilities.Setup(kcfg.AllowPrivileged, kcfg.HostNetworkSources) credentialprovider.SetPreferredDockercfgPath(kcfg.RootDirectory) if builder == nil { builder = createAndInitKubelet } if kcfg.OSInterface == nil { kcfg.OSInterface = kubecontainer.RealOS{} } k, podCfg, err := builder(kcfg) if err != nil { return fmt.Errorf("failed to create kubelet: %v", err) } // process pods and exit. if kcfg.Runonce { if _, err := k.RunOnce(podCfg.Updates()); err != nil { return fmt.Errorf("runonce failed: %v", err) } glog.Infof("Started kubelet as runonce") } else { startKubelet(k, podCfg, kcfg) glog.Infof("Started kubelet") } return nil }
// InitializeTLS checks for a configured TLSCertFile and TLSPrivateKeyFile: if unspecified a new self-signed // certificate and key file are generated. Returns a configured kubelet.TLSOptions object. func (s *KubeletServer) InitializeTLS() (*kubelet.TLSOptions, error) { if s.TLSCertFile == "" && s.TLSPrivateKeyFile == "" { s.TLSCertFile = path.Join(s.CertDirectory, "kubelet.crt") s.TLSPrivateKeyFile = path.Join(s.CertDirectory, "kubelet.key") if err := util.GenerateSelfSignedCert(nodeutil.GetHostname(s.HostnameOverride), s.TLSCertFile, s.TLSPrivateKeyFile, nil); err != nil { return nil, fmt.Errorf("unable to generate self signed cert: %v", err) } glog.V(4).Infof("Using self-signed cert (%s, %s)", s.TLSCertFile, s.TLSPrivateKeyFile) } tlsOptions := &kubelet.TLSOptions{ Config: &tls.Config{ // Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability). MinVersion: tls.VersionTLS10, // Populate PeerCertificates in requests, but don't yet reject connections without certificates. ClientAuth: tls.RequestClientCert, }, CertFile: s.TLSCertFile, KeyFile: s.TLSPrivateKeyFile, } return tlsOptions, nil }
func (util *RBDUtil) rbdLock(rbd rbd, lock bool) error { var err error var output, locker string var cmd []byte var secret_opt []string if rbd.Secret != "" { secret_opt = []string{"--key=" + rbd.Secret} } else { secret_opt = []string{"-k", rbd.Keyring} } // construct lock id using host name and a magic prefix lock_id := "kubelet_lock_magic_" + node.GetHostname("") l := len(rbd.Mon) // avoid mount storm, pick a host randomly start := rand.Int() % l // iterate all hosts until mount succeeds. for i := start; i < start+l; i++ { mon := rbd.Mon[i%l] // cmd "rbd lock list" serves two purposes: // for fencing, check if lock already held for this host // this edge case happens if host crashes in the middle of acquiring lock and mounting rbd // for defencing, get the locker name, something like "client.1234" cmd, err = rbd.plugin.execCommand("rbd", append([]string{"lock", "list", rbd.Image, "--pool", rbd.Pool, "--id", rbd.Id, "-m", mon}, secret_opt...)) output = string(cmd) if err != nil { continue } if lock { // check if lock is already held for this host by matching lock_id and rbd lock id if strings.Contains(output, lock_id) { // this host already holds the lock, exit glog.V(1).Infof("rbd: lock already held for %s", lock_id) return nil } // hold a lock: rbd lock add cmd, err = rbd.plugin.execCommand("rbd", append([]string{"lock", "add", rbd.Image, lock_id, "--pool", rbd.Pool, "--id", rbd.Id, "-m", mon}, secret_opt...)) } else { // defencing, find locker name ind := strings.LastIndex(output, lock_id) - 1 for i := ind; i >= 0; i-- { if output[i] == '\n' { locker = output[(i + 1):ind] break } } // remove a lock: rbd lock remove cmd, err = rbd.plugin.execCommand("rbd", append([]string{"lock", "remove", rbd.Image, lock_id, locker, "--pool", rbd.Pool, "--id", rbd.Id, "-m", mon}, secret_opt...)) } if err == nil { //lock is acquired break } } return err }