// StandardErrorMessage translates common errors into a human readable message, or returns // false if the error is not one of the recognized types. It may also log extended // information to glog. // // This method is generic to the command in use and may be used by non-Kubectl // commands. func StandardErrorMessage(err error) (string, bool) { if debugErr, ok := err.(debugError); ok { glog.V(4).Infof(debugErr.DebugError()) } status, isStatus := err.(kerrors.APIStatus) switch { case isStatus: switch s := status.Status(); { case s.Reason == "Unauthorized": return fmt.Sprintf("error: You must be logged in to the server (%s)", s.Message), true default: return fmt.Sprintf("Error from server: %s", err.Error()), true } case kerrors.IsUnexpectedObjectError(err): return fmt.Sprintf("Server returned an unexpected response: %s", err.Error()), true } switch t := err.(type) { case *url.Error: glog.V(4).Infof("Connection error: %s %s: %v", t.Op, t.URL, t.Err) switch { case strings.Contains(t.Err.Error(), "connection refused"): host := t.URL if server, err := url.Parse(t.URL); err == nil { host = server.Host } return fmt.Sprintf("The connection to the server %s was refused - did you specify the right host or port?", host), true } return fmt.Sprintf("Unable to connect to the server: %v", t.Err), true } return "", false }
// Forward those file paths received on candidateCh that are text files to textFileCh. func filterCandidates( candidateCh chan string, textFileCh chan string, ftiMgr *FileTypeInfoManager, ctrl GoRtnCntrl) { defer ctrl.wg.Done() glog.V(1).Infoln("ENTER filterCandidates") defer glog.V(1).Infoln("EXIT filterCandidates") for { select { case <-ctrl.stopCh: return case fp, ok := <-candidateCh: if !ok { return } isText := ftiMgr.FindType(fp) if isText { select { case <-ctrl.stopCh: return case textFileCh <- fp: } } } } }
// newClusterResourceOverride returns an admission controller for containers that // configurably overrides container resource request/limits func newClusterResourceOverride(client clientset.Interface, config io.Reader) (admission.Interface, error) { parsed, err := ReadConfig(config) if err != nil { glog.V(5).Infof("%s admission controller loaded with error: (%T) %[2]v", api.PluginName, err) return nil, err } if errs := validation.Validate(parsed); len(errs) > 0 { return nil, errs.ToAggregate() } glog.V(5).Infof("%s admission controller loaded with config: %v", api.PluginName, parsed) var internal *internalConfig if parsed != nil { internal = &internalConfig{ limitCPUToMemoryRatio: inf.NewDec(parsed.LimitCPUToMemoryPercent, 2), cpuRequestToLimitRatio: inf.NewDec(parsed.CPURequestToLimitPercent, 2), memoryRequestToLimitRatio: inf.NewDec(parsed.MemoryRequestToLimitPercent, 2), } } limitRanger, err := limitranger.NewLimitRanger(client, wrapLimit) if err != nil { return nil, err } return &clusterResourceOverridePlugin{ Handler: admission.NewHandler(admission.Create), config: internal, LimitRanger: limitRanger, }, nil }
// syncLoop is the main loop for processing changes. It watches for changes from // four channels (file, etcd, server, and http) and creates a union of them. For // any new change seen, will run a sync against desired state and running state. If // no changes are seen to the configuration, will synchronize the last known desired // state every sync_frequency seconds. Never returns. func (kl *Kubelet) syncLoop(updates <-chan PodUpdate, handler SyncHandler) { for { select { case u := <-updates: switch u.Op { case SET: glog.V(3).Infof("SET: Containers changed") kl.pods = u.Pods kl.pods = filterHostPortConflicts(kl.pods) case UPDATE: glog.V(3).Infof("Update: Containers changed") kl.pods = updateBoundPods(u.Pods, kl.pods) kl.pods = filterHostPortConflicts(kl.pods) default: panic("syncLoop does not support incremental changes") } case <-time.After(kl.resyncInterval): glog.V(4).Infof("Periodic sync") } err := handler.SyncPods(kl.pods) if err != nil { glog.Errorf("Couldn't sync containers: %v", err) } } }
// listCollection will list the items in the specified namespace // it returns the following: // the list of items in the collection (if found) // a boolean if the operation is supported // an error if the operation is supported but could not be completed. func listCollection( dynamicClient *dynamic.Client, opCache operationNotSupportedCache, gvr unversioned.GroupVersionResource, namespace string, ) (*runtime.UnstructuredList, bool, error) { glog.V(5).Infof("namespace controller - listCollection - namespace: %s, gvr: %v", namespace, gvr) key := operationKey{op: operationList, gvr: gvr} if !opCache.isSupported(key) { glog.V(5).Infof("namespace controller - listCollection ignored since not supported - namespace: %s, gvr: %v", namespace, gvr) return nil, false, nil } apiResource := unversioned.APIResource{Name: gvr.Resource, Namespaced: true} unstructuredList, err := dynamicClient.Resource(&apiResource, namespace).List(v1.ListOptions{}) if err == nil { return unstructuredList, true, nil } // this is strange, but we need to special case for both MethodNotSupported and NotFound errors // TODO: https://github.com/kubernetes/kubernetes/issues/22413 // we have a resource returned in the discovery API that supports no top-level verbs: // /apis/extensions/v1beta1/namespaces/default/replicationcontrollers // when working with this resource type, we will get a literal not found error rather than expected method not supported // remember next time that this resource does not support delete collection... if errors.IsMethodNotSupported(err) || errors.IsNotFound(err) { glog.V(5).Infof("namespace controller - listCollection not supported - namespace: %s, gvr: %v", namespace, gvr) opCache[key] = true return nil, false, nil } return nil, true, err }
// makeHandler wraps our ResponseHandlers while timing requests, collecting, // stats, logging, and handling errors. func makeHandler(handler ResponseHandler) httprouter.Handle { return func(w http.ResponseWriter, r *http.Request, p httprouter.Params) { start := time.Now() httpCode, err := handler(w, r, p) duration := time.Since(start) var msg string if err != nil { msg = err.Error() } else if httpCode != http.StatusOK { msg = http.StatusText(httpCode) } if len(msg) > 0 { http.Error(w, msg, httpCode) stats.RecordEvent(stats.ErroredRequest) } if len(msg) > 0 || glog.V(2) { reqString := r.URL.Path + " " + r.RemoteAddr if glog.V(3) { reqString = r.URL.RequestURI() + " " + r.RemoteAddr } if len(msg) > 0 { glog.Errorf("[API - %9s] %s (%d - %s)", duration, reqString, httpCode, msg) } else { glog.Infof("[API - %9s] %s (%d)", duration, reqString, httpCode) } } stats.RecordEvent(stats.HandledRequest) stats.RecordTiming(stats.ResponseTime, duration) } }
func (ts *timeStore) Put(tp TimePoint) error { if tp.Value == nil { return fmt.Errorf("cannot store TimePoint with nil data") } if (tp.Timestamp == time.Time{}) { return fmt.Errorf("cannot store TimePoint with zero timestamp") } ts.rwLock.Lock() defer ts.rwLock.Unlock() if ts.buffer.Len() == 0 { glog.V(5).Infof("put pushfront: %v, %v", tp.Timestamp, tp.Value) ts.buffer.PushFront(tp) return nil } for elem := ts.buffer.Front(); elem != nil; elem = elem.Next() { if tp.Timestamp.After(elem.Value.(TimePoint).Timestamp) { glog.V(5).Infof("put insert before: %v, %v, %v", elem, tp.Timestamp, tp.Value) ts.buffer.InsertBefore(tp, elem) return nil } } glog.V(5).Infof("put pushback: %v, %v", tp.Timestamp, tp.Value) ts.buffer.PushBack(tp) return nil }
// Decrypts a proto using an AEAD. Unmarshals the result into dst. The result // should only be considered written if this function returns true. func DecryptProto(aead cipher.AEAD, msg string, additionalData []byte, dst proto.Message) bool { msgBytes, err := base64.RawURLEncoding.DecodeString(msg) if err != nil { glog.V(2).Infof("Tried to decrypt proto with invalid base64: %v", err) return false } var msgProto pb.EncryptedMessage err = proto.Unmarshal(msgBytes, &msgProto) if err != nil { glog.V(2).Infof("Tried to decrypt proto with invalid pb.EncryptedMessage: %v", err) return false } // Decrypt in-place. plaintext := msgProto.Ciphertext plaintext, err = aead.Open(plaintext[:0], msgProto.Nonce, msgProto.Ciphertext, additionalData) if err != nil { glog.V(2).Infof("Failed to decrypt data: %v", err) return false } err = proto.Unmarshal(plaintext, dst) if err != nil { glog.V(2).Infof("Failed to decrypt proto: %v", err) return false } return true }
// shouldWriteCerts determines if the router should ask the cert manager to write out certificates // it will return true if a route is edge or reencrypt and it has all the required (host/key) certificates // defined. If the route does not have the certificates defined it will log an info message if the // router is configured with a default certificate and assume the route is meant to be a wildcard. Otherwise // it will log a warning. The route will still be written but users may receive browser errors // for a host/cert mismatch func (r *templateRouter) shouldWriteCerts(cfg *ServiceAliasConfig) bool { if cfg.Certificates == nil { return false } if cfg.TLSTermination == routeapi.TLSTerminationEdge || cfg.TLSTermination == routeapi.TLSTerminationReencrypt { if hasRequiredEdgeCerts(cfg) { return true } if cfg.TLSTermination == routeapi.TLSTerminationReencrypt && hasReencryptDestinationCACert(cfg) { glog.V(4).Info("a reencrypt route with host %s does not have an edge certificate, using default router certificate", cfg.Host) return true } msg := fmt.Sprintf("a %s terminated route with host %s does not have the required certificates. The route will still be created but no certificates will be written", cfg.TLSTermination, cfg.Host) // if a default cert is configured we'll assume it is meant to be a wildcard and only log info // otherwise we'll consider this a warning if len(r.defaultCertificate) > 0 { glog.V(4).Info(msg) } else { glog.Warning(msg) } return false } return false }
func (os *OpenStack) LoadBalancer() (cloudprovider.LoadBalancer, bool) { glog.V(4).Info("openstack.LoadBalancer() called") // TODO: Search for and support Rackspace loadbalancer API, and others. network, err := openstack.NewNetworkV2(os.provider, gophercloud.EndpointOpts{ Region: os.region, }) if err != nil { glog.Warningf("Failed to find neutron endpoint: %v", err) return nil, false } compute, err := openstack.NewComputeV2(os.provider, gophercloud.EndpointOpts{ Region: os.region, }) if err != nil { glog.Warningf("Failed to find compute endpoint: %v", err) return nil, false } glog.V(1).Info("Claiming to support LoadBalancer") if os.lbOpts.LBVersion == "v2" { return &LbaasV2{LoadBalancer{network, compute, os.lbOpts}}, true } else { return &LbaasV1{LoadBalancer{network, compute, os.lbOpts}}, true } }
// Attaches given cinder volume to the compute running kubelet func (os *OpenStack) AttachDisk(instanceID string, diskName string) (string, error) { disk, err := os.getVolume(diskName) if err != nil { return "", err } cClient, err := openstack.NewComputeV2(os.provider, gophercloud.EndpointOpts{ Region: os.region, }) if err != nil || cClient == nil { glog.Errorf("Unable to initialize nova client for region: %s", os.region) return "", err } if len(disk.Attachments) > 0 && disk.Attachments[0]["server_id"] != nil { if instanceID == disk.Attachments[0]["server_id"] { glog.V(4).Infof("Disk: %q is already attached to compute: %q", diskName, instanceID) return disk.ID, nil } else { errMsg := fmt.Sprintf("Disk %q is attached to a different compute: %q, should be detached before proceeding", diskName, disk.Attachments[0]["server_id"]) glog.Errorf(errMsg) return "", errors.New(errMsg) } } // add read only flag here if possible spothanis _, err = volumeattach.Create(cClient, instanceID, &volumeattach.CreateOpts{ VolumeID: disk.ID, }).Extract() if err != nil { glog.Errorf("Failed to attach %s volume to %s compute", diskName, instanceID) return "", err } glog.V(2).Infof("Successfully attached %s volume to %s compute", diskName, instanceID) return disk.ID, nil }
func (i *Instances) List(name_filter string) ([]string, error) { glog.V(4).Infof("openstack List(%v) called", name_filter) opts := servers.ListOpts{ Name: name_filter, Status: "ACTIVE", } pager := servers.List(i.compute, opts) ret := make([]string, 0) err := pager.EachPage(func(page pagination.Page) (bool, error) { sList, err := servers.ExtractServers(page) if err != nil { return false, err } for _, server := range sList { ret = append(ret, server.Name) } return true, nil }) if err != nil { return nil, err } glog.V(3).Infof("Found %v instances matching %v: %v", len(ret), name_filter, ret) return ret, nil }
// Since HTTPTransporter.Recv() is already buffered, so we don't need a 'recvLoop' here. func (m *MesosMessenger) decodeLoop() { for { select { case <-m.stop: return default: } msg, err := m.tr.Recv() if err != nil { if err == discardOnStopError { log.V(1).Info("exiting decodeLoop, transport shutting down") return } else { panic(fmt.Sprintf("unexpected transport error: %v", err)) } } log.V(2).Infof("Receiving message %v from %v\n", msg.Name, msg.UPID) msg.ProtoMessage = reflect.New(m.installedMessages[msg.Name]).Interface().(proto.Message) if err := proto.Unmarshal(msg.Bytes, msg.ProtoMessage); err != nil { log.Errorf("Failed to unmarshal message %v: %v\n", msg, err) continue } // TODO(yifan): Catch panic. m.installedHandlers[msg.Name](msg.UPID, msg.ProtoMessage) } }
// Start launches a master. It will error if possible, but some background processes may still // be running and the process should exit after it finishes. func (m *Master) Start() error { // Allow privileged containers // TODO: make this configurable and not the default https://github.com/openshift/origin/issues/662 capabilities.Initialize(capabilities.Capabilities{ AllowPrivileged: true, PrivilegedSources: capabilities.PrivilegedSources{ HostNetworkSources: []string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, HostPIDSources: []string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, HostIPCSources: []string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, }, }) openshiftConfig, err := origin.BuildMasterConfig(*m.config) if err != nil { return err } kubeMasterConfig, err := BuildKubernetesMasterConfig(openshiftConfig) if err != nil { return err } switch { case m.api: glog.Infof("Starting master on %s (%s)", m.config.ServingInfo.BindAddress, version.Get().String()) glog.Infof("Public master address is %s", m.config.AssetConfig.MasterPublicURL) if len(m.config.DisabledFeatures) > 0 { glog.V(4).Infof("Disabled features: %s", strings.Join(m.config.DisabledFeatures, ", ")) } glog.Infof("Using images from %q", openshiftConfig.ImageFor("<component>")) if err := StartAPI(openshiftConfig, kubeMasterConfig); err != nil { return err } case m.controllers: glog.Infof("Starting controllers on %s (%s)", m.config.ServingInfo.BindAddress, version.Get().String()) if len(m.config.DisabledFeatures) > 0 { glog.V(4).Infof("Disabled features: %s", strings.Join(m.config.DisabledFeatures, ", ")) } glog.Infof("Using images from %q", openshiftConfig.ImageFor("<component>")) if err := startHealth(openshiftConfig); err != nil { return err } } if m.controllers { // run controllers asynchronously (not required to be "ready") go func() { if err := startControllers(openshiftConfig, kubeMasterConfig); err != nil { glog.Fatal(err) } openshiftConfig.Informers.Start(utilwait.NeverStop) }() } return nil }
// Get as many pod configs as we can from a directory. Return an error iff something // prevented us from reading anything at all. Do not return an error if only some files // were problematic. func (s *sourceFile) extractFromDir(name string) ([]*api.Pod, error) { dirents, err := filepath.Glob(filepath.Join(name, "[^.]*")) if err != nil { return nil, fmt.Errorf("glob failed: %v", err) } pods := make([]*api.Pod, 0) if len(dirents) == 0 { return pods, nil } sort.Strings(dirents) for _, path := range dirents { statInfo, err := os.Stat(path) if err != nil { glog.V(1).Infof("Can't get metadata for %q: %v", path, err) continue } switch { case statInfo.Mode().IsDir(): glog.V(1).Infof("Not recursing into config path %q", path) case statInfo.Mode().IsRegular(): pod, err := s.extractFromFile(path) if err != nil { glog.V(1).Infof("Can't process config file %q: %v", path, err) } else { pods = append(pods, pod) } default: glog.V(1).Infof("Config path %q is not a directory or file: %v", path, statInfo.Mode()) } } return pods, nil }
func (sns *SNS) query(topic *Topic, message *Message, params map[string]string, resp interface{}) error { params["Timestamp"] = time.Now().UTC().Format(time.RFC3339) u, err := url.Parse(sns.Region.SNSEndpoint) if err != nil { return err } sign(sns.Auth, "GET", "/", params, u.Host) u.RawQuery = multimap(params).Encode() if glog.V(1) { glog.V(1).Infoln("REQ:\n", u.String()) } r, err := http.Get(u.String()) if err != nil { return err } defer r.Body.Close() if glog.V(1) { dump, _ := httputil.DumpResponse(r, true) glog.V(1).Infoln("DUMP:\n", string(dump)) } if r.StatusCode != 200 { return buildError(r) } err = xml.NewDecoder(r.Body).Decode(resp) return err }
// orphanFinalizer dequeues a node from the orphanQueue, then finds its dependents // based on the graph maintained by the GC, then removes it from the // OwnerReferences of its dependents, and finally updates the owner to remove // the "Orphan" finalizer. The node is add back into the orphanQueue if any of // these steps fail. func (gc *GarbageCollector) orphanFinalizer() { key, quit := gc.orphanQueue.Get() if quit { return } defer gc.orphanQueue.Done(key) owner, ok := key.(*node) if !ok { utilruntime.HandleError(fmt.Errorf("expect *node, got %#v", key)) } // we don't need to lock each element, because they never get updated owner.dependentsLock.RLock() dependents := make([]*node, 0, len(owner.dependents)) for dependent := range owner.dependents { dependents = append(dependents, dependent) } owner.dependentsLock.RUnlock() err := gc.orhpanDependents(owner.identity, dependents) if err != nil { glog.V(6).Infof("orphanDependents for %s failed with %v", owner.identity, err) gc.orphanQueue.Add(owner) return } // update the owner, remove "orphaningFinalizer" from its finalizers list err = gc.removeOrphanFinalizer(owner) if err != nil { glog.V(6).Infof("removeOrphanFinalizer for %s failed with %v", owner.identity, err) gc.orphanQueue.Add(owner) } }
// validate converts all of the arguments on the config into references to objects, or returns an error func (c *AppConfig) validate() (app.ComponentReferences, app.SourceRepositories, cmdutil.Environment, cmdutil.Environment, error) { b := c.refBuilder c.addReferenceBuilderComponents(b) b.AddGroups(c.Groups) refs, repos, errs := b.Result() if len(c.Strategy) != 0 && len(repos) == 0 { errs = append(errs, fmt.Errorf("when --strategy is specified you must provide at least one source code location")) } if c.BinaryBuild && (len(repos) > 0 || refs.HasSource()) { errs = append(errs, fmt.Errorf("specifying binary builds and source repositories at the same time is not allowed")) } env, duplicateEnv, envErrs := cmdutil.ParseEnvironmentArguments(c.Environment) for _, s := range duplicateEnv { glog.V(1).Infof("The environment variable %q was overwritten", s) } errs = append(errs, envErrs...) parms, duplicateParms, parmsErrs := cmdutil.ParseEnvironmentArguments(c.TemplateParameters) for _, s := range duplicateParms { glog.V(1).Infof("The template parameter %q was overwritten", s) } errs = append(errs, parmsErrs...) return refs, repos, env, parms, errors.NewAggregate(errs) }
// Serve runs an API server, blocking until the server has shut down. func (s *Server) Serve() { glog.V(0).Info("Starting API on ", s.config.APIConfig.ListenAddr) if s.config.APIConfig.ListenLimit != 0 { glog.V(0).Info("Limiting connections to ", s.config.APIConfig.ListenLimit) } grace := &graceful.Server{ Timeout: s.config.APIConfig.RequestTimeout.Duration, ConnState: s.connState, ListenLimit: s.config.APIConfig.ListenLimit, NoSignalHandling: true, Server: &http.Server{ Addr: s.config.APIConfig.ListenAddr, Handler: newRouter(s), ReadTimeout: s.config.APIConfig.ReadTimeout.Duration, WriteTimeout: s.config.APIConfig.WriteTimeout.Duration, }, } s.grace = grace grace.SetKeepAlivesEnabled(false) grace.ShutdownInitiated = func() { s.stopping = true } if err := grace.ListenAndServe(); err != nil { if opErr, ok := err.(*net.OpError); !ok || (ok && opErr.Op != "accept") { glog.Errorf("Failed to gracefully run API server: %s", err.Error()) return } } glog.Info("API server shut down cleanly") }
// Admit determines if the pod should be admitted based on the requested security context // and the available SCCs. // // 1. Find SCCs for the user. // 2. Find SCCs for the SA. If there is an error retrieving SA SCCs it is not fatal. // 3. Remove duplicates between the user/SA SCCs. // 4. Create the providers, includes setting pre-allocated values if necessary. // 5. Try to generate and validate an SCC with providers. If we find one then admit the pod // with the validated SCC. If we don't find any reject the pod and give all errors from the // failed attempts. func (c *constraint) Admit(a kadmission.Attributes) error { if a.GetResource() != string(kapi.ResourcePods) { return nil } pod, ok := a.GetObject().(*kapi.Pod) // if we can't convert then we don't handle this object so just return if !ok { return nil } // get all constraints that are usable by the user glog.V(4).Infof("getting security context constraints for pod %s (generate: %s) in namespace %s with user info %v", pod.Name, pod.GenerateName, a.GetNamespace(), a.GetUserInfo()) matchedConstraints, err := getMatchingSecurityContextConstraints(c.store, a.GetUserInfo()) if err != nil { return kadmission.NewForbidden(a, err) } // get all constraints that are usable by the SA if len(pod.Spec.ServiceAccountName) > 0 { userInfo := serviceaccount.UserInfo(a.GetNamespace(), pod.Spec.ServiceAccountName, "") glog.V(4).Infof("getting security context constraints for pod %s (generate: %s) with service account info %v", pod.Name, pod.GenerateName, userInfo) saConstraints, err := getMatchingSecurityContextConstraints(c.store, userInfo) if err != nil { return kadmission.NewForbidden(a, err) } matchedConstraints = append(matchedConstraints, saConstraints...) } // remove duplicate constraints and sort matchedConstraints = deduplicateSecurityContextConstraints(matchedConstraints) sort.Sort(ByRestrictions(matchedConstraints)) providers, errs := c.createProvidersFromConstraints(a.GetNamespace(), matchedConstraints) logProviders(pod, providers, errs) if len(providers) == 0 { return kadmission.NewForbidden(a, fmt.Errorf("no providers available to validated pod request")) } // all containers in a single pod must validate under a single provider or we will reject the request validationErrs := fielderrors.ValidationErrorList{} for _, provider := range providers { if errs := assignSecurityContext(provider, pod); len(errs) > 0 { validationErrs = append(validationErrs, errs.Prefix(fmt.Sprintf("provider %s: ", provider.GetSCCName()))...) continue } // the entire pod validated, annotate and accept the pod glog.V(4).Infof("pod %s (generate: %s) validated against provider %s", pod.Name, pod.GenerateName, provider.GetSCCName()) if pod.ObjectMeta.Annotations == nil { pod.ObjectMeta.Annotations = map[string]string{} } pod.ObjectMeta.Annotations[allocator.ValidatedSCCAnnotation] = provider.GetSCCName() return nil } // we didn't validate against any security context constraint provider, reject the pod and give the errors for each attempt glog.V(4).Infof("unable to validate pod %s (generate: %s) against any security context constraint: %v", pod.Name, pod.GenerateName, validationErrs) return kadmission.NewForbidden(a, fmt.Errorf("unable to validate against any security context constraint: %v", validationErrs)) }
// validate converts all of the arguments on the config into references to objects, or returns an error func (c *AppConfig) validate() (app.ComponentReferences, app.SourceRepositories, cmdutil.Environment, cmdutil.Environment, error) { b := c.refBuilder c.addReferenceBuilderComponents(b) b.AddGroups(c.Groups) refs, repos, errs := b.Result() if len(repos) > 0 { repos[0].SetContextDir(c.ContextDir) if len(repos) > 1 { glog.Warningf("You have specified more than one source repository and a context directory. "+ "The context directory will be applied to the first repository: %q", repos[0]) } } if len(c.Strategy) != 0 && len(repos) == 0 { errs = append(errs, fmt.Errorf("when --strategy is specified you must provide at least one source code location")) } env, duplicateEnv, envErrs := cmdutil.ParseEnvironmentArguments(c.Environment) for _, s := range duplicateEnv { glog.V(1).Infof("The environment variable %q was overwritten", s) } errs = append(errs, envErrs...) parms, duplicateParms, parmsErrs := cmdutil.ParseEnvironmentArguments(c.TemplateParameters) for _, s := range duplicateParms { glog.V(1).Infof("The template parameter %q was overwritten", s) } errs = append(errs, parmsErrs...) return refs, repos, env, parms, errors.NewAggregate(errs) }
func (dc *DisruptionController) deletePod(obj interface{}) { pod, ok := obj.(*api.Pod) // When a delete is dropped, the relist will notice a pod in the store not // in the list, leading to the insertion of a tombstone object which contains // the deleted key/value. Note that this value might be stale. If the pod // changed labels the new ReplicaSet will not be woken up till the periodic // resync. if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { glog.Errorf("Couldn't get object from tombstone %+v", obj) return } pod, ok = tombstone.Obj.(*api.Pod) if !ok { glog.Errorf("Tombstone contained object that is not a pod %+v", obj) return } } glog.V(4).Infof("deletePod called on pod %q", pod.Name) pdb := dc.getPdbForPod(pod) if pdb == nil { glog.V(4).Infof("No matching pdb for pod %q", pod.Name) return } glog.V(4).Infof("deletePod %q -> PDB %q", pod.Name, pdb.Name) dc.enqueuePdb(pdb) }
// WaitForAttach runs on the node to detect if the volume (referenced by LUN) is attached. If attached, the device path is returned func (attacher *azureDiskAttacher) WaitForAttach(spec *volume.Spec, lunStr string, timeout time.Duration) (string, error) { volumeSource, err := getVolumeSource(spec) if err != nil { return "", err } if len(lunStr) == 0 { return "", fmt.Errorf("WaitForAttach failed for Azure disk %q: lun is empty.", volumeSource.DiskName) } lun, err := strconv.Atoi(lunStr) if err != nil { return "", fmt.Errorf("WaitForAttach: wrong lun %q, err: %v", lunStr, err) } scsiHostRescan(&osIOHandler{}) exe := exec.New() devicePath := "" err = wait.Poll(checkSleepDuration, timeout, func() (bool, error) { glog.V(4).Infof("Checking Azure disk %q(lun %s) is attached.", volumeSource.DiskName, lunStr) if devicePath, err = findDiskByLun(lun, &osIOHandler{}, exe); err == nil { glog.V(4).Infof("Successfully found attached Azure disk %q(lun %s, device path %s).", volumeSource.DiskName, lunStr, devicePath) return true, nil } else { //Log error, if any, and continue checking periodically glog.V(4).Infof("Error Stat Azure disk (%q) is attached: %v", volumeSource.DiskName, err) return false, nil } }) return devicePath, err }
func (drp *defaultRegistryPinger) ping(registry string) error { healthzCheck := func(proto, registry string) error { healthzResponse, err := drp.client.Get(fmt.Sprintf("%s://%s/healthz", proto, registry)) if err != nil { return err } defer healthzResponse.Body.Close() if healthzResponse.StatusCode != http.StatusOK { return fmt.Errorf("unexpected status code %d", healthzResponse.StatusCode) } return nil } var err error for _, proto := range []string{"https", "http"} { glog.V(4).Infof("Trying %s for %s", proto, registry) err = healthzCheck(proto, registry) if err == nil { break } glog.V(4).Infof("Error with %s for %s: %v", proto, registry, err) } return err }
// Routine for finding candidate files given an argument path glob. func expandGlob(glob string, candidateCh chan string, ctrl GoRtnCntrl) { defer ctrl.wg.Done() glog.V(1).Infoln("ENTER expandGlob", glob) matches, err := filepath.Glob(glob) if err != nil { FailWithMessage(false, "Unable to interpret %s: %s", glob, err) } for _, fileName := range matches { if ctrl.ShouldStop() { return } if fileio.IsDirectory(fileName) { if len(matches) > 1 { fmt.Println(fmt.Sprintf("Expanding directory: %s", fileName)) } ctrl.wg.Add(1) go expandDir(fileName, candidateCh, ctrl) } else { select { case <-ctrl.stopCh: return case candidateCh <- fileName: } } } glog.V(1).Infoln("EXIT expandGlob", glob) }
// Edges are added to the graph from each predecessor (pod or replication // controller) to the images specified by the pod spec's list of containers, as // long as the image is managed by OpenShift. func addPodSpecToGraph(g graph.Graph, spec *kapi.PodSpec, predecessor gonum.Node) { for j := range spec.Containers { container := spec.Containers[j] glog.V(4).Infof("Examining container image %q", container.Image) ref, err := imageapi.ParseDockerImageReference(container.Image) if err != nil { util.HandleError(fmt.Errorf("unable to parse DockerImageReference %q: %v", container.Image, err)) continue } if len(ref.ID) == 0 { glog.V(4).Infof("%q has no image ID", container.Image) continue } imageNode := imagegraph.FindImage(g, ref.ID) if imageNode == nil { glog.Infof("Unable to find image %q in the graph", ref.ID) continue } glog.V(4).Infof("Adding edge from pod to image") g.AddEdge(predecessor, imageNode, ReferencedImageEdgeKind) } }
func fileExtImpliesText(ext string) (yes, unknown bool) { defer func() { glog.V(2).Infof("'%s' -> yes=%v unknown=%v", ext, yes, unknown) }() if ext == "" { unknown = true return } mt := mime.TypeByExtension(ext) if strings.HasPrefix(mt, "text/") || strings.HasSuffix(mt, "+xml") || strings.HasSuffix(mt, ".json") || strings.HasSuffix(mt, "+json") { // Most likely text. yes = true glog.V(1).Infof("Most likely a text extension: %s", ext) return } if strings.HasPrefix(mt, "audio/") || strings.HasPrefix(mt, "image/") || strings.HasPrefix(mt, "video/") { // Almost certainly not text. glog.V(1).Infof("Most likely a binary extension: %s", ext) return } unknown = true return }
func (this *KubernetesActionExecutor) extractPodMetadata(podNamespace, podName string) (map[string]interface{}, []api.Container, error) { pod, err := this.KubeClient.Pods(podNamespace).Get(podName) if err != nil { glog.Errorf("Error getting pod %s: %s.\n", podName, err) return nil, nil, err } else { glog.V(3).Infof("Successfully got pod %s.\n", podName) } var params map[string]interface{} params = make(map[string]interface{}) containers := []api.Container{} if pod.Labels == nil { params["labels"] = "" } else { params["labels"] = pod.Labels } params["restart"] = pod.Spec.RestartPolicy params["name"] = podName params["namespace"] = podNamespace for _, container := range pod.Spec.Containers { containers = append(containers, container) } glog.V(3).Infof("Successfully create pod before deleting original pod %s.\n", podName) return params, containers, nil }
// probeContainer probes the liveness/readiness of the given container. // If the container's liveness probe is unsuccessful, set readiness to false. // If liveness is successful, do a readiness check and set readiness accordingly. func (kl *Kubelet) probeContainer(pod *api.Pod, status api.PodStatus, container api.Container, dockerContainer *docker.APIContainers) (probe.Result, error) { // Probe liveness. live, err := kl.probeContainerLiveness(pod, status, container, dockerContainer) if err != nil { glog.V(1).Infof("Liveness probe errored: %v", err) kl.readiness.set(dockerContainer.ID, false) return probe.Unknown, err } if live != probe.Success { glog.V(1).Infof("Liveness probe unsuccessful: %v", live) kl.readiness.set(dockerContainer.ID, false) return live, nil } // Probe readiness. ready, err := kl.probeContainerReadiness(pod, status, container, dockerContainer) if err == nil && ready == probe.Success { glog.V(3).Infof("Readiness probe successful: %v", ready) kl.readiness.set(dockerContainer.ID, true) return probe.Success, nil } glog.V(1).Infof("Readiness probe failed/errored: %v, %v", ready, err) kl.readiness.set(dockerContainer.ID, false) containerID := dockertools.DockerID(dockerContainer.ID) ref, ok := kl.getRef(containerID) if !ok { glog.Warningf("No ref for pod '%v' - '%v'", containerID, container.Name) } else { kl.recorder.Eventf(ref, "unhealthy", "Liveness Probe Failed %v - %v", containerID, container.Name) } return ready, err }
func (obj *MungeObject) doWaitStatus(pending bool, requiredContexts []string, c chan error) { config := obj.config for { status := obj.GetStatusState(requiredContexts) var done bool if pending { done = (status == "pending") } else { done = (status != "pending") } if done { c <- nil return } if config.DryRun { glog.V(4).Infof("PR# %d is not pending, would wait 30 seconds, but --dry-run was set", *obj.Issue.Number) c <- nil return } sleepTime := 30 * time.Second // If the time was explicitly set, use that instead if config.PendingWaitTime != nil { sleepTime = *config.PendingWaitTime } if pending { glog.V(4).Infof("PR# %d is not pending, waiting for %f seconds", *obj.Issue.Number, sleepTime.Seconds()) } else { glog.V(4).Infof("PR# %d is pending, waiting for %f seconds", *obj.Issue.Number, sleepTime.Seconds()) } time.Sleep(sleepTime) } }