Exemple #1
0
// NewScheme creates a new Scheme. This scheme is pluggable by default.
func NewScheme() *Scheme {
	s := &Scheme{
		gvkToType:        map[schema.GroupVersionKind]reflect.Type{},
		typeToGVK:        map[reflect.Type][]schema.GroupVersionKind{},
		unversionedTypes: map[reflect.Type]schema.GroupVersionKind{},
		unversionedKinds: map[string]reflect.Type{},
		cloner:           conversion.NewCloner(),
		fieldLabelConversionFuncs: map[string]map[string]FieldLabelConversionFunc{},
		defaulterFuncs:            map[reflect.Type]func(interface{}){},
	}
	s.converter = conversion.NewConverter(s.nameFunc)

	s.AddConversionFuncs(DefaultEmbeddedConversions()...)

	// Enable map[string][]string conversions by default
	if err := s.AddConversionFuncs(DefaultStringConversions...); err != nil {
		panic(err)
	}
	if err := s.RegisterInputDefaults(&map[string][]string{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields); err != nil {
		panic(err)
	}
	if err := s.RegisterInputDefaults(&url.Values{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields); err != nil {
		panic(err)
	}
	return s
}
// syncService will sync the Service with the given key if it has had its expectations fulfilled,
// meaning it did not expect to see any more of its pods created or deleted. This function is not meant to be
// invoked concurrently with the same key.
func (s *ServiceController) syncService(key string) error {
	startTime := time.Now()
	var cachedService *cachedService
	var retryDelay time.Duration
	defer func() {
		glog.V(4).Infof("Finished syncing service %q (%v)", key, time.Now().Sub(startTime))
	}()
	// obj holds the latest service info from apiserver
	objFromStore, exists, err := s.serviceStore.Indexer.GetByKey(key)
	if err != nil {
		glog.Errorf("Unable to retrieve service %v from store: %v", key, err)
		s.queue.Add(key)
		return err
	}
	if !exists {
		// service absence in store means watcher caught the deletion, ensure LB info is cleaned
		glog.Infof("Service has been deleted %v", key)
		err, retryDelay = s.processServiceDeletion(key)
	}
	// Create a copy before modifying the obj to prevent race condition with
	// other readers of obj from store.
	obj, err := conversion.NewCloner().DeepCopy(objFromStore)
	if err != nil {
		glog.Errorf("Error in deep copying service %v retrieved from store: %v", key, err)
		s.queue.Add(key)
		return err
	}

	if exists {
		service, ok := obj.(*v1.Service)
		if ok {
			cachedService = s.serviceCache.getOrCreate(key)
			err, retryDelay = s.processServiceUpdate(cachedService, service, key)
		} else {
			tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
			if !ok {
				return fmt.Errorf("Object contained wasn't a service or a deleted key: %+v", obj)
			}
			glog.Infof("Found tombstone for %v", key)
			err, retryDelay = s.processServiceDeletion(tombstone.Key)
		}
	}

	if retryDelay != 0 {
		s.enqueueService(obj)
	} else if err != nil {
		runtime.HandleError(fmt.Errorf("Failed to process service. Not retrying: %v", err))
	}
	return nil
}
// tryUpdateNodeStatus tries to update node status to master. If ReconcileCBR0
// is set, this function will also confirm that cbr0 is configured correctly.
func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error {
	// In large clusters, GET and PUT operations on Node objects coming
	// from here are the majority of load on apiserver and etcd.
	// To reduce the load on etcd, we are serving GET operations from
	// apiserver cache (the data might be slightly delayed but it doesn't
	// seem to cause more conflict - the delays are pretty small).
	// If it result in a conflict, all retries are served directly from etcd.
	opts := metav1.GetOptions{}
	if tryNumber == 0 {
		opts.ResourceVersion = "0"
	}
	node, err := kl.kubeClient.Core().Nodes().Get(string(kl.nodeName), opts)
	if err != nil {
		return fmt.Errorf("error getting node %q: %v", kl.nodeName, err)
	}

	clonedNode, err := conversion.NewCloner().DeepCopy(node)
	if err != nil {
		return fmt.Errorf("error clone node %q: %v", kl.nodeName, err)
	}

	originalNode, ok := clonedNode.(*v1.Node)
	if !ok || originalNode == nil {
		return fmt.Errorf("failed to cast %q node object %#v to v1.Node", kl.nodeName, clonedNode)
	}

	kl.updatePodCIDR(node.Spec.PodCIDR)

	kl.setNodeStatus(node)
	// Patch the current status on the API server
	updatedNode, err := nodeutil.PatchNodeStatus(kl.kubeClient, types.NodeName(kl.nodeName), originalNode, node)
	if err != nil {
		return err
	}
	// If update finishes successfully, mark the volumeInUse as reportedInUse to indicate
	// those volumes are already updated in the node's status
	kl.volumeManager.MarkVolumesAsReportedInUse(updatedNode.Status.VolumesInUse)
	return nil
}
// tryRegisterWithApiServer makes an attempt to register the given node with
// the API server, returning a boolean indicating whether the attempt was
// successful.  If a node with the same name already exists, it reconciles the
// value of the annotation for controller-managed attach-detach of attachable
// persistent volumes for the node.  If a node of the same name exists but has
// a different externalID value, it attempts to delete that node so that a
// later attempt can recreate it.
func (kl *Kubelet) tryRegisterWithApiServer(node *v1.Node) bool {
	_, err := kl.kubeClient.Core().Nodes().Create(node)
	if err == nil {
		return true
	}

	if !apierrors.IsAlreadyExists(err) {
		glog.Errorf("Unable to register node %q with API server: %v", kl.nodeName, err)
		return false
	}

	existingNode, err := kl.kubeClient.Core().Nodes().Get(string(kl.nodeName), metav1.GetOptions{})
	if err != nil {
		glog.Errorf("Unable to register node %q with API server: error getting existing node: %v", kl.nodeName, err)
		return false
	}
	if existingNode == nil {
		glog.Errorf("Unable to register node %q with API server: no node instance returned", kl.nodeName)
		return false
	}

	clonedNode, err := conversion.NewCloner().DeepCopy(existingNode)
	if err != nil {
		glog.Errorf("Unable to clone %q node object %#v: %v", kl.nodeName, existingNode, err)
		return false
	}

	originalNode, ok := clonedNode.(*v1.Node)
	if !ok || originalNode == nil {
		glog.Errorf("Unable to cast %q node object %#v to v1.Node", kl.nodeName, clonedNode)
		return false
	}

	if existingNode.Spec.ExternalID == node.Spec.ExternalID {
		glog.Infof("Node %s was previously registered", kl.nodeName)

		// Edge case: the node was previously registered; reconcile
		// the value of the controller-managed attach-detach
		// annotation.
		requiresUpdate := kl.reconcileCMADAnnotationWithExistingNode(node, existingNode)
		if requiresUpdate {
			if _, err := nodeutil.PatchNodeStatus(kl.kubeClient, types.NodeName(kl.nodeName),
				originalNode, existingNode); err != nil {
				glog.Errorf("Unable to reconcile node %q with API server: error updating node: %v", kl.nodeName, err)
				return false
			}
		}

		return true
	}

	glog.Errorf(
		"Previously node %q had externalID %q; now it is %q; will delete and recreate.",
		kl.nodeName, node.Spec.ExternalID, existingNode.Spec.ExternalID,
	)
	if err := kl.kubeClient.Core().Nodes().Delete(node.Name, nil); err != nil {
		glog.Errorf("Unable to register node %q with API server: error deleting old node: %v", kl.nodeName, err)
	} else {
		glog.Info("Deleted old node object %q", kl.nodeName)
	}

	return false
}