func attemptToUpdateMasterRoleLabelsAndTaints(client *clientset.Clientset, schedulable bool) error { n, err := findMyself(client) if err != nil { return err } n.ObjectMeta.Labels["kubeadm.alpha.kubernetes.io/role"] = "master" if !schedulable { taintsAnnotation, _ := json.Marshal([]api.Taint{{Key: "dedicated", Value: "master", Effect: "NoSchedule"}}) n.ObjectMeta.Annotations[api.TaintsAnnotationKey] = string(taintsAnnotation) } if _, err := client.Nodes().Update(n); err != nil { if apierrs.IsConflict(err) { fmt.Println("<master/apiclient> temporarily unable to update master node metadata due to conflict (will retry)") time.Sleep(apiCallRetryInterval) attemptToUpdateMasterRoleLabelsAndTaints(client, schedulable) } else { return err } } return nil }
// Update updates an existing node api object // by looking up the given hostname. // The updated node merges the given slave attribute labels // and annotations with the found api object. func Update( client *clientset.Clientset, hostname string, slaveAttrLabels, annotations map[string]string, ) (n *api.Node, err error) { for i := 0; i < clientRetryCount; i++ { n, err = client.Nodes().Get(hostname) if err != nil { return nil, fmt.Errorf("error getting node %q: %v", hostname, err) } if n == nil { return nil, fmt.Errorf("no node instance returned for %q", hostname) } // update labels derived from Mesos slave attributes, keep all other labels n.Labels = mergeMaps( filterMap(n.Labels, IsNotSlaveAttributeLabel), slaveAttrLabels, ) n.Annotations = mergeMaps(n.Annotations, annotations) n, err = client.Nodes().Update(n) if err == nil && !errors.IsConflict(err) { return n, nil } log.Infof("retry %d/%d: error updating node %v err %v", i, clientRetryCount, n, err) time.Sleep(time.Duration(i) * clientRetryInterval) } return nil, err }
// It's safe to do this for alpha, as we don't have HA and there is no way we can get // more then one node here (TODO(phase1+) use os.Hostname) func findMyself(client *clientset.Clientset) (*api.Node, error) { nodeList, err := client.Nodes().List(api.ListOptions{}) if err != nil { return nil, fmt.Errorf("unable to list nodes [%v]", err) } if len(nodeList.Items) < 1 { return nil, fmt.Errorf("no nodes found") } node := &nodeList.Items[0] return node, nil }
// getNode gets node object from the apiserver. func getNode(c *clientset.Clientset) (*api.Node, error) { nodes, err := c.Nodes().List(api.ListOptions{}) Expect(err).NotTo(HaveOccurred(), "should be able to list nodes.") if nodes == nil { return nil, fmt.Errorf("the node list is nil.") } Expect(len(nodes.Items) > 1).NotTo(BeTrue(), "should not be more than 1 nodes.") if len(nodes.Items) == 0 { return nil, fmt.Errorf("empty node list: %+v", nodes) } return &nodes.Items[0], nil }
// Create creates a new node api object with the given hostname, // slave attribute labels and annotations func Create( client *clientset.Clientset, hostName string, slaveAttrLabels, annotations map[string]string, ) (*api.Node, error) { n := api.Node{ ObjectMeta: api.ObjectMeta{ Name: hostName, }, Spec: api.NodeSpec{ ExternalID: hostName, }, Status: api.NodeStatus{ Phase: api.NodePending, // WORKAROUND(sttts): make sure that the Ready condition is the // first one. The kube-ui v3 depends on this assumption. // TODO(sttts): remove this workaround when kube-ui v4 is used or we // merge this with the statusupdate in the controller manager. Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, Reason: slaveReadyReason, Message: slaveReadyMessage, LastHeartbeatTime: unversioned.Now(), }, }, }, } n.Labels = mergeMaps( map[string]string{"kubernetes.io/hostname": hostName}, slaveAttrLabels, ) n.Annotations = annotations // try to create return client.Nodes().Create(&n) }