示例#1
0
// setEndpoints sets the endpoints for the given service.
// in a multi-master scenario only the master will be publishing an endpoint.
// see SchedulerServer.bootstrap.
func (m *SchedulerServer) setEndpoints(serviceName string, ip net.IP, port int) error {
	// The setting we want to find.
	want := []api.EndpointSubset{{
		Addresses: []api.EndpointAddress{{IP: ip.String()}},
		Ports:     []api.EndpointPort{{Port: port, Protocol: api.ProtocolTCP}},
	}}

	ctx := api.NewDefaultContext()
	e, err := m.client.Endpoints(api.NamespaceValue(ctx)).Get(serviceName)
	createOrUpdate := m.client.Endpoints(api.NamespaceValue(ctx)).Update
	if err != nil {
		if errors.IsNotFound(err) {
			createOrUpdate = m.client.Endpoints(api.NamespaceValue(ctx)).Create
		}
		e = &api.Endpoints{
			ObjectMeta: api.ObjectMeta{
				Name:      serviceName,
				Namespace: api.NamespaceDefault,
			},
		}
	}
	if !reflect.DeepEqual(e.Subsets, want) {
		e.Subsets = want
		glog.Infof("setting endpoints for master service %q to %#v", serviceName, e)
		_, err = createOrUpdate(e)
		return err
	}
	// We didn't make any changes, no need to actually call update.
	return nil
}
示例#2
0
// createSchedulerServiceIfNeeded will create the specified service if it
// doesn't already exist.
func (m *SchedulerServer) createSchedulerServiceIfNeeded(serviceName string, servicePort int) error {
	ctx := api.NewDefaultContext()
	if _, err := m.client.Services(api.NamespaceValue(ctx)).Get(serviceName); err == nil {
		// The service already exists.
		return nil
	}
	svc := &api.Service{
		ObjectMeta: api.ObjectMeta{
			Name:      serviceName,
			Namespace: api.NamespaceDefault,
			Labels:    map[string]string{"provider": "k8sm", "component": "scheduler"},
		},
		Spec: api.ServiceSpec{
			Ports: []api.ServicePort{{Port: servicePort, Protocol: api.ProtocolTCP}},
			// maintained by this code, not by the pod selector
			Selector:        nil,
			SessionAffinity: api.ServiceAffinityNone,
		},
	}
	if m.ServiceAddress != nil {
		svc.Spec.ClusterIP = m.ServiceAddress.String()
	}
	_, err := m.client.Services(api.NamespaceValue(ctx)).Create(svc)
	if err != nil && errors.IsAlreadyExists(err) {
		err = nil
	}
	return err
}
示例#3
0
func (t *Tester) testListMatchLabels(obj runtime.Object, assignFn AssignFunc) {
	ctx := t.TestContext()
	testLabels := map[string]string{"key": "value"}

	foo1 := copyOrDie(obj)
	t.setObjectMeta(foo1, "foo1")
	foo2 := copyOrDie(obj)
	foo2Meta := t.getObjectMetaOrFail(foo2)
	foo2Meta.Name = "foo2"
	foo2Meta.Namespace = api.NamespaceValue(ctx)
	foo2Meta.Labels = testLabels

	existing := assignFn([]runtime.Object{foo1, foo2})
	filtered := []runtime.Object{existing[1]}

	selector := labels.SelectorFromSet(labels.Set(testLabels))
	listObj, err := t.storage.(rest.Lister).List(ctx, selector, fields.Everything())
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	items, err := listToItems(listObj)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	if len(items) != len(filtered) {
		t.Errorf("unexpected number of items: %v", len(items))
	}
	if !api.Semantic.DeepEqual(filtered, items) {
		t.Errorf("expected: %#v, got: %#v", filtered, items)
	}
}
示例#4
0
// TestValidNamespace validates that namespace rules are enforced on a resource prior to create or update
func TestValidNamespace(t *testing.T) {
	ctx := api.NewDefaultContext()
	namespace, _ := api.NamespaceFrom(ctx)
	resource := api.ReplicationController{}
	if !api.ValidNamespace(ctx, &resource.ObjectMeta) {
		t.Errorf("expected success")
	}
	if namespace != resource.Namespace {
		t.Errorf("expected resource to have the default namespace assigned during validation")
	}
	resource = api.ReplicationController{ObjectMeta: api.ObjectMeta{Namespace: "other"}}
	if api.ValidNamespace(ctx, &resource.ObjectMeta) {
		t.Errorf("Expected error that resource and context errors do not match because resource has different namespace")
	}
	ctx = api.NewContext()
	if api.ValidNamespace(ctx, &resource.ObjectMeta) {
		t.Errorf("Expected error that resource and context errors do not match since context has no namespace")
	}

	ctx = api.NewContext()
	ns := api.NamespaceValue(ctx)
	if ns != "" {
		t.Errorf("Expected the empty string")
	}
}
示例#5
0
// Bind just does a POST binding RPC.
func (b *binder) Bind(binding *api.Binding) error {
	glog.V(2).Infof("Attempting to bind %v to %v", binding.Name, binding.Target.Name)
	ctx := api.WithNamespace(api.NewContext(), binding.Namespace)
	return b.Post().Namespace(api.NamespaceValue(ctx)).Resource("bindings").Body(binding).Do().Error()
	// TODO: use Pods interface for binding once clusters are upgraded
	// return b.Pods(binding.Namespace).Bind(binding)
}
示例#6
0
文件: plugin.go 项目: qinguoan/vulcan
// this pod may be out of sync with respect to the API server registry:
//      this pod   |  apiserver registry
//    -------------|----------------------
//      host=.*    |  404           ; pod was deleted
//      host=.*    |  5xx           ; failed to sync, try again later?
//      host=""    |  host=""       ; perhaps no updates to process?
//      host=""    |  host="..."    ; pod has been scheduled and assigned, is there a task assigned? (check TaskIdKey in binding?)
//      host="..." |  host=""       ; pod is no longer scheduled, does it need to be re-queued?
//      host="..." |  host="..."    ; perhaps no updates to process?
//
// TODO(jdef) this needs an integration test
func (s *schedulingPlugin) reconcileTask(t *podtask.T) {
	log.V(1).Infof("reconcile pod %v, assigned to slave %q", t.Pod.Name, t.Spec.AssignedSlave)
	ctx := api.WithNamespace(api.NewDefaultContext(), t.Pod.Namespace)
	pod, err := s.client.Pods(api.NamespaceValue(ctx)).Get(t.Pod.Name)
	if err != nil {
		if errors.IsNotFound(err) {
			// attempt to delete
			if err = s.deleter.deleteOne(&Pod{Pod: &t.Pod}); err != nil && err != noSuchPodErr && err != noSuchTaskErr {
				log.Errorf("failed to delete pod: %v: %v", t.Pod.Name, err)
			}
		} else {
			//TODO(jdef) other errors should probably trigger a retry (w/ backoff).
			//For now, drop the pod on the floor
			log.Warning("aborting reconciliation for pod %v: %v", t.Pod.Name, err)
		}
		return
	}

	log.Infof("pod %v scheduled on %q according to apiserver", pod.Name, pod.Spec.NodeName)
	if t.Spec.AssignedSlave != pod.Spec.NodeName {
		if pod.Spec.NodeName == "" {
			// pod is unscheduled.
			// it's possible that we dropped the pod in the scheduler error handler
			// because of task misalignment with the pod (task.Has(podtask.Launched) == true)

			podKey, err := podtask.MakePodKey(ctx, pod.Name)
			if err != nil {
				log.Error(err)
				return
			}

			s.api.Lock()
			defer s.api.Unlock()

			if _, state := s.api.tasks().ForPod(podKey); state != podtask.StateUnknown {
				//TODO(jdef) reconcile the task
				log.Errorf("task already registered for pod %v", pod.Name)
				return
			}

			now := time.Now()
			log.V(3).Infof("reoffering pod %v", podKey)
			s.qr.reoffer(&Pod{
				Pod:      pod,
				deadline: &now,
			})
		} else {
			// pod is scheduled.
			// not sure how this happened behind our backs. attempt to reconstruct
			// at least a partial podtask.T record.
			//TODO(jdef) reconcile the task
			log.Errorf("pod already scheduled: %v", pod.Name)
		}
	} else {
		//TODO(jdef) for now, ignore the fact that the rest of the spec may be different
		//and assume that our knowledge of the pod aligns with that of the apiserver
		log.Error("pod reconciliation does not support updates; not yet implemented")
	}
}
示例#7
0
func (t *Tester) setObjectMeta(obj runtime.Object, name string) {
	meta := t.getObjectMetaOrFail(obj)
	meta.Name = name
	if t.clusterScope {
		meta.Namespace = api.NamespaceNone
	} else {
		meta.Namespace = api.NamespaceValue(t.TestContext())
	}
	meta.GenerateName = ""
}
示例#8
0
func (t *Tester) testGetMimatchedNamespace(obj runtime.Object) {
	ctx1 := api.WithNamespace(api.NewContext(), "bar1")
	ctx2 := api.WithNamespace(api.NewContext(), "bar2")
	objMeta := t.getObjectMetaOrFail(obj)
	objMeta.Name = "foo4"
	objMeta.Namespace = api.NamespaceValue(ctx1)
	_, err := t.storage.(rest.Creater).Create(ctx1, obj)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	_, err = t.storage.(rest.Getter).Get(ctx2, "foo4")
	if t.clusterScope {
		if err != nil {
			t.Errorf("unexpected error: %v", err)
		}
	} else {
		if !errors.IsNotFound(err) {
			t.Errorf("unexpected error returned: %#v", err)
		}
	}
}
示例#9
0
// testGetDifferentNamespace ensures same-name objects in different namespaces do not clash
func (t *Tester) testGetDifferentNamespace(obj runtime.Object) {
	if t.clusterScope {
		t.Fatalf("the test does not work in in cluster-scope")
	}

	objMeta := t.getObjectMetaOrFail(obj)
	objMeta.Name = "foo5"

	ctx1 := api.WithNamespace(api.NewContext(), "bar3")
	objMeta.Namespace = api.NamespaceValue(ctx1)
	_, err := t.storage.(rest.Creater).Create(ctx1, obj)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	ctx2 := api.WithNamespace(api.NewContext(), "bar4")
	objMeta.Namespace = api.NamespaceValue(ctx2)
	_, err = t.storage.(rest.Creater).Create(ctx2, obj)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	got1, err := t.storage.(rest.Getter).Get(ctx1, objMeta.Name)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	got1Meta := t.getObjectMetaOrFail(got1)
	if got1Meta.Name != objMeta.Name {
		t.Errorf("unexpected name of object: %#v, expected: %s", got1, objMeta.Name)
	}
	if got1Meta.Namespace != api.NamespaceValue(ctx1) {
		t.Errorf("unexpected namespace of object: %#v, expected: %s", got1, api.NamespaceValue(ctx1))
	}

	got2, err := t.storage.(rest.Getter).Get(ctx2, objMeta.Name)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	got2Meta := t.getObjectMetaOrFail(got2)
	if got2Meta.Name != objMeta.Name {
		t.Errorf("unexpected name of object: %#v, expected: %s", got2, objMeta.Name)
	}
	if got2Meta.Namespace != api.NamespaceValue(ctx2) {
		t.Errorf("unexpected namespace of object: %#v, expected: %s", got2, api.NamespaceValue(ctx2))
	}
}
示例#10
0
// async continuation of LaunchTask
func (k *KubernetesExecutor) launchTask(driver bindings.ExecutorDriver, taskId string, pod *api.Pod) {
	deleteTask := func() {
		k.lock.Lock()
		defer k.lock.Unlock()
		delete(k.tasks, taskId)
		k.resetSuicideWatch(driver)
	}

	// TODO(k8s): use Pods interface for binding once clusters are upgraded
	// return b.Pods(binding.Namespace).Bind(binding)
	if pod.Spec.NodeName == "" {
		//HACK(jdef): cloned binding construction from k8s plugin/pkg/scheduler/scheduler.go
		binding := &api.Binding{
			ObjectMeta: api.ObjectMeta{
				Namespace:   pod.Namespace,
				Name:        pod.Name,
				Annotations: make(map[string]string),
			},
			Target: api.ObjectReference{
				Kind: "Node",
				Name: pod.Annotations[meta.BindingHostKey],
			},
		}

		// forward the annotations that the scheduler wants to apply
		for k, v := range pod.Annotations {
			binding.Annotations[k] = v
		}

		// create binding on apiserver
		log.Infof("Binding '%v/%v' to '%v' with annotations %+v...", pod.Namespace, pod.Name, binding.Target.Name, binding.Annotations)
		ctx := api.WithNamespace(api.NewContext(), binding.Namespace)
		err := k.client.Post().Namespace(api.NamespaceValue(ctx)).Resource("bindings").Body(binding).Do().Error()
		if err != nil {
			deleteTask()
			k.sendStatus(driver, newStatus(mutil.NewTaskID(taskId), mesos.TaskState_TASK_FAILED,
				messages.CreateBindingFailure))
			return
		}
	} else {
		// post annotations update to apiserver
		patch := struct {
			Metadata struct {
				Annotations map[string]string `json:"annotations"`
			} `json:"metadata"`
		}{}
		patch.Metadata.Annotations = pod.Annotations
		patchJson, _ := json.Marshal(patch)
		log.V(4).Infof("Patching annotations %v of pod %v/%v: %v", pod.Annotations, pod.Namespace, pod.Name, string(patchJson))
		err := k.client.Patch(api.MergePatchType).RequestURI(pod.SelfLink).Body(patchJson).Do().Error()
		if err != nil {
			log.Errorf("Error updating annotations of ready-to-launch pod %v/%v: %v", pod.Namespace, pod.Name, err)
			deleteTask()
			k.sendStatus(driver, newStatus(mutil.NewTaskID(taskId), mesos.TaskState_TASK_FAILED,
				messages.AnnotationUpdateFailure))
			return
		}
	}

	podFullName := container.GetPodFullName(pod)

	// allow a recently failed-over scheduler the chance to recover the task/pod binding:
	// it may have failed and recovered before the apiserver is able to report the updated
	// binding information. replays of this status event will signal to the scheduler that
	// the apiserver should be up-to-date.
	data, err := json.Marshal(api.PodStatusResult{
		ObjectMeta: api.ObjectMeta{
			Name:     podFullName,
			SelfLink: "/podstatusresult",
		},
	})
	if err != nil {
		deleteTask()
		log.Errorf("failed to marshal pod status result: %v", err)
		k.sendStatus(driver, newStatus(mutil.NewTaskID(taskId), mesos.TaskState_TASK_FAILED,
			err.Error()))
		return
	}

	k.lock.Lock()
	defer k.lock.Unlock()

	// Add the task.
	task, found := k.tasks[taskId]
	if !found {
		log.V(1).Infof("task %v not found, probably killed: aborting launch, reporting lost", taskId)
		k.reportLostTask(driver, taskId, messages.LaunchTaskFailed)
		return
	}

	//TODO(jdef) check for duplicate pod name, if found send TASK_ERROR

	// from here on, we need to delete containers associated with the task
	// upon it going into a terminal state
	task.podName = podFullName
	k.pods[podFullName] = pod

	// send the new pod to the kubelet which will spin it up
	update := kubelet.PodUpdate{
		Op:   kubelet.ADD,
		Pods: []*api.Pod{pod},
	}
	k.updateChan <- update

	statusUpdate := &mesos.TaskStatus{
		TaskId:  mutil.NewTaskID(taskId),
		State:   mesos.TaskState_TASK_STARTING.Enum(),
		Message: proto.String(messages.CreateBindingSuccess),
		Data:    data,
	}
	k.sendStatus(driver, statusUpdate)

	// Delay reporting 'task running' until container is up.
	psf := podStatusFunc(func() (*api.PodStatus, error) {
		status, err := k.podStatusFunc(k.kl, pod)
		if err != nil {
			return nil, err
		}
		status.Phase = kubelet.GetPhase(&pod.Spec, status.ContainerStatuses)
		hostIP, err := k.kl.GetHostIP()
		if err != nil {
			log.Errorf("Cannot get host IP: %v", err)
		} else {
			status.HostIP = hostIP.String()
		}
		return status, nil
	})

	go k._launchTask(driver, taskId, podFullName, psf)
}