Exemplo n.º 1
0
func filterInvalidPods(pods []api.Pod, source string, recorder record.EventRecorder) (filtered []*api.Pod) {
	names := util.StringSet{}
	for i := range pods {
		pod := &pods[i]
		var errlist []error
		if errs := validation.ValidatePod(pod); len(errs) != 0 {
			errlist = append(errlist, errs...)
			// If validation fails, don't trust it any further -
			// even Name could be bad.
		} else {
			name := kubelet.GetPodFullName(pod)
			if names.Has(name) {
				errlist = append(errlist, apierrs.NewFieldDuplicate("name", pod.Name))
			} else {
				names.Insert(name)
			}
		}
		if len(errlist) > 0 {
			name := bestPodIdentString(pod)
			err := utilerrors.NewAggregate(errlist)
			glog.Warningf("Pod[%d] (%s) from %s failed validation, ignoring: %v", i+1, name, source, err)
			recorder.Eventf(pod, "failedValidation", "Error validating pod %s from %s, ignoring: %v", name, source, err)
			continue
		}
		filtered = append(filtered, pod)
	}
	return
}
Exemplo n.º 2
0
// LaunchTask is called when the executor receives a request to launch a task.
func (k *KubernetesExecutor) LaunchTask(driver mesos.ExecutorDriver, taskInfo *mesos.TaskInfo) {
	log.Infof("Launch task %v\n", taskInfo)

	if !k.registered {
		log.Warningf("Ignore launch task because the executor is disconnected\n")
		k.sendStatusUpdate(taskInfo.GetTaskId(),
			mesos.TaskState_TASK_FAILED, "Executor not registered yet")
		return
	}

	taskId := taskInfo.GetTaskId().GetValue()
	if _, found := k.tasks[taskId]; found {
		log.Warningf("Task already launched\n")
		// Not to send back TASK_RUNNING here, because
		// may be duplicated messages or duplicated task id.
		return
	}

	// Get the container manifest from the taskInfo.
	var manifest api.ContainerManifest
	if err := yaml.Unmarshal(taskInfo.GetData(), &manifest); err != nil {
		log.Warningf("Failed to extract yaml data from the taskInfo.data %v\n", err)
		k.sendStatusUpdate(taskInfo.GetTaskId(),
			mesos.TaskState_TASK_FAILED, "Failed to extract yaml data")
		return
	}

	// TODO(nnielsen): Verify this assumption. Manifest ID's has been marked
	// to be deprecated.
	podID := manifest.ID

	// Add the task.
	k.tasks[taskId] = &kuberTask{
		mesosTaskInfo:     taskInfo,
		containerManifest: &manifest,
	}

	k.pods = append(k.pods, kubelet.Pod{
		Name:      podID,
		Namespace: "etcd",
		Manifest:  manifest,
	})

	getPidInfo := func(name string) (api.PodInfo, error) {
		podFullName := kubelet.GetPodFullName(&kubelet.Pod{Name: name, Namespace: "etcd"})

		info, err := k.kl.GetPodInfo(podFullName)
		if err == kubelet.ErrNoContainersInPod {
			return nil, err
		}

		return info, nil
	}

	// TODO(nnielsen): Fail if container is already running.

	// Checkpoint pods.

	// Send the pod updates to the channel.
	// TODO(yifan): Replace SET with ADD when it's implemented.
	// TODO(nnielsen): Incoming launch requests end up destroying already
	// running pods.
	update := kubelet.PodUpdate{
		Pods: k.pods,
		Op:   kubelet.SET,
	}
	k.updateChan <- update

	// Delay reporting 'task running' until container is up.
	go func() {
		expires := time.Now().Add(launchGracePeriod)
		for {
			now := time.Now()
			if now.After(expires) {
				log.Warningf("Launch expired grace period of '%v'", launchGracePeriod)
				break
			}

			// We need to poll the kublet for the pod state, as
			// there is no existing event / push model for this.
			time.Sleep(containerPollTime)

			info, err := getPidInfo(podID)
			if err != nil {
				continue
			}

			log.V(2).Infof("Found pod info: '%v'", info)
			data, err := json.Marshal(info)

			statusUpdate := &mesos.TaskStatus{
				TaskId:  taskInfo.GetTaskId(),
				State:   mesos.NewTaskState(mesos.TaskState_TASK_RUNNING),
				Message: proto.String("Pod '" + podID + "' is running"),
				Data:    data,
			}

			if err := k.driver.SendStatusUpdate(statusUpdate); err != nil {
				log.Warningf("Failed to send status update%v, %v", err)
			}

			// TODO(nnielsen): Monitor health of container and report if lost.
			go func() {
				for {
					time.Sleep(containerPollTime)
					_, err := getPidInfo(podID)
					if err != nil {
						k.sendStatusUpdate(taskInfo.GetTaskId(), mesos.TaskState_TASK_LOST, "Task lost: container disappeared")
						return
					}
				}
			}()

			return
		}
		k.sendStatusUpdate(taskInfo.GetTaskId(), mesos.TaskState_TASK_LOST, "Task lost: launch failed")
	}()
}
Exemplo n.º 3
0
func (s *podStorage) merge(source string, change interface{}) (adds, updates, deletes *kubelet.PodUpdate) {
	s.podLock.Lock()
	defer s.podLock.Unlock()

	adds = &kubelet.PodUpdate{Op: kubelet.ADD}
	updates = &kubelet.PodUpdate{Op: kubelet.UPDATE}
	deletes = &kubelet.PodUpdate{Op: kubelet.REMOVE}

	pods := s.pods[source]
	if pods == nil {
		pods = make(map[string]*api.Pod)
	}

	update := change.(kubelet.PodUpdate)
	switch update.Op {
	case kubelet.ADD, kubelet.UPDATE:
		if update.Op == kubelet.ADD {
			glog.V(4).Infof("Adding new pods from source %s : %v", source, update.Pods)
		} else {
			glog.V(4).Infof("Updating pods from source %s : %v", source, update.Pods)
		}

		filtered := filterInvalidPods(update.Pods, source, s.recorder)
		for _, ref := range filtered {
			name := kubelet.GetPodFullName(ref)
			if existing, found := pods[name]; found {
				if !reflect.DeepEqual(existing.Spec, ref.Spec) {
					// this is an update
					existing.Spec = ref.Spec
					updates.Pods = append(updates.Pods, *existing)
					continue
				}
				// this is a no-op
				continue
			}
			// this is an add
			if ref.Annotations == nil {
				ref.Annotations = make(map[string]string)
			}
			ref.Annotations[kubelet.ConfigSourceAnnotationKey] = source
			pods[name] = ref
			adds.Pods = append(adds.Pods, *ref)
		}

	case kubelet.REMOVE:
		glog.V(4).Infof("Removing a pod %v", update)
		for _, value := range update.Pods {
			name := kubelet.GetPodFullName(&value)
			if existing, found := pods[name]; found {
				// this is a delete
				delete(pods, name)
				deletes.Pods = append(deletes.Pods, *existing)
				continue
			}
			// this is a no-op
		}

	case kubelet.SET:
		glog.V(4).Infof("Setting pods for source %s : %v", source, update)
		s.markSourceSet(source)
		// Clear the old map entries by just creating a new map
		oldPods := pods
		pods = make(map[string]*api.Pod)

		filtered := filterInvalidPods(update.Pods, source, s.recorder)
		for _, ref := range filtered {
			name := kubelet.GetPodFullName(ref)
			if existing, found := oldPods[name]; found {
				pods[name] = existing
				if !reflect.DeepEqual(existing.Spec, ref.Spec) {
					// this is an update
					existing.Spec = ref.Spec
					updates.Pods = append(updates.Pods, *existing)
					continue
				}
				// this is a no-op
				continue
			}
			if ref.Annotations == nil {
				ref.Annotations = make(map[string]string)
			}
			ref.Annotations[kubelet.ConfigSourceAnnotationKey] = source
			pods[name] = ref
			adds.Pods = append(adds.Pods, *ref)
		}

		for name, existing := range oldPods {
			if _, found := pods[name]; !found {
				// this is a delete
				deletes.Pods = append(deletes.Pods, *existing)
			}
		}

	default:
		glog.Warningf("Received invalid update type: %v", update)

	}

	s.pods[source] = pods
	return adds, updates, deletes
}