コード例 #1
0
ファイル: executor.go プロジェクト: pologood/kubernetes
// Reregistered is called when the executor is successfully re-registered with the slave.
// This can happen when the slave fails over.
func (k *KubernetesExecutor) Reregistered(driver bindings.ExecutorDriver, slaveInfo *mesos.SlaveInfo) {
	if k.isDone() {
		return
	}
	log.Infof("Reregistered with slave %v\n", slaveInfo)
	if !(&k.state).transition(disconnectedState, connectedState) {
		log.Errorf("failed to reregister/transition to a connected state")
	}

	if slaveInfo != nil {
		_, err := node.CreateOrUpdate(k.client, slaveInfo.GetHostname(), node.SlaveAttributesToLabels(slaveInfo.Attributes))
		if err != nil {
			log.Errorf("cannot update node labels: %v", err)
		}
	}

	if slaveInfo != nil && k.nodeInfos != nil {
		// make sure nodeInfos is not nil and send new NodeInfo
		k.lock.Lock()
		defer k.lock.Unlock()
		if k.isDone() {
			return
		}
		k.nodeInfos <- nodeInfo(slaveInfo, nil)
	}
}
コード例 #2
0
ファイル: executor.go プロジェクト: pologood/kubernetes
// Registered is called when the executor is successfully registered with the slave.
func (k *KubernetesExecutor) Registered(driver bindings.ExecutorDriver,
	executorInfo *mesos.ExecutorInfo, frameworkInfo *mesos.FrameworkInfo, slaveInfo *mesos.SlaveInfo) {
	if k.isDone() {
		return
	}
	log.Infof("Executor %v of framework %v registered with slave %v\n",
		executorInfo, frameworkInfo, slaveInfo)
	if !(&k.state).transition(disconnectedState, connectedState) {
		log.Errorf("failed to register/transition to a connected state")
	}

	if executorInfo != nil && executorInfo.Data != nil {
		k.initializeStaticPodsSource(executorInfo.Data)
	}

	if slaveInfo != nil {
		_, err := node.CreateOrUpdate(k.client, slaveInfo.GetHostname(), node.SlaveAttributesToLabels(slaveInfo.Attributes))
		if err != nil {
			log.Errorf("cannot update node labels: %v", err)
		}
	}

	// emit an empty update to allow the mesos "source" to be marked as seen
	k.lock.Lock()
	defer k.lock.Unlock()
	k.sendPodUpdate(&kubetypes.PodUpdate{
		Pods: []*api.Pod{},
		Op:   kubetypes.SET,
	})

	if slaveInfo != nil && k.nodeInfos != nil {
		k.nodeInfos <- nodeInfo(slaveInfo, executorInfo) // leave it behind the upper lock to avoid panics
	}
}
コード例 #3
0
ファイル: scheduler.go プロジェクト: rettori/kubernetes
// New creates a new KubernetesScheduler
func New(config Config) *KubernetesScheduler {
	var k *KubernetesScheduler
	k = &KubernetesScheduler{
		schedcfg:          &config.Schedcfg,
		RWMutex:           new(sync.RWMutex),
		executor:          config.Executor,
		executorGroup:     uid.Parse(config.Executor.ExecutorId.GetValue()).Group(),
		PodScheduler:      config.Scheduler,
		client:            config.Client,
		etcdClient:        config.EtcdClient,
		failoverTimeout:   config.FailoverTimeout,
		reconcileInterval: config.ReconcileInterval,
		nodeRegistrator:   node.NewRegistrator(config.Client, config.LookupNode),
		offers: offers.CreateRegistry(offers.RegistryConfig{
			Compat: func(o *mesos.Offer) bool {
				// the node must be registered and have up-to-date labels
				n := config.LookupNode(o.GetHostname())
				if n == nil || !node.IsUpToDate(n, node.SlaveAttributesToLabels(o.GetAttributes())) {
					return false
				}

				// the executor IDs must not identify a kubelet-executor with a group that doesn't match ours
				for _, eid := range o.GetExecutorIds() {
					execuid := uid.Parse(eid.GetValue())
					if execuid.Name() == execcfg.DefaultInfoID && execuid.Group() != k.executorGroup {
						return false
					}
				}

				return true
			},
			DeclineOffer: func(id string) <-chan error {
				errOnce := proc.NewErrorOnce(k.terminate)
				errOuter := k.asRegisteredMaster.Do(func() {
					var err error
					defer errOnce.Report(err)
					offerId := mutil.NewOfferID(id)
					filters := &mesos.Filters{}
					_, err = k.driver.DeclineOffer(offerId, filters)
				})
				return errOnce.Send(errOuter).Err()
			},
			// remember expired offers so that we can tell if a previously scheduler offer relies on one
			LingerTTL:     config.Schedcfg.OfferLingerTTL.Duration,
			TTL:           config.Schedcfg.OfferTTL.Duration,
			ListenerDelay: config.Schedcfg.ListenerDelay.Duration,
		}),
		slaveHostNames:    slave.NewRegistry(),
		taskRegistry:      podtask.NewInMemoryRegistry(),
		reconcileCooldown: config.ReconcileCooldown,
		registration:      make(chan struct{}),
		asRegisteredMaster: proc.DoerFunc(func(proc.Action) <-chan error {
			return proc.ErrorChanf("cannot execute action with unregistered scheduler")
		}),
	}
	return k
}
コード例 #4
0
ファイル: executor.go プロジェクト: robertabbott/kubernetes
// Registered is called when the executor is successfully registered with the slave.
func (k *Executor) Registered(
	driver bindings.ExecutorDriver,
	executorInfo *mesos.ExecutorInfo,
	frameworkInfo *mesos.FrameworkInfo,
	slaveInfo *mesos.SlaveInfo,
) {
	if k.isDone() {
		return
	}

	log.Infof(
		"Executor %v of framework %v registered with slave %v\n",
		executorInfo, frameworkInfo, slaveInfo,
	)

	if !(&k.state).transition(disconnectedState, connectedState) {
		log.Errorf("failed to register/transition to a connected state")
	}

	if executorInfo != nil && executorInfo.Data != nil {
		err := k.initializeStaticPodsSource(slaveInfo.GetHostname(), executorInfo.Data)
		if err != nil {
			log.Errorf("failed to initialize static pod configuration: %v", err)
		}
	}

	annotations, err := executorInfoToAnnotations(executorInfo)
	if err != nil {
		log.Errorf(
			"cannot get node annotations from executor info %v error %v",
			executorInfo, err,
		)
	}

	if slaveInfo != nil {
		_, err := node.CreateOrUpdate(
			k.client,
			slaveInfo.GetHostname(),
			node.SlaveAttributesToLabels(slaveInfo.Attributes),
			annotations,
		)

		if err != nil {
			log.Errorf("cannot update node labels: %v", err)
		}
	}

	// emit an empty update to allow the mesos "source" to be marked as seen
	k.lock.Lock()
	defer k.lock.Unlock()
	k.sendPodsSnapshot()

	if slaveInfo != nil && k.nodeInfos != nil {
		k.nodeInfos <- nodeInfo(slaveInfo, executorInfo) // leave it behind the upper lock to avoid panics
	}
}
コード例 #5
0
ファイル: executor.go プロジェクト: ethernetdan/kubernetes
// Registered is called when the executor is successfully registered with the slave.
func (k *Executor) Registered(
	driver bindings.ExecutorDriver,
	executorInfo *mesos.ExecutorInfo,
	frameworkInfo *mesos.FrameworkInfo,
	slaveInfo *mesos.SlaveInfo,
) {
	if k.isDone() {
		return
	}

	log.Infof(
		"Executor %v of framework %v registered with slave %v\n",
		executorInfo, frameworkInfo, slaveInfo,
	)

	if !(&k.state).transition(disconnectedState, connectedState) {
		log.Errorf("failed to register/transition to a connected state")
	}

	k.initializeStaticPodsSource(executorInfo)

	annotations, err := annotationsFor(executorInfo)
	if err != nil {
		log.Errorf(
			"cannot get node annotations from executor info %v error %v",
			executorInfo, err,
		)
	}

	if slaveInfo != nil {
		_, err := k.nodeAPI.createOrUpdate(
			slaveInfo.GetHostname(),
			node.SlaveAttributesToLabels(slaveInfo.Attributes),
			annotations,
		)

		if err != nil {
			log.Errorf("cannot update node labels: %v", err)
		}
	}

	k.lock.Lock()
	defer k.lock.Unlock()

	if slaveInfo != nil && k.nodeInfos != nil {
		k.nodeInfos <- nodeInfo(slaveInfo, executorInfo) // leave it behind the upper lock to avoid panics
	}
}
コード例 #6
0
ファイル: executor.go プロジェクト: JingGe/kubernetes
// Reregistered is called when the executor is successfully re-registered with the slave.
// This can happen when the slave fails over.
func (k *KubernetesExecutor) Reregistered(driver bindings.ExecutorDriver, slaveInfo *mesos.SlaveInfo) {
	if k.isDone() {
		return
	}
	log.Infof("Reregistered with slave %v\n", slaveInfo)
	if !(&k.state).transition(disconnectedState, connectedState) {
		log.Errorf("failed to reregister/transition to a connected state")
	}

	if slaveInfo != nil {
		_, err := node.CreateOrUpdate(k.client, slaveInfo.GetHostname(), node.SlaveAttributesToLabels(slaveInfo.Attributes))
		if err != nil {
			log.Errorf("cannot update node labels: %v", err)
		}
	}

	k.initialRegistration.Do(k.onInitialRegistration)
}
コード例 #7
0
ファイル: framework.go プロジェクト: robertabbott/kubernetes
// ResourceOffers is called when the scheduler receives some offers from the master.
func (k *framework) ResourceOffers(driver bindings.SchedulerDriver, offers []*mesos.Offer) {
	log.V(2).Infof("Received offers %+v", offers)

	// Record the offers in the global offer map as well as each slave's offer map.
	k.offers.Add(offers)
	for _, offer := range offers {
		slaveId := offer.GetSlaveId().GetValue()
		k.slaveHostNames.Register(slaveId, offer.GetHostname())

		// create api object if not existing already
		if k.nodeRegistrator != nil {
			labels := node.SlaveAttributesToLabels(offer.GetAttributes())
			_, err := k.nodeRegistrator.Register(offer.GetHostname(), labels)
			if err != nil {
				log.Error(err)
			}
		}
	}
}
コード例 #8
0
ファイル: pod_task_test.go プロジェクト: Joshzx/kubernetes
func TestNodeSelector(t *testing.T) {
	t.Parallel()

	newNode := func(hostName string, l map[string]string) *api.Node {
		nodeLabels := map[string]string{"kubernetes.io/hostname": hostName}
		if l != nil {
			for k, v := range l {
				nodeLabels[k] = v
			}
		}
		return &api.Node{
			ObjectMeta: api.ObjectMeta{
				Name:   hostName,
				Labels: nodeLabels,
			},
			Spec: api.NodeSpec{
				ExternalID: hostName,
			},
		}
	}
	node1 := newNode("node1", node.SlaveAttributesToLabels([]*mesos.Attribute{
		newTextAttribute("rack", "a"),
		newTextAttribute("gen", "2014"),
		newScalarAttribute("num", 42.0),
	}))
	node2 := newNode("node2", node.SlaveAttributesToLabels([]*mesos.Attribute{
		newTextAttribute("rack", "b"),
		newTextAttribute("gen", "2015"),
		newScalarAttribute("num", 0.0),
	}))
	labels3 := node.SlaveAttributesToLabels([]*mesos.Attribute{
		newTextAttribute("rack", "c"),
		newTextAttribute("gen", "2015"),
		newScalarAttribute("old", 42),
	})
	labels3["some.other/label"] = "43"
	node3 := newNode("node3", labels3)

	tests := []struct {
		selector map[string]string
		node     *api.Node
		ok       bool
		desc     string
	}{
		{map[string]string{"k8s.mesosphere.io/attribute-rack": "a"}, node1, true, "label value matches"},
		{map[string]string{"k8s.mesosphere.io/attribute-rack": "b"}, node1, false, "label value does not match"},
		{map[string]string{"k8s.mesosphere.io/attribute-rack": "a", "k8s.mesosphere.io/attribute-gen": "2014"}, node1, true, "multiple required labels match"},
		{map[string]string{"k8s.mesosphere.io/attribute-rack": "a", "k8s.mesosphere.io/attribute-gen": "2015"}, node1, false, "one label does not match"},
		{map[string]string{"k8s.mesosphere.io/attribute-rack": "a", "k8s.mesosphere.io/attribute-num": "42"}, node1, true, "scalar label matches"},
		{map[string]string{"k8s.mesosphere.io/attribute-rack": "a", "k8s.mesosphere.io/attribute-num": "43"}, node1, false, "scalar label does not match"},

		{map[string]string{"kubernetes.io/hostname": "node1"}, node1, true, "hostname label matches"},
		{map[string]string{"kubernetes.io/hostname": "node2"}, node1, false, "hostname label does not match"},
		{map[string]string{"kubernetes.io/hostname": "node2"}, node2, true, "hostname label matches"},

		{map[string]string{"some.other/label": "43"}, node1, false, "non-slave attribute does not match"},
		{map[string]string{"some.other/label": "43"}, node3, true, "non-slave attribute matches"},
	}

	defaultPredicate := NewDefaultPredicate(mresource.DefaultDefaultContainerCPULimit, mresource.DefaultDefaultContainerMemLimit)

	for _, ts := range tests {
		task, _ := fakePodTask("foo")
		task.Pod.Spec.NodeSelector = ts.selector
		offer := &mesos.Offer{
			Resources: []*mesos.Resource{
				mutil.NewScalarResource("cpus", t_min_cpu),
				mutil.NewScalarResource("mem", t_min_mem),
			},
			Hostname: &ts.node.Name,
		}
		if got, want := defaultPredicate(task, offer, ts.node), ts.ok; got != want {
			t.Fatalf("expected acceptance of offer for selector %v to be %v, got %v: %q", ts.selector, want, got, ts.desc)
		}
	}
}
コード例 #9
0
ファイル: framework.go プロジェクト: robertabbott/kubernetes
// New creates a new Framework
func New(config Config) Framework {
	var k *framework
	k = &framework{
		schedulerConfig:   &config.SchedulerConfig,
		RWMutex:           new(sync.RWMutex),
		client:            config.Client,
		failoverTimeout:   config.FailoverTimeout,
		reconcileInterval: config.ReconcileInterval,
		nodeRegistrator:   node.NewRegistrator(config.Client, config.LookupNode),
		executorId:        config.ExecutorId,
		offers: offers.CreateRegistry(offers.RegistryConfig{
			Compat: func(o *mesos.Offer) bool {
				// the node must be registered and have up-to-date labels
				n := config.LookupNode(o.GetHostname())
				if n == nil || !node.IsUpToDate(n, node.SlaveAttributesToLabels(o.GetAttributes())) {
					return false
				}

				eids := len(o.GetExecutorIds())
				switch {
				case eids > 1:
					// at most one executor id expected. More than one means that
					// the given node is seriously in trouble.
					return false

				case eids == 1:
					// the executor id must match, otherwise the running executor
					// is incompatible with the current scheduler configuration.
					if eid := o.GetExecutorIds()[0]; eid.GetValue() != config.ExecutorId.GetValue() {
						return false
					}
				}

				return true
			},
			DeclineOffer: func(id string) <-chan error {
				errOnce := proc.NewErrorOnce(k.terminate)
				errOuter := k.asRegisteredMaster.Do(func() {
					var err error
					defer errOnce.Report(err)
					offerId := mutil.NewOfferID(id)
					filters := &mesos.Filters{}
					_, err = k.driver.DeclineOffer(offerId, filters)
				})
				return errOnce.Send(errOuter).Err()
			},
			// remember expired offers so that we can tell if a previously scheduler offer relies on one
			LingerTTL:     config.SchedulerConfig.OfferLingerTTL.Duration,
			TTL:           config.SchedulerConfig.OfferTTL.Duration,
			ListenerDelay: config.SchedulerConfig.ListenerDelay.Duration,
		}),
		slaveHostNames:    newSlaveRegistry(),
		reconcileCooldown: config.ReconcileCooldown,
		registration:      make(chan struct{}),
		asRegisteredMaster: proc.DoerFunc(func(proc.Action) <-chan error {
			return proc.ErrorChanf("cannot execute action with unregistered scheduler")
		}),
		storeFrameworkId: config.StoreFrameworkId,
		lookupNode:       config.LookupNode,
	}
	return k
}