Example #1
0
func NewPodFitsResourcesPredicate(c mresource.CPUShares, m mresource.MegaBytes) func(t *T, offer *mesos.Offer, _ *api.Node) bool {
	return func(t *T, offer *mesos.Offer, _ *api.Node) bool {
		// find offered cpu and mem
		var (
			offeredCpus mresource.CPUShares
			offeredMem  mresource.MegaBytes
		)
		for _, resource := range offer.Resources {
			if resource.GetName() == "cpus" {
				offeredCpus = mresource.CPUShares(*resource.GetScalar().Value)
			}

			if resource.GetName() == "mem" {
				offeredMem = mresource.MegaBytes(*resource.GetScalar().Value)
			}
		}

		// calculate cpu and mem sum over all containers of the pod
		// TODO (@sttts): also support pod.spec.resources.limit.request
		// TODO (@sttts): take into account the executor resources
		cpu := mresource.CPUForPod(&t.Pod, c)
		mem := mresource.MemForPod(&t.Pod, m)
		log.V(4).Infof("trying to match offer with pod %v/%v: cpus: %.2f mem: %.2f MB", t.Pod.Namespace, t.Pod.Name, cpu, mem)
		if (cpu > offeredCpus) || (mem > offeredMem) {
			log.V(3).Infof("not enough resources for pod %v/%v: cpus: %.2f mem: %.2f MB", t.Pod.Namespace, t.Pod.Name, cpu, mem)
			return false
		}
		return true
	}
}
Example #2
0
func (t *T) AcceptOffer(offer *mesos.Offer) bool {
	if offer == nil {
		return false
	}

	// if the user has specified a target host, make sure this offer is for that host
	if t.Pod.Spec.NodeName != "" && offer.GetHostname() != t.Pod.Spec.NodeName {
		return false
	}

	// check the NodeSelector
	if len(t.Pod.Spec.NodeSelector) > 0 {
		slaveLabels := map[string]string{}
		for _, a := range offer.Attributes {
			if a.GetType() == mesos.Value_TEXT {
				slaveLabels[a.GetName()] = a.GetText().GetValue()
			}
		}
		selector := labels.SelectorFromSet(t.Pod.Spec.NodeSelector)
		if !selector.Matches(labels.Set(slaveLabels)) {
			return false
		}
	}

	// check ports
	if _, err := t.mapper.Generate(t, offer); err != nil {
		log.V(3).Info(err)
		return false
	}

	// find offered cpu and mem
	var (
		offeredCpus mresource.CPUShares
		offeredMem  mresource.MegaBytes
	)
	for _, resource := range offer.Resources {
		if resource.GetName() == "cpus" {
			offeredCpus = mresource.CPUShares(*resource.GetScalar().Value)
		}

		if resource.GetName() == "mem" {
			offeredMem = mresource.MegaBytes(*resource.GetScalar().Value)
		}
	}

	// calculate cpu and mem sum over all containers of the pod
	// TODO (@sttts): also support pod.spec.resources.limit.request
	// TODO (@sttts): take into account the executor resources
	cpu := mresource.PodCPULimit(&t.Pod)
	mem := mresource.PodMemLimit(&t.Pod)
	log.V(4).Infof("trying to match offer with pod %v/%v: cpus: %.2f mem: %.2f MB", t.Pod.Namespace, t.Pod.Name, cpu, mem)
	if (cpu > offeredCpus) || (mem > offeredMem) {
		log.V(3).Infof("not enough resources for pod %v/%v: cpus: %.2f mem: %.2f MB", t.Pod.Namespace, t.Pod.Name, cpu, mem)
		return false
	}

	return true
}
Example #3
0
	clientauth "k8s.io/kubernetes/pkg/client/unversioned/auth"
	"k8s.io/kubernetes/pkg/fields"
	"k8s.io/kubernetes/pkg/healthz"
	"k8s.io/kubernetes/pkg/master/ports"
	etcdstorage "k8s.io/kubernetes/pkg/storage/etcd"
	"k8s.io/kubernetes/pkg/tools"
)

const (
	defaultMesosMaster       = "localhost:5050"
	defaultMesosUser         = "******" // should have privs to execute docker and iptables commands
	defaultReconcileInterval = 300    // 5m default task reconciliation interval
	defaultReconcileCooldown = 15 * time.Second
	defaultNodeRelistPeriod  = 5 * time.Minute
	defaultFrameworkName     = "Kubernetes"
	defaultExecutorCPUs      = mresource.CPUShares(0.25)  // initial CPU allocated for executor
	defaultExecutorMem       = mresource.MegaBytes(128.0) // initial memory allocated for executor
)

type SchedulerServer struct {
	Port                int
	Address             net.IP
	EnableProfiling     bool
	AuthPath            string
	APIServerList       []string
	EtcdServerList      []string
	EtcdConfigFile      string
	AllowPrivileged     bool
	ExecutorPath        string
	ProxyPath           string
	MesosMaster         string