func (cache *schedCache) getSlavePid(slaveId *mesos.SlaveID) *upid.UPID { if slaveId == nil { log.V(3).Infoln("SlaveId == nil, returning empty UPID") return nil } return cache.savedSlavePids[slaveId.GetValue()] }
func (sched *testScheduler) FrameworkMessage(dr SchedulerDriver, execId *mesos.ExecutorID, slaveId *mesos.SlaveID, data string) { log.Infoln("Sched.FrameworkMessage() called.") sched.s.NotNil(slaveId) sched.s.Equal(slaveId.GetValue(), "test-slave-001") sched.s.NotNil(execId) sched.s.NotNil(data) sched.s.Equal("test-data-999", string(data)) sched.ch <- true }
// SlaveLost is called when some slave is lost. func (k *KubernetesScheduler) SlaveLost(driver bindings.SchedulerDriver, slaveId *mesos.SlaveID) { log.Infof("Slave %v is lost\n", slaveId) sid := slaveId.GetValue() k.offers.InvalidateForSlave(sid) // TODO(jdef): delete slave from our internal list? probably not since we may need to reconcile // tasks. it would be nice to somehow flag the slave as lost so that, perhaps, we can periodically // flush lost slaves older than X, and for which no tasks or pods reference. // unfinished tasks/pods will be dropped. use a replication controller if you want pods to // be restarted when slaves die. }
func (s *Scheduler) SlaveLost(driver scheduler.SchedulerDriver, slave *mesos.SlaveID) { Logger.Infof("[SlaveLost] %s", slave.GetValue()) }
func (sched *testScheduler) SlaveLost(dr SchedulerDriver, slaveId *mesos.SlaveID) { log.Infoln("Sched.SlaveLost() called.") sched.s.NotNil(slaveId) sched.s.Equal(slaveId.GetValue(), "test-slave-001") sched.ch <- true }
func (sched *Scheduler) ExecutorLost(driver sched.SchedulerDriver, executorID *mesos.ExecutorID, slaveID *mesos.SlaveID, status int) { glog.Errorf("lost executor %s on slave %s with status %d", executorID.GetValue(), slaveID.GetValue(), status) }
func (sched *Scheduler) SlaveLost(driver sched.SchedulerDriver, slaveID *mesos.SlaveID) { glog.Errorf("lost slave %s", slaveID.GetValue()) }
func (sched *Scheduler) FrameworkMessage(driver sched.SchedulerDriver, executorID *mesos.ExecutorID, slaveID *mesos.SlaveID, data string) { glog.Errorf("got framework message from executor %s on slave %s: %q", executorID.GetValue(), slaveID.GetValue(), data) }
func (cache *schedCache) removeSlavePid(slaveId *mesos.SlaveID) { cache.lock.Lock() delete(cache.savedSlavePids, slaveId.GetValue()) cache.lock.Unlock() }
func (cache *schedCache) containsSlavePid(slaveId *mesos.SlaveID) bool { cache.lock.RLock() defer cache.lock.RUnlock() _, ok := cache.savedSlavePids[slaveId.GetValue()] return ok }
func (cache *schedCache) putSlavePid(slaveId *mesos.SlaveID, pid *upid.UPID) { cache.lock.Lock() cache.savedSlavePids[slaveId.GetValue()] = pid cache.lock.Unlock() }