func (s *StackDeployScheduler) Registered(driver scheduler.SchedulerDriver, id *mesos.FrameworkID, master *mesos.MasterInfo) { Logger.Info("[Registered] framework: %s master: %s:%d", id.GetValue(), master.GetHostname(), master.GetPort()) s.Storage.FrameworkID = id.GetValue() s.Storage.Save() s.driver = driver }
func (s *Scheduler) Registered(driver scheduler.SchedulerDriver, id *mesos.FrameworkID, master *mesos.MasterInfo) { Logger.Infof("[Registered] framework: %s master: %s:%d", id.GetValue(), master.GetHostname(), master.GetPort()) s.cluster.frameworkID = id.GetValue() s.cluster.Save() s.driver = driver s.reconcileTasks(true) }
// Registered is called when the Scheduler is Registered func (s *eremeticScheduler) Registered(driver sched.SchedulerDriver, frameworkID *mesos.FrameworkID, masterInfo *mesos.MasterInfo) { log.Debugf("Framework %s registered with master %s", frameworkID.GetValue(), masterInfo.GetHostname()) if !s.initialised { driver.ReconcileTasks([]*mesos.TaskStatus{}) s.initialised = true } else { s.Reconcile(driver) } }
// Reregistered is called when the Scheduler is Reregistered func (s *Scheduler) Reregistered(driver mesossched.SchedulerDriver, masterInfo *mesosproto.MasterInfo) { logrus.WithFields(logrus.Fields{ "master_id": masterInfo.GetId(), "master": masterInfo.GetHostname(), }).Debug("Framework re-registered with master.") if !s.initialised { driver.ReconcileTasks([]*mesosproto.TaskStatus{}) s.initialised = true } else { s.Reconcile(driver) } }
func masterHostPort(info *mesos.MasterInfo) string { host := info.GetHostname() if host == "" { // unpack IPv4 octets := make([]byte, 4) binary.BigEndian.PutUint32(octets, info.GetIp()) ipv4 := net.IP(octets) host = ipv4.String() } return net.JoinHostPort(host, masterPort(info)) }
func (d *ZkLeaderDetector) onLeaderChange(info *mesos.MasterInfo) { if info == nil { log.Errorln("No leader available in Zookeeper") } else { leader := "" if host := info.GetHostname(); host != "" { leader = host } else { // unpack IPv4 octets := make([]byte, 4, 4) binary.BigEndian.PutUint32(octets, info.GetIp()) ipv4 := net.IP(octets) leader = ipv4.String() } leader = fmt.Sprintf("%s:%d", leader, info.GetPort()) log.Infoln("New master in Zookeeper", leader) d.newLeader <- &leader } }
func extractMasterAddress(info *mesos.MasterInfo) (host string, port int) { if info != nil { host = info.GetAddress().GetHostname() if host == "" { host = info.GetAddress().GetIp() } if host != "" { // use port from Address port = int(info.GetAddress().GetPort()) } else { // deprecated: get host and port directly from MasterInfo (and not Address) host = info.GetHostname() if host == "" { host = unpackIPv4(info.GetIp()) } port = int(info.GetPort()) } } return }
func (m *Mesos) hostFromMasterInfo(mi *mesosproto.MasterInfo) MesosHost { var ipstring = "" var port = "" if mi != nil { if host := mi.GetHostname(); host != "" { ip, err := net.LookupIP(host) if err != nil { ipstring = host } else { for _, i := range ip { four := i.To4() if four != nil { ipstring = i.String() break } } // If control reaches here there are no IPv4 addresses // returned by net.LookupIP. Use the hostname as ipstring // ipstring = host } } else { octets := make([]byte, 4, 4) binary.BigEndian.PutUint32(octets, mi.GetIp()) ipv4 := net.IP(octets) ipstring = ipv4.String() } } if len(ipstring) > 0 { port = fmt.Sprint(mi.GetPort()) } return MesosHost{ host: ipstring, port: port, isLeader: false, } }
func ProtoBufToMesosHost(mi *proto.MasterInfo) *MesosHost { ipstring := "" port := "" log.WithField("mi.GetHostname()", mi.GetHostname()).Debug("protobuf MasterInfo") log.WithField("mi.GetIp()", packedIpToString(mi.GetIp())).Debug("protobuf MasterInfo") log.WithField("mi.GetPort()", fmt.Sprint(mi.GetPort())).Debug("protobuf MasterInfo") if host := mi.GetHostname(); host != "" { if ip, err := net.LookupIP(host); err == nil { for _, i := range ip { if four := i.To4(); four != nil { ipstring = i.String() break } } } } if ipstring == "" { ipstring = packedIpToString(mi.GetIp()) } if ipstring == "" { ipstring = mi.GetHostname() } if len(ipstring) > 0 { port = fmt.Sprint(mi.GetPort()) } return &MesosHost{ Host: mi.GetHostname(), Ip: ipstring, Port: int(mi.GetPort()), PortString: port, IsLeader: false, IsRegistered: false, } }
func (s *Scheduler) Reregistered(driver scheduler.SchedulerDriver, master *mesos.MasterInfo) { Logger.Infof("[Reregistered] master: %s:%d", master.GetHostname(), master.GetPort()) s.driver = driver }
func (s *Scheduler) Registered(driver scheduler.SchedulerDriver, id *mesos.FrameworkID, master *mesos.MasterInfo) { Logger.Infof("[Registered] framework: %s master: %s:%d", id.GetValue(), master.GetHostname(), master.GetPort()) s.driver = driver }
func (u *mesosUpstream) sync() { var syncing int32 syncing = atomic.AddInt32(&u.syncing, 1) if syncing > 1 { atomic.AddInt32(&u.syncing, -1) u.syncWg.Wait() return } u.syncWg.Add(1) defer func() { u.syncWg.Done() atomic.AddInt32(&u.syncing, -1) u.lastSync = time.Now() }() var state mesosState var masterHosts []string if path, err := url.Parse(u.mesosMaster); err == nil { switch path.Scheme { case "zk": if path.Path == "" || path.Path == "/" { log.Printf("[ERROR] no path specified for mesos zk lookup \"%s\"", u.mesosMaster) return } zookeeperPath := path.Path if zookeeperPath[0] != '/' { zookeeperPath = "/" + zookeeperPath } if zoo, _, err := zk.Connect(strings.Split(path.Host, ","), 10*time.Second); err == nil { defer zoo.Close() if children, _, err := zoo.Children(zookeeperPath); err == nil { sort.Strings(children) for _, child := range children { if strings.HasPrefix(child, "info_") { if data, _, err := zoo.Get(zookeeperPath + "/" + child); err == nil { masterInfo := new(mesosproto.MasterInfo) if err := masterInfo.Unmarshal(data); err == nil { masterHosts = []string{fmt.Sprintf("%s:%d", masterInfo.GetHostname(), masterInfo.GetPort())} break } else { log.Printf("[ERROR] parsing mesos master from zookeeper. \"%s\"", err.Error()) return } } else { log.Printf("[ERROR] getting mesos master from zookeeper. \"%s\"", err.Error()) return } } } } else { log.Printf("[ERROR] getting mesos masters from zookeeper. \"%s\"", err.Error()) return } } case "http", "https": masterHosts = strings.Split(path.Host, ",") default: log.Printf("[ERROR] unknown scheme in parsing mesos master url \"%s\"", u.mesosMaster) return } } else { masterHosts = strings.Split(u.mesosMaster, ",") } if len(masterHosts) == 0 { log.Printf("[ERROR] No reachable masters.") return } var masterErr error for _, host := range masterHosts { if resp, err := http.Get("http://" + host + "/state.json"); err == nil { defer resp.Body.Close() if err := json.NewDecoder(resp.Body).Decode(&state); err == nil { masterErr = nil break } else { masterErr = err } } else { masterErr = err } } if masterErr != nil { log.Printf("[ERROR] Failed to reach masters. \"%s\"", masterErr.Error()) return } if len(state.Frameworks) == 0 { log.Println("[WARNING] No frameworks found running.") return } hosts := make(proxy.HostPool, 0, 4) for _, framework := range state.Frameworks { if framework.Name == u.framework { for _, task := range framework.Tasks { if task.Name == u.taskName && task.State == "TASK_RUNNING" { host := &proxy.UpstreamHost{ Name: task.SlaveId, Conns: 0, Fails: 0, FailTimeout: u.FailTimeout, Unhealthy: false, ExtraHeaders: u.proxyHeaders, CheckDown: func(upstream *mesosUpstream) proxy.UpstreamHostDownFunc { return func(uh *proxy.UpstreamHost) bool { if uh.Unhealthy { return true } if uh.Fails >= upstream.MaxFails && upstream.MaxFails != 0 { return true } return false } }(u), } if u.Port > 0 { host.Name = host.Name + ":" + strconv.Itoa(u.Port) } else if u.Port < 0 { idx := (u.Port * -1) - 1 if len(task.Resources.Ports) > 2 { portResource := task.Resources.Ports[1 : len(task.Resources.Ports)-1] ports := strings.Split(portResource, " ") if idx < len(ports) { selectedPort := ports[idx] if strings.Index(selectedPort, "-") != -1 { selectedPort = strings.Split(selectedPort, "-")[0] host.Name = host.Name + ":" + selectedPort } } else { continue } } else { continue } } hosts = append(hosts, host) } } break } } for _, host := range hosts { id, port := func() (string, string) { k := strings.Split(host.Name, ":") return k[0], k[1] }() for _, slave := range state.Slaves { if id == slave.Id { host.Name = u.Scheme + "://" + slave.Hostname + ":" + port break } } } oldPool := u.Hosts() isSame := len(oldPool) == len(hosts) for i, host := range hosts { found := false for _, oldHost := range oldPool { if oldHost.Name == host.Name { hosts[i] = oldHost found = true break } } if !found { isSame = false } } for _, host := range hosts { if host.ReverseProxy == nil { if baseUrl, err := url.Parse(host.Name); err == nil { host.ReverseProxy = proxy.NewSingleHostReverseProxy(baseUrl, "") } else { return } } } if !isSame { if u.HealthCheck.Path != "" { u.healthCheck(hosts) } u.hosts.Store(hosts) } }
func masterString(info *mesos.MasterInfo) string { return fmt.Sprintf("Id %v Ip %v Hostname %v Port %v Version %v Pid %v", info.GetId(), info.GetIp(), info.GetHostname(), info.GetPort(), info.GetVersion(), info.GetPid()) }
// Registered is called when the Scheduler is Registered func (s *eremeticScheduler) Registered(_ sched.SchedulerDriver, frameworkID *mesos.FrameworkID, masterInfo *mesos.MasterInfo) { log.Debugf("Framework %s registered with master %s", frameworkID.GetValue(), masterInfo.GetHostname()) }
func validMasterInfo(info *mesos.MasterInfo) bool { return info.GetHostname() != "" || info.GetIp() != 0 }