Beispiel #1
0
func switchCase() {
	var arg = flag.String("foo", "", "...")
	var bar int
	flag.Lookup("logtostderr").Value.Set("true")
	flag.Parse()
	switch *arg {
	case "":

	case "+1":
		bar = bar + 1
	case "+2":
		bar = bar + 2
	default:
		bar = bar + 3
	}
	switch bar {
	case 0:
		glog.Infoln("foo is nil, do nothing")
	case 1:
		glog.Infoln("foo is +1")
	case 2:
		glog.Infoln("foo is +2")
	case 3:
		glog.Infoln("foo is other case")
	}
}
Beispiel #2
0
func main() {
	flag.Parse()
	glog.Infoln("START. Use 'botbot -help' for command line options.")

	storage := common.NewPostgresStorage()
	defer storage.Close()

	queue := common.NewRedisQueue()

	botbot := NewBotBot(storage, queue)

	// Listen for incoming commands
	go botbot.listen(LISTEN_QUEUE_PREFIX)

	// Start the main loop
	go botbot.mainLoop()

	// Start and http server to serve the stats from expvar
	log.Fatal(http.ListenAndServe(":3030", nil))

	// Trap stop signal (Ctrl-C, kill) to exit
	kill := make(chan os.Signal)
	signal.Notify(kill, syscall.SIGINT, syscall.SIGKILL, syscall.SIGTERM)

	// Wait for stop signal
	for {
		<-kill
		glog.Infoln("Graceful shutdown")
		botbot.shutdown()
		break
	}

	glog.Infoln("Bye")
}
Beispiel #3
0
func useGet(query string) {
	uri := googleAPI + "?q=" + url.QueryEscape(query)
	glog.Infoln("uri: ", uri)
	resp, err := http.Get(uri)
	glog.Infoln("err: ", err)
	glog.Infof("resp: %#v", resp)
}
Beispiel #4
0
func init() {
	if client, err := db.Client(); err != nil {
		glog.Errorln(err)
	} else {
		defer db.Release(client)
		{
			if len(config.Cfg.Metrics.AddScript) > 0 {
				if addSha, err = client.Cmd("SCRIPT", "LOAD", config.Cfg.Metrics.AddScript).Str(); err != nil {
					glog.Errorln(err)
				} else {
					glog.Infoln("ADD SHA", addSha)
				}
			}

			if len(config.Cfg.Metrics.GetScript) > 0 {
				if getSha, err = client.Cmd("SCRIPT", "LOAD", config.Cfg.Metrics.GetScript).Str(); err != nil {
					glog.Errorln(err)
				} else {
					glog.Infoln("GET SHA", getSha)
				}
			}

			if len(config.Cfg.Metrics.TtlScript) > 0 {
				if ttlSha, err = client.Cmd("SCRIPT", "LOAD", config.Cfg.Metrics.TtlScript).Str(); err != nil {
					glog.Errorln(err)
				} else {
					glog.Infoln("TTL SHA", ttlSha)
				}
			}
		}
	}
}
Beispiel #5
0
// Main loop for dispatching SyncQueue
// TODO exit
func Dispatch() {
	var err error
	for {
		select {
		case si := <-SyncQueue:
			if si.FullSync {
				syncAlbum(si)
			} else if si, err = NewSyncItemPhoto(si.Filename); err != nil {
				glog.Errorln(err)
			} else {
				syncAlbum(si)
			}
		case ai := <-AlbumQueue:
			switch {
			case ai.MetaUpdate:
				glog.Infoln("Updating Album meta")
				if err = updateMeta(ai.AlbumId); err != nil {
					glog.Errorln(err)
				} else {
					glog.Infof("Metainfo was updated for albumId", ai.AlbumId)
				}
			case ai.StatusUpdate:
				glog.Infoln("Reloading albums")
				if err = updateAlbums(); err != nil {
					glog.Errorln(err)
				} else {
					glog.Infoln("Albums were reloaded")
				}
			}
		}
	}
}
// Implementing Pool interface
func (p *PGPool) DelConn(ws *websocket.Conn) error {
	for i := range p.conns {
		// Find connection
		if p.conns[i] == ws {
			// Remove connection
			p.conns = append(p.conns[:i], p.conns[i+1:]...)

			if glog.V(INFOLOG_LEVEL_ABOUT_CONNS) {
				glog.Infoln("Connection was found and removed")
			}

			// Stop all child goroutines if empty pool
			if p.IsEmpty() {
				if glog.V(INFOLOG_LEVEL_ABOUT_POOLS) {
					glog.Infoln("Pool is empty")
				}

				if p.cancel != nil {
					p.cancel()
					if glog.V(INFOLOG_LEVEL_ABOUT_POOLS) {
						glog.Infoln("Pool goroutines was canceled")
					}
				} else {
					glog.Errorln("CancelFunc is nil")
				}
			}

			return nil
		}
	}

	return errors.New("Cannot delete connection: " +
		"connection was not found in pool")
}
Beispiel #7
0
func (driver *MesosSchedulerDriver) resourcesOffered(from *upid.UPID, pbMsg proto.Message) {
	log.V(2).Infoln("Handling resource offers.")

	msg := pbMsg.(*mesos.ResourceOffersMessage)
	if driver.status == mesos.Status_DRIVER_ABORTED {
		log.Infoln("Ignoring ResourceOffersMessage, the driver is aborted!")
		return
	}

	if !driver.connected {
		log.Infoln("Ignoring ResourceOffersMessage, the driver is not connected!")
		return
	}

	pidStrings := msg.GetPids()
	if len(pidStrings) != len(msg.Offers) {
		log.Errorln("Ignoring offers, Offer count does not match Slave PID count.")
		return
	}

	for i, offer := range msg.Offers {
		if pid, err := upid.Parse(pidStrings[i]); err == nil {
			driver.cache.putOffer(offer, pid)
			log.V(2).Infof("Cached offer %s from SlavePID %s", offer.Id.GetValue(), pid)
		} else {
			log.Warningf("Failed to parse offer PID '%v': %v", pid, err)
		}
	}

	driver.withScheduler(func(s Scheduler) { s.ResourceOffers(driver, msg.Offers) })
}
Beispiel #8
0
// StartCluster starts a k8s cluster on the specified Host.
func StartCluster(h sshAble, ip string, config MachineConfig) error {
	commands := []string{stopCommand, GetStartCommand(ip)}
	if config.DeployRegistry {
		commands = append(commands, `
cd /var/lib/minishift;
sudo /usr/local/bin/openshift admin registry --service-account=registry --config=openshift.local.config/master/admin.kubeconfig;
sudo /usr/local/bin/openshift cli patch service docker-registry -p '{"spec": {"type": "NodePort"}}' --config=openshift.local.config/master/admin.kubeconfig
`)
	}
	if config.DeployRouter {
		commands = append(commands, `
cd /var/lib/minishift;
sudo /usr/local/bin/openshift admin policy add-scc-to-user hostnetwork -z router --config=openshift.local.config/master/admin.kubeconfig;
sudo /usr/local/bin/openshift admin router --service-account=router --config=openshift.local.config/master/admin.kubeconfig
`)
	}
	for _, cmd := range commands {
		glog.Infoln(cmd)
		output, err := h.RunSSHCommand(cmd)
		glog.Infoln(output)
		if err != nil {
			return err
		}
	}

	return nil
}
Beispiel #9
0
func (sched *ExampleScheduler) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
	log.Infoln("Status update: task", status.TaskId.GetValue(), " is in state ", status.State.Enum().String())
	for _, task := range sched.tasks {
		if task.name == status.TaskId.GetValue() &&
			(status.GetState() == mesos.TaskState_TASK_FINISHED ||
				status.GetState() == mesos.TaskState_TASK_LOST ||
				status.GetState() == mesos.TaskState_TASK_KILLED ||
				status.GetState() == mesos.TaskState_TASK_FAILED ||
				status.GetState() == mesos.TaskState_TASK_ERROR) {

			// No matter what the outcome was, move to finished state so that we can unreserve resources
			task.state = FinishedState
		}
	}

	if status.GetState() == mesos.TaskState_TASK_LOST ||
		status.GetState() == mesos.TaskState_TASK_KILLED ||
		status.GetState() == mesos.TaskState_TASK_FAILED ||
		status.GetState() == mesos.TaskState_TASK_ERROR {
		log.Infoln(
			"Task", status.TaskId.GetValue(),
			"is in unexpected state", status.State.String(),
			"with message", status.GetMessage(),
			". Unreserving resources",
		)
	}
}
Beispiel #10
0
// go run main.go -logtostderr
func syncRuntime() {
	var wg sync.WaitGroup
	var urls = []string{
		"http://www.baidu.com/",
		"http://dict.youdao.com/w/currency/#keyfrom=dict2.top",
		"https://docs.mongodb.com/manual/mongo/",
		"http://www.runoob.com/mongodb/mongodb-q,uery.html",
		"http://studygolang.com/articles/2059",
	}
	glog.Infoln("fetching url..")
	for _, url := range urls {
		// Increment the WaitGroup counter.
		wg.Add(1)
		// Launch a goroutine to fetch the URL.
		go func(url string) {
			glog.Infoln("fetch url: ", url)
			// Decrement the counter when the goroutine completes.
			defer wg.Done()
			// Fetch the URL.
			r, _ := http.Get(url)
			glog.Infoln("status: %s, code: %d, url is %s", r.Status, r.StatusCode, url)
		}(url)
	}
	// Wait for all HTTP fetches to complete.
	wg.Wait()
}
func (ecwrap *EngineContainerWrap) OpenNewTask(job string) (string, error) {
	name := ecwrap.engineId + "_" + job
	ecwrap.jobc = JobContext{
		containerName: name,
		dirname:       path.Join(*jobsdir, name),
	}

	glog.Infoln("new task ", ecwrap.engineId, ecwrap.jobc.containerName, ecwrap.jobc.dirname)
	// build container mount
	if err := os.MkdirAll(ecwrap.jobc.dirname, 0700); err != nil {
		glog.Infoln("could not create dir worker for engine", ecwrap.engineId, ecwrap.jobc.containerName, ecwrap.jobc.dirname)
		return "", errors.New("error creating work dir")
	}

	docker_create_tokens := append(
		append([]string{"create", "-v", name + ":" + ecwrap.MountPoint, "-u", strconv.Itoa(ecwrap.User), "--name", ecwrap.jobc.containerName}, ecwrap.RunFlags...),
		[]string{ecwrap.Image, "/bin/bash", "-c", ecwrap.Cmd}...)
	docker_create_cmd := exec.Command("docker", docker_create_tokens...)
	docker_create_cmd.Stdout = os.Stdout
	docker_create_cmd.Stderr = os.Stderr
	glog.Infoln("volume create Command", docker_create_cmd.Args)
	err := docker_create_cmd.Run()
	if err != nil {
		return "", err
	}
	glog.Infoln("create dir worker for engine", ecwrap.engineId, ecwrap.jobc.dirname)
	return path.Join(ecwrap.jobc.dirname, ecwrap.InputFileName), nil
}
Beispiel #12
0
func Query(ctx context.Context, req *http.Request) (int, []byte, error) {
	m := metric{}
	defer func(m *metric) {
		metricc <- *m
	}(&m)

	if glog.V(10) {
		dump, _ := httputil.DumpRequest(req, true)
		glog.Infoln(string(dump))
	}

	start := time.Now()
	resp, err := ctxhttp.Do(ctx, nil, req)
	m.latency = time.Since(start).Nanoseconds()
	if err != nil {
		return 0, nil, err
	}

	if glog.V(10) {
		dump, _ := httputil.DumpResponse(resp, true)
		glog.Infoln(string(dump))
	}

	defer resp.Body.Close()
	body, err := ioutil.ReadAll(resp.Body)
	if err != nil {
		return 0, nil, err
	}

	m.bytes = len(body)
	m.success = true
	return resp.StatusCode, body, nil
}
Beispiel #13
0
func (this *client) Trigger(t Trigger) (<-chan interface{}, chan<- int, error) {
	stop := make(chan int)
	events := make(chan interface{}, 8)

	var cStop chan<- int
	var cStopped <-chan error
	var err error
	switch t := t.(type) {
	case Create:
		cStop, cStopped, err = this.Watch(t.Path.String(),
			func(e Event) {
				if e.Type == EventNodeCreated {
					events <- e
				}
			})
		if err != nil {
			return nil, nil, err
		}
	case Change:
		cStop, cStopped, err = this.Watch(t.Path.String(),
			func(e Event) {
				if e.Type == EventNodeDataChanged {
					events <- e
				}
			})
		if err != nil {
			return nil, nil, err
		}
	case Delete:
		cStop, cStopped, err = this.Watch(t.Path.String(),
			func(e Event) {
				if e.Type == EventNodeDeleted {
					events <- e
				}
			})
		if err != nil {
			return nil, nil, err
		}
	case Members:
		// TODO - Implement the matching criteria using min/max/delta, etc.
		cStop, cStopped, err = this.WatchChildren(t.Path.String(),
			func(e Event) {
				if e.Type == EventNodeChildrenChanged {
					events <- e
				}
			})
		if err != nil {
			return nil, nil, err
		}
	}
	go func() {
		// Stop the watch
		c := <-stop
		cStop <- c
		glog.Infoln("Waiting for user callbacks to finish")
		<-cStopped
		glog.Infoln("Stopped.")
	}()
	return events, stop, nil
}
Beispiel #14
0
// Restart a chatbot
func (nm *NetworkManager) restart(botId int) {

	glog.Infoln("Restarting bot ", botId)

	var config *common.BotConfig

	// Find configuration for this bot

	botConfigs := nm.storage.BotConfig()
	for _, botConf := range botConfigs {
		if botConf.Id == botId {
			config = botConf
			break
		}
	}

	if config == nil {
		glog.Infoln("Could not find configuration for bot ", botId, ". Bot will not run.")
		delete(nm.chatbots, botId)
		return
	}

	nm.Lock()
	nm.chatbots[botId] = nm.Connect(config)
	nm.Unlock()
}
Beispiel #15
0
func setup() {
	flag.Parse()
	numCPU := runtime.NumCPU()
	glog.Infoln("NumCPU", numCPU)
	if envMaxProcs := os.Getenv("GOMAXPROCS"); envMaxProcs == "" {
		if numCPU > 1 {
			// Consuming N-1 appears to greatly reduce per-request latency in loaded systems.
			runtime.GOMAXPROCS(numCPU - 1)
		}
	}
	glog.Infoln("GOMAXPROCS", runtime.GOMAXPROCS(0))

	var d db.DB
	switch *useDB {
	case "cassandra":
		d = cassandradb.New()
	default:
		glog.Fatalln("Unknown DB:", *useDB)
	}

	if err := d.Init(); err != nil {
		glog.Fatalln("An error occured Initializing the DB: ", err)
	}
	handlers.InitializeAndRegister(d)
}
Beispiel #16
0
func (sched *Scheduler) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
	log.Infoln("Status update: task", status.TaskId.GetValue(), " is in state ", status.State.Enum().String())

	if status.GetState() == mesos.TaskState_TASK_FINISHED {
		sched.tasksFinished++
		log.Infoln("%v of %v tasks finished.", sched.tasksFinished, sched.totalTasks)
	}

	//TODO if a job is finished, failed, error, lost, killed
	// figure out how this impacts dependent jobs and update job graph

	/*
		  //never shut down framework!
			if sched.tasksFinished >= sched.totalTasks {
				log.Infoln("Total tasks completed, stopping framework.")
				driver.Stop(false)
			}
	*/

	/*
		if status.GetState() == mesos.TaskState_TASK_LOST ||
			status.GetState() == mesos.TaskState_TASK_KILLED ||
			status.GetState() == mesos.TaskState_TASK_FAILED {
			log.Infoln(
				"Aborting because task", status.TaskId.GetValue(),
				"is in unexpected state", status.State.String(),
				"with message", status.GetMessage(),
			)
			driver.Abort()
		}
	*/
}
Beispiel #17
0
func main() {
	httpAddr := flag.String("http", "127.0.0.1:5000", "address and port to listen on")
	httpDocroot := flag.String("root", "www", "HTTP document root for static web files")
	dataPath := flag.String("data", "/usr/local/var/lib/shadowcaster", "data directory (for indexes and such)")
	flag.Parse()

	Config = config{
		IndexPath:        *dataPath,
		HTTPAddr:         *httpAddr,
		HTTPDocumentRoot: *httpDocroot}

	// Run consistency checks on the indexes.
	glog.Infoln("Running consistency checks on the indexes")
	if err := CheckIndexes(*dataPath); err != nil {
		glog.Fatalln(err)
	}
	glog.Infoln("Consistency checks passed")

	// Set up the HTTP handling.
	http.HandleFunc("/movies/", HandleMovies)
	http.HandleFunc("/movies/setdir", HandleSetMovieDir)
	http.HandleFunc("/movies/status", HandleMovieStatus)
	http.HandleFunc("/tv/", HandleTV)
	http.HandleFunc("/music/", HandleMusic)
	http.HandleFunc("/pictures/", HandlePictures)
	http.HandleFunc("/settings/", HandleSettings)
	http.Handle("/", http.FileServer(http.Dir(*httpDocroot)))
	glog.Infof("Listening on %v", *httpAddr)
	if err := http.ListenAndServe(*httpAddr, nil); err != nil {
		glog.Fatalln(err)
	}
	glog.Infof("ShadowCaster offline")
}
Beispiel #18
0
func (self *manager) HandleStatusMessage(statusMessage *mesosproto.StatusUpdateMessage) {
	glog.Infof("Status Update %v\n", statusMessage)
	status := statusMessage.GetUpdate().GetStatus()

	switch {
	case *status.State == mesosproto.TaskState_TASK_RUNNING:
		task, _ := taskRegistry.Fetch(status.GetTaskId().GetValue())
		task.Running = true
		task.SlaveID = status.GetSlaveId().GetValue()
		managerInterface.UpdateTaskWithDockerInfo(task, status.GetData())
	case *status.State == mesosproto.TaskState_TASK_FAILED:
		taskRegistry.Delete(status.GetTaskId().GetValue())
		glog.Infoln("Task Failed: ", status.GetTaskId().GetValue())
	case *status.State == mesosproto.TaskState_TASK_LOST:
		switch {
		case strings.Contains(status.GetMessage(), "Task has duplicate ID"):
		// ignore
		case strings.Contains(status.GetMessage(), "is no longer valid"):
			task, _ := taskRegistry.Fetch(status.GetTaskId().GetValue())
			task.RequestSent = false
		default:
			taskRegistry.Delete(status.GetTaskId().GetValue())
		}
		glog.Infoln("Task Lost: ", status.GetTaskId().GetValue())
	case *status.State == mesosproto.TaskState_TASK_FINISHED:
		taskRegistry.Delete(status.GetTaskId().GetValue())
		glog.Infoln("Task Finished: ", status.GetTaskId().GetValue())
	case *status.State == mesosproto.TaskState_TASK_KILLED:
		taskRegistry.Delete(status.GetTaskId().GetValue())
		glog.Infoln("Task Killed: ", status.GetTaskId().GetValue())
	}

	self.acknowledgeStatusUpdate(statusMessage)
}
Beispiel #19
0
func main() {

	flag.Parse()

	buildInfo := version.BuildInfo()
	flag.Usage = func() {
		fmt.Fprintf(os.Stderr, "%s\n", buildInfo.Notice())
		fmt.Fprintf(os.Stderr, "flags:\n")
		flag.PrintDefaults()
	}

	glog.Infoln(buildInfo.Notice())
	buildInfo.HandleFlag()

	// Two server cores running on different ports.  Note that quitquitquit will
	// only shutdown the server requested but no the other one.  Kernel signals
	// will shutdown both.
	stopped1 := startServer(*port)
	stopped2 := startServer(*port + 1)

	for range []int{1, 2} {
		select {
		case <-stopped1:
		case <-stopped2:
		}
	}
	glog.Infoln("Bye")
}
Beispiel #20
0
func (driver *MesosSchedulerDriver) frameworkReregistered(from *upid.UPID, pbMsg proto.Message) {
	log.V(1).Infoln("Handling Scheduler re-registered event.")
	msg := pbMsg.(*mesos.FrameworkReregisteredMessage)

	if driver.status == mesos.Status_DRIVER_ABORTED {
		log.Infoln("Ignoring FrameworkReregisteredMessage from master, driver is aborted!")
		return
	}
	if driver.connected {
		log.Infoln("Ignoring FrameworkReregisteredMessage from master,driver is already connected!")
		return
	}
	if !driver.masterPid.Equal(from) {
		log.Warningf("ignoring framework re-registered message because it was sent from '%v' instead of leading master '%v'", from, driver.masterPid)
		return
	}

	// TODO(vv) detect if message was from leading-master (sched.cpp)
	log.Infof("Framework re-registered with ID [%s] ", msg.GetFrameworkId().GetValue())
	driver.connected = true
	driver.failover = false
	driver.connection = uuid.NewUUID()

	driver.withScheduler(func(s Scheduler) { s.Reregistered(driver, msg.GetMasterInfo()) })

}
Beispiel #21
0
// StartHost starts a host VM.
func StartHost(api libmachine.API, config MachineConfig) (*host.Host, error) {
	exists, err := api.Exists(constants.MachineName)
	if err != nil {
		return nil, errors.Wrapf(err, "Error checking if host exists: %s", constants.MachineName)
	}
	if !exists {
		return createHost(api, config)
	}

	glog.Infoln("Machine exists!")
	h, err := api.Load(constants.MachineName)
	if err != nil {
		return nil, errors.Wrap(err, "Error loading existing host. Please try running [minikube delete], then run [minikube start] again.")
	}

	s, err := h.Driver.GetState()
	glog.Infoln("Machine state: ", s)
	if err != nil {
		return nil, errors.Wrap(err, "Error getting state for host")
	}

	if s != state.Running {
		if err := h.Driver.Start(); err != nil {
			return nil, errors.Wrapf(err, "Error starting stopped host")
		}
		if err := api.Save(h); err != nil {
			return nil, errors.Wrapf(err, "Error saving started host")
		}
	}

	if err := h.ConfigureAuth(); err != nil {
		return nil, errors.Wrap(err, "Error configuring auth on host: %s")
	}
	return h, nil
}
Beispiel #22
0
// stop expects to be guarded by eventLock
func (driver *MesosSchedulerDriver) stop(cause error, failover bool) (mesos.Status, error) {
	log.Infoln("Stopping the scheduler driver")
	if stat := driver.status; stat != mesos.Status_DRIVER_RUNNING {
		return stat, fmt.Errorf("Unable to Stop, expected driver status %s, but is %s", mesos.Status_DRIVER_RUNNING, stat)
	}

	if driver.connected && !failover {
		// unregister the framework
		log.Infoln("Unregistering the scheduler driver")
		message := &mesos.UnregisterFrameworkMessage{
			FrameworkId: driver.frameworkInfo.Id,
		}
		//TODO(jdef) this is actually a little racy: we send an 'unregister' message but then
		// immediately afterward the messenger is stopped in driver._stop(). so the unregister message
		// may not actually end up being sent out.
		if err := driver.send(driver.masterPid, message); err != nil {
			log.Errorf("Failed to send UnregisterFramework message while stopping driver: %v\n", err)
			if cause == nil {
				cause = &ErrDriverAborted{}
			}
			return driver._stop(cause, mesos.Status_DRIVER_ABORTED)
		}
		time.Sleep(2 * time.Second)
	}

	// stop messenger
	return driver._stop(cause, mesos.Status_DRIVER_STOPPED)
}
Beispiel #23
0
func (sched *SdcScheduler) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
	log.Infoln("Status update: task", status.TaskId.GetValue(), " is in state ", status.State.Enum().String())

	if status.GetState() == mesos.TaskState_TASK_FINISHED {
		sched.tasksFinished++
		// KillTaskを実行するとTASK_LOSTが検知され、フレームワークが止まる
		// driver.KillTask(status.TaskId)
		// log.Infoln("!! Status update: task", status.TaskId.GetValue(), " is in state ", status.State.Enum().String())
		// return
	}

	if sched.tasksFinished >= sched.totalTasks {
		// log.Infoln("Total tasks completed, stopping framework.")
		log.Infoln("Total tasks completed.")
		sched.tasksFinished = 0
		sched.totalTasks = 0
		sched.tasksLaunched = 0
		// driver.Stop(false)
	}

	if status.GetState() == mesos.TaskState_TASK_LOST ||
		status.GetState() == mesos.TaskState_TASK_KILLED ||
		status.GetState() == mesos.TaskState_TASK_FAILED ||
		status.GetState() == mesos.TaskState_TASK_ERROR {
		log.Infoln(
			"Aborting because task", status.TaskId.GetValue(),
			"is in unexpected state", status.State.String(),
			"with message", status.GetMessage(),
		)
		driver.Abort()
	}
}
Beispiel #24
0
func (s *Streamer) Subscribe(pg Playground, ws *websocket.Conn,
) error {
	if ws == nil {
		return errors.New("Cannot subscribe: passed nil connection")
	}

	defer func() {
		if !s.isEmpty() && !s.running() {
			if glog.V(INFOLOG_LEVEL_ABOUT_SERVER) {
				glog.Infoln("Starting streamer")
			}
			if err := s.start(); err != nil {
				glog.Errorln("Cannot start stream:", err)
			}
		}
	}()

	var stm = s.getStreamWithPlayground(pg)
	if stm.hasSubscriber(ws) {
		return errors.New("Cannot subscribe: " +
			"connection is already subscribed")
	}

	if glog.V(INFOLOG_LEVEL_ABOUT_CONNS) {
		glog.Infoln("Creating subscriber to stream")
	}

	stm.addSubscriber(ws)

	return nil
}
Beispiel #25
0
func main() {
	launchTimeout :=
		flag.Uint("launch-timeout", 240,
			"Seconds to retry launching an etcd instance for before giving up. "+
				"This should be long enough for a port occupied by a killed process "+
				"to be vacated.")
	flag.Parse()
	log.Infoln("Starting etcd Executor")

	dconfig := executor.DriverConfig{
		Executor: etcdexecutor.New(
			time.Duration(*launchTimeout) * time.Second,
		),
	}
	driver, err := executor.NewMesosExecutorDriver(dconfig)

	if err != nil {
		log.Infoln("Unable to create an ExecutorDriver ", err.Error())
	}

	_, err = driver.Start()
	if err != nil {
		log.Infoln("Got error:", err)
		return
	}
	log.Infoln("Executor process has started and running.")
	driver.Join()
}
Beispiel #26
0
func (sched *NoneScheduler) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
	taskId := status.GetTaskId().GetValue()
	log.Infoln("Status update: task", taskId, "is in state", status.State.Enum().String())

	c := sched.queue.GetCommandById(taskId)
	if c == nil {
		log.Errorln("Unable to find command for task", taskId)
		driver.Abort()
	}
	if c.Status.GetState() == status.GetState() {
		// ignore repeated status updates
		return
	}
	c.Status = status

	// send status update to CommandHandler
	if status.GetState() == mesos.TaskState_TASK_RUNNING {
		sched.handler.CommandRunning(c)
	} else if status.GetState() == mesos.TaskState_TASK_FINISHED {
		sched.handler.CommandEnded(c)
		sched.handler.CommandFinished(c)
	} else if status.GetState() == mesos.TaskState_TASK_FAILED ||
		status.GetState() == mesos.TaskState_TASK_LOST ||
		status.GetState() == mesos.TaskState_TASK_KILLED {
		sched.handler.CommandEnded(c)
		sched.handler.CommandFailed(c)
	}

	// stop if Commands channel was closed and all tasks are finished
	if sched.queue.Closed() && !sched.handler.HasRunningTasks() {
		log.Infoln("All tasks finished, stopping framework.")
		sched.handler.FinishAllCommands()
		driver.Stop(false)
	}
}
Beispiel #27
0
// Update the channels based on new configuration, leaving old ones and joining new ones
func (bot *ircBot) updateChannels(newChannels []*common.Channel) {
	glog.Infoln("[Info] Starting bot.updateChannels")
	bot.RLock()
	channels := bot.channels
	bot.RUnlock()

	glog.V(3).Infoln("[Debug] newChannels: ", newChannels, "bot.channels:", channels)

	if isEqual(newChannels, channels) {
		if glog.V(2) {
			glog.Infoln("Channels comparison is equals for bot: ", bot.nick)
		}
		return
	}
	glog.Infoln("[Info] The channels the bot is connected to need to be updated")

	// PART old ones
	for _, channel := range channels {
		if !isIn(channel, newChannels) {
			glog.Infoln("[Info] Parting new channel: ", channel.Credential())
			bot.part(channel.Credential())
		}
	}

	// JOIN new ones
	for _, channel := range newChannels {
		if !isIn(channel, channels) {
			glog.Infoln("[Info] Joining new channel: ", channel.Credential())
			bot.join(channel.Credential())
		}
	}
	bot.Lock()
	bot.channels = newChannels
	bot.Unlock()
}
Beispiel #28
0
// Set up the handlers, then start the server and start processing requests.
func (w *HTTPServer) Run() {
	mux := http.NewServeMux() // don't use default to allow multiple instances

	port := getInputOrConfig(w.Port, "HTTP_PORT") //TODO:This is dependant upon mqtt func, needs moving - lightbulb

	pem := ""
	key := ""

	for param := range w.Param {

		switch param.(type) {

		case flow.Tag:
			switch param.(flow.Tag).Tag {
			case "certfile":
				f := param.(flow.Tag).Msg.(string)
				if _, err := os.Stat(f); err == nil {
					glog.Infoln("Using Certfile:", f)
					pem = f
				}
			case "certkey":
				f := param.(flow.Tag).Msg.(string)
				if _, err := os.Stat(f); err == nil {
					glog.Infoln("Using Keyfile:", f)
					key = f
				}
			}
		}
	}

	info, _ := NewHttpEndpointInfo(port, pem, key)

	for m := range w.Handlers {
		tag := m.(flow.Tag)
		switch v := tag.Msg.(type) {
		case string:
			h := createHandler(tag.Tag, v, info)
			mux.Handle(tag.Tag, &flowHandler{h, w})
		case http.Handler:
			mux.Handle(tag.Tag, &flowHandler{v, w})
		}
	}

	go func() {
		// will stay running until an error is returned or the app ends
		defer flow.DontPanic()
		var err error
		if info.uri.Scheme == "https" {
			err = http.ListenAndServeTLS(info.uri.Host, info.pem, info.key, mux)
		} else {
			err = http.ListenAndServe(info.uri.Host, mux)
		}
		glog.Fatal(err)
		glog.Infoln("http started on", info.uri.Host)
	}()
	// TODO: this is a hack to make sure the server is ready
	// better would be to interlock the goroutine with the listener being ready
	time.Sleep(50 * time.Millisecond)
}
Beispiel #29
0
func establish(conn net.Conn, addr string, arg Args) error {
	switch arg.Protocol {
	case "ss": // shadowsocks
		return nil
	case "socks", "socks5":
		host, port, err := net.SplitHostPort(addr)
		p, _ := strconv.Atoi(port)
		// TODO: support bind and udp
		req := gosocks5.NewRequest(gosocks5.CmdConnect, &gosocks5.Addr{
			Type: gosocks5.AddrDomain,
			Host: host,
			Port: uint16(p),
		})
		rep, err := requestSocks5(conn, req)
		if err != nil {
			return err
		}
		if rep.Rep != gosocks5.Succeeded {
			return errors.New("Service unavailable")
		}
	case "http":
		fallthrough
	default:
		req := &http.Request{
			Method:     "CONNECT",
			URL:        &url.URL{Host: addr},
			Host:       addr,
			ProtoMajor: 1,
			ProtoMinor: 1,
			Header:     make(http.Header),
		}
		req.Header.Set("Proxy-Connection", "keep-alive")
		if arg.User != nil {
			req.Header.Set("Proxy-Authorization",
				"Basic "+base64.StdEncoding.EncodeToString([]byte(arg.User.String())))
		}
		if err := req.Write(conn); err != nil {
			return err
		}
		if glog.V(LDEBUG) {
			dump, _ := httputil.DumpRequest(req, false)
			glog.Infoln(string(dump))
		}

		resp, err := http.ReadResponse(bufio.NewReader(conn), req)
		if err != nil {
			return err
		}
		if glog.V(LDEBUG) {
			dump, _ := httputil.DumpResponse(resp, false)
			glog.Infoln(string(dump))
		}
		if resp.StatusCode != http.StatusOK {
			return errors.New(resp.Status)
		}
	}

	return nil
}
Beispiel #30
0
// connect to the server. Here we keep trying every 10 seconds until we manage
// to Dial to the server.
func (bot *ircBot) connect() (conn io.ReadWriteCloser) {

	var (
		err     error
		counter int
	)

	connectTimeout := time.After(0)

	bot.Lock()
	bot.isConnecting = true
	bot.isAuthenticating = false
	bot.Unlock()

	for {
		select {
		case <-connectTimeout:
			counter++
			connectTimeout = nil
			glog.Infoln("[Info] Connecting to IRC server: ", bot.address)
			conn, err = tls.Dial("tcp", bot.address, nil) // Always try TLS first
			if err == nil {
				glog.Infoln("Connected: TLS secure")
				return conn
			} else if _, ok := err.(x509.HostnameError); ok {
				glog.Errorln("Could not connect using TLS because: ", err)
				// Certificate might not match. This happens on irc.cloudfront.net
				insecure := &tls.Config{InsecureSkipVerify: true}
				conn, err = tls.Dial("tcp", bot.address, insecure)

				if err == nil && isCertValid(conn.(*tls.Conn)) {
					glog.Errorln("Connected: TLS with awkward certificate")
					return conn
				}
			} else if _, ok := err.(x509.UnknownAuthorityError); ok {
				glog.Errorln("x509.UnknownAuthorityError : ", err)
				insecure := &tls.Config{InsecureSkipVerify: true}
				conn, err = tls.Dial("tcp", bot.address, insecure)
				if err == nil {
					glog.Infoln("Connected: TLS with an x509.UnknownAuthorityError", err)
					return conn
				}
			} else {
				glog.Errorln("Could not establish a tls connection", err)

			}

			conn, err = net.Dial("tcp", bot.address)
			if err == nil {
				glog.Infoln("Connected: Plain text insecure")
				return conn
			}
			// TODO (yml) At some point we might want to panic
			delay := 5 * counter
			glog.Infoln("IRC Connect error. Will attempt to re-connect. ", err, "in", delay, "seconds")
			connectTimeout = time.After(time.Duration(delay) * time.Second)
		}
	}
}