Esempio n. 1
0
// initSubscribers performs one-time initialization of the Postgres listener and
// goroutine for event delivery, termination and subscription management.
func (n *Notifier) initSubscribers(dbUrl string) error {
	n.l = pq.NewListener(dbUrl, 20*time.Millisecond, time.Hour, nil)
	go func() {
		for {
			select {
			case <-n.terminated:
				return
			case <-n.exit:
				n.l.UnlistenAll()
				n.l.Close()
				n.db.Close()
				for _, localL := range n.subscribers {
					close(localL)
				}
				close(n.terminated)
			case cmd := <-n.ch:
				cmd.fun()
				close(cmd.ack)
			case pgn := <-n.l.Notify:
				if pgn != nil {
					localN, err := n.makeNotification(pgn)
					if err != nil {
						log.Printf("Error parsing inbound notification %v: %v", pgn, err)
					} else {
						for _, sub := range n.subscribers {
							sub <- localN
						}
					}
				}
			}
		}
	}()

	return nil
}
func waitForNotification(dbcluter DBCluster, parition string) {
	conninfo := fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=require", "read", dbcluter.Password, dbcluter.Parition[parition].Write.IP, "regentmarkets")
	listener := pq.NewListener(conninfo, 5*time.Second, 10*time.Second, nil)
	db, _ := sql.Open("postgres", conninfo)
	err := db.Ping()
	if err != nil {
		log.Fatal(err)
	}
	err = listener.Listen("transactions_watcher")
	checkErr(err)
	fmt.Println("Listing to", parition)

	var redisdb redis.Conn
	var notification *pq.Notification
	for {
		select {
		case notification = <-listener.Notify:
			if notification != nil {
				redisconn(&redisdb)
				publish(redisdb, regexp.MustCompile(",").Split(notification.Extra, -1))
			}

		case <-time.After(60 * time.Second):
			fmt.Println("no notifications for 60 seconds...")
		}
	}
}
Esempio n. 3
0
func (cmd *activityCmd) run(ctx scope.Context, args []string) error {
	// Get cluster in order to load config.
	_, err := getCluster(ctx)
	if err != nil {
		return fmt.Errorf("cluster error: %s", err)
	}

	listener := pq.NewListener(backend.Config.DB.DSN, 200*time.Millisecond, 5*time.Second, nil)
	if err := listener.Listen("broadcast"); err != nil {
		return fmt.Errorf("pq listen error: %s", err)
	}

	defer func() {
		ctx.Cancel()
		ctx.WaitGroup().Wait()
	}()

	// Start metrics server.
	ctx.WaitGroup().Add(1)
	go activity.Serve(ctx, cmd.addr)

	// Start scanner.
	ctx.WaitGroup().Add(1)
	activity.ScanLoop(ctx, listener)

	return nil
}
Esempio n. 4
0
func main() {
	conninfo := "dbname=gotraining user=root password=root port=7705 sslmode=disable"

	_, err := sql.Open("postgres", conninfo)
	if err != nil {
		panic(err)
	}

	reportProblem := func(ev pq.ListenerEventType, err error) {
		if err != nil {
			fmt.Println(err.Error())
		}
	}

	listener := pq.NewListener(conninfo, 10*time.Second, time.Minute, reportProblem)
	err = listener.Listen("events")
	if err != nil {
		panic(err)
	}

	fmt.Println("Start monitoring PostgreSQL...")
	for {
		waitForNotification(listener)
	}
}
Esempio n. 5
0
func Init() {
	var err error

	db, err = sql.Open("postgres", *dataSource)
	if err != nil {
		debug.Fatal(err)
	}

	create := func(name, content string) {
		if err != nil {
			debug.Fatal(err)
		}
		err = createTable(name, content)
	}

	// primary tables
	create("user_", createUserSQL)
	create("project", createProjectSQL)
	create("task", createTaskSQL)
	create("milestone", createMilestoneSQL)
	create("member", createMemberSQL)
	create("worker", createWorkerSQL)
	create("friend", createFriendSQL)
	create("chat", createChatSQL)

	// secondary tables
	create("featured_project", createFeaturedProjectSQL)
	create("user_tag", createUserTagSQL)
	create("task_tag", createTaskTagSQL)
	create("tag", createTagSQL)

	// setup listener
	if *listenerEnabled {
		listener = pq.NewListener(*dataSource, 1*time.Second, time.Minute, func(ev pq.ListenerEventType, err error) {
			if err != nil {
				log.Fatal(err)
			}
		})

		if err := listener.Listen("chat"); err != nil {
			log.Fatal(err)
		}

		go func() {
			for {
				select {
				case notification := <-listener.Notify:
					if Notify != nil {
						Notify(notification.Channel, notification.Extra)
					}
				}
			}
		}()
	} else {
		debug.Warn("PostgreSQL listener is disabled")
	}
}
Esempio n. 6
0
func listenMessages() {
	revel.TRACE.Println("Setting up db listening")
	var Spec, _ = revel.Config.String("db.spec")
	var listener = pq.NewListener(Spec, 10*time.Second, time.Minute, listenerEventsHandler)
	var err = listener.Listen("new_message")
	if err != nil {
		panic(err)
	}
	waitForNotification(listener)
}
Esempio n. 7
0
func (repo *postgresRepository) ensureListener() {
	repo.m.Lock()
	defer repo.m.Unlock()

	if repo.listener == nil {
		repo.listener = pq.NewListener(repo.connectionString, time.Second, time.Minute, nil)

		kit.SafeGo(repo.listenDaemon)
	}
}
Esempio n. 8
0
func run(config Config) {
	listener := pq.NewListener(config.PostgresURL, 10*time.Second, time.Minute, errorReporter)
	err := listener.Listen("urlwork")
	if err != nil {
		log.Fatal(err)
	}

	rabbitchannel := make(chan string, 100)

	go func() {
		cfg := new(tls.Config)
		cfg.InsecureSkipVerify = true
		conn, err := amqp.DialTLS(config.RabbitMQURL, cfg)
		if err != nil {
			log.Fatal(err)
		}
		defer conn.Close()

		ch, err := conn.Channel()
		if err != nil {
			log.Fatal(err)
		}
		defer ch.Close()

		for {
			payload := <-rabbitchannel
			log.Println(payload)
			err := ch.Publish("urlwork", "todo", false, false, amqp.Publishing{
				ContentType: "text/plain",
				Body:        []byte(payload),
			})
			if err != nil {
				log.Fatal(err)
			}
		}
	}()

	for {
		select {
		case notification := <-listener.Notify:
			rabbitchannel <- notification.Extra
		case <-time.After(90 * time.Second):
			go func() {
				err := listener.Ping()
				if err != nil {
					log.Fatal(err)
				}
			}()
		}
	}
}
Esempio n. 9
0
// newPgListener creates and returns the pglistener from the pq package.
func newPgListener(pgconninfo string) (*pq.Listener, error) {

	// create a callback function to monitor connection state changes
	pgEventCallback := func(ev pq.ListenerEventType, err error) {
		if err != nil {
			fmt.Println("pgbroadcast: ", err.Error())
		}
	}

	// create the listener
	l := pq.NewListener(pgconninfo, 10*time.Second, time.Minute, pgEventCallback)

	return l, nil
}
Esempio n. 10
0
func run(config Config) {
	purl := fmt.Sprintf("user=%s password=%s host=%s port=%s dbname=%s sslmode=%s", config.DbUser, config.DbPassword, config.DbHost, config.DbPort, config.DbName, config.SslMode)
	listener := pq.NewListener(purl, 10*time.Second, time.Minute, errorReporter)
	err := listener.Listen("usertrigger")
	if err != nil {
		log.Fatal(err)
	}

	rabbitchannel := make(chan string, 100)

	//Code for STOMP
	go func() {
		rabbitHost := fmt.Sprintf("%s:%s", config.RabbitHost, config.RabbitPort)
		conn, err := stomp.Dial("tcp", rabbitHost,
			stomp.ConnOpt.Login(config.RabbitUser, config.RabbitPassword),
			stomp.ConnOpt.AcceptVersion(stomp.V11),
			stomp.ConnOpt.AcceptVersion(stomp.V12),
			stomp.ConnOpt.Host(config.RabbitVHost),
			stomp.ConnOpt.Header("nonce", "B256B26D320A"))

		if err != nil {
			log.Fatal(err)
		}
		defer conn.Disconnect()

		for {
			payload := <-rabbitchannel
			log.Println(payload)
			err = conn.Send(config.RabbitQueue, "text/plain", []byte(payload))
			if err != nil {
				log.Fatal(err)
			}
		}
	}()

	for {
		select {
		case notification := <-listener.Notify:
			rabbitchannel <- notification.Extra
		case <-time.After(90 * time.Second):
			go func() {
				err := listener.Ping()
				if err != nil {
					log.Fatal(err)
				}
			}()
		}
	}
}
Esempio n. 11
0
File: jobs.go Progetto: logan/heim
func (jql *jobQueueListener) background(wg *sync.WaitGroup) {
	ctx := jql.Backend.ctx.Fork()
	logger := jql.Backend.logger

	defer ctx.WaitGroup().Done()

	listener := pq.NewListener(jql.Backend.dsn, 200*time.Millisecond, 5*time.Second, nil)
	if err := listener.Listen("job_item"); err != nil {
		// TODO: manage this more nicely
		panic("job listen: " + err.Error())
	}
	logger.Printf("job listener started")

	// Signal to constructor that we're ready to handle operations.
	wg.Done()

	keepalive := time.NewTicker(30 * time.Second)
	defer keepalive.Stop()

	for {
		select {
		case <-ctx.Done():
			return
		case <-keepalive.C:
			// Ping to make sure the database connection is still live.
			if err := listener.Ping(); err != nil {
				logger.Printf("job listener ping: %s\n", err)
				jql.Backend.ctx.Terminate(fmt.Errorf("job listener ping: %s", err))
				return
			}
		case notice := <-listener.Notify:
			if notice == nil {
				logger.Printf("job listener: received nil notification")
				// A nil notice indicates a loss of connection.
				// For now it's easier to just shut down and force job
				// processor to restart.
				jql.Backend.ctx.Terminate(ErrPsqlConnectionLost)
				return
			}

			jql.m.Lock()
			if c, ok := jql.cs[notice.Extra]; ok {
				c.Signal()
			}
			jql.m.Unlock()
		}
	}
}
Esempio n. 12
0
File: command.go Progetto: ACPK/atc
func (cmd *ATCCommand) constructDB(logger lager.Logger) (*db.SQLDB, db.PipelineDBFactory, error) {
	dbConn, err := migrations.LockDBAndMigrate(logger.Session("db.migrations"), "postgres", cmd.PostgresDataSource)
	if err != nil {
		return nil, nil, fmt.Errorf("failed to migrate database: %s", err)
	}

	listener := pq.NewListener(cmd.PostgresDataSource, time.Second, time.Minute, nil)
	bus := db.NewNotificationsBus(listener, dbConn)

	explainDBConn := db.Explain(logger, dbConn, clock.NewClock(), 500*time.Millisecond)
	sqlDB := db.NewSQL(logger.Session("db"), explainDBConn, bus)

	pipelineDBFactory := db.NewPipelineDBFactory(logger.Session("db"), explainDBConn, bus, sqlDB)

	return sqlDB, pipelineDBFactory, err
}
func waitForNotification(dbcluter DBCluster, parition string) {
	conninfo := fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=require", "read", dbcluter.Password, dbcluter.Parition[parition].Write.IP, "regentmarkets")
	listener := pq.NewListener(conninfo, 5*time.Second, 10*time.Second, nil)
	_ = listener.Listen("transactions_watcher")

	redisdb, _ := redis.DialURL(os.Getenv("REDIS_URL"))
	var notification *pq.Notification
	for {
		select {
		case notification = <-listener.Notify:
			if notification != nil {
				publish(redisdb, regexp.MustCompile(",").Split(notification.Extra, -1))
			}
		}
	}
}
Esempio n. 14
0
// Listen executes `LISTEN channel`. Uses f to handle received notifications on chanel.
// On error logs error messages (if a logs exists)
func (db *Database) Listen(channel string, f func(payload ...string)) error {
	// Create a new listener only if Listen is called for the first time
	if db.listener == nil {
		db.listenerCallbacks = make(map[string]func(...string))

		reportProblem := func(ev pq.ListenerEventType, err error) {
			if err != nil && db.logger != nil {
				db.printLog(err.Error())
			}
		}
		db.listener = pq.NewListener(db.connectionString, 10*time.Second, time.Minute, reportProblem)

		if db.listener == nil {
			return errors.New("Unable to create a new listener")
		}

		// detach event handler
		go func() {
			for {
				select {
				case notification := <-db.listener.Notify:
					go db.listenerCallbacks[notification.Channel](notification.Extra)
				case <-time.After(90 * time.Second):
					go func() {
						if db.listener.Ping() != nil {
							db.printLog(fmt.Sprintf("Error checking server connection for channel %s\n", channel))
							return
						}
					}()
				}
			}
		}()
	}

	if _, alreadyIn := db.listenerCallbacks[channel]; alreadyIn {
		return errors.New("Already subscribed to channel " + channel)
	}

	db.listenerCallbacks[channel] = f

	if err := db.listener.Listen(channel); err != nil {
		return err
	}

	return nil
}
Esempio n. 15
0
// pgstr - строка для коннекта к постгресу
func NewEventService(pgstr string) (event *EventService) {

	event = &EventService{Channels: map[string]Channel{}}

	reportProblem := func(ev pq.ListenerEventType, err error) {
		if err != nil {
			event.handleErrorAll(err)
		}
	}

	event.Listener = pq.NewListener(pgstr, 10*time.Second, time.Minute, reportProblem)

	go func() {
		var msg Message
		for {
			select {
			case n := <-event.Listener.Notify:
				if ch, ok := event.Channels[n.Channel]; ok {
					ch.MessageCount++
					err := json.Unmarshal([]byte(n.Extra), &msg)
					// не распарсили сообщение
					if err != nil {
						ch.handleError(err)
						continue
					}
					// распарсили и нашли клиента
					if client, ok := ch.Clients[msg.Target]; ok {
						go client.EventHandler(msg.Body)
					}
				}
				continue
			case <-time.After(60 * time.Second):
				go func() {
					err := event.Listener.Ping()
					if err != nil {
						event.handleErrorAll(err)
					}
				}()
				continue
			}
		}
	}()

	return
}
Esempio n. 16
0
func waitForNotification(clientdb ClientDB, company string) {
	conninfo := fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=require", "postgres", clientdb.Password, clientdb.Company[company].Write.IP, "test")
	listener := pq.NewListener(conninfo, 10*time.Second, time.Minute, nil)
	err := listener.Listen("getwork")
	checkErr(err)

	redisdb, err := redis.DialURL(os.Getenv("REDIS_URL"))
	checkErr(err)
	var notification *pq.Notification
	for {
		select {
		case notification = <-listener.Notify:
			if notification != nil {
				publish(redisdb, regexp.MustCompile(",").Split(notification.Extra, -1))
			}

		case <-time.After(60 * time.Second):
			fmt.Println("no notifications for 60 seconds...")
		}
	}
}
Esempio n. 17
0
func NewDispatcher(conf DBConf) (d *Dispatcher, err error) {
	c, ok := conf[env()]
	if !ok {
		err = errors.New("Configuration for \"" + env() + "\" environment not found.")
		return
	}

	d = &Dispatcher{
		done:      make(chan bool),
		waitGroup: &sync.WaitGroup{},
		listener:  pq.NewListener(c.Conn, 10*time.Second, time.Minute, nil),
		clients:   NewClientsPool(),
		tasks:     make(map[int64]autogram.BackgroundTask),
	}

	d.db, err = sql.Open(c.Driver, c.Conn)
	if err != nil {
		return
	}
	return
}
Esempio n. 18
0
func main() {
	config_dsn := os.Getenv("CP_DSN")
	if config_dsn == "" {
		config_dsn = "postgres://*****:*****@10.10.42.23:5432/cachepusher?sslmode=disable"
	}

	config_redis := os.Getenv("CP_REDIS")
	if config_redis == "" {
		config_redis = "10.10.42.23:6379"
	}

	config_cacheprefix := os.Getenv("CP_PREFIX")
	if config_cacheprefix == "" {
		config_cacheprefix = "customer"
	}

	db, err := sqlx.Connect("postgres", config_dsn)
	if err != nil {
		log.Print("[Postgresql] ", err)
		return
	}

	r := redis.NewClient(&redis.Options{
		Addr: config_redis,
	})
	_, err = r.Ping().Result()
	if err != nil {
		log.Print("[Redis] ", err)
		return
	}

	log.Print("Clearing cache")
	keys, err := r.Keys(fmt.Sprintf("%s:*", config_cacheprefix)).Result()
	if err != nil {
		log.Print(err)
	}
	r.Pipelined(func(r *redis.Pipeline) error {
		for _, key := range keys {
			err = r.Del(key).Err()
			if err != nil {
				log.Print(err)
			}
		}
		return nil
	})

	listener := pq.NewListener(config_dsn, 10*time.Second, time.Minute, func(ev pq.ListenerEventType, err error) {
		if err != nil {
			log.Print(err)
		}
	})

	err = listener.Listen("customer_updated")
	if err != nil {
		log.Print(err)
		return
	}
	err = listener.Listen("customer_deleted")
	if err != nil {
		log.Print(err)
		return
	}

	/** Wait until we have set up the listener to get notifications before we trigger the warmup **/
	log.Print("Triggering cache warmup")
	_, err = db.Exec("select customer_warmup()")
	if err != nil {
		log.Print(err)
	}

	log.Printf("Listening for updates")
	for {
		select {
		case n := <-listener.Notify:
			cachekey := fmt.Sprintf("%s:%s", config_cacheprefix, n.Extra)
			switch n.Channel {
			case "customer_deleted":
				log.Printf("DEL %s", cachekey)
				err = r.Del(cachekey).Err()
				if err != nil {
					log.Print(err)
				}
			case "customer_updated":
				var customerJson string
				err = db.Get(&customerJson, "select customer_get($1)", n.Extra)
				if err != nil {
					log.Print(err)
				}
				log.Printf("SET %s %s", cachekey, customerJson)
				err = r.Set(cachekey, customerJson, 0).Err()
				if err != nil {
					log.Print(err)
				}
			}
		// Make sure our connection stays up
		case <-time.After(90 * time.Second):
			log.Print("LISTEN PING")
			go func() {
				err = listener.Ping()
				if err != nil {
					log.Print(err)
				}
			}()
		}
	}
}
Esempio n. 19
0
File: main.go Progetto: utako/atc
func main() {
	flag.Parse()

	if !*dev && (*httpUsername == "" || (*httpHashedPassword == "" && *httpPassword == "")) {
		fatal(errors.New("must specify -httpUsername and -httpPassword or -httpHashedPassword or turn on dev mode"))
	}

	if _, err := os.Stat(*templatesDir); err != nil {
		fatal(errors.New("directory specified via -templates does not exist"))
	}

	if _, err := os.Stat(*publicDir); err != nil {
		fatal(errors.New("directory specified via -public does not exist"))
	}

	logger := lager.NewLogger("atc")

	logLevel := lager.INFO
	if *dev {
		logLevel = lager.DEBUG
	}

	sink := lager.NewReconfigurableSink(lager.NewWriterSink(os.Stdout, lager.DEBUG), logLevel)
	logger.RegisterSink(sink)

	var err error

	var dbConn Db.Conn

	for {
		dbConn, err = migration.Open(*sqlDriver, *sqlDataSource, migrations.Migrations)
		if err != nil {
			if strings.Contains(err.Error(), " dial ") {
				logger.Error("failed-to-open-db", err)
				time.Sleep(5 * time.Second)
				continue
			}

			fatal(err)
		}

		break
	}

	dbConn = Db.Explain(logger, dbConn, 500*time.Millisecond)

	listener := pq.NewListener(*sqlDataSource, time.Second, time.Minute, nil)
	bus := Db.NewNotificationsBus(listener)

	db := Db.NewSQL(logger.Session("db"), dbConn, bus)
	pipelineDBFactory := Db.NewPipelineDBFactory(logger.Session("db"), dbConn, bus, db)

	var configDB Db.ConfigDB
	configDB = Db.PlanConvertingConfigDB{db}

	var resourceTypesNG []atc.WorkerResourceType
	err = json.Unmarshal([]byte(*resourceTypes), &resourceTypesNG)
	if err != nil {
		logger.Fatal("invalid-resource-types", err)
	}

	var workerClient worker.Client
	if *gardenAddr != "" {
		workerClient = worker.NewGardenWorker(
			gclient.New(gconn.NewWithLogger(
				*gardenNetwork,
				*gardenAddr,
				logger.Session("garden-connection"),
			)),
			clock.NewClock(),
			-1,
			resourceTypesNG,
			"linux",
			[]string{},
		)
	} else {
		workerClient = worker.NewPool(worker.NewDBWorkerProvider(db, logger))
	}

	resourceTracker := resource.NewTracker(workerClient)
	gardenFactory := exec.NewGardenFactory(workerClient, resourceTracker, func() string {
		guid, err := uuid.NewV4()
		if err != nil {
			panic("not enough entropy to generate guid: " + err.Error())
		}

		return guid.String()
	})
	execEngine := engine.NewExecEngine(gardenFactory, engine.NewBuildDelegateFactory(db), db)

	engine := engine.NewDBEngine(engine.Engines{execEngine}, db, db)

	var webValidator auth.Validator

	if *httpUsername != "" && *httpHashedPassword != "" {
		webValidator = auth.BasicAuthHashedValidator{
			Username:       *httpUsername,
			HashedPassword: *httpHashedPassword,
		}
	} else if *httpUsername != "" && *httpPassword != "" {
		webValidator = auth.BasicAuthValidator{
			Username: *httpUsername,
			Password: *httpPassword,
		}
	} else {
		webValidator = auth.NoopValidator{}
	}

	callbacksURL, err := url.Parse(*callbacksURLString)
	if err != nil {
		fatal(err)
	}

	drain := make(chan struct{})

	apiHandler, err := api.NewHandler(
		logger,            // logger lager.Logger,
		webValidator,      // validator auth.Validator,
		pipelineDBFactory, // pipelineDBFactory db.PipelineDBFactory,

		configDB, // configDB db.ConfigDB,

		db, // buildsDB buildserver.BuildsDB,
		db, // workerDB workerserver.WorkerDB,
		db, // pipeDB pipes.PipeDB,
		db, // pipelinesDB db.PipelinesDB,

		config.ValidateConfig,       // configValidator configserver.ConfigValidator,
		callbacksURL.String(),       // peerURL string,
		buildserver.NewEventHandler, // eventHandlerFactory buildserver.EventHandlerFactory,
		drain, // drain <-chan struct{},

		engine,       // engine engine.Engine,
		workerClient, // workerClient worker.Client,

		sink, // sink *lager.ReconfigurableSink,

		*cliDownloadsDir, // cliDownloadsDir string,
	)
	if err != nil {
		fatal(err)
	}

	radarSchedulerFactory := pipelines.NewRadarSchedulerFactory(
		resourceTracker,
		*checkInterval,
		db,
		engine,
		db,
	)

	webHandler, err := web.NewHandler(
		logger,
		webValidator,
		radarSchedulerFactory,
		db,
		pipelineDBFactory,
		configDB,
		*templatesDir,
		*publicDir,
		engine,
	)
	if err != nil {
		fatal(err)
	}

	webMux := http.NewServeMux()
	webMux.Handle("/api/v1/", apiHandler)
	webMux.Handle("/", webHandler)

	var httpHandler http.Handler

	httpHandler = webMux

	if !*publiclyViewable {
		httpHandler = auth.Handler{
			Handler:   httpHandler,
			Validator: webValidator,
		}
	}

	// copy Authorization header as ATC-Authorization cookie for websocket auth
	httpHandler = auth.CookieSetHandler{
		Handler: httpHandler,
	}

	httpHandler = httpmetrics.Wrap(httpHandler)

	webListenAddr := fmt.Sprintf("%s:%d", *webListenAddress, *webListenPort)
	debugListenAddr := fmt.Sprintf("%s:%d", *debugListenAddress, *debugListenPort)

	syncer := pipelines.NewSyncer(
		logger.Session("syncer"),
		db,
		pipelineDBFactory,
		func(pipelineDB Db.PipelineDB) ifrit.Runner {
			return grouper.NewParallel(os.Interrupt, grouper.Members{
				{
					pipelineDB.ScopedName("radar"),
					rdr.NewRunner(
						logger.Session(pipelineDB.ScopedName("radar")),
						*noop,
						db,
						radarSchedulerFactory.BuildRadar(pipelineDB),
						pipelineDB,
						1*time.Minute,
					),
				},
				{
					pipelineDB.ScopedName("scheduler"),
					&sched.Runner{
						Logger: logger.Session(pipelineDB.ScopedName("scheduler")),

						Locker: db,
						DB:     pipelineDB,

						Scheduler: radarSchedulerFactory.BuildScheduler(pipelineDB),

						Noop: *noop,

						Interval: 10 * time.Second,
					},
				},
			})
		},
	)

	buildTracker := builds.NewTracker(
		logger.Session("build-tracker"),
		db,
		engine,
	)

	memberGrouper := []grouper.Member{
		{"web", http_server.New(webListenAddr, httpHandler)},

		{"debug", http_server.New(debugListenAddr, http.DefaultServeMux)},

		{"drainer", ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {
			close(ready)

			<-signals

			close(drain)

			return nil
		})},

		{"pipelines", pipelines.SyncRunner{
			Syncer:   syncer,
			Interval: 10 * time.Second,
			Clock:    clock.NewClock(),
		}},

		{"builds", builds.TrackerRunner{
			Tracker:  buildTracker,
			Interval: 10 * time.Second,
			Clock:    clock.NewClock(),
		}},
	}

	group := grouper.NewParallel(os.Interrupt, memberGrouper)

	running := ifrit.Envoke(sigmon.New(group))

	logger.Info("listening", lager.Data{
		"web":   webListenAddr,
		"debug": debugListenAddr,
	})

	err = <-running.Wait()
	if err != nil {
		logger.Error("exited-with-failure", err)
		os.Exit(1)
	}
}
Esempio n. 20
0
func main() {
	InitErrorLog(os.Stderr)

	if len(os.Args) != 2 {
		printUsage()
		os.Exit(1)
	} else if os.Args[1] == "--help" {
		printUsage()
		os.Exit(1)
	}

	err := readConfigFile(os.Args[1])
	if err != nil {
		elog.Fatalf("error while reading configuration file: %s", err)
	}
	if len(Config.Databases) == 0 {
		elog.Fatalf("at least one database must be configured")
	}

	listenAddr := net.JoinHostPort(Config.ListenAddress, strconv.Itoa(Config.ListenPort))
	tcpAddr, err := net.ResolveTCPAddr("tcp", listenAddr)
	if err != nil {
		elog.Fatalf("could not resolve listen address: %s", err)
	}
	l, err := net.ListenTCP("tcp", tcpAddr)
	if err != nil {
		elog.Fatalf("could not open listen socket: %s", err)
	}

	var m sync.Mutex
	var connStatusNotifier chan struct{}

	listenerStateChange := func(ev pq.ListenerEventType, err error) {
		switch ev {
		case pq.ListenerEventConnectionAttemptFailed:
			elog.Warningf("Listener: could not connect to the database: %s", err.Error())

		case pq.ListenerEventDisconnected:
			elog.Warningf("Listener: lost connection to the database: %s", err.Error())
			m.Lock()
			close(connStatusNotifier)
			connStatusNotifier = nil
			m.Unlock()

		case pq.ListenerEventReconnected,
			pq.ListenerEventConnected:
			elog.Logf("Listener: connected to the database")
			m.Lock()
			connStatusNotifier = make(chan struct{})
			m.Unlock()
		}
	}

	// make sure pq.Listener doesn't pick up any env variables
	os.Clearenv()

	clientConnectionString := fmt.Sprintf("fallback_application_name=allas %s", Config.ClientConnInfo)
	listener := pq.NewListener(clientConnectionString,
		250*time.Millisecond, 5*time.Minute,
		listenerStateChange)

	nd := notifydispatcher.NewNotifyDispatcher(listener)
	nd.SetBroadcastOnConnectionLoss(false)
	nd.SetSlowReaderEliminationStrategy(notifydispatcher.NeglectSlowReaders)

	for {
		c, err := l.Accept()
		if err != nil {
			panic(err)
		}

		var myConnStatusNotifier chan struct{}

		m.Lock()
		if connStatusNotifier == nil {
			m.Unlock()
			go RejectFrontendConnection(c)
			continue
		} else {
			myConnStatusNotifier = connStatusNotifier
		}
		m.Unlock()

		newConn := NewFrontendConnection(c, nd, myConnStatusNotifier)
		go newConn.mainLoop(Config.StartupParameters, Config.Databases)
	}
}
Esempio n. 21
0
func (b *Backend) background(wg *sync.WaitGroup) {
	ctx := b.ctx.Fork()
	logger := b.logger

	defer ctx.WaitGroup().Done()

	listener := pq.NewListener(b.dsn, 200*time.Millisecond, 5*time.Second, nil)
	if err := listener.Listen("broadcast"); err != nil {
		// TODO: manage this more nicely
		panic("pq listen: " + err.Error())
	}
	logger.Printf("pq listener started")

	peerWatcher := b.cluster.Watch()
	keepalive := time.NewTicker(3 * cluster.TTL / 4)
	defer keepalive.Stop()

	// Signal to constructor that we're ready to handle client connections.
	wg.Done()

	for {
		select {
		case <-ctx.Done():
			return
		case <-keepalive.C:
			if b.desc != nil {
				if err := b.cluster.Update(b.desc); err != nil {
					logger.Printf("cluster: keepalive error: %s", err)
				}
			}
			// Ping to make sure the database connection is still live.
			if err := listener.Ping(); err != nil {
				logger.Printf("pq ping: %s\n", err)
				b.ctx.Terminate(fmt.Errorf("pq ping: %s", err))
				return
			}
		case event := <-peerWatcher:
			b.Lock()
			switch e := event.(type) {
			case *cluster.PeerJoinedEvent:
				logger.Printf("cluster: peer %s joining with era %s", e.ID, e.Era)
				b.peers[e.ID] = e.Era
			case *cluster.PeerAliveEvent:
				if prevEra := b.peers[e.ID]; prevEra != e.Era {
					b.invalidatePeer(ctx, e.ID, prevEra)
					logger.Printf("cluster: peer %s changing era from %s to %s", e.ID, prevEra, e.Era)
				}
				b.peers[e.ID] = e.Era
			case *cluster.PeerLostEvent:
				logger.Printf("cluster: peer %s departing", e.ID)
				if era, ok := b.peers[e.ID]; ok {
					b.invalidatePeer(ctx, e.ID, era)
					delete(b.peers, e.ID)
				}
			}
			b.Unlock()
		case notice := <-listener.Notify:
			if notice == nil {
				logger.Printf("pq listen: received nil notification")
				// A nil notice indicates a loss of connection. We could
				// re-snapshot for all connected clients, but for now it's
				// easier to just shut down and force everyone to reconnect.
				b.ctx.Terminate(ErrPsqlConnectionLost)
				return
			}

			var msg BroadcastMessage

			if err := json.Unmarshal([]byte(notice.Extra), &msg); err != nil {
				logger.Printf("error: pq listen: invalid broadcast: %s", err)
				logger.Printf("         payload: %#v", notice.Extra)
				continue
			}

			// Check for UserID- if so, notify user instead of room
			if msg.UserID != "" {
				for _, lm := range b.listeners {
					if err := lm.NotifyUser(ctx, msg.UserID, msg.Event, msg.Exclude...); err != nil {
						logger.Printf("error: pq listen: notify user error on userID %s: %s", msg.Room, err)
					}
				}
				continue
			}

			// Check for global ban, which is a special-case broadcast.
			if msg.Room == "" && msg.Event.Type == proto.BounceEventType {
				for _, lm := range b.listeners {
					if err := lm.Broadcast(ctx, msg.Event, msg.Exclude...); err != nil {
						logger.Printf("error: pq listen: bounce broadcast error on %s: %s", msg.Room, err)
					}
				}
				continue
			}

			// TODO: if room name is empty, broadcast globally
			if lm, ok := b.listeners[msg.Room]; ok {
				logger.Printf("broadcasting %s to %s", msg.Event.Type, msg.Room)
				if err := lm.Broadcast(ctx, msg.Event, msg.Exclude...); err != nil {
					logger.Printf("error: pq listen: broadcast error on %s: %s", msg.Room, err)
				}
			}

			if msg.Event.Type == proto.PartEventType {
				payload, err := msg.Event.Payload()
				if err != nil {
					continue
				}
				if presence, ok := payload.(*proto.PresenceEvent); ok {
					if c, ok := b.partWaiters[presence.SessionID]; ok {
						c <- struct{}{}
					}
				}
			}
		}
	}
}
Esempio n. 22
0
func (a *app) listen() {

	listener := pq.NewListener(a.config.Connect.DSN(), minReconnectInterval, maxReconnectInterval, func(event pq.ListenerEventType, err error) {

		if err != nil {

			log.Errorf("Postgres listen: %v", err)

			return
		}

		log.Debugf("Postgres notify send event: %v", event)
	})

	listener.Listen(channelTasks)
	listener.Listen(channelStopContainer)

	var (
		events          = listener.NotificationChannel()
		checkTasks      = time.Tick(time.Minute)
		checkContainers = time.Tick(time.Minute * 10)
	)

	for {

		select {

		case event := <-events:

			if event == nil {

				continue
			}

			log.Debugf("Received from [%s] playload: %s", event.Channel, event.Extra)

			switch event.Channel {
			case channelTasks:

				var task Task

				if err := json.Unmarshal([]byte(event.Extra), &task); err != nil {

					log.Errorf("Could not unmarshal notify playload: %v", err)

					continue
				}

				var accept bool

				if err := a.connect.Get(&accept, "SELECT accept FROM build.accept($1)", task.BuildID); err == nil {

					if accept {

						a.tasks <- task
					}

				} else {

					log.Debugf("Error when accepting a task: %v", err)
				}

			case channelStopContainer:

				var container struct {
					ContainerID string    `json:"container_id"`
					CreatedAt   time.Time `json:"created_at"`
				}

				if err := json.Unmarshal([]byte(event.Extra), &container); err != nil {

					log.Errorf("Could not unmarshal notify playload: %v", err)

					continue
				}

				log.Warnf("Remove container because build stopped by timeout: %s", container.ContainerID)

				a.docker.RemoveContainer(container.ContainerID)
			}

		case <-checkTasks:

			log.Debug("Checking for new tasks")

			for {

				var task Task

				if err := a.connect.Get(&task, "SELECT build_id, created_at FROM build.fetch()"); err != nil {

					if err != sql.ErrNoRows {

						log.Errorf("Could not fetch new task: %v", err)
					}

					break
				}

				a.tasks <- task
			}

			if _, err := a.connect.Exec("SELECT build.gc()"); err != nil {

				log.Errorf("Error when checking the lost builds: %v", err)
			}

		case <-checkContainers:

			if containers, err := a.docker.ListContainers(); err == nil {

				for _, container := range containers {

					if strings.HasPrefix(container.Name, "/pci-seq-") && container.CreatedAt.Add(containerZombieTTL).Before(time.Now()) {

						log.Warnf("Container was running too long time, destroy: %s", container.Name)

						container.Destroy()
					}
				}
			}
		}
	}
}
Esempio n. 23
0
// RegisterScanListener "subscribes" to the notifications published to the scan_listener notifier.
// It has as input the usual db attributes and returns an int64 channel which can be consumed
// for newly created scan id's.
func (db *DB) RegisterScanListener(dbname, user, password, hostport, sslmode string) <-chan int64 {

	log := logger.GetLogger()

	reportProblem := func(ev pq.ListenerEventType, err error) {
		if err != nil {
			log.WithFields(logrus.Fields{
				"error": err.Error(),
			}).Error("Listener Error")
		}
	}

	listenerChan := make(chan int64)
	listenerName := "scan_listener"

	connInfo := fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=%s",
		user, password, hostport, dbname, sslmode)

	go func() {

		listener := pq.NewListener(connInfo, 100*time.Millisecond, 10*time.Second, reportProblem)
		err := listener.Listen(listenerName)

		if err != nil {
			log.WithFields(logrus.Fields{
				"listener": listenerName,
				"error":    err.Error(),
			}).Error("could not listen for notification")
			close(listenerChan)
			return
		}

		for m := range listener.Notify {
			sid := m.Extra
			if !db.acquireScan(sid) {
				// skip this scan if we didn't acquire it
				continue
			}
			// scan was acquired, inform the scanner to launch it
			id, err := strconv.ParseInt(string(sid), 10, 64)
			if err != nil {
				log.WithFields(logrus.Fields{
					"scan_id": sid,
					"error":   err.Error(),
				}).Error("could not decode acquired notification")
			}
			listenerChan <- id
			log.WithFields(logrus.Fields{
				"scan_id": id,
			}).Debug("Acquired notification.")
		}

	}()

	// Launch a goroutine that relaunches scans that have not yet been processed
	go func() {
		for {
			// don't requeue scans more than 3 times
			_, err := db.Exec(`UPDATE scans SET ack = false, timestamp = NOW()
				             WHERE completion_perc = 0 AND attempts < 3 AND ack = true
						   AND timestamp < NOW() - INTERVAL '10 minute'`)
			if err != nil {
				log.WithFields(logrus.Fields{
					"error": err,
				}).Error("Could not run zero completion update query")
			}
			_, err = db.Exec(fmt.Sprintf(`SELECT pg_notify('%s', ''||id )
						      FROM scans
						      WHERE ack=false
						      ORDER BY id ASC
						      LIMIT 1000`, listenerName))
			if err != nil {
				log.WithFields(logrus.Fields{
					"error": err,
				}).Error("Could not run unacknowledged scans periodic check.")
			}
			time.Sleep(3 * time.Minute)
		}
	}()
	return listenerChan
}
Esempio n. 24
0
	"github.com/concourse/atc"
	"github.com/concourse/atc/db"
	"github.com/concourse/atc/event"
)

var _ = Describe("One-off Builds", func() {
	var atcProcess ifrit.Process
	var dbListener *pq.Listener
	var atcPort uint16
	var pipelineDBFactory db.PipelineDBFactory

	BeforeEach(func() {
		dbLogger := lagertest.NewTestLogger("test")
		postgresRunner.Truncate()
		dbConn = postgresRunner.Open()
		dbListener = pq.NewListener(postgresRunner.DataSourceName(), time.Second, time.Minute, nil)
		bus := db.NewNotificationsBus(dbListener, dbConn)
		sqlDB = db.NewSQL(dbLogger, dbConn, bus)
		pipelineDBFactory = db.NewPipelineDBFactory(dbLogger, dbConn, bus, sqlDB)

		atcProcess, atcPort = startATC(atcBin, 1)
	})

	AfterEach(func() {
		ginkgomon.Interrupt(atcProcess)

		Expect(dbConn.Close()).To(Succeed())
		Expect(dbListener.Close()).To(Succeed())
	})

	Describe("viewing a list of builds", func() {
Esempio n. 25
0
func setupDatabaseListener() {
	if !checkDatabaseHandleValid(db) {
		return
	}

	//create/replace function for notifyPhoneNumber()
	if _, err := db.Exec(`CREATE or REPLACE FUNCTION notifyPhoneNumber() RETURNS trigger AS $$
 			BEGIN  
  			  IF TG_OP='DELETE' THEN
    				EXECUTE FORMAT('NOTIFY notifyphonenumber, ''%s''', OLD.PhoneNumber); 
  				ELSE
    				EXECUTE FORMAT('NOTIFY notifyphonenumber, ''%s''', NEW.PhoneNumber); 
  				END IF;
  			RETURN NULL;
 			END;  
		$$ LANGUAGE plpgsql;`); err != nil {
		log.Println(err)
	} else {
		log.Println("Successfully create/replace function notifyPhoneNumber()")
	}

	//check for trigger existence
	var triggerExist bool
	if err := db.QueryRow(`SELECT EXISTS(
		SELECT 1
			FROM pg_trigger
			WHERE tgname='inprogresschange')`).Scan(&triggerExist); err != nil {
		log.Println(err)
	}
	if !triggerExist {
		if _, err := db.Exec(`CREATE TRIGGER inprogresschange AFTER INSERT OR UPDATE OR DELETE
 			ON inprogress
 			FOR EACH ROW 
 			EXECUTE PROCEDURE notifyPhoneNumber();`); err != nil {
			log.Println(err)
		} else {
			log.Println("Successfully create trigger inprogresschange")
		}
	} else {
		log.Println("Trigger inprogresschange exists.")
	}

	//Create handler for logging listener errors
	reportProblem := func(ev pq.ListenerEventType, err error) {
		if err != nil {
			fmt.Println(err.Error())
		}
	}

	//Listen for table updates
	var listenerObj *pq.Listener
	listenerObj = pq.NewListener(os.Getenv("DATABASE_URL"), 10*time.Second, time.Minute, reportProblem)

	err := listenerObj.Listen("notifyphonenumber")
	if err != nil {
		log.Println(err)
	}

	//Find our session PID so we can ignore notifications from ourselves
	var pid int
	fmt.Println("Getting session PID...")
	if rows, err := db.Query(`SELECT * 
		FROM pg_stat_activity 
		WHERE pid = pg_backend_pid();`); err != nil {
		log.Println(err)
	} else {
		var i interface{} //empty interface to read unneeded columns into
		for rows.Next() {
			if err := rows.Scan(&i, &i, &pid, &i, &i, &i, &i, &i, &i, &i, &i, &i, &i, &i, &i, &i, &i, &i); err != nil {
				log.Println(err)
			} else {
				fmt.Println("Session PID:", pid)
			}
		}
	}

	//Monitor for noitification in background
	go func() {
		for {
			var notificationObj *pq.Notification
			notificationObj = <-listenerObj.Notify
			fmt.Printf("Backend PID %v\nChannel %v\nPayload %v\n", notificationObj.BePid, notificationObj.Channel, notificationObj.Extra)
			//Get updated row from database if the notifying PID is not this instance's PID
			if pid != notificationObj.BePid {
				if updatedRows := selectRowsFromTableByPhoneNumber("inprogress", notificationObj.Extra); updatedRows == nil {
					log.Println(err)
				} else {
					//We handle the possibility of deleted rows in laod pickups rows into memory since we can only enumerate over rows object once
					loadPickupRowsIntoMemory(&pickups, updatedRows, notificationObj)
				}
			}
		}
	}()
}