예제 #1
1
파일: server.go 프로젝트: logan/heim
func Serve(ctx scope.Context, addr string) {
	http.Handle("/metrics", prometheus.Handler())

	listener, err := net.Listen("tcp", addr)
	if err != nil {
		ctx.Terminate(err)
	}

	closed := false
	m := sync.Mutex{}
	closeListener := func() {
		m.Lock()
		if !closed {
			listener.Close()
			closed = true
		}
		m.Unlock()
	}

	// Spin off goroutine to watch ctx and close listener if shutdown requested.
	go func() {
		<-ctx.Done()
		closeListener()
	}()

	if err := http.Serve(listener, nil); err != nil {
		fmt.Printf("http[%s]: %s\n", addr, err)
		ctx.Terminate(err)
	}

	closeListener()
	ctx.WaitGroup().Done()
}
예제 #2
1
파일: serve.go 프로젝트: logan/heim
func (cmd *serveEmbedCmd) run(ctx scope.Context, args []string) error {
	listener, err := net.Listen("tcp", cmd.addr)
	if err != nil {
		return err
	}

	closed := false
	m := sync.Mutex{}
	closeListener := func() {
		m.Lock()
		if !closed {
			listener.Close()
			closed = true
		}
		m.Unlock()
	}

	// Spin off goroutine to watch ctx and close listener if shutdown requested.
	go func() {
		<-ctx.Done()
		closeListener()
	}()

	if err := http.Serve(listener, cmd); err != nil {
		fmt.Printf("http[%s]: %s\n", cmd.addr, err)
		return err
	}

	closeListener()
	ctx.WaitGroup().Done()
	return ctx.Err()
}
예제 #3
0
파일: worker.go 프로젝트: logan/heim
func (cmd *workerCmd) run(ctx scope.Context, args []string) error {
	if len(args) < 1 {
		fmt.Printf("Usage: %s\r\n", cmd.usage())
		// TODO: list queues
		return nil
	}

	fmt.Printf("getting config\n")
	cfg, err := getConfig(ctx)
	if err != nil {
		return err
	}

	fmt.Printf("getting heim\n")
	heim, err := cfg.Heim(ctx)
	if err != nil {
		fmt.Printf("heim error: %s\n", err)
		return err
	}

	defer func() {
		ctx.Cancel()
		ctx.WaitGroup().Wait()
	}()

	// Start metrics server.
	fmt.Printf("starting server\n")
	ctx.WaitGroup().Add(1)
	go worker.Serve(ctx, cmd.addr)

	// Start scanner.
	return worker.Loop(ctx, heim, cmd.worker, args[0])
}
예제 #4
0
파일: serve.go 프로젝트: robot0x/heim
func controller(
	ctx scope.Context, addr string, b proto.Backend, kms security.KMS, c cluster.Cluster) error {

	if addr != "" {
		ctrl, err := console.NewController(ctx, addr, b, kms, c)
		if err != nil {
			return err
		}

		if backend.Config.Console.HostKey != "" {
			if err := ctrl.AddHostKey(backend.Config.Console.HostKey); err != nil {
				return err
			}
		} else {
			if err := ctrl.AddHostKeyFromCluster(backend.Config.Cluster.ServerID); err != nil {
				return err
			}
		}

		for _, authKey := range backend.Config.Console.AuthKeys {
			if authKey == "" {
				continue
			}
			if err := ctrl.AddAuthorizedKeys(authKey); err != nil {
				return err
			}
		}

		ctx.WaitGroup().Add(1)
		go ctrl.Serve()
	}
	return nil
}
예제 #5
0
func (c *Controller) background(ctx scope.Context) {
	defer ctx.WaitGroup().Done()

	var lastStatCheck time.Time
	for {
		logging.Logger(ctx).Printf("[%s] background loop", c.w.QueueName())
		if time.Now().Sub(lastStatCheck) > StatsInterval {
			logging.Logger(ctx).Printf("[%s] collecting stats", c.w.QueueName())
			stats, err := c.jq.Stats(ctx)
			if err != nil {
				logging.Logger(ctx).Printf("error: %s stats: %s", c.w.QueueName(), err)
				return
			}
			lastStatCheck = time.Now()
			labels := map[string]string{"queue": c.w.QueueName()}
			claimedGauge.With(labels).Set(float64(stats.Claimed))
			dueGauge.With(labels).Set(float64(stats.Due))
			waitingGauge.With(labels).Set(float64(stats.Waiting))
		}
		if err := c.processOne(ctx); err != nil {
			// TODO: retry a couple times before giving up
			logging.Logger(ctx).Printf("error: %s: %s", c.w.QueueName(), err)
			return
		}
	}
}
예제 #6
0
파일: emails.go 프로젝트: logan/heim
func (et *EmailTracker) Send(
	ctx scope.Context, js jobs.JobService, templater *templates.Templater, deliverer emails.Deliverer,
	account proto.Account, to, templateName string, data interface{}) (
	*emails.EmailRef, error) {

	if to == "" {
		to, _ = account.Email()
	}

	sf, err := snowflake.New()
	if err != nil {
		return nil, err
	}
	msgID := fmt.Sprintf("<%s@%s>", sf, deliverer.LocalName())

	ref, err := emails.NewEmail(templater, msgID, to, templateName, data)
	if err != nil {
		return nil, err
	}
	ref.AccountID = account.ID()

	jq, err := js.GetQueue(ctx, jobs.EmailQueue)
	if err != nil {
		return nil, err
	}

	payload := &jobs.EmailJob{
		AccountID: account.ID(),
		EmailID:   ref.ID,
	}
	job, err := jq.AddAndClaim(ctx, jobs.EmailJobType, payload, "immediate", jobs.EmailJobOptions...)
	if err != nil {
		return nil, err
	}

	ref.JobID = job.ID

	et.m.Lock()
	if et.emailsByAccount == nil {
		et.emailsByAccount = map[snowflake.Snowflake][]*emails.EmailRef{}
	}
	et.emailsByAccount[account.ID()] = append(et.emailsByAccount[account.ID()], ref)
	et.m.Unlock()

	child := ctx.Fork()
	child.WaitGroup().Add(1)

	go job.Exec(child, func(ctx scope.Context) error {
		defer ctx.WaitGroup().Done()

		logging.Logger(ctx).Printf("delivering to %s\n", to)
		if err := deliverer.Deliver(ctx, ref); err != nil {
			return err
		}
		return nil
	})

	return ref, nil
}
예제 #7
0
파일: loop.go 프로젝트: logan/heim
func Loop(ctx scope.Context, heim *proto.Heim, workerName, queueName string) error {
	fmt.Printf("Loop\n")
	ctrl, err := NewController(ctx, heim, workerName, queueName)
	if err != nil {
		fmt.Printf("error: %s\n", err)
		return err
	}

	ctx.WaitGroup().Add(1)
	go ctrl.background(ctx)
	ctx.WaitGroup().Wait()
	return ctx.Err()
}
예제 #8
0
파일: retention.go 프로젝트: logan/heim
func (cmd *retentionCmd) run(ctx scope.Context, args []string) error {
	heim, b, err := getHeimWithPsqlBackend(ctx)
	if err != nil {
		return err
	}

	defer func() {
		ctx.Cancel()
		ctx.WaitGroup().Wait()
		heim.Backend.Close()
	}()

	// start metrics server
	ctx.WaitGroup().Add(1)
	go retention.Serve(ctx, cmd.addr)

	// start metrics scanner
	ctx.WaitGroup().Add(1)
	go retention.ExpiredScanLoop(ctx, heim.Cluster, b, cmd.interval)

	// start delete scanner
	ctx.WaitGroup().Add(1)
	retention.DeleteScanLoop(ctx, heim.Cluster, b, cmd.interval)

	return nil
}
예제 #9
0
파일: retention.go 프로젝트: robot0x/heim
func (cmd *retentionCmd) run(ctx scope.Context, args []string) error {
	c, err := getCluster(ctx)
	if err != nil {
		return err
	}

	b, err := getBackend(ctx, c)
	if err != nil {
		return fmt.Errorf("backend error: %s", err)
	}
	defer b.Close()

	defer func() {
		ctx.Cancel()
		ctx.WaitGroup().Wait()
	}()

	// start metrics server
	ctx.WaitGroup().Add(1)
	go retention.Serve(ctx, cmd.addr)

	// start metrics scanner
	ctx.WaitGroup().Add(1)
	go retention.ExpiredScanLoop(ctx, c, b, cmd.interval)

	// start delete scanner
	ctx.WaitGroup().Add(1)
	retention.DeleteScanLoop(ctx, c, b, cmd.interval)

	return nil
}
예제 #10
0
파일: activity.go 프로젝트: robot0x/heim
func (cmd *activityCmd) run(ctx scope.Context, args []string) error {
	// Get cluster in order to load config.
	_, err := getCluster(ctx)
	if err != nil {
		return fmt.Errorf("cluster error: %s", err)
	}

	listener := pq.NewListener(backend.Config.DB.DSN, 200*time.Millisecond, 5*time.Second, nil)
	if err := listener.Listen("broadcast"); err != nil {
		return fmt.Errorf("pq listen error: %s", err)
	}

	defer func() {
		ctx.Cancel()
		ctx.WaitGroup().Wait()
	}()

	// Start metrics server.
	ctx.WaitGroup().Add(1)
	go activity.Serve(ctx, cmd.addr)

	// Start scanner.
	ctx.WaitGroup().Add(1)
	activity.ScanLoop(ctx, listener)

	return nil
}
예제 #11
0
파일: presence.go 프로젝트: robot0x/heim
func (cmd *presenceCmd) run(ctx scope.Context, args []string) error {
	c, err := getCluster(ctx)
	if err != nil {
		return err
	}

	b, err := getBackend(ctx, c)
	if err != nil {
		return fmt.Errorf("backend error: %s", err)
	}
	defer b.Close()

	defer func() {
		ctx.Cancel()
		ctx.WaitGroup().Wait()
	}()

	// Start metrics server.
	ctx.WaitGroup().Add(1)
	go presence.Serve(ctx, cmd.addr)

	// Start scanner.
	ctx.WaitGroup().Add(1)
	presence.ScanLoop(ctx, c, b, cmd.interval)

	return nil
}
예제 #12
0
파일: server.go 프로젝트: logan/heim
func (ctrl *Controller) interact(ctx scope.Context, conn net.Conn) {
	defer ctx.WaitGroup().Done()

	_, nchs, reqs, err := ssh.NewServerConn(conn, ctrl.config)
	if err != nil {
		return
	}

	go ssh.DiscardRequests(reqs)

	for nch := range nchs {
		if nch.ChannelType() != "session" {
			nch.Reject(ssh.UnknownChannelType, "unknown channel type")
			continue
		}
		ch, reqs, err := nch.Accept()
		if err != nil {
			return
		}
		go ctrl.filterClientRequests(reqs)
		go ctrl.terminal(ctx, ch)
	}
}
예제 #13
0
파일: scanner.go 프로젝트: logan/heim
func ScanLoop(ctx scope.Context, c cluster.Cluster, pb *psql.Backend, interval time.Duration) {
	defer ctx.WaitGroup().Done()

	errCount := 0
	for {
		t := time.After(interval)
		select {
		case <-ctx.Done():
			return
		case <-t:
			if err := scan(ctx.Fork(), c, pb); err != nil {
				errCount++
				fmt.Printf("scan error [%d/%d]: %s", errCount, maxErrors, err)
				if errCount > maxErrors {
					fmt.Printf("maximum scan errors exceeded, terminating\n")
					ctx.Terminate(fmt.Errorf("maximum scan errors exceeded"))
					return
				}
				continue
			}
			errCount = 0
		}
	}
}
예제 #14
0
파일: scanner.go 프로젝트: logan/heim
func ScanLoop(ctx scope.Context, listener *pq.Listener) {
	defer ctx.WaitGroup().Done()

	logger := logging.Logger(ctx)
	for {
		select {
		case <-ctx.Done():
			logger.Printf("received cancellation signal, shutting down")
			return
		case notice := <-listener.Notify:
			if notice == nil {
				logger.Printf("received nil from listener")
				continue
			}

			var msg psql.BroadcastMessage

			if err := json.Unmarshal([]byte(notice.Extra), &msg); err != nil {
				logger.Printf("error: pq listen: invalid broadcast: %s", err)
				logger.Printf("         payload: %#v", notice.Extra)
				continue
			}

			switch msg.Event.Type {
			case proto.BounceEventType:
				bounceActivity.WithLabelValues(msg.Room).Inc()
			case proto.JoinEventType:
				joinActivity.WithLabelValues(msg.Room).Inc()
			case proto.PartEventType:
				partActivity.WithLabelValues(msg.Room).Inc()
			case proto.SendEventType:
				messageActivity.WithLabelValues(msg.Room).Inc()
			}
		}
	}
}
예제 #15
0
파일: presence.go 프로젝트: logan/heim
func (cmd *presenceCmd) run(ctx scope.Context, args []string) error {
	heim, err := getHeim(ctx)
	if err != nil {
		return err
	}

	heim, b, err := getHeimWithPsqlBackend(ctx)

	defer func() {
		ctx.Cancel()
		ctx.WaitGroup().Wait()
		heim.Backend.Close()
	}()

	// Start metrics server.
	ctx.WaitGroup().Add(1)
	go presence.Serve(ctx, cmd.addr)

	// Start scanner.
	ctx.WaitGroup().Add(1)
	presence.ScanLoop(ctx, heim.Cluster, b, cmd.interval)

	return nil
}
예제 #16
0
func (et *EmailTracker) Send(
	ctx scope.Context, js jobs.JobService, templater *templates.Templater, deliverer emails.Deliverer,
	account proto.Account, templateName string, data interface{}) (
	*emails.EmailRef, error) {

	// choose a Message-ID
	sf, err := snowflake.New()
	if err != nil {
		return nil, err
	}
	domain := "heim"
	if deliverer != nil {
		domain = deliverer.LocalName()
	}
	msgID := fmt.Sprintf("<%s@%s>", sf, domain)

	// choose an address to send to
	to := ""
	/*
	   requireVerifiedAddress := true
	   switch templateName {
	   case proto.WelcomeEmail, proto.RoomInvitationWelcomeEmail, proto.PasswordResetEmail:
	       requireVerifiedAddress = false
	   }
	*/
	for _, pid := range account.PersonalIdentities() {
		if pid.Namespace() == "email" {
			/*
			   if !pid.Verified() && requireVerifiedAddress {
			       continue
			   }
			*/
			to = pid.ID()
			break
		}
	}
	if to == "" {
		fmt.Printf("no email address to deliver to\n")
		return nil, fmt.Errorf("account has no email address to deliver %s to", templateName)
	}

	// construct the email
	ref, err := emails.NewEmail(templater, msgID, to, templateName, data)
	if err != nil {
		return nil, err
	}
	ref.AccountID = account.ID()

	// get underlying JobQueue so we can add-and-claim in the same transaction as the email insert
	abstractQueue, err := js.GetQueue(ctx, jobs.EmailQueue)
	if err != nil {
		return nil, err
	}
	jq := abstractQueue.(*JobQueueBinding)

	t, err := et.Backend.DbMap.Begin()
	if err != nil {
		return nil, err
	}

	// insert job first, so we know what JobID to associate with the email when we insert it
	payload := &jobs.EmailJob{
		AccountID: account.ID(),
		EmailID:   ref.ID,
	}
	job, err := jq.addAndClaim(ctx, t, jobs.EmailJobType, payload, "immediate", jobs.EmailJobOptions...)
	if err != nil {
		rollback(ctx, t)
		return nil, err
	}
	ref.JobID = job.ID

	// insert the email
	var email Email
	email.FromBackend(ref)
	if err := t.Insert(&email); err != nil {
		rollback(ctx, t)
		return nil, err
	}

	// finalize and spin off first delivery attempt
	if err := t.Commit(); err != nil {
		return nil, err
	}

	child := ctx.Fork()
	child.WaitGroup().Add(1)
	go job.Exec(child, func(ctx scope.Context) error {
		defer ctx.WaitGroup().Done()

		logging.Logger(ctx).Printf("delivering to %s\n", to)
		if deliverer == nil {
			return fmt.Errorf("deliverer not configured")
		}
		if err := deliverer.Deliver(ctx, ref); err != nil {
			return err
		}
		if _, err := et.Backend.DbMap.Exec("UPDATE email SET delivered = $2 WHERE id = $1", ref.ID, ref.Delivered); err != nil {
			// Even if we fail to mark the email as delivered, don't return an
			// error so the job still gets completed. We wouldn't want to spam
			// someone just because of a DB issue.
			logging.Logger(ctx).Printf("error marking email %s/%s as delivered: %s", account.ID(), ref.ID, err)
		}
		return nil
	})

	return ref, nil
}
예제 #17
0
func (et *EmailTracker) Send(
	ctx scope.Context, js jobs.JobService, templater *templates.Templater, deliverer emails.Deliverer,
	account proto.Account, templateName string, data interface{}) (
	*emails.EmailRef, error) {

	sf, err := snowflake.New()
	if err != nil {
		return nil, err
	}
	msgID := fmt.Sprintf("<%s@%s>", sf, deliverer.LocalName())

	to := ""
	/*
		requireVerifiedAddress := true
		switch templateName {
		case proto.WelcomeEmail, proto.RoomInvitationWelcomeEmail, proto.PasswordResetEmail:
			requireVerifiedAddress = false
		}
	*/
	for _, pid := range account.PersonalIdentities() {
		if pid.Namespace() == "email" {
			/*
				if !pid.Verified() && requireVerifiedAddress {
					continue
				}
			*/
			to = pid.ID()
			break
		}
	}
	if to == "" {
		fmt.Printf("no email address to deliver to\n")
		return nil, fmt.Errorf("account has no email address to deliver %s to", templateName)
	}

	ref, err := emails.NewEmail(templater, msgID, to, templateName, data)
	if err != nil {
		return nil, err
	}
	ref.AccountID = account.ID()

	jq, err := js.GetQueue(ctx, jobs.EmailQueue)
	if err != nil {
		return nil, err
	}

	payload := &jobs.EmailJob{
		AccountID: account.ID(),
		EmailID:   ref.ID,
	}
	job, err := jq.AddAndClaim(ctx, jobs.EmailJobType, payload, "immediate", jobs.EmailJobOptions...)
	if err != nil {
		return nil, err
	}

	ref.JobID = job.ID

	et.m.Lock()
	if et.emailsByAccount == nil {
		et.emailsByAccount = map[snowflake.Snowflake][]*emails.EmailRef{}
	}
	et.emailsByAccount[account.ID()] = append(et.emailsByAccount[account.ID()], ref)
	et.m.Unlock()

	child := ctx.Fork()
	child.WaitGroup().Add(1)

	go job.Exec(child, func(ctx scope.Context) error {
		defer ctx.WaitGroup().Done()

		logging.Logger(ctx).Printf("delivering to %s\n", to)
		if err := deliverer.Deliver(ctx, ref); err != nil {
			return err
		}
		return nil
	})

	return ref, nil
}