Esempio n. 1
0
func Update() {
	if _, err := database.Exec(`
		INSERT OR IGNORE INTO comparison (code) SELECT code FROM new;
		DELETE FROM new;
		VACUUM;
	`); err != nil {
		fmt.Println("데이터를 정리할 수 없습니다.")
		fmt.Println(err)
		return
	}

	stmt, _ := database.Prepare("INSERT INTO comparison (code) VALUES (?)")

	http.Handle("/", engine(UpdateIndexHandler))

	codec := CodePool()
	notifier := shutdown.First()

	for {
		select {
		case code := <-codec:
			database.Lock()
			stmt.Exec(string(code))
			database.Unlock()
		case n := <-notifier:
			stmt.Close()
			close(n)
			return
		}
	}
}
Esempio n. 2
0
func Bloat() {
	stmt, err := database.Prepare("INSERT INTO new (code) VALUES (?)")
	if err != nil {
		fmt.Println(err)
		return
	}

	http.Handle("/", engine(BloatHandler))
	http.Handle("/card", engine(CardHandler))

	codec := CodePool()
	notifier := shutdown.First()

	for {
		select {
		case code := <-codec:
			database.Lock()
			stmt.Exec(string(code))
			database.Unlock()
		case n := <-notifier:
			stmt.Close()
			close(n)
			return
		}
	}
}
Esempio n. 3
0
// NewServer will read the supplied config file,
// and return a new server.
// A file watcher will be set up to monitor the
// configuration file and reload settings if changes
// are detected.
func NewServer(config string) (*Server, error) {
	s := &Server{handler: NewReverseProxy()}
	err := s.ReadConfig(config, true)
	if err != nil {
		return nil, err
	}

	// Add config file watcher/reloader.
	if s.Config.WatchConfig {
		watcher, err := fsnotify.NewWatcher()
		if err != nil {
			return nil, err
		}
		err = watcher.Add(config)
		if err != nil {
			return nil, err
		}
		log.Println("Watching", config)
		// We want the watcher to exit in the first stage.
		go func() {
			// Get a first stage shutdown notification
			exit := shutdown.First()
			for {
				select {
				// Event on config file.
				case event := <-watcher.Events:
					switch event.Op {
					// Editor may do rename -> write -> delete, so we should not follow
					// the old file
					case fsnotify.Rename:
						watcher.Remove(event.Name)
						watcher.Add(config)
					case fsnotify.Remove:
						continue
					}
					log.Println("Reloading configuration")
					err := s.ReadConfig(event.Name, false)
					if err != nil {
						log.Println("Error reloading configuration:", err)
						log.Println("Configuration NOT applied")
					} else {
						log.Println("Configuration applied")
					}

					// Server is shutting down
				case n := <-exit:
					watcher.Remove(config)
					close(n)
					return
				}
			}
		}()
	}
	return s, nil
}
Esempio n. 4
0
// startMonitor will monitor stats of the backend
// Will at times require BOTH rt and Stats mutex.
// This means that no other goroutine should acquire
// both at the same time.
func (b *backend) startMonitor() {
	s := b.rt
	ticker := time.NewTicker(time.Second)
	defer ticker.Stop()
	exit := shutdown.First()
	end := b.closeMonitor
	previous := time.Now()

	for {
		select {
		case <-ticker.C:
			elapsed := time.Now().Sub(previous)
			previous = time.Now()
			s.mu.Lock()
			b.Stats.mu.Lock()
			if s.requests == 0 {
				b.Stats.Latency.Add(0)
				b.Stats.FailureRate.Add(0)
			} else {
				b.Stats.Latency.Add(float64(s.latencySum) / float64(elapsed) / float64(s.requests))
				b.Stats.FailureRate.Add(float64(s.errors) / float64(s.requests))
			}
			s.requests = 0
			s.errors = 0
			s.latencySum = 0
			s.mu.Unlock()

			// Perform health check
			b.healthCheck()

			if b.Stats.Healthy && b.Stats.healthFailures > 5 {
				log.Println("5 Consequtive health tests failed. Marking as unhealty.")
				b.Stats.Healthy = false
			}
			if !b.Stats.Healthy && b.Stats.healthFailures == 0 {
				log.Println("Health check succeeded. Marking as healty")
				b.Stats.Healthy = true
			}
			b.Stats.mu.Unlock()
		case n := <-end:
			exit.Cancel()
			close(n)
			return
		case n := <-exit:
			close(n)
			return
		}
	}
}
Esempio n. 5
0
// MonitorInventory will monitor the inventory file
// and reload the inventory if changes are detected.
// The monitor can be shut down by sending a channel on
// (Server).exitMonInv. The monitor will exit and close
// the supplied channel.
func (s *Server) MonitorInventory() error {
	watcher, err := fsnotify.NewWatcher()
	if err != nil {
		return err
	}
	file := s.Config.InventoryFile
	err = watcher.Add(file)
	if err != nil {
		return err
	}

	// Create channel to stop monitoring
	stop := make(chan chan struct{})
	s.exitMonInv = stop

	log.Println("Watching", file)
	// We want the watcher to exit in the first stage.
	go func() {
		// Get a first stage shutdown notification
		exit := shutdown.First()
		for {
			select {
			// Event on config file.
			case event := <-watcher.Events:
				switch event.Op {
				// Editor may do rename -> write -> delete, so we should not follow
				// the old file
				case fsnotify.Rename:
					watcher.Remove(event.Name)
					watcher.Add(file)
				case fsnotify.Remove:
					continue
				}
				log.Println("Reloading inventory")
				s.mu.RLock()
				bec := s.Config.Backend
				s.mu.RUnlock()

				inv, err := ReadInventory(event.Name, bec)
				if err != nil {
					log.Println("Error reloading inventory:", err)
					log.Println("New inventory NOT applied")
					continue
				}

				// Update the load balancer
				s.mu.RLock()
				lb, err := NewLoadBalancer(s.Config.LoadBalancing, inv)
				if err != nil {
					log.Println(err)
					log.Println("New inventory NOT applied")
					s.mu.RUnlock()
					continue
				}
				s.handler.SetBackends(lb)
				s.mu.RUnlock()

				log.Println("New inventory applied")
			// Server is shutting down
			case n := <-exit:
				log.Println("Monitor exiting")
				watcher.Remove(file)
				close(n)
				return
				// Monitor must stop
			case n := <-stop:
				exit.Cancel()
				watcher.Remove(file)
				close(n)
				log.Println("No longer watching", file)
				return
			}
		}
	}()
	return nil
}