Exemplo n.º 1
0
func (sql *SQLKeyBackend) DefaultConfig() map[string]interface{} {
	return map[string]interface{}{
		"driver":            strings.Join(gosql.Drivers(), "/"),
		"connection_string": "host=localhost port=5432 user=apiplexy password=apiplexy dbname=apiplexy",
		"query":             "SELECT key_id, realm, quota_name, json_data FROM table WHERE id = :key_id AND type = :key_type",
	}
}
Exemplo n.º 2
0
func (sql *SQLDBBackend) DefaultConfig() map[string]interface{} {
	return map[string]interface{}{
		"driver":            strings.Join(gosql.Drivers(), "/"),
		"connection_string": "host=localhost port=5432 user=apiplexy password=apiplexy dbname=apiplexy",
		"create_tables":     false,
	}
}
Exemplo n.º 3
0
func driverRegistered(e string) bool {
	result := false

	for _, a := range sql.Drivers() {
		if a == e {
			result = true
			break
		}
	}

	return result
}
Exemplo n.º 4
0
func TestMain(m *testing.M) {
	plugin = apiplexy.ManagementBackendPlugin(&SQLDBBackend{})
	err := plugin.Configure(map[string]interface{}{
		"driver":            "sqlite3",
		"connection_string": ":memory:",
		"create_tables":     true,
	})
	if err != nil {
		fmt.Printf("Couldn't initialize in-memory sqlite DB for testing. %s\n", err.Error())
		fmt.Printf("Available drivers: %s\n", strings.Join(sql.Drivers(), ", "))
		os.Exit(1)
	}
	os.Exit(m.Run())
}
Exemplo n.º 5
0
Arquivo: pg.go Projeto: xtudouh/web
func Init() {
	var (
		err error
	)
	Engine, err = xorm.NewEngine(sql.Drivers()[0], conf.String("database", "DSN"))
	if err != nil {
		panic(err)
	}
	if conf.ENV != "release" {
		Engine.ShowSQL = true
	}
	Engine.SetMaxOpenConns(conf.Int("database", "MAX_CONNECTION", 10))
	Engine.SetMaxIdleConns(conf.Int("database", "MAX_IDLE_CONNECTION", 50))
	Engine.SetLogger(&logAdapter{})
}
Exemplo n.º 6
0
func init() {
	logger = new(Logger)
	logger.output = func(n int, out string) error {
		var err error
		log.Println(out)
		return err
	}
	for _, driver := range sql.Drivers() {
		if strings.Contains(driver, ":trace") {
			continue
		}
		db, _ := sql.Open(driver, "")
		defer db.Close()
		sql.Register(driver+":trace", proxy.NewTraceProxy(db.Driver(), logger))
	}
}
Exemplo n.º 7
0
func TestMysql(t *testing.T) {
	fmt.Println(sql.Drivers())
}
Exemplo n.º 8
0
func (conf *config) server(par *parse) {

	start_time := Now()
	//  start a rolled logger to flowd-Dow.log, rolled daily
	info_log_ch := make(file_byte_chan)

	roll_start, roll_end := info_log_ch.roll_Dow(
		Sprintf("%s/flowd", conf.log_directory), "log", true)
	roll_when_start := "today"
	roll_when_end := "yesterday"
	boot_sample := flow_worker_sample{}

	//  accullate per roll statistics on roll_sample channel.
	//  burp those stats into end of closing log file and
	//  start of new log file.

	roll_sample := make(chan flow_worker_sample)
	go func() {
		today_sample := flow_worker_sample{}

		roll_entry := func(format string, args ...interface{}) []byte {
			return []byte(Sprintf("%s: %s\n",
				Now().Format("2006/01/02 15:04:05"),
				Sprintf(format, args...),
			))
		}

		for {
			var entries [5][]byte

			//  assemble final and initial log entries for both
			//  old and new log files.

			roll_entries := func(when, old, new string) [][]byte {

				entries[0] = roll_entry(
					"%s: %s",
					when,
					today_sample.String(),
				)
				entries[1] = roll_entry(
					"%s: %s",
					"boot",
					boot_sample.String(),
				)
				z, off := Now().Zone()
				entries[2] = roll_entry(
					"uptime: %s, time (zone=%s offset=%d)",
					Since(start_time),
					z, off)
				tense := ""
				if when == roll_when_end {
					tense = "ed"
				}
				entries[3] = roll_entry(
					"roll%s %s -> %s",
					tense,
					old,
					new,
				)
				entries[4] = roll_entry("go version: %s",
					runtime.Version())

				return entries[:]
			}

			select {

			//  rolling to new log file.  entries at end of old
			//  log file.

			case fr := <-roll_start:
				if fr == nil {
					return
				}
				fr.entries = roll_entries(
					roll_when_start,
					fr.open_path,
					fr.roll_path)
				roll_start <- fr

			//  closed previous log file, entries at begining of
			//  new log file

			case fr := <-roll_end:
				if fr == nil {
					return
				}
				fr.entries = roll_entries(
					roll_when_end,
					fr.roll_path,
					fr.open_path)

				//  make last entry in previous same as first
				//  entry in new current.
				e4 := fr.entries[3]
				fr.entries[3] = fr.entries[2]
				fr.entries[2] = fr.entries[1]
				fr.entries[1] = fr.entries[0]
				fr.entries[0] = e4

				roll_end <- fr
				today_sample = flow_worker_sample{}

			// update daily roll stats in between rolls

			case sam := <-roll_sample:
				today_sample.fdr_count++
				today_sample.wall_duration += sam.wall_duration
				today_sample.ok_count += sam.ok_count
				today_sample.fault_count += sam.fault_count
			}
		}
	}()

	info := info_log_ch.info
	WARN := info_log_ch.WARN

	leave := func(status int) {

		var f *file
		f.unlink("run/flowd.pid")

		info("good bye, cruel world")
		Sleep(Second)
		os.Exit(status)
	}

	defer func() {
		if r := recover(); r != nil {
			info_log_ch.ERROR("panic: %s", r)
			leave(1)
		}
	}()

	go func() {
		c := make(chan os.Signal)
		signal.Notify(c, syscall.SIGTERM)
		s := <-c
		server_leaving = true
		info("caught signal: %s", s)
		leave(1)
	}()

	info("hello, world")

	//  create run/flowd.pid file
	{
		pid := os.Getpid()

		pid_path := "run/flowd.pid"

		_, err := os.OpenFile(pid_path, os.O_RDONLY, 0)
		if err == nil {
			WARN("process id file exists: %s", pid_path)
			//  Note: write the pid of the previous process
			WARN("another flowd process may be running")
		} else if !os.IsNotExist(err) {
			panic(err)
		}

		f, err := os.Create(pid_path)
		if err != nil {
			panic(err)
		}
		_, err = f.WriteString(Sprintf("%d\n", pid))
		if err != nil {
			panic(err)
		}
		err = f.Close()
		if err != nil {
			panic(err)
		}
		info("process id %d written to file: %s", pid, pid_path)
	}
	info("go version: %s", runtime.Version())
	info("number of cpus: %d", runtime.NumCPU())
	info("GOROOT(): %s", runtime.GOROOT())
	info("GOMAXPROCS(0): %d", runtime.GOMAXPROCS(0))
	info("heartbeat duration: %s", conf.heartbeat_duration)
	info("memstats duration: %s", conf.memstats_duration)

	//  list environment variables
	{
		env := os.Environ()
		info("listing %d environment variables ...", len(env))
		sort.Strings(env)

		for _, n := range env {
			info("	%s", n)
		}
	}

	info("enumerated dependency order: %d commands/queries/tail",
		len(par.depend_order))
	for i, n := range par.depend_order {
		info("	% 3d: %s", i, n)
	}

	info("looking up paths for %d commands", len(conf.command))
	for _, cmd := range conf.command {
		fp, err := exec.LookPath(cmd.path)

		//  should not abort if the file does not exist in the path
		if err != nil {
			panic(err)
		}
		cmd.full_path = fp
		info("	%s -> %s", cmd.path, cmd.full_path)
	}

	info("os exec capacity: %d", conf.os_exec_capacity)
	osx_q := make(os_exec_chan, conf.os_exec_capacity)

	info("spawning %d os exec workers", conf.os_exec_worker_count)
	for i := uint16(0); i < conf.os_exec_worker_count; i++ {
		go osx_q.worker_flowd_execv()
	}

	info("opening brr tail %s (cap=%d) for %s biod.brr",
		conf.tail.name, conf.brr_capacity, conf.tail.path)
	tail := &tail{
		name:            conf.tail.name,
		path:            conf.tail.path,
		output_capacity: conf.brr_capacity,
	}
	brr_chan := tail.forever()

	//  installed database drivers ...
	{
		d := sql.Drivers()
		info("%d installed database/sql drivers", len(d))
		for _, n := range d {
			info("	%s", n)
		}
	}

	//  open databases
	if len(conf.sql_database) > 0 {
		info("opening %d databases", len(conf.sql_database))
		for _, db := range conf.sql_database {
			var err error

			n := Sprintf("%s/%s",
				db.name,
				db.driver_name,
			)
			if db.data_source_name == "" {
				info("	%s: no data source, so using default")
			} else {
				info("	%s: data source name: %s",
					n, db.data_source_name)
			}
			db.opendb, err = sql.Open(
				db.driver_name,
				db.data_source_name,
			)
			if err != nil {
				panic(Sprintf("%s: %s", db.name, err))
			}
			defer db.opendb.Close()

			info("	%s: max idle connections: %d",
				n, db.max_idle_conns)
			db.opendb.SetMaxIdleConns(db.max_idle_conns)

			info("	%s: max open connections: %d",
				n, db.max_open_conns)
			db.opendb.SetMaxOpenConns(db.max_open_conns)
		}
	} else {
		info("no sql databases defined (ok): %s", conf.path)
	}

	info("preparing %d sql query row declarations", len(conf.sql_query_row))
	for _, q := range conf.sql_query_row {
		var err error

		info("	%s", q.name)
		q.stmt, err = q.sql_database.opendb.Prepare(q.statement)
		if err != nil {
			panic(Sprintf("%s: %s", q.name, err))
		}
		defer q.stmt.Close()
	}

	info("preparing %d sql exec declarations", len(conf.sql_exec))
	for _, ex := range conf.sql_exec {
		var err error

		ex.stmt = make([]*sql.Stmt, len(ex.statement))
		info("	%s (%d statements)", ex.name, len(ex.statement))
		for i, s := range ex.statement {
			info("		#%d: %s",
				i,
				TrimRight((TrimSpace(s))[:20], "\n"),
			)
			ex.stmt[i], err = ex.sql_database.opendb.Prepare(s)
			if err != nil {
				panic(Sprintf("%s: %s", ex.name, err))
			}
			defer ex.stmt[i].Close()
		}
	}

	//  flow detail record log file

	info("fdr file roll duration: %s", conf.fdr_roll_duration)
	path := Sprintf("%s/flowd", conf.log_directory)
	info("opening fdr log file: %s.fdr", path)
	fdr_log_chan := make(file_byte_chan)
	fdr_log_chan.roll_epoch(path, "fdr", conf.fdr_roll_duration, false)

	//  execution detail record log file

	info("xdr file roll duration: %s", conf.xdr_roll_duration)
	path = Sprintf("%s/flowd", conf.log_directory)
	info("opening xdr log file: %s.xdr", path)
	xdr_log_chan := make(file_byte_chan)
	xdr_log_chan.roll_epoch(path, "xdr", conf.xdr_roll_duration, false)

	//  query detail log file

	info("qdr file roll duration: %s", conf.qdr_roll_duration)
	path = Sprintf("%s/flowd", conf.log_directory)
	info("opening qdr log file: %s.qdr", path)
	qdr_log_chan := make(file_byte_chan)
	qdr_log_chan.roll_epoch(path, "qdr", conf.qdr_roll_duration, false)

	//  start a sequence channel for the fdr records

	seq_q := make(chan uint64, conf.brr_capacity)
	go func() {
		seq := uint64(1)
		for {
			seq_q <- seq
			seq++
		}
	}()

	info("spawning %d flow workers", conf.flow_worker_count)
	flow_sample_ch := make(chan flow_worker_sample, conf.brr_capacity)
	for i := uint16(1); i <= conf.flow_worker_count; i++ {
		go (&flow_worker{
			id: uint16(<-seq_q),

			parse: par,

			brr_chan:         brr_chan,
			os_exec_chan:     osx_q,
			fdr_log_chan:     fdr_log_chan,
			xdr_log_chan:     xdr_log_chan,
			qdr_log_chan:     qdr_log_chan,
			info_log_chan:    info_log_ch,
			flow_sample_chan: flow_sample_ch,

			seq_chan: seq_q,
		}).flow()
	}

	//  monitor the incremental samples sent by the workers
	//  and periodically publish the stats

	info("starting logger for incremental stats")
	sample := flow_worker_sample{}

	//  stat burped on every heart beat
	active_count := conf.flow_worker_count
	worker_stats := make([]int, conf.flow_worker_count)

	heartbeat := NewTicker(conf.heartbeat_duration)
	hb := float64(conf.heartbeat_duration) / float64(Second)

	memstat_tick := NewTicker(conf.memstats_duration)

	info("flow starvation threshold: %d", flow_starvation_how_busy)
	//  wait for all workers to finish
	for active_count > 0 {
		select {

		//  fdr sample from flow worker
		case sam := <-flow_sample_ch:
			if sam.worker_id < 0 {
				info("worker #%d exited", -sam.worker_id)
				active_count--
				break
			}
			sample.fdr_count++
			sample.ok_count += sam.ok_count
			sample.fault_count += sam.fault_count
			sample.wall_duration += sam.wall_duration
			worker_stats[sam.worker_id-1]++

			//  update roll level sample stats
			roll_sample <- sam

		//  burp out stats every heartbeat

		case <-heartbeat.C:

			//  dump open database count

			for n, sql := range conf.sql_database {
				db := sql.opendb
				msg := "no open connections"
				if db != nil {
					tense := "s"

					oc := db.Stats().OpenConnections
					if oc == 1 {
						tense = ""
					}
					msg = Sprintf("%d open connection%s",
						oc,
						tense,
					)
				}
				info("sql database: %s: %s", n, msg)
			}

			bl := len(brr_chan)

			sfc := sample.fdr_count
			switch {

			//  no completed flows seen, no blob requests in queue
			case sfc == 0 && bl == 0:
				info("blob requests in queue: 0")
				continue

			//  no completed flows seen, but unresolved exist
			case sfc == 0:
				WARN("no fdr samples seen in %.0f sec", hb)
				WARN("all jobs may be running > %.0f sec", hb)
				WARN("perhaps increase heartbeat duration")
				continue
			}
			info("%s sample: %s", conf.heartbeat_duration, sample)

			//  help debug starvation of flow thread

			info("summary of %d active workers ...", active_count)
			idle_count := active_count
			max_busy := 0
			for i, c := range worker_stats {
				if c > 0 {
					if c > max_busy {
						max_busy = c
					}
					info("	#%d: %d flows", i+1, c)
					worker_stats[i] = 0
					idle_count--
				} else {
					info("	#%d: idle", i+1)
				}
			}

			//  check that all flow workers are busy.
			//  warn about existence of idle workers when at
			//  least one worker appears to be be "busy".
			//  Note: wouldn't standard deviation be better measure?

			if idle_count == 0 {
				info("all %d flow workers busy", active_count)
			} else if max_busy >= flow_starvation_how_busy {
				info_log_ch.WARN("%d flow workers stagnant",
					idle_count)
			}

			//  update accumulated totals
			boot_sample.fdr_count += sfc
			boot_sample.ok_count += sample.ok_count
			boot_sample.fault_count += sample.fault_count
			boot_sample.wall_duration += sample.wall_duration

			sample = flow_worker_sample{}

			info("boot: %s", boot_sample)
			info("blob requests in queue: %d", bl)
		case <-memstat_tick.C:
			var m runtime.MemStats

			now := Now()
			runtime.ReadMemStats(&m)
			info("%s MemStat for %d routines, read in %s",
				conf.memstats_duration,
				runtime.NumGoroutine(),
				Since(now),
			)
			info("--")
			info("        Allocated in Use: %d bytes", m.Alloc)
			info("         Total Allocated: %d bytes", m.TotalAlloc)
			info("             From System: %d bytes", m.Sys)
			info("         Pointer Lookups: %d lookups", m.Lookups)
			info("                 Mallocs: %d mallocs", m.Mallocs)
			info("                   Frees: %d frees", m.Frees)
			info("   Heap Allocated in Use: %d bytes", m.HeapAlloc)
			info("        Heap From System: %d bytes", m.HeapSys)
			info("      Heap in Idle Spans: %d bytes", m.HeapIdle)
			info("   Heap in Non-Idle Span: %d bytes", m.HeapInuse)
			info("     Heap Released to OS: %d bytes",
				m.HeapReleased)
			info("Heap Total Alloc Objects: %d", m.HeapObjects)
			info("--")
		}
	}
	leave(0)
}