func TestGetSessionToken(t *testing.T) { conf := jconfig.LoadConfig("../config.json") auth := aws.Auth{conf.GetString("AccessKeyId"), conf.GetString("SecretAccessKey")} region := aws.USEast sts := New(auth, region) _, err := sts.GetSessionToken() if err != nil { t.Error("There was an error getting the session token.") } }
func setupNewDynamo() { if dynamo == nil { conf := jconfig.LoadConfig("../config.json") auth := aws.Auth{conf.GetString("AccessKeyId"), conf.GetString("SecretAccessKey")} region := aws.USEast sts := sts.New(auth, region) resp, _ := sts.GetSessionToken() auth = aws.Auth{resp.AccessKeyId, resp.SecretAccessKey} dynamo = New(auth, region, resp.SessionToken) } }
func init() { var stage string // Get stage flag passed in. Default to test flag.StringVar(&stage, "stage", "test", "flag for deployment stage") flag.Parse() if stage == "test" { setupTestEnvironment() } else { // Retrieve config from balanced.conf config := jconfig.LoadConfig(stage + "/balanced.conf") apiRoot = config.GetString("balanced_api_root") apiKey = config.GetString("balanced_api_key") marketplaceId = config.GetString("balanced_marketplace_id") } }
func New(db *bolt.DB) httpHandler { config := jconfig.LoadConfig("config.json") key := config.GetString("session_secret") var err error str, err = store.New(db, store.Config{}, []byte(key)) if err != nil { panic(err) } sessionName = config.GetString("session_name") return func(h http.Handler) http.Handler { return http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { session, err := GetSession(r) if err != nil { util.Error(err, w) return } if t, ok := session.Values["expires"]; ok { if time.Now().Before(t.(time.Time)) { // authed if u, ok := session.Values["user"]; ok { w.Header().Set("X-User", u.(string)) } w.Header().Set("X-Expires", t.(time.Time).String()) h.ServeHTTP(w, r) return } } user, pass, ok := r.BasicAuth() if ok { if checkUserIn(user, pass, w, r) { h.ServeHTTP(w, r) return } } if r.URL.Path == "/api/v1/auth/" { h.ServeHTTP(w, r) return } w.WriteHeader(http.StatusUnauthorized) }, ) } }
func parseZonefiles() { config := jconfig.LoadConfig("config.json") dir := config.GetString("zonefile_dir") p := zonefile.New() files := []string{ //"20141113-net.zone.gz", "20140621-biz.zone.gz", "20140622-biz.zone.gz", "20141210-biz.zone.gz", } for _, f := range files { err := p.SetupGunzipFile(dir + f) if err != nil { log.Error("Unable to setup %s: %s", f, err) return } p.Parse() } }
func Run() { // TODO: set up logger and config here //InitLogger() //InitMemcache() //web.SetLogger(Logger) // TODO: add error checking config := jconfig.LoadConfig("app.conf") web.Config.StaticDir = config.GetString("StaticDirectory") web.Config.Addr = config.GetString("Host") web.Config.Port = config.GetInt("Port") web.Config.CookieSecret = config.GetString("CookieSecret") switch config.GetString("SessionStore") { case "redis": host := config.GetString("SessionHost") port := strconv.Itoa(config.GetInt("SessionPort")) sessionStore = store.NewRedisStore("tcp:" + host + ":" + port) } addr := web.Config.Addr + ":" + strconv.Itoa(web.Config.Port) web.Run(addr) }
// setup uses config.json to try and connect to Postgres. Warning, this method can panic. func (c *connection) setup() error { config := jconfig.LoadConfig("config.json") user := config.GetString("db_user") pass := config.GetString("db_pass") name := config.GetString("db_name") if len(user+pass+name) == 0 { panic("error setup config file for database connection") } conn, err := sql.Open("postgres", fmt.Sprintf("user=%s password=%s dbname=%s sslmode=disable", user, pass, name, ), ) if err != nil { return err } c.db = conn return nil }
func main() { config := jconfig.LoadConfig("conf.json") //confBE := config.GetArray("backend") backendPool := make([]*Backend, 10) for i, v := range config.GetArray("backend") { log.Info("backend %d = %s", i, v) be, _ := NewBackend(v.(string)) backendPool[i] = be } listenStr := config.GetString("listen_host") + ":" + config.GetString("listen_port") listenAddr, err := net.ResolveTCPAddr("tcp", listenStr) if err != nil { log.Error(err) } log.Info("Starting the server on %s", listenAddr) listener, err := net.ListenTCP("tcp", listenAddr) if err != nil { log.Error("error listening:%s", err.Error()) log.Close() os.Exit(1) } bc := 0 //backend counter for { conn, err := listener.AcceptTCP() if err != nil { log.Error("Error accept:%s", err.Error()) log.Close() os.Exit(1) } go ProxyFunc(conn, backendPool[bc]) bc = (bc + 1) % len(backendPool) } }
func init() { config := jconfig.LoadConfig(os.Getenv("GOPATH") + "/assets/config/aws.json") accessKey = config.GetString("aws_access_key") secretKey = config.GetString("aws_secret_key") }
type wsContext struct { UserID string //user id LoggedIn bool //login status. true if logged in Ws *websocket.Conn //websocket object } //construct new websocket context func newWSContext(ws *websocket.Conn) *wsContext { w := new(wsContext) w.Ws = ws w.LoggedIn = false return w } //Config is this application configuration var Config = jconfig.LoadConfig("config.json") func main() { log.LoadConfiguration("timber.xml") r := mux.NewRouter() r.Handle("/irc/", websocket.Handler(wsMain)) r.PathPrefix("/").Handler(http.FileServer(http.Dir("../ui/build/"))) ContextMapInit() go EndpointPublisher() log.Debug("Starting ircboks server ..") if err := http.ListenAndServe(Config.GetString("host_port"), r); err != nil { log.Error("ListenAndServer error :", err.Error())
func init() { config := jconfig.LoadConfig("/etc/aws.conf") accessKey = config.GetString("aws_access_key") secretKey = config.GetString("aws_secret_key") }
// main function func main() { args := os.Args if args == nil || len(args) < 2 { fmt.Println("Usage: tcp2kafka ./default.json") return } const Compiler = "gc" const GOARCH string = "amd64" const GOOS string = "linux" runtime.GOMAXPROCS(runtime.NumCPU()) CONFIG = jconfig.LoadConfig(args[1]) port := CONFIG.GetString("port") if port == "" { port = "8090" } ln, err := net.Listen("tcp", fmt.Sprintf(":%s", port)) if err != nil { log.Fatal(err) } err = initProducers(CONFIG) if err != nil { log.Println(err) return } // dumpError timer dumpTimer := make(chan bool, 1) go makeTimer(dumpTimer, 1500) defer close(dumpTimer) // push to msgChan timer pushTimer := make(chan bool, 1) go makeTimer(pushTimer, 1000) defer close(pushTimer) // pull from msgChan timer publishTimer := make(chan bool, 1) go makeTimer(publishTimer, 1000) defer close(publishTimer) for _, p := range PRODUCERS { go dumpErrors(p, dumpTimer) } // 定长阻塞队列 msgChan := make(chan []byte, 10000*10000) defer close(msgChan) // 初始化发送routine for i := 0; i < runtime.NumCPU(); i++ { go publishMsg(msgChan, publishTimer) } for { conn, err := ln.Accept() if err != nil { log.Println(err) continue } go handle(conn, msgChan, pushTimer) } }
func Init() *SES { config := jconfig.LoadConfig("/etc/aws.conf") return &SES{config.GetString("aws_access_key"), config.GetString("aws_secret_key")} }
// returns an operation filter which uses a consistent hash to determine // if the operation will be accepted for processing. can be used to distribute work. // name: the name of the worker creating this filter. e.g. "Harry" // configFile: a file path to a json document. the document should contain // an object with a property named 'workers' which is a list of // all the workers participating. e.g. // { "workers": ["Tom", "Dick", "Harry"] } func ConsistentHashFilterFromFile(name string, configFile string) (gtm.OpFilter, error) { config := jconfig.LoadConfig(configFile) workers := config.GetArray("workers") return ConsistentHashFilter(name, workers) }