コード例 #1
0
ファイル: s3.go プロジェクト: straup/go-whosonfirst-s3
func NewSync(auth aws.Auth, region aws.Region, acl aws_s3.ACL, bucket string, prefix string, log chan string) *Sync {

	numCPUs := runtime.NumCPU() * 2
	runtime.GOMAXPROCS(numCPUs)

	pool, _ := tunny.CreatePoolGeneric(numCPUs).Open()

	s := aws_s3.New(auth, region)
	b := s.Bucket(bucket)

	return &Sync{
		ACL:    acl,
		Bucket: *b,
		Prefix: prefix,
		Pool:   *pool,
		Log:    log,
	}
}
コード例 #2
0
ファイル: analytics.go プロジェクト: TykTechnologies/tyk
func (r *RedisAnalyticsHandler) Init() {
	if config.AnalyticsConfig.EnableGeoIP {
		go r.reloadDB()
	}

	analytics.Store.Connect()
	var err error

	ps := config.AnalyticsConfig.PoolSize
	if ps == 0 {
		ps = 50
	}

	AnalyticsPool, err = tunny.CreatePoolGeneric(ps).Open()
	if err != nil {
		log.Error("Failed to init analytics pool")
	}
}
コード例 #3
0
ファイル: s3.go プロジェクト: whosonfirst/go-whosonfirst-s3
func NewSync(creds *credentials.Credentials, region string, acl string, bucket string, prefix string, procs int, debug bool, logger *log.WOFLogger) *Sync {

	runtime.GOMAXPROCS(procs)

	workpool, _ := tunny.CreatePoolGeneric(procs).Open()
	retries := pool.NewLIFOPool()

	cfg := aws.NewConfig()
	cfg.WithRegion(region)

	if creds != nil {
		cfg.WithCredentials(creds)
	}

	sess := session.New(cfg)

	svc := s3.New(sess)

	ttp := new(time.Duration)

	return &Sync{
		Service:       svc,
		ACL:           acl,
		Bucket:        bucket,
		Prefix:        prefix,
		WorkPool:      *workpool,
		Debug:         debug,
		Dryrun:        false,
		Logger:        logger,
		Scheduled:     0,
		Completed:     0,
		Skipped:       0,
		Error:         0,
		Success:       0,
		Retried:       0,
		TimeToProcess: ttp,
		Retries:       retries,
		MaxRetries:    25.0, // maybe allow this to be user-defined ?
	}
}
コード例 #4
0
func NewWOFClone(source string, dest string, procs int, logger *log.WOFLogger) (*WOFClone, error) {

	// https://golang.org/src/net/http/filetransport.go

	u, err := url.Parse(source)

	if err != nil {
		return nil, err
	}

	var cl *http.Client

	if u.Scheme == "file" {

		root := u.Path

		if !strings.HasSuffix(root, "/") {
			root = root + "/"
		}

		/*
			Pay attention to what's going here. Absent tweaking the URL to
			fetch in the 'Fetch' method the following will not work. In
			order to make this working *without* tweaking the URL you would
			need to specifiy the root as '/' which just seems like a bad
			idea. The fear of blindly opening up the root level directory on
			the file system in this context may seem a bit premature (not to
			mention silly) but measure twice and all that good stuff...
			See also: https://code.google.com/p/go/issues/detail?id=2113
			(20160112/thisisaaronland)
		*/

		t := &http.Transport{}
		t.RegisterProtocol("file", http.NewFileTransport(http.Dir(root)))

		cl = &http.Client{Transport: t}
	} else {
		cl = &http.Client{}
	}

	runtime.GOMAXPROCS(procs)

	workpool, _ := tunny.CreatePoolGeneric(procs).Open()
	retries := pool.NewLIFOPool()

	/*

		This gets triggered in the 'Process' function to ensure that
		we don't exit out of 'CloneMetaFile' before all the goroutines
		to write new files to disk actually finish ... you know, writing
		to disk (20160606/thisisaaronland)
	*/

	writesync := new(sync.WaitGroup)

	ch := make(chan bool)

	c := WOFClone{
		Success:    0,
		Error:      0,
		Skipped:    0,
		Source:     source,
		Dest:       dest,
		Logger:     logger,
		MaxRetries: 25.0, // maybe allow this to be user-defined ?
		client:     cl,
		workpool:   workpool,
		writesync:  writesync,
		retries:    retries,
		timer:      time.Now(),
		done:       ch,
	}

	go func(c *WOFClone) {

		for {
			select {

			case <-c.done:
				break
			case <-time.After(1 * time.Second):
				c.Status()
			}
		}
	}(&c)

	return &c, nil
}
コード例 #5
0
ファイル: bmo.go プロジェクト: cosmicturtle/bmo
func (bmo *BMO) Compute(input *os.File) {

	var err error
	var cur *r.Cursor
	var session *r.Session

	// set up database connection pool
	session, err = r.Connect(r.ConnectOpts{
		Addresses:     bmo.nodes,
		Database:      bmo.database,
		DiscoverHosts: true,
	})
	session.SetMaxOpenConns(POOL_SIZE)
	if err != nil {
		log.Fatalln(err)
	}
	// ensure table is present
	var tableNames []string
	cur, err = r.DB(bmo.database).TableList().Run(session)
	if err != nil {
		log.Fatalln(err)
	}
	cur.All(&tableNames)
	set := make(map[string]bool)
	for _, v := range tableNames {
		set[v] = true
	}
	if !set[bmo.table] {
		log.Println("Creating table ", bmo.table)
		_, err = r.DB(bmo.database).TableCreate(bmo.table).RunWrite(session)
		if err != nil {
			log.Fatalln("Error creating table: ", err)
			os.Exit(1)
		}
	}

	// deliver the messages
	decoder := json.NewDecoder(input)
	ms := make([]Message, INSERT_BATCH_SIZE)
	var m *Message
	var i uint64
	var ignoreLast bool

	pool, _ := tunny.CreatePoolGeneric(POOL_SIZE).Open()
	defer pool.Close()

	table := r.Table(bmo.table)
	insertOptions := r.InsertOpts{Durability: "soft"}

	insert := func() {
		j := i
		if !ignoreLast {
			j += 1
		}
		_, err = table.Insert(ms[:j], insertOptions).RunWrite(session)
		if err != nil {
			log.Fatal(err)
			os.Exit(1)
		}
	}

	for {
		i = bmo.seq % INSERT_BATCH_SIZE
		m = &ms[i]
		err = decoder.Decode(&m)

		switch {
		case err == io.EOF:
			ignoreLast = true
			pool.SendWork(insert)
			return
		case err != nil:
			ignoreLast = true
			pool.SendWork(insert)
			log.Fatal("Can't parse json input, \"", err, "\". Object #", bmo.seq, ", after ", m)
			os.Exit(1)
		default:
			if i+1 == INSERT_BATCH_SIZE {
				ignoreLast = false
				pool.SendWork(insert)
			}
		}

		bmo.seq += 1
	}
}