Exemple #1
0
// SetSync controls synchronous event mode. When set to true, a function call
// to generate an event does not return until the event has been processed.
func (logger *Logger) SetSync(enabled bool) {
	if enabled {
		atomic.StoreUint32(&logger.syncEnabled, 1)
	} else {
		atomic.StoreUint32(&logger.syncEnabled, 0)
	}
}
Exemple #2
0
func (c *conf) setBinaryOption(opt *uint32, value bool) {
	if value {
		atomic.StoreUint32(opt, 1)
		return
	}
	atomic.StoreUint32(opt, 0)
}
Exemple #3
0
// Wait waits for the queue to finish processing all transfers. Once Wait is
// called, Add will no longer add transferables to the queue. Any failed
// transfers will be automatically retried once.
func (q *TransferQueue) Wait() {
	if q.batcher != nil {
		q.batcher.Exit()
	}

	q.wait.Wait()

	// Handle any retries
	close(q.retriesc)
	q.retrywait.Wait()
	atomic.StoreUint32(&q.retrying, 1)

	if len(q.retries) > 0 && q.batcher != nil {
		tracerx.Printf("tq: retrying %d failed transfers", len(q.retries))
		for _, t := range q.retries {
			q.Add(t)
		}
		q.batcher.Exit()
		q.wait.Wait()
	}

	atomic.StoreUint32(&q.retrying, 0)

	close(q.apic)
	close(q.transferc)
	close(q.errorc)

	for _, watcher := range q.watchers {
		close(watcher)
	}

	q.meter.Finish()
	q.errorwait.Wait()
}
func testThrottling(t *testing.T, protocol int) {
	// Create a long block chain to download and the tester
	targetBlocks := 8 * blockCacheLimit
	hashes, blocks := makeChain(targetBlocks, 0, genesis)

	tester := newTester()
	tester.newPeer("peer", protocol, hashes, blocks)

	// Wrap the importer to allow stepping
	blocked, proceed := uint32(0), make(chan struct{})
	tester.downloader.chainInsertHook = func(blocks []*Block) {
		atomic.StoreUint32(&blocked, uint32(len(blocks)))
		<-proceed
	}
	// Start a synchronisation concurrently
	errc := make(chan error)
	go func() {
		errc <- tester.sync("peer", nil)
	}()
	// Iteratively take some blocks, always checking the retrieval count
	for {
		// Check the retrieval count synchronously (! reason for this ugly block)
		tester.lock.RLock()
		retrieved := len(tester.ownBlocks)
		tester.lock.RUnlock()
		if retrieved >= targetBlocks+1 {
			break
		}
		// Wait a bit for sync to throttle itself
		var cached int
		for start := time.Now(); time.Since(start) < time.Second; {
			time.Sleep(25 * time.Millisecond)

			tester.downloader.queue.lock.RLock()
			cached = len(tester.downloader.queue.blockPool)
			tester.downloader.queue.lock.RUnlock()

			if cached == blockCacheLimit || len(tester.ownBlocks)+cached+int(atomic.LoadUint32(&blocked)) == targetBlocks+1 {
				break
			}
		}
		// Make sure we filled up the cache, then exhaust it
		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
		if cached != blockCacheLimit && len(tester.ownBlocks)+cached+int(atomic.LoadUint32(&blocked)) != targetBlocks+1 {
			t.Fatalf("block count mismatch: have %v, want %v (owned %v, target %v)", cached, blockCacheLimit, len(tester.ownBlocks), targetBlocks+1)
		}
		// Permit the blocked blocks to import
		if atomic.LoadUint32(&blocked) > 0 {
			atomic.StoreUint32(&blocked, uint32(0))
			proceed <- struct{}{}
		}
	}
	// Check that we haven't pulled more blocks than available
	if len(tester.ownBlocks) > targetBlocks+1 {
		t.Fatalf("target block count mismatch: have %v, want %v", len(tester.ownBlocks), targetBlocks+1)
	}
	if err := <-errc; err != nil {
		t.Fatalf("block synchronization failed: %v", err)
	}
}
Exemple #5
0
func (b *Bool) Set(v bool) {
	if v {
		atomic.StoreUint32(&b.v, 1)
		return
	}
	atomic.StoreUint32(&b.v, 0)
}
Exemple #6
0
func SetAllHeadersDone(res bool) {
	if res {
		atomic.StoreUint32(&allheadersdone, 1)
	} else {
		atomic.StoreUint32(&allheadersdone, 0)
	}
}
Exemple #7
0
func (j *Job) Run() {
	// If the job panics, just print a stack trace.
	// Don't let the whole process die.
	defer func() {
		if err := recover(); err != nil {
			if revelError := revel.NewErrorFromPanic(err); revelError != nil {
				revel.ERROR.Print(err, "\n", revelError.Stack)
			} else {
				revel.ERROR.Print(err, "\n", string(debug.Stack()))
			}
		}
	}()

	if !selfConcurrent {
		j.running.Lock()
		defer j.running.Unlock()
	}

	if workPermits != nil {
		workPermits <- struct{}{}
		defer func() { <-workPermits }()
	}

	atomic.StoreUint32(&j.status, 1)
	defer atomic.StoreUint32(&j.status, 0)

	j.inner.Run()
}
Exemple #8
0
// Switches between startup (fast) and maintenance (slow) sampling speeds.
func (b *Bootstrapper) SetMode(startup bool) {
	if startup {
		atomic.StoreUint32(&b.phase, uint32(0))
	} else {
		atomic.StoreUint32(&b.phase, uint32(1))
	}
}
// EnableDebugLogs has log messages directed to standard output if enable is true.
func EnableDebugLogs(enable bool) {
	if enable {
		atomic.StoreUint32(&enableDebugLogs, 1)
	} else {
		atomic.StoreUint32(&enableDebugLogs, 0)
	}
}
Exemple #10
0
// Call invokes the (constant) contract method with params as input values and
// sets the output to result. The result type might be a single field for simple
// returns, a slice of interfaces for anonymous returns and a struct for named
// returns.
func (c *BoundContract) Call(opts *CallOpts, result interface{}, method string, params ...interface{}) error {
	// Don't crash on a lazy user
	if opts == nil {
		opts = new(CallOpts)
	}
	// Make sure we have a contract to operate on, and bail out otherwise
	if (opts.Pending && atomic.LoadUint32(&c.pendingHasCode) == 0) || (!opts.Pending && atomic.LoadUint32(&c.latestHasCode) == 0) {
		if code, err := c.caller.HasCode(c.address, opts.Pending); err != nil {
			return err
		} else if !code {
			return ErrNoCode
		}
		if opts.Pending {
			atomic.StoreUint32(&c.pendingHasCode, 1)
		} else {
			atomic.StoreUint32(&c.latestHasCode, 1)
		}
	}
	// Pack the input, call and unpack the results
	input, err := c.abi.Pack(method, params...)
	if err != nil {
		return err
	}
	output, err := c.caller.ContractCall(c.address, input, opts.Pending)
	if err != nil {
		return err
	}
	return c.abi.Unpack(result, method, output)
}
Exemple #11
0
func (j *Job) Run() {
	start := time.Now()
	// If the job panics, just print a stack trace.
	// Don't let the whole process die.
	defer func() {
		if err := recover(); err != nil {
			var buf bytes.Buffer
			logger := log.New(&buf, "JobRunner Log: ", log.Lshortfile)
			logger.Panic(err, "\n", string(debug.Stack()))
		}
	}()

	if !selfConcurrent {
		j.running.Lock()
		defer j.running.Unlock()
	}

	if workPermits != nil {
		workPermits <- struct{}{}
		defer func() { <-workPermits }()
	}

	atomic.StoreUint32(&j.status, 1)
	j.StatusUpdate()

	defer j.StatusUpdate()
	defer atomic.StoreUint32(&j.status, 0)

	j.inner.Run()

	end := time.Now()
	j.Latency = end.Sub(start).String()

}
Exemple #12
0
// synchronise tries to sync up our local block chain with a remote peer.
func (pm *ProtocolManager) synchronise(peer *peer) {
	// Short circuit if no peers are available
	if peer == nil {
		return
	}
	// Make sure the peer's TD is higher than our own
	currentBlock := pm.blockchain.CurrentBlock()
	td := pm.blockchain.GetTd(currentBlock.Hash())

	pHead, pTd := peer.Head()
	if pTd.Cmp(td) <= 0 {
		return
	}
	// Otherwise try to sync with the downloader
	mode := downloader.FullSync
	if atomic.LoadUint32(&pm.fastSync) == 1 {
		mode = downloader.FastSync
	}
	if err := pm.downloader.Synchronise(peer.id, pHead, pTd, mode); err != nil {
		return
	}
	atomic.StoreUint32(&pm.synced, 1) // Mark initial sync done

	// If fast sync was enabled, and we synced up, disable it
	if atomic.LoadUint32(&pm.fastSync) == 1 {
		// Disable fast sync if we indeed have something in our chain
		if pm.blockchain.CurrentBlock().NumberU64() > 0 {
			glog.V(logger.Info).Infof("fast sync complete, auto disabling")
			atomic.StoreUint32(&pm.fastSync, 0)
		}
	}
}
Exemple #13
0
func updateNow() {
	t := time.Now()
	dt := uint32(t.Year()%100*10000 + int(t.Month())*100 + t.Day())
	tm := uint32(t.Hour()*10000 + t.Minute()*100 + t.Second())
	atomic.StoreUint32(&lastDate, dt)
	atomic.StoreUint32(&lastTime, tm)
	lastDateTimeStr = fmt.Sprintf("%04d %06d", dt%10000, tm)
}
Exemple #14
0
// Toggle enables or disables logging.
func (el *ErrLog) Toggle(toggle bool) {
	el32 := unsafe.Pointer(el)
	if toggle {
		atomic.StoreUint32((*uint32)(el32), 1)
	} else {
		atomic.StoreUint32((*uint32)(el32), 0)
	}
}
Exemple #15
0
// Get returns buffer with length of n.
func (p *BufferPool) Get(n int) []byte {
	atomic.AddUint32(&p.get, 1)

	if poolNum := p.poolNum(n); poolNum == 0 {
		// Fast path.
		if b, ok := p.pool[0].Get().([]byte); ok {
			switch {
			case cap(b) > n:
				atomic.AddUint32(&p.less, 1)
				return b[:n]
			case cap(b) == n:
				atomic.AddUint32(&p.equal, 1)
				return b[:n]
			default:
				panic("not reached")
			}
		} else {
			atomic.AddUint32(&p.miss, 1)
		}

		return make([]byte, n, p.baseline0)
	} else {
		sizePtr := &p.size[poolNum-1]

		if b, ok := p.pool[poolNum].Get().([]byte); ok {
			switch {
			case cap(b) > n:
				atomic.AddUint32(&p.less, 1)
				return b[:n]
			case cap(b) == n:
				atomic.AddUint32(&p.equal, 1)
				return b[:n]
			default:
				atomic.AddUint32(&p.greater, 1)
				if uint32(cap(b)) >= atomic.LoadUint32(sizePtr) {
					p.pool[poolNum].Put(b)
				}
			}
		} else {
			atomic.AddUint32(&p.miss, 1)
		}

		if size := atomic.LoadUint32(sizePtr); uint32(n) > size {
			if size == 0 {
				atomic.CompareAndSwapUint32(sizePtr, 0, uint32(n))
			} else {
				sizeMissPtr := &p.sizeMiss[poolNum-1]
				if atomic.AddUint32(sizeMissPtr, 1) == 20 {
					atomic.StoreUint32(sizePtr, uint32(n))
					atomic.StoreUint32(sizeMissPtr, 0)
				}
			}
			return make([]byte, n)
		} else {
			return make([]byte, n, size)
		}
	}
}
Exemple #16
0
// Сollect start when we create new Rps instance.
// Еvery second update rps and set countre = 0
func (rc *Rps) collect() {
	for {
		select {
		case <-rc.ticker:
			atomic.StoreUint32(rc.rps, atomic.LoadUint32(rc.count))
			atomic.StoreUint32(rc.count, 0)
		}
	}
}
Exemple #17
0
func SetListenTCP(yes bool, global bool) {
	if yes {
		atomic.StoreUint32(&listen_tcp, 1)
	} else {
		atomic.StoreUint32(&listen_tcp, 0)
	}
	if global {
		// Make sure mutex_cfg is locked while calling this one
		CFG.Net.ListenTCP = yes
	}
}
Exemple #18
0
func SetListenTCP(yes bool, global bool) {
	if yes {
		atomic.StoreUint32(&listen_tcp, 1)
	} else {
		atomic.StoreUint32(&listen_tcp, 0)
	}
	if global {
		mutex_cfg.Lock()
		CFG.Net.ListenTCP = yes
		mutex_cfg.Unlock()
	}
}
Exemple #19
0
// Update a given pool from an etcd notification.
func (m *Manager) updatePool(node *etcd.Node) {
	pool := m.getPool(node.Key[1:])
	if pool == nil {
		return // We're not monitoring this domain, nothing to be done
	}

	pool.Lock()
	defer pool.Unlock()

	// Init the host pool with new hosts
	hosts := strings.Split(node.Value, "|")
	pool.hosts.Assign(hosts)

	// For each new host, create a peer connection if it doesn't exist yet
	for _, host := range hosts {
		if peer := pool.peers[host]; peer != nil {
			atomic.StoreUint32(&peer.shutdown, 0)
		} else {
			peer := &Peer{
				shutdown:  0,
				reconnect: 0,
				refCount:  0,
				pool:      pool,
			}
			peer.connect(host)
			pool.peers[host] = peer
		}
	}

	var del bool
	var h string

	// For each deleted host, mark it as being shutdown
	for host, peer := range pool.peers {
		del = true
		for _, h = range hosts {
			if h == host {
				del = false
				break
			}
		}
		if del {
			atomic.StoreUint32(&peer.shutdown, 1)
		}
	}

	// Notify hosts change
	select {
	case pool.notif <- 1:
	default:
	}
}
Exemple #20
0
// WUnlock relinquishes our write lock.
func (l *lock) WUnlock() {
	// If more writers attempted our lock, write&^highBit will be >1, and
	// the try that got 2 will be waiting in pending state. That waiter
	// will now see we have unlocked, so we can leave.
	write := atomic.AddUint32(&l.write, highBit)
	if write&^highBit > 1 {
		return
	}
	// If nobody else attempted the lock by the time added the high bit,
	// we must let readers continue (first) and then reset our write lock.
	atomic.StoreUint32(&l.read, 0)
	atomic.StoreUint32(&l.write, 0)
}
Exemple #21
0
/*
	Grow hash table procedure
	old hash table (n buckets)	->	new hash table	(n*2 buckets)
	example:
	0    0       0Lock   0       0       0       0
	1    1       1       1Lock   1       1       1
	2    2       2       2       2Lock   2       2
	3    3       3       3       3       3Lock   3
	     4Lock   4Lock   4       4       4       4
	     5Lock   5Lock   5Lock   5       5       5
	     6Lock   6Lock   6Lock   6Lock   6       6
	     7Lock   7Lock   7Lock   7Lock   7Lock   7
*/
func (t *tCache) grow() {
	var (
		i, j int
		val  *TItem
	)

	oldht := *(*[]*tBucket)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&t.t))))
	oldlen := len(oldht)
	newlen := oldlen << 1

	if uint64(newlen) >= math.MaxUint32 {
		// grow only singleton runed
		atomic.StoreUint32(&t.growlock, 0)
		return
	}

	newht := make([]*tBucket, newlen)
	copy(newht, oldht) // possible since it links
	for i = oldlen; i < newlen; i++ {
		newht[i] = &tBucket{}
		newht[i].Lock()
		j++
	}
	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&t.t)), unsafe.Pointer(&newht))
	oldht = nil
	// rehash

	j = oldlen
	for i = 0; i < oldlen; i++ {
		itemsold := make([]*TItem, 0, initbucketscap)
		itemsnew := make([]*TItem, 0, initbucketscap)
		newht[i].Lock()
		for _, val = range newht[i].items {
			if t.keyToID([]byte(val.Key)) == uint32(i) {
				itemsold = append(itemsold, val)
			} else {
				itemsnew = append(itemsnew, val)
			}
		}

		newht[j].items = itemsnew
		newht[j].Unlock()
		newht[i].items = itemsold
		newht[i].Unlock()
		j++
	}

	// grow only singleton runed
	atomic.StoreUint32(&t.growlock, 0)
}
// Data size monitor depends on output format of mongodb command, the format is
// united in all of current supported versions, 1.8, 2.0 and 2.2. And we must
// get the data size information from mongodb command interface.
func (filter *ProxyFilterImpl) MonitorQuotaDataSize() {
	dbhost := filter.mongo.HOST
	port := filter.mongo.PORT
	dbname := filter.mongo.DBNAME
	user := filter.mongo.USER
	pass := filter.mongo.PASS
	quota_data_size := filter.config.QUOTA_DATA_SIZE

	filter.lock.Lock()
	filter.running++
	filter.lock.Unlock()

	var size float64
	for {
		event := <-filter.data_size_channel
		if event == STOP_EVENT {
			break
		}

		if err := startMongoSession(dbhost, port); err != nil {
			logger.Errorf("Failed to connect to %s:%s, [%s].", dbhost, port, err)
			goto Error
		}

		if !readMongodbSize(dbname, user, pass, &size) {
			logger.Errorf("Failed to read database '%s' size.", dbname)
			goto Error
		}

		if size >= float64(quota_data_size)*float64(1024*1024) {
			logger.Error("Data size exceeds quota.")
			atomic.StoreUint32(&filter.mablocked, BLOCKED)
		} else {
			atomic.StoreUint32(&filter.mablocked, UNBLOCKED)
		}

		continue
	Error:
		atomic.StoreUint32(&filter.mablocked, BLOCKED)
	}

	endMongoSession()

	filter.lock.Lock()
	filter.running--
	if filter.running == 0 {
		filter.wait <- STOP_EVENT
	}
	filter.lock.Unlock()
}
Exemple #23
0
func main() {
	flag.Parse()
	cfg, _, err := config.Parse(*flagConfig)
	if err != nil {
		Fatalf("%v", err)
	}
	if len(flag.Args()) != 1 {
		Fatalf("usage: syz-crush -config=config.file execution.log")
	}

	Logf(0, "booting test machines...")
	var shutdown uint32
	var wg sync.WaitGroup
	wg.Add(cfg.Count + 1)
	for i := 0; i < cfg.Count; i++ {
		i := i
		go func() {
			defer wg.Done()
			for {
				vmCfg, err := config.CreateVMConfig(cfg, i)
				if atomic.LoadUint32(&shutdown) != 0 {
					break
				}
				if err != nil {
					Fatalf("failed to create VM config: %v", err)
				}
				runInstance(cfg, vmCfg)
				if atomic.LoadUint32(&shutdown) != 0 {
					break
				}
			}
		}()
	}

	go func() {
		c := make(chan os.Signal, 2)
		signal.Notify(c, syscall.SIGINT)
		<-c
		wg.Done()
		atomic.StoreUint32(&shutdown, 1)
		close(vm.Shutdown)
		Logf(-1, "shutting down...")
		atomic.StoreUint32(&shutdown, 1)
		<-c
		Fatalf("terminating")
	}()
	wg.Wait()
}
Exemple #24
0
func (r *RoundRobinPeerSelector) SetMaxLen(m uint32) {
	if m > math.MaxUint16 {
		log.Panicf("Number of peers %v greater than those suported %v",
			m, math.MaxUint16)
	}
	atomic.StoreUint32(&r.maxLen, m)
}
Exemple #25
0
func (k *KinesisOutput) Write(metrics []telegraf.Metric) error {
	var sz uint32 = 0

	if len(metrics) == 0 {
		return nil
	}

	r := []*kinesis.PutRecordsRequestEntry{}

	for _, p := range metrics {
		atomic.AddUint32(&sz, 1)

		metric, _ := FormatMetric(k, p)
		d := kinesis.PutRecordsRequestEntry{
			Data:         []byte(metric),
			PartitionKey: aws.String(k.PartitionKey),
		}
		r = append(r, &d)

		if sz == 500 {
			// Max Messages Per PutRecordRequest is 500
			elapsed := writekinesis(k, r)
			log.Printf("Wrote a %+v point batch to Kinesis in %+v.\n", sz, elapsed)
			atomic.StoreUint32(&sz, 0)
			r = nil
		}
	}

	writekinesis(k, r)

	return nil
}
Exemple #26
0
// Establishes a new socket connection to the 9P server and creates
// a client object for it. Negotiates the dialect and msize for the
// connection. Returns a Clnt object, or Error.
func Connect(c net.Conn, msize uint32, dotu bool) (*Clnt, error) {
	clnt := NewClnt(c, msize, dotu)
	ver := "9P2000"
	if clnt.Dotu {
		ver = "9P2000.u"
	}

	clntmsize := atomic.LoadUint32(&clnt.Msize)
	tc := ninep.NewFcall(clntmsize)
	err := ninep.PackTversion(tc, clntmsize, ver)
	if err != nil {
		return nil, err
	}

	rc, err := clnt.Rpc(tc)
	if err != nil {
		return nil, err
	}

	if rc.Msize < atomic.LoadUint32(&clnt.Msize) {
		atomic.StoreUint32(&clnt.Msize, rc.Msize)
	}

	clnt.Dotu = rc.Version == "9P2000.u" && clnt.Dotu
	return clnt, nil
}
func (filter *ProxyFilterImpl) StorageMonitor() {
	for {
		// FIXME: fake monitor handler
		atomic.StoreUint32(&filter.blocked, UNBLOCKED)
		time.Sleep(1 * time.Second)
	}
}
Exemple #28
0
// Flush writes the content of the buffer to a given resource and resets the
// internal state, i.e. the buffer is empty after a call to Flush.
// Writing will be done in a separate go routine to be non-blocking.
//
// The validate callback will be called after messages have been successfully
// written to the io.Writer.
// If validate returns false the buffer will not be resetted (automatic retry).
// If validate is nil a return value of true is assumed (buffer reset).
//
// The onError callback will be called if the io.Writer returned an error.
// If onError returns false the buffer will not be resetted (automatic retry).
// If onError is nil a return value of true is assumed (buffer reset).
func (batch *MessageBatch) Flush(assemble AssemblyFunc) {
	if batch.IsEmpty() {
		return // ### return, nothing to do ###
	}

	// Only one flush at a time
	batch.flushing.IncWhenDone()

	// Switch the buffers so writers can go on writing
	flushSet := atomic.SwapUint32(&batch.activeSet, (batch.activeSet&messageBatchIndexMask)^messageBatchIndexMask)

	flushIdx := flushSet >> messageBatchIndexShift
	writerCount := flushSet & messageBatchCountMask
	flushQueue := &batch.queue[flushIdx]
	spin := shared.NewSpinner(shared.SpinPriorityHigh)

	// Wait for remaining writers to finish
	for writerCount != atomic.LoadUint32(&flushQueue.doneCount) {
		spin.Yield()
	}

	// Write data and reset buffer asynchronously
	go shared.DontPanic(func() {
		defer batch.flushing.Done()

		messageCount := shared.MinI(int(writerCount), len(flushQueue.messages))
		assemble(flushQueue.messages[:messageCount])
		atomic.StoreUint32(&flushQueue.doneCount, 0)
		batch.Touch()
	})
}
Exemple #29
0
func TestGoroutineParallelism(t *testing.T) {
	P := 4
	N := 10
	if testing.Short() {
		P = 3
		N = 3
	}
	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
	// If runtime triggers a forced GC during this test then it will deadlock,
	// since the goroutines can't be stopped/preempted.
	// Disable GC for this test (see issue #10958).
	defer debug.SetGCPercent(debug.SetGCPercent(-1))
	for try := 0; try < N; try++ {
		done := make(chan bool)
		x := uint32(0)
		for p := 0; p < P; p++ {
			// Test that all P goroutines are scheduled at the same time
			go func(p int) {
				for i := 0; i < 3; i++ {
					expected := uint32(P*i + p)
					for atomic.LoadUint32(&x) != expected {
					}
					atomic.StoreUint32(&x, expected+1)
				}
				done <- true
			}(p)
		}
		for p := 0; p < P; p++ {
			<-done
		}
	}
}
Exemple #30
0
func InitEntityStoreMap(dbrSess *dbr.Session) error {
	if atomic.LoadUint32(&initMapDone.done) == 1 {
		return ErrStoreMapInitialized
	}

	initMapDone.m.Lock()
	defer initMapDone.m.Unlock()
	if initMapDone.done == 0 {
		defer atomic.StoreUint32(&initMapDone.done, 1)

		s, err := TableCollection.Structure(TableIndexEntityStore)
		if err != nil {
			return errgo.Mask(err)
		}
		var ess TableEntityStoreSlice
		_, err = dbrSess.
			Select(s.Columns.FieldNames()...).
			From(s.Name).
			LoadStructs(&ess)
		if err != nil {
			return errgo.Mask(err)
		}

		for _, es := range ess {
			EntityStoreMap.Set(es.EntityTypeID, es.StoreID, es)
		}

		ess = ess[:len(ess)-1] // delete Struct Slice https://code.google.com/p/go-wiki/wiki/SliceTricks
		return nil
	}
	return ErrStoreMapInitialized
}