Пример #1
0
func NewWorker(fetchers uint64, tc <-chan Archive, rc chan<- Status) *Worker {
	w := new(Worker)
	w.ID = atomic.AddUint32(&workerID, 1)
	w.Targets, w.Report = tc, rc
	w.Fetchers = make([]*Fetcher, fetchers)
	w.FRequest = make(chan FetchRequest, 30)
	w.FResult = make(chan FetchResult, 30*fetchers)
	for i := range w.Fetchers {
		f := new(Fetcher)
		f.ID = atomic.AddUint32(&fetchID, 1)
		f.Request = w.FRequest
		f.Result = w.FResult
		w.Fetchers[i] = f
		go w.Fetchers[i].fetch()
	}
	go w.Watch()
	go func() {
		for result := range w.FResult {
			if !result.Ok {
				log.Printf("Fetch error: %s", result.Description)
			}
		}
	}()
	return w
}
Пример #2
0
// ServeConn serves HTTP requests from the given connection.
//
// ServeConn returns nil if all requests from the c are successfully served.
// It returns non-nil error otherwise.
//
// Connection c must immediately propagate all the data passed to Write()
// to the client. Otherwise requests' processing may hang.
//
// ServeConn closes c before returning.
func (s *Server) ServeConn(c net.Conn) error {
	if s.MaxConnsPerIP > 0 {
		pic := wrapPerIPConn(s, c)
		if pic == nil {
			c.Close()
			return ErrPerIPConnLimit
		}
		c = pic
	}

	n := atomic.AddUint32(&s.concurrency, 1)
	if n > uint32(s.getConcurrency()) {
		atomic.AddUint32(&s.concurrency, ^uint32(0))
		c.Close()
		return ErrConcurrencyLimit
	}

	err := s.serveConn(c)

	atomic.AddUint32(&s.concurrency, ^uint32(0))

	if err != errHijacked {
		err1 := c.Close()
		if err == nil {
			err = err1
		}
	} else {
		err = nil
	}
	return err
}
Пример #3
0
func (f *Fixer) updateCounters(chains [][]*x509.Certificate, ferrs []*FixError) {
	atomic.AddUint32(&f.validChainsProduced, uint32(len(chains)))

	var verifyFailed bool
	var fixFailed bool
	for _, ferr := range ferrs {
		switch ferr.Type {
		case VerifyFailed:
			verifyFailed = true
		case FixFailed:
			fixFailed = true
		}
	}
	// No errors --> reconstructed
	// VerifyFailed --> notReconstructed
	// VerifyFailed but no FixFailed --> fixed
	// VerifyFailed and FixFailed --> notFixed
	if verifyFailed {
		atomic.AddUint32(&f.notReconstructed, 1)
		// FixFailed error will only be present if a VerifyFailed error is, as
		// fixChain() is only called if constructChain() fails.
		if fixFailed {
			atomic.AddUint32(&f.notFixed, 1)
			return
		}
		atomic.AddUint32(&f.fixed, 1)
		return
	}
	atomic.AddUint32(&f.reconstructed, 1)
}
Пример #4
0
// GetSeqNum returns a sequence number that is bigger than the previously sequence number by 1.
func (m32 *MonoIncSeqNumGenerator32) GetSeqNum() uint32 {
	seq := atomic.AddUint32((*uint32)(m32), 1)
	for seq == 0 {
		seq = atomic.AddUint32((*uint32)(m32), 1)
	}
	return seq
}
Пример #5
0
func (rcv *TCP) handleConnection(conn net.Conn) {
	atomic.AddInt32(&rcv.active, 1)
	defer atomic.AddInt32(&rcv.active, -1)

	defer conn.Close()
	reader := bufio.NewReader(conn)

	for {
		conn.SetReadDeadline(time.Now().Add(2 * time.Minute))

		line, err := reader.ReadBytes('\n')

		if err != nil {
			if err == io.EOF {
				if len(line) > 0 {
					logrus.Warningf("[tcp] Unfinished line: %#v", line)
				}
			} else {
				atomic.AddUint32(&rcv.errors, 1)
				logrus.Error(err)
			}
			break
		}
		if len(line) > 0 { // skip empty lines
			if msg, err := points.ParseText(string(line)); err != nil {
				atomic.AddUint32(&rcv.errors, 1)
				logrus.Info(err)
			} else {
				atomic.AddUint32(&rcv.metricsReceived, 1)
				rcv.out <- msg
			}
		}
	}
}
Пример #6
0
func doTarget(target string, pkg []string) {
	//println("DEBUG ", target, pkg)
	if onlyJS && target != "js" {
		results <- resChan{string("Target " + target + " ignored"), nil}
		return
	}
	var lastErr error
	exe := "bash"
	_, err := exec.LookPath(exe)
	if err != nil {
		switch exe {
		default:
			panic(" error - executable not found: " + exe)
		}
	}
	out := []byte{}
	if target == "all" {
		prms := append([]string{"./testtgoall.sh"}, pkg...)
		out, lastErr = exec.Command(exe, prms...).CombinedOutput()
	} else {
		out, lastErr = exec.Command(exe, "./testtgo.sh", target, pkg[0]).CombinedOutput()
	}
	layout := "%-25s %s"
	for n := range pkg {
		if lastErr != nil {
			//out = append(out, []byte(lastErr.Error())...)
			scores[fmt.Sprintf(layout, pkg[n], target)] = "Fail"
			atomic.AddUint32(&failures, 1)
		} else {
			scores[fmt.Sprintf(layout, pkg[n], target)] = "Pass"
			atomic.AddUint32(&passes, 1)
		}
	}
	results <- resChan{string(out), lastErr}
}
Пример #7
0
func (mux *SimpleMux) getNextSessID() uint64 {
	baseID := atomic.AddUint32(&(mux.nextSessID), 1)
	for baseID == 0 {
		baseID = atomic.AddUint32(&(mux.nextSessID), 1)
	}
	return ((uint64(time.Now().Unix()) << 32) | uint64(baseID))
}
Пример #8
0
func TestCache(t *testing.T) {

	iris.ResetDefault()

	expectedBodyStr := "Imagine it as a big message to achieve x20 response performance!"
	var textCounter, htmlCounter uint32

	iris.Get("/text", iris.Cache(func(ctx *iris.Context) {
		atomic.AddUint32(&textCounter, 1)
		ctx.Text(iris.StatusOK, expectedBodyStr)
	}, cacheDuration))

	iris.Get("/html", iris.Cache(func(ctx *iris.Context) {
		atomic.AddUint32(&htmlCounter, 1)
		ctx.HTML(iris.StatusOK, expectedBodyStr)
	}, cacheDuration))

	e := httptest.New(iris.Default, t)

	// test cache on text/plain
	if err := runCacheTest(e, "/text", &textCounter, expectedBodyStr, "text/plain"); err != nil {
		t.Fatal(err)
	}

	// text cache on text/html
	if err := runCacheTest(e, "/html", &htmlCounter, expectedBodyStr, "text/html"); err != nil {
		t.Fatal(err)
	}
}
Пример #9
0
func (pool *streampool) pickupstreams(udp bool) []*upstream {
	pool.waitforalive()

	// pick udp and tcp equally
	pool.RLock()
	defer pool.RUnlock()

	// pick one of each

	switch {
	case udp && pool.udplen > 0:
		rn := int(atomic.AddUint32(&pool.rn, 1) - 1)
		return []*upstream{pool.udpool[rn%pool.udplen]}
	case pool.tcplen > 0 && pool.udplen > 0:
		// pick one of each
		rn := int(atomic.AddUint32(&pool.rn, 1) - 1)
		return []*upstream{
			pool.udpool[rn%pool.udplen],
			pool.tcpool[rn%pool.tcplen],
		}
	case pool.tcplen == 0 || pool.udplen == 0:
		// pick 2 alived
		rn := int(atomic.AddUint32(&pool.rn, 2) - 2)

		return []*upstream{
			pool.alived[rn%pool.alvlen],
			pool.alived[(rn+1)%pool.alvlen],
		}
	}
	logrus.Warnln("no upstream avalible for pick")
	return nil
}
Пример #10
0
func loadStats(f io.Reader) {
	s := bufio.NewScanner(f)
	wg := new(sync.WaitGroup)
	tmp := make([][]byte, sNum)
	unkC = 0
	srch := func(b [][]byte) {
		for _, v := range b {
			t := root.search(convertIP(v))
			if t == nil {
				atomic.AddUint32(&unkC, 1)
			} else {
				atomic.AddUint32(&t.count, 1)
			}
		}
		wg.Done()
	}

	i := 0
	for s.Scan() {
		tmp[i] = make([]byte, len(s.Bytes()))
		copy(tmp[i], s.Bytes())
		i++
		if i == sNum {
			wg.Add(1)
			go srch(tmp)
			i, tmp = 0, make([][]byte, sNum)
		}
	}
	if i > 0 {
		wg.Add(1)
		go srch(tmp[:i])
	}
	wg.Wait()
}
Пример #11
0
// save stat
func (p *Whisper) doCheckpoint() {
	updateOperations := atomic.LoadUint32(&p.updateOperations)
	commitedPoints := atomic.LoadUint32(&p.commitedPoints)
	atomic.AddUint32(&p.updateOperations, -updateOperations)
	atomic.AddUint32(&p.commitedPoints, -commitedPoints)

	created := atomic.LoadUint32(&p.created)
	atomic.AddUint32(&p.created, -created)

	logrus.WithFields(logrus.Fields{
		"updateOperations": int(updateOperations),
		"commitedPoints":   int(commitedPoints),
		"created":          int(created),
	}).Info("[persister] doCheckpoint()")

	p.Stat("updateOperations", float64(updateOperations))
	p.Stat("commitedPoints", float64(commitedPoints))
	if updateOperations > 0 {
		p.Stat("pointsPerUpdate", float64(commitedPoints)/float64(updateOperations))
	} else {
		p.Stat("pointsPerUpdate", 0.0)
	}

	p.Stat("created", float64(created))

}
func TestSemaphoreMultipleGoroutines(t *testing.T) {
	var done uint32
	sem := NewSemaphore(0)
	sem2 := NewSemaphore(0)
	go func() {
		sem.Wait()
		atomic.AddUint32(&done, 1)
		sem2.Post()
	}()
	go func() {
		time.Sleep(10 * time.Nanosecond)
		atomic.AddUint32(&done, 1)
		sem.Post()
	}()
	go func() {
		time.Sleep(20 * time.Nanosecond)
		atomic.AddUint32(&done, 1)
		sem.Post()
	}()
	sem.Wait()
	go func() {
		time.Sleep(10 * time.Nanosecond)
		atomic.AddUint32(&done, 1)
		sem.Post()
	}()
	sem.Wait()
	sem2.Wait()
	doneVal := atomic.LoadUint32(&done)
	if doneVal != 4 {
		t.Fatalf("sem.Wait did not wait for sem.Posts")
	}
}
Пример #13
0
func (rcv *UDP) statWorker(exit chan bool) {
	ticker := time.NewTicker(rcv.metricInterval)
	defer ticker.Stop()

	for {
		select {
		case <-ticker.C:
			metricsReceived := atomic.LoadUint32(&rcv.metricsReceived)
			atomic.AddUint32(&rcv.metricsReceived, -metricsReceived)
			rcv.Stat("udp.metricsReceived", float64(metricsReceived))

			incompleteReceived := atomic.LoadUint32(&rcv.incompleteReceived)
			atomic.AddUint32(&rcv.incompleteReceived, -incompleteReceived)
			rcv.Stat("udp.incompleteReceived", float64(incompleteReceived))

			errors := atomic.LoadUint32(&rcv.errors)
			atomic.AddUint32(&rcv.errors, -errors)
			rcv.Stat("udp.errors", float64(errors))

			logrus.WithFields(logrus.Fields{
				"metricsReceived":    int(metricsReceived),
				"incompleteReceived": int(incompleteReceived),
				"errors":             int(errors),
			}).Info("[udp] doCheckpoint()")

		case <-exit:
			rcv.conn.Close()
			return
		}
	}
}
Пример #14
0
// Get returns existed connection from the pool or creates a new one.
func (p *ConnPool) Get() (*Conn, error) {
	if p.Closed() {
		return nil, ErrClosed
	}

	atomic.AddUint32(&p.stats.Requests, 1)

	// Fetch first non-idle connection, if available.
	if cn := p.First(); cn != nil {
		atomic.AddUint32(&p.stats.Hits, 1)
		return cn, nil
	}

	// Try to create a new one.
	if p.conns.Reserve() {
		cn, err := p.NewConn()
		if err != nil {
			p.conns.CancelReservation()
			return nil, err
		}
		p.conns.Add(cn)
		return cn, nil
	}

	// Otherwise, wait for the available connection.
	atomic.AddUint32(&p.stats.Waits, 1)
	if cn := p.wait(); cn != nil {
		return cn, nil
	}

	atomic.AddUint32(&p.stats.Timeouts, 1)
	return nil, ErrPoolTimeout
}
func (u *urlCache) getURL(url string) ([]byte, error) {
	r, ok := u.cache.get(url)
	if ok {
		atomic.AddUint32(&u.hit, 1)
		return r, nil
	}
	c, err := u.client.Get(url)
	if err != nil {
		atomic.AddUint32(&u.errors, 1)
		return nil, err
	}
	defer c.Body.Close()
	// TODO(katjoyce): Add caching of permanent errors.
	if c.StatusCode != 200 {
		atomic.AddUint32(&u.badStatus, 1)
		return nil, fmt.Errorf("can't deal with status %d", c.StatusCode)
	}
	r, err = ioutil.ReadAll(c.Body)
	if err != nil {
		atomic.AddUint32(&u.readFail, 1)
		return nil, err
	}
	atomic.AddUint32(&u.miss, 1)
	u.cache.set(url, r)
	return r, nil
}
Пример #16
0
func (pc *ProxyCounter) GetList(args *storageproto.GetArgs, reply *storageproto.GetListReply) error {
	if pc.override {
		reply.Status = pc.overrideStatus
		return pc.overrideErr
	}
	byteCount := len(args.Key)
	if args.WantLease {
		atomic.AddUint32(&pc.leaseRequestCount, 1)
	}
	if pc.disableLease {
		args.WantLease = false
	}
	err := pc.srv.Call("StorageRPC.GetList", args, reply)
	for _, s := range reply.Value {
		byteCount += len(s)
	}
	if reply.Lease.Granted {
		if pc.overrideLeaseSeconds > 0 {
			reply.Lease.ValidSeconds = pc.overrideLeaseSeconds
		}
		atomic.AddUint32(&pc.leaseGrantedCount, 1)
	}
	atomic.AddUint32(&pc.rpcCount, 1)
	atomic.AddUint32(&pc.byteCount, uint32(byteCount))
	return err
}
// QueueChain adds the given chain to the queue to be posted to the log.
func (l *Logger) QueueChain(chain []*x509.Certificate) {
	if chain == nil {
		return
	}

	atomic.AddUint32(&l.queued, 1)
	// Has a chain for the cert this chain if for already been successfully
	//posted to the log by this Logger?
	h := hash(chain[0]) // Chains are cert -> root
	if l.postCertCache.get(h) {
		atomic.AddUint32(&l.reposted, 1)
		return // Don't post chain for a cert that has already had a chain posted.
	}
	// If we assume all chains for the same cert are equally
	// likely to succeed, then we could mark the cert as posted
	// here. However, bugs might cause a log to refuse one chain
	// and accept another, so try each unique chain.

	// Has this Logger already tried to post this chain?
	h = hashChain(chain)
	if l.postChainCache.get(h) {
		atomic.AddUint32(&l.chainReposted, 1)
		return
	}
	l.postChainCache.set(h, true)

	p := &toPost{chain: chain, retries: 5}
	l.postToLog(p)
}
Пример #18
0
// ServeConn serves HTTP requests from the given connection.
//
// ServeConn returns nil if all requests from the c are successfully served.
// It returns non-nil error otherwise.
//
// Connection c must immediately propagate all the data passed to Write()
// to the client. Otherwise requests' processing may hang.
//
// ServeConn closes c before returning.
func (s *Server) ServeConn(c net.Conn) error {
	if s.MaxConnsPerIP > 0 {
		pic := wrapPerIPConn(s, c)
		if pic == nil {
			return ErrPerIPConnLimit
		}
		c = pic
	}

	n := atomic.AddUint32(&s.concurrency, 1)
	if n > uint32(s.getConcurrency()) {
		atomic.AddUint32(&s.concurrency, ^uint32(0))
		s.writeFastError(c, StatusServiceUnavailable, "The connection cannot be served because Server.Concurrency limit exceeded")
		c.Close()
		return ErrConcurrencyLimit
	}

	err := s.serveConn(c)

	atomic.AddUint32(&s.concurrency, ^uint32(0))

	if err != errHijacked {
		err1 := c.Close()
		if err == nil {
			err = err1
		}
	} else {
		err = nil
	}
	return err
}
Пример #19
0
func TestCancelAndClose(t *testing.T) {
	cf := &testCFetcher{}
	ccf := CNew(cf)

	var canceled uint32

	done1 := make(chan struct{})
	cancel1 := make(chan struct{})
	go func() {
		_, err := ccf.CFetch(cancel1, "key")
		if err == nil {
			t.Fatalf("Gets nil, wants errors")
		}
		atomic.AddUint32(&canceled, 1)
		close(done1)
	}()

	done2 := make(chan struct{})
	go func() {
		_, err := ccf.CFetch(nil, "key")
		if err == nil {
			t.Fatalf("Gets nil, wants errors")
		}
		atomic.AddUint32(&canceled, 1)
		close(done2)
	}()

	done3 := make(chan struct{})
	go func() {
		_, err := ccf.CFetch(nil, "KEY")
		if err == nil {
			t.Fatalf("Gets nil, wants errors")
		}
		atomic.AddUint32(&canceled, 1)
		close(done3)
	}()

	close(cancel1)
	<-done1
	time.Sleep(10 * time.Millisecond) // Check if done2 isn't canceled
	if canceled != 1 {
		t.Fatalf("Gets %d canceled, wants 1", canceled)
	}
	if cf.cnt != 0 {
		t.Fatalf("Gets %d canceled internal calls, wants 0", canceled)
	}

	ccf.Close()
	<-done2
	<-done3
	cf.wg.Wait()
	time.Sleep(10 * time.Millisecond) // Wait for all cancel calls
	if canceled != 3 {
		t.Fatalf("Gets %d canceled, wants 3", canceled)
	}
	if cf.cnt != 2 {
		t.Fatalf(`Gets %d canceled internal calls, wants 2 ("key" and "KEY")`, cf.cnt)
	}
}
Пример #20
0
func (arch *Archive) ScanCheckpointsFast(opts *CommandOptions) error {

	if opts.Concurrency == 0 {
		return errors.New("Zero concurrency")
	}

	var errs uint32
	tick := makeTicker(func(_ uint) {
		arch.ReportCheckpointStats()
	})

	var wg sync.WaitGroup
	wg.Add(opts.Concurrency)

	req := make(chan scanCheckpointFastReq)

	cats := Categories()
	go func() {
		for _, cat := range cats {
			for _, pth := range RangePaths(opts.Range) {
				req <- scanCheckpointFastReq{category: cat, pathprefix: pth}
			}
		}
		close(req)
	}()

	for i := 0; i < opts.Concurrency; i++ {
		go func() {
			for {
				r, ok := <-req
				if !ok {
					break
				}
				ch, es := arch.ListCategoryCheckpoints(r.category, r.pathprefix)
				for n := range ch {
					tick <- true
					arch.NoteCheckpointFile(r.category, n, true)
					if opts.Verify {
						atomic.AddUint32(&errs,
							noteError(arch.VerifyCategoryCheckpoint(r.category, n)))
					}
				}
				atomic.AddUint32(&errs, drainErrors(es))
			}
			wg.Done()
		}()
	}

	wg.Wait()
	close(tick)
	log.Printf("Checkpoint files scanned with %d errors", errs)
	arch.ReportCheckpointStats()
	if errs != 0 {
		return fmt.Errorf("%d errors scanning checkpoints", errs)
	}
	return nil
}
func (l *Logger) postServer() {
	for {
		c := <-l.toPost
		atomic.AddUint32(&l.active, 1)
		l.postChain(c)
		atomic.AddUint32(&l.active, ^uint32(0))
		l.wg.Done()
	}
}
Пример #22
0
// Listen bind port. Receive messages and send to out channel
func (rcv *TCP) Listen(addr *net.TCPAddr) error {
	var err error
	rcv.listener, err = net.ListenTCP("tcp", addr)
	if err != nil {
		return err
	}

	go func() {
		ticker := time.NewTicker(time.Minute)
		defer ticker.Stop()

		for {
			select {
			case <-ticker.C:
				cnt := atomic.LoadUint32(&rcv.metricsReceived)
				atomic.AddUint32(&rcv.metricsReceived, -cnt)
				rcv.Stat("metricsReceived", float64(cnt))

				rcv.Stat("active", float64(atomic.LoadInt32(&rcv.active)))

				errors := atomic.LoadUint32(&rcv.errors)
				atomic.AddUint32(&rcv.errors, -errors)
				rcv.Stat("errors", float64(errors))
			case <-rcv.exit:
				rcv.listener.Close()
				return
			}
		}
	}()

	handler := rcv.handleConnection
	if rcv.isPickle {
		handler = rcv.handlePickle
	}

	go func() {
		defer rcv.listener.Close()

		for {

			conn, err := rcv.listener.Accept()
			if err != nil {
				if strings.Contains(err.Error(), "use of closed network connection") {
					break
				}
				logrus.Warningf("[tcp] Failed to accept connection: %s", err)
				continue
			}

			go handler(conn)
		}

	}()

	return nil
}
Пример #23
0
func store(p *Whisper, values *points.Points) {
	path := filepath.Join(p.rootPath, strings.Replace(values.Metric, ".", "/", -1)+".wsp")

	w, err := whisper.Open(path)
	if err != nil {
		schema := p.schemas.match(values.Metric)
		if schema == nil {
			logrus.Errorf("[persister] No storage schema defined for %s", values.Metric)
			return
		}

		aggr := p.aggregation.match(values.Metric)
		if aggr == nil {
			logrus.Errorf("[persister] No storage aggregation defined for %s", values.Metric)
			return
		}

		logrus.WithFields(logrus.Fields{
			"retention":    schema.retentionStr,
			"schema":       schema.name,
			"aggregation":  aggr.name,
			"xFilesFactor": aggr.xFilesFactor,
			"method":       aggr.aggregationMethodStr,
		}).Debugf("[persister] Creating %s", path)

		if err = os.MkdirAll(filepath.Dir(path), os.ModeDir|os.ModePerm); err != nil {
			logrus.Error(err)
			return
		}

		w, err = whisper.Create(path, schema.retentions, aggr.aggregationMethod, float32(aggr.xFilesFactor))
		if err != nil {
			logrus.Errorf("[persister] Failed to create new whisper file %s: %s", path, err.Error())
			return
		}

		atomic.AddUint32(&p.created, 1)
	}

	points := make([]*whisper.TimeSeriesPoint, len(values.Data))
	for i, r := range values.Data {
		points[i] = &whisper.TimeSeriesPoint{Time: int(r.Timestamp), Value: r.Value}
	}

	atomic.AddUint32(&p.commitedPoints, uint32(len(values.Data)))
	atomic.AddUint32(&p.updateOperations, 1)

	defer w.Close()

	defer func() {
		if r := recover(); r != nil {
			logrus.Errorf("[persister] UpdateMany %s recovered: %s", path, r)
		}
	}()
	w.UpdateMany(points)
}
Пример #24
0
func TestLRUCache_ConcurrentSetGet(t *testing.T) {
	runtime.GOMAXPROCS(runtime.NumCPU())

	seed := time.Now().UnixNano()
	t.Logf("seed=%d", seed)

	const (
		N = 2000000
		M = 4000
		C = 3
	)

	var set, get uint32

	wg := &sync.WaitGroup{}
	c := NewLRUCache(M / 4)
	for ni := uint64(0); ni < C; ni++ {
		r0 := rand.New(rand.NewSource(seed + int64(ni)))
		r1 := rand.New(rand.NewSource(seed + int64(ni) + 1))
		ns := c.GetNamespace(ni)

		wg.Add(2)
		go func(ns Namespace, r *rand.Rand) {
			for i := 0; i < N; i++ {
				x := uint64(r.Int63n(M))
				o := ns.Get(x, func() (int, interface{}) {
					atomic.AddUint32(&set, 1)
					return 1, x
				})
				if v := o.Value().(uint64); v != x {
					t.Errorf("#%d invalid value, got=%d", x, v)
				}
				o.Release()
			}
			wg.Done()
		}(ns, r0)
		go func(ns Namespace, r *rand.Rand) {
			for i := 0; i < N; i++ {
				x := uint64(r.Int63n(M))
				o := ns.Get(x, nil)
				if o != nil {
					atomic.AddUint32(&get, 1)
					if v := o.Value().(uint64); v != x {
						t.Errorf("#%d invalid value, got=%d", x, v)
					}
					o.Release()
				}
			}
			wg.Done()
		}(ns, r1)
	}

	wg.Wait()

	t.Logf("set=%d get=%d", set, get)
}
Пример #25
0
// QueueChain queues the given chain to be fixed wrt the roots of the logger
// contained in fl, and then logged to the Certificate Transparency log
// represented by the logger.  Note: chain is expected to be in the order of
// cert --> root.
func (fl *FixAndLog) QueueChain(chain []*x509.Certificate) {
	if chain != nil {
		if fl.logger.IsPosted(chain[0]) {
			atomic.AddUint32(&fl.alreadyPosted, 1)
			return
		}
		fl.fixer.QueueChain(chain[0], chain, fl.logger.RootCerts())
		atomic.AddUint32(&fl.chainsSent, 1)
	}
}
Пример #26
0
func NewUUID(version byte) (uuid UUID, err error) {

	switch version {
	case UUIDv1:
		seq := atomic.AddUint32(&monotonic_v1, 1)
		now := uint64(time.Now().UnixNano()/100 + 12219292800000)
		binary.BigEndian.PutUint32(uuid[0:4], uint32(now))
		binary.BigEndian.PutUint16(uuid[4:6], uint16(now>>32))
		binary.BigEndian.PutUint16(uuid[6:8], uint16(now>>48))
		binary.BigEndian.PutUint16(uuid[8:10], uint16(seq))
		copy(uuid[10:16], HardWareAddress[0:6])
		uuid[6] = (uuid[6] & 0x0f) | (UUIDv1)
		uuid[8] = (uuid[8] & 0x3f) | (UUID_RFC)
		return

	case UUIDv1MacRand:
		seq := atomic.AddUint32(&monotonic_v1, 1)
		now := uint64(time.Now().UnixNano()/100 + 12219292800000)
		binary.BigEndian.PutUint32(uuid[0:4], uint32(now))
		binary.BigEndian.PutUint16(uuid[4:6], uint16(now>>32))
		binary.BigEndian.PutUint16(uuid[6:8], uint16(now>>48))
		binary.BigEndian.PutUint16(uuid[8:10], uint16(seq))
		_, err = rand.Read(uuid[10:16])
		uuid[6] = (uuid[6] & 0x0f) | (UUIDv1)
		uuid[8] = (uuid[8] & 0x3f) | (UUID_RFC)
		return

	case UUIDv1_timestamp:
		seq := atomic.AddUint32(&monotonic_v1, 1)
		now := uint64(time.Now().UnixNano()/100 + 12219292800000)
		binary.BigEndian.PutUint32(uuid[0:4], uint32(now))
		binary.BigEndian.PutUint16(uuid[4:6], uint16(now>>32))
		binary.BigEndian.PutUint16(uuid[6:8], uint16(now>>48))
		binary.BigEndian.PutUint16(uuid[8:10], uint16(seq))

		uuid[6] = (uuid[6] & 0x0f) | (UUIDv1)
		uuid[8] = (uuid[8] & 0x3f) | (UUID_RFC)
		return

	case UUIDv4:
		_, err = rand.Read(uuid[:])
		if err != nil {
			return
		}
		uuid[6] = (uuid[6] & 0x0f) | (UUIDv4)
		uuid[8] = (uuid[8] & 0x3f) | (UUID_RFC)
		return

	default:
		err = errors.New("cant generate this uuid")
		return
	}

}
Пример #27
0
func (pc *proxyCounter) Delete(args *storagerpc.DeleteArgs, reply *storagerpc.DeleteReply) error {
	if pc.override {
		reply.Status = pc.overrideStatus
		return pc.overrideErr
	}
	byteCount := len(args.Key)
	err := pc.srv.Call("StorageServer.Delete", args, reply)
	atomic.AddUint32(&pc.rpcCount, 1)
	atomic.AddUint32(&pc.byteCount, uint32(byteCount))
	return err
}
Пример #28
0
func (pc *proxyCounter) AppendToList(args *storagerpc.PutArgs, reply *storagerpc.PutReply) error {
	if pc.override {
		reply.Status = pc.overrideStatus
		return pc.overrideErr
	}
	byteCount := len(args.Key) + len(args.Value)
	err := pc.srv.Call("StorageServer.AppendToList", args, reply)
	atomic.AddUint32(&pc.rpcCount, 1)
	atomic.AddUint32(&pc.byteCount, uint32(byteCount))
	return err
}
Пример #29
0
func (pc *ProxyCounter) RemoveFromList(args *storageproto.PutArgs, reply *storageproto.PutReply) error {
	if pc.override {
		reply.Status = pc.overrideStatus
		return pc.overrideErr
	}
	byteCount := len(args.Key) + len(args.Value)
	err := pc.srv.Call("StorageRPC.RemoveFromList", args, reply)
	atomic.AddUint32(&pc.rpcCount, 1)
	atomic.AddUint32(&pc.byteCount, uint32(byteCount))
	return err
}
Пример #30
0
// Get returns buffer with length of n.
func (p *BufferPool) Get(n int) []byte {
	atomic.AddUint32(&p.get, 1)

	if poolNum := p.poolNum(n); poolNum == 0 {
		// Fast path.
		if b, ok := p.pool[0].Get().([]byte); ok {
			switch {
			case cap(b) > n:
				atomic.AddUint32(&p.less, 1)
				return b[:n]
			case cap(b) == n:
				atomic.AddUint32(&p.equal, 1)
				return b[:n]
			default:
				panic("not reached")
			}
		} else {
			atomic.AddUint32(&p.miss, 1)
		}

		return make([]byte, n, p.baseline0)
	} else {
		sizePtr := &p.size[poolNum-1]

		if b, ok := p.pool[poolNum].Get().([]byte); ok {
			switch {
			case cap(b) > n:
				atomic.AddUint32(&p.less, 1)
				return b[:n]
			case cap(b) == n:
				atomic.AddUint32(&p.equal, 1)
				return b[:n]
			default:
				atomic.AddUint32(&p.greater, 1)
				if uint32(cap(b)) >= atomic.LoadUint32(sizePtr) {
					p.pool[poolNum].Put(b)
				}
			}
		} else {
			atomic.AddUint32(&p.miss, 1)
		}

		if size := atomic.LoadUint32(sizePtr); uint32(n) > size {
			if size == 0 {
				atomic.CompareAndSwapUint32(sizePtr, 0, uint32(n))
			} else {
				sizeMissPtr := &p.sizeMiss[poolNum-1]
				if atomic.AddUint32(sizeMissPtr, 1) == 20 {
					atomic.StoreUint32(sizePtr, uint32(n))
					atomic.StoreUint32(sizeMissPtr, 0)
				}
			}
			return make([]byte, n)
		} else {
			return make([]byte, n, size)
		}
	}
}