Exemplo n.º 1
0
func newBuffer(size int64, server *Server) (*buffer, error) {
	if size < 0 {
		return nil, bufio.ErrNegativeCount
	}

	if size == 0 {
		size = DefaultBufferSize
	}

	if !powerOfTwo64(size) {
		fmt.Printf("Size must be power of two. Try %d.", roundUpPowerOfTwo64(size))
		return nil, fmt.Errorf("Size must be power of two. Try %d.", roundUpPowerOfTwo64(size))
	}

	return &buffer{
		id:         atomic.AddInt64(&bufcnt, 1),
		readIndex:  int64(0),
		writeIndex: int64(0),
		buf:        make([]*[]byte, size),
		size:       size,
		mask:       size - 1,
		pcond:      sync.NewCond(new(sync.Mutex)),
		ccond:      sync.NewCond(new(sync.Mutex)),
		_server:    server,
		b:          make([]byte, 5),
	}, nil
}
Exemplo n.º 2
0
func newFD(sysfd int) (*FD, error) {
	// Set sysfd to non-blocking mode
	err := syscall.SetNonblock(sysfd, true)
	if err != nil {
		debugf("FD %03d: NF: SetNonBlock: %s", sysfd, err.Error())
		return nil, err
	}
	// Check if sysfd is select-able
	// NOTE(npat): Is this useful? Can it ever fail?
	var rs fdSet
	var tv syscall.Timeval
	rs.Zero()
	rs.Set(sysfd)
	_, err = uxSelect(sysfd+1, &rs, nil, nil, &tv)
	if err != nil {
		debugf("FD %03d: NF: select(2): %s", sysfd, err.Error())
		return nil, err
	}
	// Initialize FD. We don't generate arbitrary ids, instead we
	// use sysfd as the fdMap id. In effect, fd.id is alays ==
	// fd.sysfd. Remember that sysfd's can be reused; we must be
	// carefull not to allow this to mess things up.
	fd := &FD{id: sysfd, sysfd: sysfd}
	fd.r.cond = sync.NewCond(&fd.r.mu)
	fd.w.cond = sync.NewCond(&fd.w.mu)
	// Add to fdMap
	fdM.AddFD(fd)
	debugf("FD %03d: NF: Ok", fd.id)
	return fd, nil
}
Exemplo n.º 3
0
// NewConnection create a new KCP connection between local and remote.
func NewConnection(conv uint16, writerCloser io.WriteCloser, local *net.UDPAddr, remote *net.UDPAddr, block internet.Authenticator) *Connection {
	log.Info("KCP|Connection: creating connection ", conv)

	conn := new(Connection)
	conn.local = local
	conn.remote = remote
	conn.block = block
	conn.writer = writerCloser
	conn.since = nowMillisec()
	conn.dataInputCond = sync.NewCond(new(sync.Mutex))
	conn.dataOutputCond = sync.NewCond(new(sync.Mutex))

	authWriter := &AuthenticationWriter{
		Authenticator: block,
		Writer:        writerCloser,
	}
	conn.conv = conv
	conn.output = NewSegmentWriter(authWriter)

	conn.mss = authWriter.Mtu() - DataSegmentOverhead
	conn.roundTrip = &RoundTripInfo{
		rto:    100,
		minRtt: effectiveConfig.Tti,
	}
	conn.interval = effectiveConfig.Tti
	conn.receivingWorker = NewReceivingWorker(conn)
	conn.fastresend = 2
	conn.congestionControl = effectiveConfig.Congestion
	conn.sendingWorker = NewSendingWorker(conn)

	go conn.updateTask()

	return conn
}
Exemplo n.º 4
0
func newFD(sysfd int) (*FD, error) {
	// Set sysfd to non-blocking mode
	err := syscall.SetNonblock(sysfd, true)
	if err != nil {
		debugf("FD xxx: NF: sysfd=%d, err=%v", sysfd, err)
		return nil, err
	}
	// Initialize FD
	fd := &FD{sysfd: sysfd}
	fd.id = fdM.GetID()
	fd.r.cond = sync.NewCond(&fd.r.mu)
	fd.w.cond = sync.NewCond(&fd.w.mu)
	// Add to Epoll set. We may imediatelly start receiving events
	// after this. They will be dropped since the FD is not yet in
	// fdMap. It's ok. Nobody is waiting on this FD yet, anyway.
	ev := syscall.EpollEvent{
		Events: syscall.EPOLLIN |
			syscall.EPOLLOUT |
			syscall.EPOLLRDHUP |
			(syscall.EPOLLET & 0xffffffff),
		Fd: int32(fd.id)}
	err = syscall.EpollCtl(epfd, syscall.EPOLL_CTL_ADD, fd.sysfd, &ev)
	if err != nil {
		debugf("FD %03d: NF: sysfd=%d, err=%v", fd.id, fd.sysfd, err)
		return nil, err
	}
	// Add to fdMap
	fdM.AddFD(fd)
	debugf("FD %03d: NF: sysfd=%d", fd.id, fd.sysfd)
	return fd, nil
}
Exemplo n.º 5
0
func newBuffer(size int64) (*buffer, error) {
	if size < 0 {
		return nil, bufio.ErrNegativeCount
	}

	if size == 0 {
		size = defaultBufferSize
	}

	if !powerOfTwo64(size) {
		return nil, fmt.Errorf("Size must be power of two. Try %d.", roundUpPowerOfTwo64(size))
	}

	if size < 2*defaultReadBlockSize {
		return nil, fmt.Errorf("Size must at least be %d. Try %d.", 2*defaultReadBlockSize, 2*defaultReadBlockSize)
	}

	return &buffer{
		id:    atomic.AddInt64(&bufcnt, 1),
		buf:   make([]byte, size),
		size:  size,
		mask:  size - 1,
		pseq:  newSequence(),
		cseq:  newSequence(),
		pcond: sync.NewCond(new(sync.Mutex)),
		ccond: sync.NewCond(new(sync.Mutex)),
		cwait: 0,
		pwait: 0,
	}, nil
}
Exemplo n.º 6
0
Arquivo: node.go Projeto: Mic92/docker
// New returns new Node instance.
func New(c *Config) (*Node, error) {
	if err := os.MkdirAll(c.StateDir, 0700); err != nil {
		return nil, err
	}
	stateFile := filepath.Join(c.StateDir, stateFilename)
	dt, err := ioutil.ReadFile(stateFile)
	var p []api.Peer
	if err != nil && !os.IsNotExist(err) {
		return nil, err
	}
	if err == nil {
		if err := json.Unmarshal(dt, &p); err != nil {
			return nil, err
		}
	}

	n := &Node{
		remotes:              newPersistentRemotes(stateFile, p...),
		role:                 ca.WorkerRole,
		config:               c,
		started:              make(chan struct{}),
		stopped:              make(chan struct{}),
		closed:               make(chan struct{}),
		ready:                make(chan struct{}),
		certificateRequested: make(chan struct{}),
		notifyNodeChange:     make(chan *api.Node, 1),
	}
	n.roleCond = sync.NewCond(n.RLocker())
	n.connCond = sync.NewCond(n.RLocker())
	if err := n.loadCertificates(); err != nil {
		return nil, err
	}
	return n, nil
}
Exemplo n.º 7
0
func main() {
	var mtx sync.Mutex
	var cnd *sync.Cond
	var cnds [N]*sync.Cond
	var mtxs [N]sync.Mutex
	cnd = sync.NewCond(&mtx)
	for i := 0; i < N; i++ {
		cnds[i] = sync.NewCond(&mtxs[i])
	}
	for i := 0; i < N; i++ {
		go func(me int, m *sync.Mutex, c1 *sync.Cond, c2 *sync.Cond) {
			fmt.Printf("Hello, world. %d\n", me)
			if me == 0 {
				cnd.Signal()
			}
			for j := 0; j < 10000000; j++ {
				m.Lock()
				c1.Wait()
				m.Unlock()
				c2.Signal()
			}
			if me == N-1 {
				cnd.Signal()
			}
		}(i, &mtxs[i], cnds[i], cnds[(i+1)%N])
	}
	mtx.Lock()
	cnd.Wait()
	mtx.Unlock()
	cnds[0].Signal()
	mtx.Lock()
	cnd.Wait()
	mtx.Unlock()
}
Exemplo n.º 8
0
// iterator creates a new iterator for a named auxilary field.
func (a auxIteratorFields) iterator(name string, typ DataType) Iterator {
	for _, f := range a {
		// Skip field if it's name doesn't match.
		// Exit if no points were received by the iterator.
		if f.name != name || (typ != Unknown && f.typ != typ) {
			continue
		}

		// Create channel iterator by data type.
		switch f.typ {
		case Float:
			itr := &floatChanIterator{cond: sync.NewCond(&sync.Mutex{})}
			f.append(itr)
			return itr
		case Integer:
			itr := &integerChanIterator{cond: sync.NewCond(&sync.Mutex{})}
			f.append(itr)
			return itr
		case String, Tag:
			itr := &stringChanIterator{cond: sync.NewCond(&sync.Mutex{})}
			f.append(itr)
			return itr
		case Boolean:
			itr := &booleanChanIterator{cond: sync.NewCond(&sync.Mutex{})}
			f.append(itr)
			return itr
		default:
			break
		}
	}

	return &nilFloatIterator{}
}
Exemplo n.º 9
0
func newSession(conn net.Conn, server bool) (*Transport, error) {
	var referenceCounter uint64
	if server {
		referenceCounter = 2
	} else {
		referenceCounter = 1
	}
	session := &Transport{
		receiverChan:     make(chan *channel),
		channelC:         sync.NewCond(new(sync.Mutex)),
		channels:         make(map[uint64]*channel),
		referenceCounter: referenceCounter,
		byteStreamC:      sync.NewCond(new(sync.Mutex)),
		byteStreams:      make(map[uint64]*byteStream),
		netConnC:         sync.NewCond(new(sync.Mutex)),
		netConns:         make(map[byte]map[string]net.Conn),
		networks:         make(map[string]byte),
	}

	spdyConn, spdyErr := spdystream.NewConnection(conn, server)
	if spdyErr != nil {
		return nil, spdyErr
	}
	go spdyConn.Serve(session.newStreamHandler)

	session.conn = spdyConn
	session.handler = session.initializeHandler()

	return session, nil
}
Exemplo n.º 10
0
// NewMemQueue returns a new wait queue that holds at most max items.
func NewMemQueue(max int) queue.WaitQueue {
	q := &MemQueue{
		max: max,
	}
	q.popCond = sync.NewCond(&q.mu)
	q.pushCond = sync.NewCond(&q.mu)
	return queue.WithChannel(q)
}
func NewBuffer(n int) *Buffer {
	b := &Buffer{}
	b.capacity = n
	b.mu = sync.Mutex{}
	b.emptyCond = sync.NewCond(&b.mu)
	b.fullCond = sync.NewCond(&b.mu)
	return b
}
Exemplo n.º 12
0
func newInstance(v *viper.Viper) *instance {
	var i = instance{
		viper: v,
		slots: v.GetInt("start.parallel"),
	}
	i.slotAvailable = sync.NewCond(&i.m)
	i.taskFinished = sync.NewCond(&i.m)
	return &i
}
Exemplo n.º 13
0
func newPipe(store buffer) (Reader, Writer) {
	p := &pipe{}
	p.rwait = sync.NewCond(&p.mu)
	p.wwait = sync.NewCond(&p.mu)
	p.store = store
	r := &reader{p}
	w := &writer{p}
	return r, w
}
Exemplo n.º 14
0
func memPipe() (a, b packetConn) {
	t1 := memTransport{}
	t2 := memTransport{}
	t1.write = &t2
	t2.write = &t1
	t1.Cond = sync.NewCond(&t1.Mutex)
	t2.Cond = sync.NewCond(&t2.Mutex)
	return &t1, &t2
}
Exemplo n.º 15
0
//初始化队列
func (q *SliceQueue) init(writeslicenum int, sliceelemnum int) {
	q.sliceelemnum = sliceelemnum
	q.writeslicenum = writeslicenum
	q.readwait = sync.NewCond(&sync.Mutex{})
	q.writewait = sync.NewCond(&sync.Mutex{})
	q.readQueue = q.createSlice()
	q.writeQueue = make([]PowerLister, 0)
	q.writeQueue = append(q.writeQueue, q.createSlice())
	q.run = true
}
Exemplo n.º 16
0
func NewAccess() *Access {
	a := new(Access)
	a.readersWaiting = 0
	a.readersWorking = 0
	a.writersWaiting = 0
	a.writerWorking = false
	a.exclusion = new(sync.Mutex)
	a.readerAccess = sync.NewCond(a.exclusion)
	a.writerAccess = sync.NewCond(a.exclusion)
	return a
}
Exemplo n.º 17
0
func (tree *ProcessTree) NewSlaveNode(name string, parent *SlaveNode) *SlaveNode {
	x := &SlaveNode{}
	x.Parent = parent
	x.isBooted = false
	x.Name = name
	var mutex sync.Mutex
	x.bootWait = sync.NewCond(&mutex)
	x.restartWait = sync.NewCond(&mutex)
	x.Features = make(map[string]bool)
	x.ClientCommandPTYFileDescriptor = make(chan int)
	tree.SlavesByName[name] = x
	return x
}
Exemplo n.º 18
0
func NewNonBlockingReader(reader io.Reader, cap int) *NonBlockingReader {
	r := &NonBlockingReader{}
	r.reader = reader
	r.cap = cap
	r.lock = &sync.Mutex{}
	r.buffer = &bytes.Buffer{}
	r.readyCond = sync.NewCond(r.lock)
	r.unblockedCond = sync.NewCond(r.lock)

	go r.loop()

	return r
}
Exemplo n.º 19
0
// NewConnection create a new KCP connection between local and remote.
func NewConnection(conv uint16, writerCloser io.WriteCloser, local *net.UDPAddr, remote *net.UDPAddr, block internet.Authenticator, config *Config) *Connection {
	log.Info("KCP|Connection: creating connection ", conv)

	conn := new(Connection)
	conn.local = local
	conn.remote = remote
	conn.block = block
	conn.writer = writerCloser
	conn.since = nowMillisec()
	conn.dataInputCond = sync.NewCond(new(sync.Mutex))
	conn.dataOutputCond = sync.NewCond(new(sync.Mutex))
	conn.Config = config

	authWriter := &AuthenticationWriter{
		Authenticator: block,
		Writer:        writerCloser,
		Config:        config,
	}
	conn.conv = conv
	conn.output = NewSegmentWriter(authWriter)

	conn.mss = authWriter.Mtu() - DataSegmentOverhead
	conn.roundTrip = &RoundTripInfo{
		rto:    100,
		minRtt: config.Tti.GetValue(),
	}
	conn.interval = config.Tti.GetValue()
	conn.receivingWorker = NewReceivingWorker(conn)
	conn.congestionControl = config.Congestion
	conn.sendingWorker = NewSendingWorker(conn)

	isTerminating := func() bool {
		return conn.State().Is(StateTerminating, StateTerminated)
	}
	isTerminated := func() bool {
		return conn.State() == StateTerminated
	}
	conn.dataUpdater = NewUpdater(
		conn.interval,
		predicate.Not(isTerminating).And(predicate.Any(conn.sendingWorker.UpdateNecessary, conn.receivingWorker.UpdateNecessary)),
		isTerminating,
		conn.updateTask)
	conn.pingUpdater = NewUpdater(
		5000, // 5 seconds
		predicate.Not(isTerminated),
		isTerminated,
		conn.updateTask)
	conn.pingUpdater.WakeUp()

	return conn
}
Exemplo n.º 20
0
func NewNonBlockingWriter(writer io.Writer, cap int) *NonBlockingWriter {
	w := &NonBlockingWriter{}
	w.writer = writer
	w.cap = cap
	w.lock = &sync.Mutex{}
	w.buffer = &bytes.Buffer{}
	w.readyCond = sync.NewCond(w.lock)
	w.unblockedCond = sync.NewCond(w.lock)
	w.closedChan = make(chan bool, 1)

	go w.loop()

	return w
}
Exemplo n.º 21
0
func (vcpu *Vcpu) initRunInfo() error {
	// Initialize our structure.
	e := syscall.Errno(C.kvm_run_init(C.int(vcpu.fd), &vcpu.RunInfo.info))
	if e != 0 {
		return e
	}

	// Setup the lock.
	vcpu.RunInfo.lock = &sync.Mutex{}
	vcpu.RunInfo.pause_event = sync.NewCond(vcpu.RunInfo.lock)
	vcpu.RunInfo.resume_event = sync.NewCond(vcpu.RunInfo.lock)

	// We're okay.
	return nil
}
Exemplo n.º 22
0
func (self *ActiveFileManager) PrepareUpload(fileExtension string, userKey string) string {
	self.Lock()
	defer self.Unlock()

	for {
		fileName := GenerateRandomString()
		if fileExtension != "" {
			fileName += "." + fileExtension
		}

		_, exists := self.activeFiles[fileName]
		if !exists {
			activeFile := &ActiveFile{
				fileName:          fileName,
				currentUpload:     nil,
				readLocker:        nil,
				dataAvailableCond: nil,
				timeout:           nil,
				userKey:           userKey,
				state:             activeFileStateNew,
			}

			activeFile.readLocker = activeFile.RLocker()
			activeFile.dataAvailableCond = sync.NewCond(activeFile.readLocker)
			activeFile.timeout = timeout.New(10*time.Second, func() {
				self.finishActiveFile(activeFile, fileName)
			})
			self.activeFiles[fileName] = activeFile

			return fileName
		}
	}
}
Exemplo n.º 23
0
func newCache(signers []*signer) cache {
	var mutex sync.Mutex
	return cache{
		signers: signers,
		cond:    sync.NewCond(&mutex),
	}
}
Exemplo n.º 24
0
func main() {

	pl := new(Pool)
	pl.available = 200
	pl.emptyCond = sync.NewCond(&pl.lock)

	maxrequest := 5000000
	status := make(chan int, 2)
	start := time.Now()

	for i := 1; i <= maxrequest; i++ {

		conn, _ := pl.pull()

		go func(i int, conn *GenConn, pl *Pool) {

			defer pl.push(conn)

			if ret := conn.Call(i); ret == maxrequest {
				status <- 1
			}

		}(i, conn, pl)
	}

	select {
	case <-status:
		fmt.Printf("Executed %v in %v\n", maxrequest, time.Since(start))
	case <-time.After(60e9):
		fmt.Println("Timeout", time.Since(start))
	}
}
Exemplo n.º 25
0
// NewExecutor creates an Executor and registers a callback on the
// system config.
func NewExecutor(db client.DB, gossip *gossip.Gossip, leaseMgr *LeaseManager, metaRegistry *metric.Registry, stopper *stop.Stopper) *Executor {
	exec := &Executor{
		db:       db,
		reCache:  parser.NewRegexpCache(512),
		leaseMgr: leaseMgr,

		latency: metaRegistry.Latency("sql.latency"),
	}
	exec.systemConfigCond = sync.NewCond(&exec.systemConfigMu)

	gossipUpdateC := gossip.RegisterSystemConfigChannel()
	stopper.RunWorker(func() {
		for {
			select {
			case <-gossipUpdateC:
				cfg := gossip.GetSystemConfig()
				exec.updateSystemConfig(cfg)
			case <-stopper.ShouldStop():
				return
			}
		}
	})

	return exec
}
Exemplo n.º 26
0
func TestBufferInsertData(t *testing.T) {
	table := []dataChunk{{nil, 1}, {nil, 2}, {nil, 4}}
	b := ioBuffer{table: table, newData: sync.NewCond(&sync.Mutex{})}

	b.insert(nil, 3)
	if len(b.table) != 4 {
		t.Fatal("data was not inserted")
	}
	if b.table[2].order != 3 {
		t.Fatal("wrong table index")
	}

	b.table = []dataChunk{{nil, 1}}
	b.insert(nil, 3)
	if len(b.table) != 2 {
		t.Fatal("data was not inserted, try two")
	}
	if b.table[1].order != 3 {
		t.Fatalf("wrong table index, try two (got %d)", b.table[1].order)
	}

	b.table = []dataChunk{{nil, 4}}
	b.insert(nil, 3)
	if len(b.table) != 2 {
		t.Fatal("data was not inserted, try three")
	}
	if b.table[0].order != 3 {
		t.Fatalf("wrong table index, try three (got %d)", b.table[0].order)
	}
}
Exemplo n.º 27
0
func TestTransitions(t *testing.T) {
	runtime.GOMAXPROCS(8)

	s := &Session{}
	s.stateChange = sync.NewCond(&s.stateMutex)
	s.stateListeners = map[int]int32{}
	s.state = StateConnecting

	numWaits := int32(1000)
	j := int32(0)
	for i := int32(0); i < numWaits; i++ {
		go func() {
			s.WaitForTransition(StateDisconnected)
			atomic.AddInt32(&j, 1)
		}()
	}
	for s.stateListeners[StateDisconnected] < numWaits {
		time.Sleep(time.Millisecond)
	}
	// each goroutine needs to get its PC through the cond.Wait() call, so we
	// grab a momentary lock on the mutex to ensure everything gets through.
	s.stateMutex.Lock()
	done := make(chan struct{})
	go func() {
		s.Transition(StateDisconnected)
		s.Transition(StateConnecting)
		done <- struct{}{}
	}()
	s.stateMutex.Unlock()
	<-done
	if j != numWaits {
		t.Errorf("failed: %d != %d", j, numWaits)
	}
}
Exemplo n.º 28
0
func init() {
	for i := 0; i < len(groups); i++ {
		g := &groups[i]
		g.cond = sync.NewCond(&g.lock)
		g.list = list.New()
		main := func() {
			defer func() {
				if x := recover(); x != nil {
					counts.Count("async.panic", 1)
					xlog.ErrLog.Printf("[async]: panic = %v\n%s\n", x, utils.Trace())
				}
			}()
			for {
				var f func()
				g.lock.Lock()
				if e := g.list.Front(); e != nil {
					f = g.list.Remove(e).(func())
				} else {
					g.cond.Wait()
				}
				g.lock.Unlock()
				if f != nil {
					f()
				}
			}
		}
		go func() {
			for {
				main()
			}
		}()
	}
}
Exemplo n.º 29
0
// NewBroadcaster returns a Broadcaster structure
func NewBroadcaster() *Broadcaster {
	b := &Broadcaster{
		c: make(chan struct{}),
	}
	b.cond = sync.NewCond(b)
	return b
}
Exemplo n.º 30
0
// newListenerMux listens and wraps accepted connections with tls after protocol peeking
func newListenerMux(listener net.Listener, config *tls.Config) *ListenerMux {
	l := ListenerMux{
		Listener:    listener,
		config:      config,
		cond:        sync.NewCond(&sync.Mutex{}),
		acceptResCh: make(chan ListenerMuxAcceptRes),
	}
	// Start listening, wrap connections with tls when needed
	go func() {
		// Loop for accepting new connections
		for {
			conn, err := l.Listener.Accept()
			if err != nil {
				l.acceptResCh <- ListenerMuxAcceptRes{err: err}
				return
			}
			// Wrap the connection with ConnMux to be able to peek the data in the incoming connection
			// and decide if we need to wrap the connection itself with a TLS or not
			go func(conn net.Conn) {
				connMux := NewConnMux(conn)
				if connMux.PeekProtocol() == "tls" {
					l.acceptResCh <- ListenerMuxAcceptRes{conn: tls.Server(connMux, l.config)}
				} else {
					l.acceptResCh <- ListenerMuxAcceptRes{conn: connMux}
				}
			}(conn)
		}
	}()
	return &l
}