Exemplo n.º 1
1
// Upload is called to perform the upload.
func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) {
	if u.currentUploads != nil {
		defer atomic.AddInt32(u.currentUploads, -1)

		if atomic.AddInt32(u.currentUploads, 1) > maxUploadConcurrency {
			return distribution.Descriptor{}, errors.New("concurrency limit exceeded")
		}
	}

	// Sleep a bit to simulate a time-consuming upload.
	for i := int64(0); i <= 10; i++ {
		select {
		case <-ctx.Done():
			return distribution.Descriptor{}, ctx.Err()
		case <-time.After(10 * time.Millisecond):
			progressOutput.WriteProgress(progress.Progress{ID: u.ID(), Current: i, Total: 10})
		}
	}

	if u.simulateRetries != 0 {
		u.simulateRetries--
		return distribution.Descriptor{}, errors.New("simulating retry")
	}

	return distribution.Descriptor{}, nil
}
// inBulkSet actually processes incoming bulk-set messages; there may be more
// than one of these workers.
func (store *defaultGroupStore) inBulkSet(wg *sync.WaitGroup) {
	for {
		bsm := <-store.bulkSetState.inMsgChan
		if bsm == nil {
			break
		}
		body := bsm.body
		var err error
		ring := store.msgRing.Ring()
		var rightwardPartitionShift uint64
		var bsam *groupBulkSetAckMsg
		var ptimestampbits uint64
		if ring != nil {
			rightwardPartitionShift = 64 - uint64(ring.PartitionBitCount())
			// Only ack if there is someone to ack to.
			if bsm.nodeID() != 0 {
				bsam = store.newOutBulkSetAckMsg()
			}
		}
		for len(body) > _GROUP_BULK_SET_MSG_ENTRY_HEADER_LENGTH {

			keyA := binary.BigEndian.Uint64(body)
			keyB := binary.BigEndian.Uint64(body[8:])
			childKeyA := binary.BigEndian.Uint64(body[16:])
			childKeyB := binary.BigEndian.Uint64(body[24:])
			timestampbits := binary.BigEndian.Uint64(body[32:])
			l := binary.BigEndian.Uint32(body[40:])

			atomic.AddInt32(&store.inBulkSetWrites, 1)
			/*
			   // REMOVEME logging when we get zero-length values.
			   if l == 0 && timestampbits&_TSB_DELETION == 0 {
			       fmt.Printf("REMOVEME inbulkset got a zero-length value, %x %x %x %x %x\n", keyA, keyB, childKeyA, childKeyB, timestampbits)
			   }
			*/
			// Attempt to store everything received...
			// Note that deletions are acted upon as internal requests (work
			// even if writes are disabled due to disk fullness) and new data
			// writes are not.
			ptimestampbits, err = store.write(keyA, keyB, childKeyA, childKeyB, timestampbits, body[_GROUP_BULK_SET_MSG_ENTRY_HEADER_LENGTH:_GROUP_BULK_SET_MSG_ENTRY_HEADER_LENGTH+l], timestampbits&_TSB_DELETION != 0)
			if err != nil {
				atomic.AddInt32(&store.inBulkSetWriteErrors, 1)
			} else if ptimestampbits >= timestampbits {
				atomic.AddInt32(&store.inBulkSetWritesOverridden, 1)
			}
			// But only ack on success, there is someone to ack to, and the
			// local node is responsible for the data.
			if err == nil && bsam != nil && ring != nil && ring.Responsible(uint32(keyA>>rightwardPartitionShift)) {
				bsam.add(keyA, keyB, childKeyA, childKeyB, timestampbits)
			}
			body = body[_GROUP_BULK_SET_MSG_ENTRY_HEADER_LENGTH+l:]
		}
		if bsam != nil {
			atomic.AddInt32(&store.outBulkSetAcks, 1)
			store.msgRing.MsgToNode(bsam, bsm.nodeID(), store.bulkSetState.inResponseMsgTimeout)
		}
		store.bulkSetState.inFreeMsgChan <- bsm
	}
	wg.Done()
}
Exemplo n.º 3
0
func (w *Producer) sendCommandAsync(cmd *Command, doneChan chan *ProducerTransaction,
	args []interface{}) error {
	// keep track of how many outstanding producers we're dealing with
	// in order to later ensure that we clean them all up...
	atomic.AddInt32(&w.concurrentProducers, 1)
	defer atomic.AddInt32(&w.concurrentProducers, -1)

	if atomic.LoadInt32(&w.state) != StateConnected {
		err := w.connect()
		if err != nil {
			return err
		}
	}

	t := &ProducerTransaction{
		cmd:      cmd,
		doneChan: doneChan,
		Args:     args,
	}

	select {
	case w.transactionChan <- t:
	case <-w.exitChan:
		return ErrStopped
	}

	return nil
}
Exemplo n.º 4
0
// Ensure that multiple tickers can be used together.
func TestMock_Ticker_Multi(t *testing.T) {
	var n int32
	clock := NewMock()

	go func() {
		a := clock.Ticker(1 * time.Microsecond)
		b := clock.Ticker(3 * time.Microsecond)

		for {
			select {
			case <-a.C:
				atomic.AddInt32(&n, 1)
			case <-b.C:
				atomic.AddInt32(&n, 100)
			}
		}
	}()
	gosched()

	// Move clock forward.
	clock.Add(10 * time.Microsecond)
	gosched()
	if atomic.LoadInt32(&n) != 310 {
		t.Fatalf("unexpected: %d", n)
	}
}
Exemplo n.º 5
0
func (c *ConcurrentSolver) attrHelper(G *graphs.Graph, removed []bool, tmpMap []int32, flags []uint32, ch chan int, i int, node int, wg *sync.WaitGroup) {
	for _, v0 := range G.Nodes[node].Inc {
		if !removed[v0] {
			flag := G.Nodes[v0].Player == i
			if atomic.CompareAndSwapUint32(&flags[v0], 0, 1) {
				if flag {
					ch <- v0
					atomic.AddInt32(&tmpMap[v0], 1)
				} else {
					adj_counter := 0
					for _, x := range G.Nodes[v0].Adj {
						if !removed[x] {
							adj_counter += 1
						}
					}
					atomic.AddInt32(&tmpMap[v0], int32(adj_counter))
					if adj_counter == 1 {
						ch <- v0
					}
				}
			} else if !flag {
				if atomic.AddInt32(&tmpMap[v0], -1) == 1 {
					ch <- v0
				}
			}
		}
	}
	wg.Done()
}
Exemplo n.º 6
0
// inBulkSetAck actually processes incoming bulk-set-ack messages; there may be
// more than one of these workers.
func (store *DefaultGroupStore) inBulkSetAck(wg *sync.WaitGroup) {
	for {
		bsam := <-store.bulkSetAckState.inMsgChan
		if bsam == nil {
			break
		}
		ring := store.msgRing.Ring()
		var rightwardPartitionShift uint64
		if ring != nil {
			rightwardPartitionShift = 64 - uint64(ring.PartitionBitCount())
		}
		b := bsam.body
		// div mul just ensures any trailing bytes are dropped
		l := len(b) / _GROUP_BULK_SET_ACK_MSG_ENTRY_LENGTH * _GROUP_BULK_SET_ACK_MSG_ENTRY_LENGTH
		for o := 0; o < l; o += _GROUP_BULK_SET_ACK_MSG_ENTRY_LENGTH {
			keyA := binary.BigEndian.Uint64(b[o:])
			if ring != nil && !ring.Responsible(uint32(keyA>>rightwardPartitionShift)) {
				atomic.AddInt32(&store.inBulkSetAckWrites, 1)
				timestampbits := binary.BigEndian.Uint64(b[o+32:]) | _TSB_LOCAL_REMOVAL
				ptimestampbits, err := store.write(keyA, binary.BigEndian.Uint64(b[o+8:]), binary.BigEndian.Uint64(b[o+16:]), binary.BigEndian.Uint64(b[o+24:]), timestampbits, nil, true)
				if err != nil {
					atomic.AddInt32(&store.inBulkSetAckWriteErrors, 1)
				} else if ptimestampbits >= timestampbits {
					atomic.AddInt32(&store.inBulkSetAckWritesOverridden, 1)
				}
			}
		}
		store.bulkSetAckState.inFreeMsgChan <- bsam
	}
	wg.Done()
}
Exemplo n.º 7
0
func (g *Group) AddWindow(w Window) (err error) {
	if atomic.LoadInt32(&g.count) >= 10 {
		return ErrTooMany
	}
	if g.ids.exists(w.Id()) {
		return ErrNotUnique
	}
	g.ids.add(w.Id())
	atomic.AddInt32(&g.count, 1)
	g.Add(1)
	go func() {
		for ch := range w.Childs() {
			if err := g.AddWindow(ch); err != nil {
				w.SetError(err)
			}
		}
	}()
	go func() {
		<-w.Show()
		g.ids.remove(w.Id())
		atomic.AddInt32(&g.count, -1)
		g.Done()
	}()
	return
}
func (store *defaultValueStore) compactionWorker(jobChan chan *valueCompactionJob, controlChan chan struct{}, wg *sync.WaitGroup) {
	for c := range jobChan {
		select {
		case <-controlChan:
			break
		default:
		}
		total, err := valueTOCStat(path.Join(store.pathtoc, c.nametoc), store.stat, store.openReadSeeker)
		if err != nil {
			store.logError("compaction: unable to stat %s because: %v", path.Join(store.pathtoc, c.nametoc), err)
			continue
		}
		// TODO: This 1000 should be in the Config.
		// If total is less than 100, it'll automatically get compacted.
		if total < 1000 {
			atomic.AddInt32(&store.smallFileCompactions, 1)
		} else {
			toCheck := uint32(total)
			// If there are more than a million entries, we'll just check the
			// first million and extrapolate.
			if toCheck > 1000000 {
				toCheck = 1000000
			}
			if !store.needsCompaction(c.nametoc, c.candidateBlockID, total, toCheck) {
				continue
			}
			atomic.AddInt32(&store.compactions, 1)
		}
		store.compactFile(c.nametoc, c.candidateBlockID, controlChan, "compactionWorker")
	}
	wg.Done()
}
Exemplo n.º 9
0
func TestRun(t *testing.T) {
	i := int32(0)
	errs := Run(
		func() error {
			atomic.AddInt32(&i, 1)
			return nil
		},
		func() error {
			atomic.AddInt32(&i, 5)
			return nil
		},
	)
	if len(errs) != 0 || i != 6 {
		t.Error("unexpected run")
	}

	testErr := fmt.Errorf("an error")
	i = int32(0)
	errs = Run(
		func() error {
			return testErr
		},
		func() error {
			atomic.AddInt32(&i, 5)
			return nil
		},
	)
	if len(errs) != 1 && errs[0] != testErr && i != 5 {
		t.Error("unexpected run")
	}
}
Exemplo n.º 10
0
func (t *TCPMsgRing) msgToAddr(msg Msg, addr string, timeout time.Duration) {
	atomic.AddInt32(&t.msgToAddrs, 1)
	msgChan, created := t.msgChanForAddr(addr)
	if created {
		go t.connection(addr, nil, msgChan, true)
	}
	timer := time.NewTimer(timeout)
	select {
	case <-t.controlChan:
		atomic.AddInt32(&t.msgToAddrShutdownDrops, 1)
		timer.Stop()
		msg.Free()
	case msgChan <- msg:
		atomic.AddInt32(&t.msgToAddrQueues, 1)
		timer.Stop()
	case <-timer.C:
		atomic.AddInt32(&t.msgToAddrTimeoutDrops, 1)
		msg.Free()
	}
	// TODO: Uncertain the code block above is better than that below.
	//       Seems reasonable to Stop a timer if it won't be used; but
	//       perhaps that's more expensive than just letting it fire.
	//	select {
	//	case <-t.controlChan:
	//		msg.Free()
	//	case msgChan <- msg:
	//	case <-time.After(timeout):
	//		msg.Free()
	//	}
}
Exemplo n.º 11
0
// MsgToNode queues the message for delivery to all other replicas of a
// partition; the timeout should be considered for queueing, not for actual
// delivery.
//
// If the ring is not bound to a specific node (LocalNode() returns nil) then
// the delivery attempts will be to all replicas.
//
// When the msg has actually been sent or has been discarded due to delivery
// errors or delays, msg.Free() will be called.
func (t *TCPMsgRing) MsgToOtherReplicas(msg Msg, partition uint32, timeout time.Duration) {
	atomic.AddInt32(&t.msgToOtherReplicas, 1)
	ring := t.Ring()
	if ring == nil {
		atomic.AddInt32(&t.msgToOtherReplicasNoRings, 1)
		msg.Free()
		return
	}
	nodes := ring.ResponsibleNodes(partition)
	mmsg := &multiMsg{msg: msg, freerChan: make(chan struct{}, len(nodes))}
	toAddrChan := make(chan struct{}, len(nodes))
	toAddr := func(addr string) {
		t.msgToAddr(mmsg, addr, timeout)
		toAddrChan <- struct{}{}
	}
	var localID uint64
	if localNode := ring.LocalNode(); localNode != nil {
		localID = localNode.ID()
	}
	toAddrs := 0
	for _, node := range nodes {
		if node.ID() != localID {
			go toAddr(node.Address(t.addressIndex))
			toAddrs++
		}
	}
	if toAddrs == 0 {
		msg.Free()
		return
	}
	for i := 0; i < toAddrs; i++ {
		<-toAddrChan
	}
	go mmsg.freer(toAddrs)
}
Exemplo n.º 12
0
// inBulkSetAck actually processes incoming bulk-set-ack messages; there may be
// more than one of these workers.
func (vs *DefaultValueStore) inBulkSetAck(doneChan chan struct{}) {
	for {
		bsam := <-vs.bulkSetAckState.inMsgChan
		if bsam == nil {
			break
		}
		ring := vs.msgRing.Ring()
		var rightwardPartitionShift uint64
		if ring != nil {
			rightwardPartitionShift = 64 - uint64(ring.PartitionBitCount())
		}
		b := bsam.body
		// div mul just ensures any trailing bytes are dropped
		l := len(b) / _BULK_SET_ACK_MSG_ENTRY_LENGTH * _BULK_SET_ACK_MSG_ENTRY_LENGTH
		for o := 0; o < l; o += _BULK_SET_ACK_MSG_ENTRY_LENGTH {
			keyA := binary.BigEndian.Uint64(b[o:])
			if ring != nil && !ring.Responsible(uint32(keyA>>rightwardPartitionShift)) {
				atomic.AddInt32(&vs.inBulkSetAckWrites, 1)
				timestampbits := binary.BigEndian.Uint64(b[o+16:]) | _TSB_LOCAL_REMOVAL
				rtimestampbits, err := vs.write(keyA, binary.BigEndian.Uint64(b[o+8:]), timestampbits, nil)
				if err != nil {
					atomic.AddInt32(&vs.inBulkSetAckWriteErrors, 1)
				} else if rtimestampbits != timestampbits {
					atomic.AddInt32(&vs.inBulkSetAckWritesOverridden, 1)
				}
			}
		}
		vs.bulkSetAckState.inFreeMsgChan <- bsam
	}
	doneChan <- struct{}{}
}
Exemplo n.º 13
0
func (v *basePE) Start() {
	v.mutex.Lock()
	defer v.mutex.Unlock()

	if v.started {
		// already started
		return
	}
	cs := make(chan bool, v.parallel)
	for i := 0; i < v.parallel; i++ {
		go func(vv *basePE) {
			vv.wg.Add(1)
			atomic.AddInt32(&vv.running, 1)
			cs <- true
			defer func() {
				atomic.AddInt32(&vv.running, -1)
				vv.wg.Done()
			}()
			for vf := range vv.queue {
				vf()
				vv.completed++
			}
		}(v)
	}
	for i := 0; i < v.parallel; i++ {
		<-cs
	}
	v.started = true
}
Exemplo n.º 14
0
func (r *IndexReaderImpl) decRef() error {
	// only check refcount here (don't call ensureOpen()), so we can
	// still close the reader if it was made invalid by a child:
	if r.refCount <= 0 {
		return errors.New("this IndexReader is closed")
	}

	rc := atomic.AddInt32(&r.refCount, -1)
	if rc == 0 {
		success := false
		defer func() {
			if !success {
				// Put reference back on failure
				atomic.AddInt32(&r.refCount, 1)
			}
		}()
		r.doClose()
		success = true
		r.reportCloseToParentReaders()
		r.notifyReaderClosedListeners()
	} else if rc < 0 {
		panic(fmt.Sprintf("too many decRef calls: refCount is %v after decrement", rc))
	}

	return nil
}
Exemplo n.º 15
0
func renderRows(wg *sync.WaitGroup, s32 int32) {
	var y int32
	for y = atomic.AddInt32(&yAt, 1); y < s32; y = atomic.AddInt32(&yAt, 1) {
		fields[y] = renderRow(&y)
	}
	wg.Done()
}
Exemplo n.º 16
0
// Download is called to perform the download.
func (d *mockDownloadDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) {
	if d.currentDownloads != nil {
		defer atomic.AddInt32(d.currentDownloads, -1)

		if atomic.AddInt32(d.currentDownloads, 1) > maxDownloadConcurrency {
			return nil, 0, errors.New("concurrency limit exceeded")
		}
	}

	// Sleep a bit to simulate a time-consuming download.
	for i := int64(0); i <= 10; i++ {
		select {
		case <-ctx.Done():
			return nil, 0, ctx.Err()
		case <-time.After(10 * time.Millisecond):
			progressOutput.WriteProgress(progress.Progress{ID: d.ID(), Action: "Downloading", Current: i, Total: 10})
		}
	}

	if d.simulateRetries != 0 {
		d.simulateRetries--
		return nil, 0, errors.New("simulating retry")
	}

	return d.mockTarStream(), 0, nil
}
Exemplo n.º 17
0
// queueRoutine captures and provides work.
func (workPool *WorkPool) queueRoutine() {
	for {
		select {
		// Shutdown the QueueRoutine.
		case <-workPool.shutdownQueueChannel:
			writeStdout("Queue", "queueRoutine", "Going Down")
			workPool.shutdownQueueChannel <- "Down"
			return

		// Post work to be processed.
		case queueItem := <-workPool.queueChannel:
			// If the queue is at capacity don't add it.
			if atomic.AddInt32(&workPool.queuedWork, 0) == workPool.queueCapacity {
				queueItem.resultChannel <- fmt.Errorf("Thread Pool At Capacity")
				continue
			}

			// Increment the queued work count.
			atomic.AddInt32(&workPool.queuedWork, 1)

			// Queue the work for the WorkRoutine to process.
			workPool.workChannel <- queueItem.work

			// Tell the caller the work is queued.
			queueItem.resultChannel <- nil
			break
		}
	}
}
Exemplo n.º 18
0
func (rs *RemoteServer) serveStream(stream net.Conn) {
	defer stream.Close()
	start := time.Now()
	conn, err := net.Dial("tcp", rs.raddr)
	if err != nil {
		log.Error("connect to remote error:%v", err)
		return
	}
	defer conn.Close()

	atomic.AddInt32(&rs.streamCount, 1)
	streamID := atomic.AddUint64(&rs.streamID, 1)
	readChan := make(chan int64, 1)
	writeChan := make(chan int64, 1)
	var readBytes int64
	var writeBytes int64
	go pipe(stream, conn, readChan)
	go pipe(conn, stream, writeChan)
	for i := 0; i < 2; i++ {
		select {
		case readBytes = <-readChan:
			stream.Close()
			log.Debug("[#%d] read %d bytes", streamID, readBytes)
		case writeBytes = <-writeChan:
			// DON'T call conn.Close, it will trigger an error in pipe.
			// Just close stream, let the conn copy finished normally
			log.Debug("[#%d] write %d bytes", streamID, writeBytes)
		}
	}
	log.Info("[#%d] r:%d w:%d t:%v c:%d",
		streamID, readBytes, writeBytes, time.Now().Sub(start), atomic.LoadInt32(&rs.streamCount))
	atomic.AddInt32(&rs.streamCount, -1)
}
Exemplo n.º 19
0
// Handles incoming requests.
func handleRequest(conn net.Conn) {
	defer func() {
		conn.Close()
		atomic.AddInt32(&clients, -1)
	}()

	atomic.AddInt32(&clients, 1)

	// Make a buffer to hold incoming data.
	buf := memPool.Get().([]byte)
	for {
		// Read the incoming connection into the buffer.
		n, err := conn.Read(buf)
		if err != nil {
			if err != io.EOF {
				log.Println("Error reading:", err.Error())
			}

			return
		}
		atomic.AddUint64(&bytesRecved, uint64(n))

		// Send a response back to person contacting us.
		n, _ = conn.Write([]byte("Message received."))
		atomic.AddUint64(&bytesSent, uint64(n))
	}

}
Exemplo n.º 20
0
func (sc *SocketCrutch) createClient(ws *websocket.Conn, req *http.Request) {
	id := xid.New().String()

	conn := newConnection(ws, sc)
	session := newSession(conn, id, req, sc)

	atomic.AddInt32(&sc.connectCounter, 1)
	defer atomic.AddInt32(&sc.connectCounter, -1)

	log.Debugf("the client session #%s created, the current number of connections %d", id, sc.connectCounter)

	if sc.connectHandler != nil {
		sc.connectHandler(session)
	}

	// Start reading and writing the goroutines.
	conn.run()
	// Wait disconnect
	conn.wait()

	if sc.disconnectHandler != nil {
		sc.disconnectHandler(session)
	}

	log.Debugf("the client session #%s disconnected, the current number of connections %d", id, sc.connectCounter)
}
Exemplo n.º 21
0
func (t *Tunnel) transport(conn net.Conn) {
	start := time.Now()
	conn2, err := net.DialTCP("tcp", nil, t.baddr)
	if err != nil {
		log.Print(err)
		return
	}
	connectTime := time.Now().Sub(start)
	start = time.Now()
	cipher := NewCipher(t.cryptoMethod, t.secret)
	readChan := make(chan int64)
	writeChan := make(chan int64)
	var readBytes, writeBytes int64
	atomic.AddInt32(&t.sessionsCount, 1)
	var bconn, fconn *Conn
	if t.clientMode {
		fconn = NewConn(conn, nil, t.pool)
		bconn = NewConn(conn2, cipher, t.pool)
	} else {
		fconn = NewConn(conn, cipher, t.pool)
		bconn = NewConn(conn2, nil, t.pool)
	}
	go t.pipe(bconn, fconn, writeChan)
	go t.pipe(fconn, bconn, readChan)
	writeBytes = <-writeChan
	readBytes = <-readChan
	transferTime := time.Now().Sub(start)
	log.Printf("r:%d w:%d ct:%.3f t:%.3f [#%d]", readBytes, writeBytes,
		connectTime.Seconds(), transferTime.Seconds(), t.sessionsCount)
	atomic.AddInt32(&t.sessionsCount, -1)
}
Exemplo n.º 22
0
// Upload is called to perform the upload.
func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (digest.Digest, error) {
	if u.currentUploads != nil {
		defer atomic.AddInt32(u.currentUploads, -1)

		if atomic.AddInt32(u.currentUploads, 1) > maxUploadConcurrency {
			return "", errors.New("concurrency limit exceeded")
		}
	}

	// Sleep a bit to simulate a time-consuming upload.
	for i := int64(0); i <= 10; i++ {
		select {
		case <-ctx.Done():
			return "", ctx.Err()
		case <-time.After(10 * time.Millisecond):
			progressOutput.WriteProgress(progress.Progress{ID: u.ID(), Current: i, Total: 10})
		}
	}

	if u.simulateRetries != 0 {
		u.simulateRetries--
		return "", errors.New("simulating retry")
	}

	// For the mock implementation, use SHA256(DiffID) as the returned
	// digest.
	return digest.FromBytes([]byte(u.diffID.String()))
}
Exemplo n.º 23
0
// addServiceOnPort starts listening for a new service, returning the serviceInfo.
// Pass proxyPort=0 to allocate a random port. The timeout only applies to UDP
// connections, for now.
func (proxier *Proxier) addServiceOnPort(service proxy.ServicePortName, protocol api.Protocol, proxyPort int, timeout time.Duration) (*serviceInfo, error) {
	sock, err := newProxySocket(protocol, proxier.listenIP, proxyPort)
	if err != nil {
		return nil, err
	}
	_, portStr, err := net.SplitHostPort(sock.Addr().String())
	if err != nil {
		sock.Close()
		return nil, err
	}
	portNum, err := strconv.Atoi(portStr)
	if err != nil {
		sock.Close()
		return nil, err
	}
	si := &serviceInfo{
		proxyPort:           portNum,
		protocol:            protocol,
		socket:              sock,
		timeout:             timeout,
		sessionAffinityType: api.ServiceAffinityNone, // default
		stickyMaxAgeMinutes: 180,                     // TODO: paramaterize this in the API.
	}
	proxier.setServiceInfo(service, si)

	glog.V(2).Infof("Proxying for service %q on %s port %d", service, protocol, portNum)
	go func(service proxy.ServicePortName, proxier *Proxier) {
		defer util.HandleCrash()
		atomic.AddInt32(&proxier.numProxyLoops, 1)
		sock.ProxyLoop(service, si, proxier)
		atomic.AddInt32(&proxier.numProxyLoops, -1)
	}(service, proxier)

	return si, nil
}
Exemplo n.º 24
0
func (s *semaphore) acquire(timeout time.Duration) bool {
	s.lock.Lock()
	observer := atomic.AddInt32(&s.observer, 1)
	if size := cap(s.bus); size < int(observer) {
		oldBus := s.bus
		s.bus = make(chan byte, size<<1)
		for i := 0; i < size; i++ {
			oldBus <- 0 // timeout duration will be accumulated when expanding
		}
	}
	s.lock.Unlock()
	defer atomic.AddInt32(&s.observer, -1)
	for {
		select {
		case x := <-s.bus:
			if x == 0 {
				runtime.Gosched()
			} else {
				return x < 0xff
			}
		case <-time.After(timeout):
			return s.timeoutResult
		}
	}
}
Exemplo n.º 25
0
func (c *Cache) startHit(h *Hit) {
	atomic.AddInt32(&c.activeCount, 1)
	var arrayTop, arrayFull []Element
	general := (len(h.criteria) == 0)
	for {
		select {
		case r := <-h.in:
			if r.count < 0 {
				atomic.AddInt32(&c.activeCount, -1)
				<-c.wait
				return
			}
			if general {
				genrealRespoce(r, c.sphere, h.filter)
			} else if end := r.offset + r.count; end <= 200 && arrayFull == nil {
				if arrayTop == nil {
					arrayTop = c.sphere.topK(200, h.criteria, h.filter)
				}
				sendRespose(r, arrayTop)
			} else {
				if arrayFull == nil {
					arrayFull = proccess(h.criteria, c.sphere, h.filter)
					log.Printf("make full sort!!!!Request from %d to %d", r.offset, r.offset+r.count)
					arrayTop = nil
				}
				sendRespose(r, arrayTop)
			}
		}
	}
}
Exemplo n.º 26
0
func runQuery(q *queryIn) {
	if len(q.ptrs) == 0 {
		q.cherr <- fmt.Errorf("At least one pointer is required")
		return
	}

	db, err := dbopen(q.dbname)
	if err != nil {
		log.Printf("Error opening db: %v - %v", q.dbname, err)
		q.cherr <- err
		return
	}
	defer db.Close()

	chunk := int64(time.Duration(q.group) * time.Millisecond)

	infos := []*couchstore.DocInfo{}
	g := int64(0)
	nextg := ""

	err = db.Walk(q.from, func(d *couchstore.Couchstore,
		di *couchstore.DocInfo) error {
		kstr := di.ID()
		var err error
		if q.to != "" && kstr >= q.to {
			err = couchstore.StopIteration
		}

		atomic.AddInt32(&q.totalKeys, 1)

		if kstr >= nextg {
			if len(infos) > 0 {
				atomic.AddInt32(&q.started, 1)
				fetchDocs(q.dbname, g, infos, di,
					q.ptrs, q.reds, q.filters, q.filtervals,
					q.before, q.out)

				infos = make([]*couchstore.DocInfo, 0, len(infos))
			}

			k := parseKey(kstr)
			g = (k / chunk) * chunk
			nextgi := g + chunk
			nextgt := time.Unix(nextgi/1e9, nextgi%1e9).UTC()
			nextg = nextgt.Format(time.RFC3339Nano)
		}
		infos = append(infos, di)

		return err
	})

	if err == nil && len(infos) > 0 {
		atomic.AddInt32(&q.started, 1)
		fetchDocs(q.dbname, g, infos, nil,
			q.ptrs, q.reds, q.filters, q.filtervals,
			q.before, q.out)
	}

	q.cherr <- err
}
Exemplo n.º 27
0
// AddAsyncHandler adds an AsyncHandler for messages received by this Reader.
//
// See AsyncHandler for details on implementing this interface.
//
// It's ok to start more than one handler simultaneously, they
// are concurrently executed in goroutines.
func (q *Reader) AddAsyncHandler(handler AsyncHandler) {
	atomic.AddInt32(&q.runningHandlers, 1)
	log.Println("starting AsyncHandler go-routine")
	go func() {
		for {
			message, ok := <-q.incomingMessages
			if !ok {
				log.Printf("closing AsyncHandler (after self.incomingMessages closed)")
				if atomic.AddInt32(&q.runningHandlers, -1) == 0 {
					q.ExitChan <- 1
				}
				break
			}

			// message passed the max number of attempts
			// note: unfortunately it's not straight forward to do this after passing to async handler, so we don't.
			if q.MaxAttemptCount > 0 && message.Attempts > q.MaxAttemptCount {
				log.Printf("WARNING: msg attempted %d times. giving up %s %s", message.Attempts, message.Id, message.Body)
				logger, ok := handler.(FailedMessageLogger)
				if ok {
					logger.LogFailedMessage(message.Message)
				}
				message.responseChannel <- &FinishedMessage{message.Id, 0, true}
				continue
			}

			handler.HandleMessage(message.Message, message.responseChannel)
		}
	}()
}
Exemplo n.º 28
0
func handleConnection(conn *ss.Conn) {
	var host string

	atomic.AddInt32(&connCnt, 1)
	if connCnt-nextLogConnCnt >= 0 {
		// XXX There's no xadd in the atomic package, so it's difficult to log
		// the message only once with low cost. Also note nextLogConnCnt maybe
		// added twice for current peak connection number level.
		log.Printf("Number of client connections reaches %d\n", nextLogConnCnt)
		nextLogConnCnt += logCntDelta
	}

	// function arguments are always evaluated, so surround debug statement
	// with if statement
	if debug {
		debug.Printf("new client %s->%s\n", conn.RemoteAddr().String(), conn.LocalAddr())
	}
	defer func() {
		if debug {
			debug.Printf("closing pipe %s<->%s\n", conn.RemoteAddr(), host)
		}
		atomic.AddInt32(&connCnt, -1)
		conn.Close()
	}()

	host, extra, err := getRequest(conn)
	if err != nil {
		log.Println("error getting request:", err)
		return
	}
	debug.Println("connecting", host)
	remote, err := net.Dial("tcp", host)
	if err != nil {
		if ne, ok := err.(*net.OpError); ok && (ne.Err == syscall.EMFILE || ne.Err == syscall.ENFILE) {
			// log too many open file error
			// EMFILE is process reaches open file limits, ENFILE is system limit
			log.Println("dial error:", err)
		} else {
			debug.Println("error connecting to:", host, err)
		}
		return
	}
	defer remote.Close()
	// write extra bytes read from
	if extra != nil {
		// debug.Println("getRequest read extra data, writing to remote, len", len(extra))
		if _, err = remote.Write(extra); err != nil {
			debug.Println("write request extra error:", err)
			return
		}
	}
	if debug {
		debug.Printf("piping %s<->%s", conn.RemoteAddr(), host)
	}
	c := make(chan byte, 2)
	go ss.Pipe(conn, remote, c)
	go ss.Pipe(remote, conn, c)
	<-c // close the other connection whenever one connection is closed
	return
}
Exemplo n.º 29
0
func (rcv *TCP) handleConnection(conn net.Conn) {
	atomic.AddInt32(&rcv.active, 1)
	defer atomic.AddInt32(&rcv.active, -1)

	defer conn.Close()
	reader := bufio.NewReader(conn)

	for {
		conn.SetReadDeadline(time.Now().Add(2 * time.Minute))

		line, err := reader.ReadBytes('\n')

		if err != nil {
			if err == io.EOF {
				if len(line) > 0 {
					logrus.Warningf("[tcp] Unfinished line: %#v", line)
				}
			} else {
				atomic.AddUint32(&rcv.errors, 1)
				logrus.Error(err)
			}
			break
		}
		if len(line) > 0 { // skip empty lines
			if msg, err := points.ParseText(string(line)); err != nil {
				atomic.AddUint32(&rcv.errors, 1)
				logrus.Info(err)
			} else {
				atomic.AddUint32(&rcv.metricsReceived, 1)
				rcv.out <- msg
			}
		}
	}
}
Exemplo n.º 30
0
//
// the application wants to create a paxos peer.
// the ports of all the paxos peers (including this one)
// are in peers[]. this servers port is peers[me].
//
func Make(peers []string, me int, rpcs *rpc.Server) *Paxos {
	px := &Paxos{}
	px.peers = peers
	px.me = me

	// Your initialization code here.

	if rpcs != nil {
		// caller will create socket &c
		rpcs.Register(px)
	} else {
		rpcs = rpc.NewServer()
		rpcs.Register(px)

		// prepare to receive connections from clients.
		// change "unix" to "tcp" to use over a network.
		os.Remove(peers[me]) // only needed for "unix"
		l, e := net.Listen("unix", peers[me])
		if e != nil {
			log.Fatal("listen error: ", e)
		}
		px.l = l

		// please do not change any of the following code,
		// or do anything to subvert it.

		// create a thread to accept RPC connections
		go func() {
			for px.isdead() == false {
				conn, err := px.l.Accept()
				if err == nil && px.isdead() == false {
					if px.isunreliable() && (rand.Int63()%1000) < 100 {
						// discard the request.
						conn.Close()
					} else if px.isunreliable() && (rand.Int63()%1000) < 200 {
						// process the request but force discard of reply.
						c1 := conn.(*net.UnixConn)
						f, _ := c1.File()
						err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)
						if err != nil {
							fmt.Printf("shutdown: %v\n", err)
						}
						atomic.AddInt32(&px.rpcCount, 1)
						go rpcs.ServeConn(conn)
					} else {
						atomic.AddInt32(&px.rpcCount, 1)
						go rpcs.ServeConn(conn)
					}
				} else if err == nil {
					conn.Close()
				}
				if err != nil && px.isdead() == false {
					fmt.Printf("Paxos(%v) accept: %v\n", me, err.Error())
				}
			}
		}()
	}

	return px
}