示例#1
0
文件: rpool.go 项目: intercom/dvara
// Acquire will pull a resource from the pool or create a new one if necessary.
func (p *Pool) Acquire() (io.Closer, error) {
	elapsedTime := stats.BumpTime(p.Stats, "acquire.time")
	defer elapsedTime.End()
	p.manageOnce.Do(p.goManage)
	r := make(chan io.Closer)
	p.acquire <- r
	c := <-r

	// sentinel value indicates the pool is closed
	if c == closedSentinel {
		return nil, errPoolClosed
	}

	// need to allocate a new resource
	if c == newSentinel {
		t := stats.BumpTime(p.Stats, "acquire.new.time")
		c, err := p.New()
		t.End()
		stats.BumpSum(p.Stats, "acquire.new", 1)
		if err != nil {
			stats.BumpSum(p.Stats, "acquire.error.new", 1)
			// discard our assumed checked out resource since we failed to New
			p.discard <- returnResource{resource: newSentinel}
		} else {
			p.new <- c
		}
		return c, err
	}

	// successfully acquired from pool
	return c, nil
}
示例#2
0
文件: rpool.go 项目: wptad/dvara
// Close closes the pool and its resources. It waits until all acquired
// resources are released or discarded. It is an error to call Acquire after
// closing the pool.
func (p *Pool) Close() error {
	defer stats.BumpTime(p.Stats, "shutdown.time").End()
	p.manageOnce.Do(p.goManage)
	r := make(chan error)
	p.close <- r
	return <-r
}
func (s *server) Stop() error {
	s.stopOnce.Do(func() {
		defer stats.BumpTime(s.stats, "stop.time").End()
		stats.BumpSum(s.stats, "stop", 1)

		// first disable keep-alive for new connections
		s.server.SetKeepAlivesEnabled(false)

		// then close the listener so new connections can't connect come thru
		closeErr := s.listener.Close()
		<-s.serveDone

		// then trigger the background goroutine to stop and wait for it
		stopDone := make(chan struct{})
		s.stop <- stopDone

		// wait for stop
		select {
		case <-stopDone:
		case <-s.clock.After(s.stopTimeout):
			defer stats.BumpTime(s.stats, "kill.time").End()
			stats.BumpSum(s.stats, "kill", 1)

			// stop timed out, wait for kill
			killDone := make(chan struct{})
			s.kill <- killDone
			select {
			case <-killDone:
			case <-s.clock.After(s.killTimeout):
				// kill timed out, give up
				stats.BumpSum(s.stats, "kill.timeout", 1)
			}
		}

		if closeErr != nil && !isUseOfClosedError(closeErr) {
			stats.BumpSum(s.stats, "listener.close.error", 1)
			s.stopErr = closeErr
		}
	})
	return s.stopErr
}
示例#4
0
文件: proxy.go 项目: wptad/dvara
func (p *Proxy) clientReadHeader(c net.Conn, timeout time.Duration) (*messageHeader, error) {
	t := stats.BumpTime(p.stats, "client.read.header.time")
	type headerError struct {
		header *messageHeader
		error  error
	}
	resChan := make(chan headerError)

	c.SetReadDeadline(time.Now().Add(timeout))
	go func() {
		h, err := readHeader(c)
		resChan <- headerError{header: h, error: err}
	}()

	closed := false
	var response headerError

	select {
	case response = <-resChan:
		// all good
	case <-p.closed:
		closed = true
		c.SetReadDeadline(timeInPast)
		response = <-resChan
	}

	// Successfully read a header.
	if response.error == nil {
		t.End()
		return response.header, nil
	}

	// Client side disconnected.
	if response.error == io.EOF {
		stats.BumpSum(p.stats, "client.clean.disconnect", 1)
		return nil, errNormalClose
	}

	// We hit our ReadDeadline.
	if ne, ok := response.error.(net.Error); ok && ne.Timeout() {
		if closed {
			stats.BumpSum(p.stats, "client.clean.disconnect", 1)
			return nil, errNormalClose
		}
		return nil, errClientReadTimeout
	}

	// Some other unknown error.
	stats.BumpSum(p.stats, "client.error.disconnect", 1)
	p.Log.Error(response.error)
	return nil, response.error
}
示例#5
0
文件: rpool.go 项目: intercom/dvara
func (p *Pool) manage() {
	klock := p.Clock
	if klock == nil {
		klock = clock.New()
	}

	// setup goroutines to close resources
	closers := make(chan io.Closer)
	var closeWG sync.WaitGroup
	closeWG.Add(int(p.ClosePoolSize))
	for i := uint(0); i < p.ClosePoolSize; i++ {
		go func() {
			defer closeWG.Done()
			for c := range closers {
				t := stats.BumpTime(p.Stats, "close.time")
				stats.BumpSum(p.Stats, "close", 1)
				if err := c.Close(); err != nil {
					stats.BumpSum(p.Stats, "close.error", 1)
					p.CloseErrorHandler(err)
				}
				t.End()
			}
		}()
	}

	// setup a ticker to report various averages every minute. if we don't have a
	// Stats implementation provided, we Stop it so it never ticks.
	statsTicker := klock.Ticker(time.Minute)
	if p.Stats == nil {
		statsTicker.Stop()
	}

	resources := []entry{}
	outResources := map[io.Closer]struct{}{}
	out := uint(0)
	waiting := list.New()
	idleTicker := klock.Ticker(p.IdleTimeout)
	closed := false
	var closeResponse chan error
	for {
		if closed && out == 0 && waiting.Len() == 0 {
			if p.Stats != nil {
				statsTicker.Stop()
			}

			// all waiting acquires are done, all resources have been released.
			// now just wait for all resources to close.
			close(closers)
			closeWG.Wait()

			// close internal channels.
			close(p.acquire)
			close(p.new)
			close(p.release)
			close(p.discard)
			close(p.close)

			// return a response to the original close.
			closeResponse <- nil

			return
		}

		select {
		case r := <-p.acquire:
			// if closed, new acquire calls are rejected
			if closed {
				r <- closedSentinel
				stats.BumpSum(p.Stats, "acquire.error.closed", 1)
				continue
			}

			// acquire from pool
			if cl := len(resources); cl > 0 {
				c := resources[cl-1]
				outResources[c.resource] = struct{}{}
				r <- c.resource
				resources = resources[:cl-1]
				out++
				stats.BumpSum(p.Stats, "acquire.pool", 1)
				continue
			}

			// max resources already in use, need to block & wait
			if out == p.Max {
				waiting.PushBack(r)
				stats.BumpSum(p.Stats, "acquire.waiting", 1)
				continue
			}

			// Make a new resource in the calling goroutine by sending it a
			// newSentinel. We assume it's checked out. Acquire will discard if
			// creating a new resource fails.
			out++
			r <- newSentinel
		case c := <-p.new:
			outResources[c] = struct{}{}
		case rr := <-p.release:
			// ensure we're dealing with a resource acquired thru us
			if _, found := outResources[rr.resource]; !found {
				rr.response <- errWrongPool
				return
			}
			close(rr.response)

			// pass it to someone who's waiting
			if e := waiting.Front(); e != nil {
				r := waiting.Remove(e).(chan io.Closer)
				r <- rr.resource
				continue
			}

			// no longer out
			out--
			delete(outResources, rr.resource)

			// no one is waiting, and we're closed, schedule it to be closed
			if closed {
				closers <- rr.resource
				continue
			}

			// put it back in our pool
			resources = append(resources, entry{resource: rr.resource, use: klock.Now()})
		case rr := <-p.discard:
			// ensure we're dealing with a resource acquired thru us
			if rr.resource != newSentinel { // this happens when new fails
				if _, found := outResources[rr.resource]; !found {
					rr.response <- errWrongPool
					return
				}
				close(rr.response)
				delete(outResources, rr.resource)
				closers <- rr.resource
			}

			// we can make a new one if someone is waiting. no need to decrement out
			// in this case since we assume this new one is checked out. Acquire will
			// discard if creating a new resource fails.
			if e := waiting.Front(); e != nil {
				r := waiting.Remove(e).(chan io.Closer)
				r <- newSentinel
				continue
			}

			// otherwise we lost a resource and dont need a new one right away
			out--
		case now := <-idleTicker.C:
			eligibleOffset := len(resources) - int(p.MinIdle)

			// less than min idle, nothing to do
			if eligibleOffset <= 0 {
				continue
			}

			t := stats.BumpTime(p.Stats, "idle.cleanup.time")

			// cleanup idle resources
			idleLen := 0
			for _, e := range resources[:eligibleOffset] {
				if now.Sub(e.use) < p.IdleTimeout {
					break
				}
				closers <- e.resource
				idleLen++
			}

			// move the remaining resources to the beginning
			resources = resources[:copy(resources, resources[idleLen:])]

			t.End()
			stats.BumpSum(p.Stats, "idle.closed", float64(idleLen))
		case <-statsTicker.C:
			// We can assume if we hit this then p.Stats is not nil
			p.Stats.BumpAvg("waiting", float64(waiting.Len()))
			p.Stats.BumpAvg("idle", float64(len(resources)))
			p.Stats.BumpAvg("out", float64(out))
			p.Stats.BumpAvg("alive", float64(uint(len(resources))+out))
		case r := <-p.close:
			// cant call close if already closing
			if closed {
				r <- errCloseAgain
				continue
			}

			closed = true
			idleTicker.Stop() // stop idle processing

			// close idle since if we have idle, implicitly no one is waiting
			for _, e := range resources {
				closers <- e.resource
			}

			closeResponse = r
		}
	}
}
示例#6
0
文件: proxy.go 项目: wptad/dvara
// clientServeLoop loops on a single client connected to the proxy and
// dispatches its requests.
func (p *Proxy) clientServeLoop(c net.Conn) {
	remoteIP := c.RemoteAddr().(*net.TCPAddr).IP.String()

	// enforce per-client max connection limit
	if p.maxPerClientConnections.inc(remoteIP) {
		c.Close()
		stats.BumpSum(p.stats, "client.rejected.max.connections", 1)
		p.Log.Errorf("rejecting client connection due to max connections limit: %s", remoteIP)
		return
	}

	// turn on TCP keep-alive and set it to the recommended period of 2 minutes
	// http://docs.mongodb.org/manual/faq/diagnostics/#faq-keepalive
	if conn, ok := c.(*net.TCPConn); ok {
		conn.SetKeepAlivePeriod(2 * time.Minute)
		conn.SetKeepAlive(true)
	}

	c = teeIf(fmt.Sprintf("client %s <=> %s", c.RemoteAddr(), p), c)
	p.Log.Infof("client %s connected to %s", c.RemoteAddr(), p)
	stats.BumpSum(p.stats, "client.connected", 1)
	p.ReplicaSet.ClientsConnected.Inc(1)
	defer func() {
		p.ReplicaSet.ClientsConnected.Dec(1)
		p.Log.Infof("client %s disconnected from %s", c.RemoteAddr(), p)
		p.wg.Done()
		if err := c.Close(); err != nil {
			p.Log.Error(err)
		}
		p.maxPerClientConnections.dec(remoteIP)
	}()

	var lastError LastError
	for {
		h, err := p.idleClientReadHeader(c)
		if err != nil {
			if err != errNormalClose {
				p.Log.Error(err)
			}
			return
		}

		mpt := stats.BumpTime(p.stats, "message.proxy.time")
		serverConn, err := p.getServerConn()
		if err != nil {
			if err != errNormalClose {
				p.Log.Error(err)
			}
			return
		}

		scht := stats.BumpTime(p.stats, "server.conn.held.time")
		for {
			err := p.proxyMessage(h, c, serverConn, &lastError)
			if err != nil {
				p.serverPool.Discard(serverConn)
				p.Log.Error(err)
				stats.BumpSum(p.stats, "message.proxy.error", 1)
				if ne, ok := err.(net.Error); ok && ne.Timeout() {
					stats.BumpSum(p.stats, "message.proxy.timeout", 1)
				}
				if err == errRSChanged {
					go p.ReplicaSet.Restart()
				}
				return
			}

			// One message was proxied, stop it's timer.
			mpt.End()

			if !h.OpCode.IsMutation() {
				break
			}

			// If the operation we just performed was a mutation, we always make the
			// follow up request on the same server because it's possibly a getLastErr
			// call which expects this behavior.

			stats.BumpSum(p.stats, "message.with.mutation", 1)
			h, err = p.gleClientReadHeader(c)
			if err != nil {
				// Client did not make _any_ query within the GetLastErrorTimeout.
				// Return the server to the pool and wait go back to outer loop.
				if err == errClientReadTimeout {
					break
				}
				// Prevent noise of normal client disconnects, but log if anything else.
				if err != errNormalClose {
					p.Log.Error(err)
				}
				// We need to return our server to the pool (it's still good as far
				// as we know).
				p.serverPool.Release(serverConn)
				return
			}

			// Successfully read message when waiting for the getLastError call.
			mpt = stats.BumpTime(p.stats, "message.proxy.time")
		}
		p.serverPool.Release(serverConn)
		scht.End()
		stats.BumpSum(p.stats, "message.proxy.success", 1)
	}
}