Esempio n. 1
0
// Acquire will pull a resource from the pool or create a new one if necessary.
func (p *Pool) Acquire() (io.Closer, error) {
	elapsedTime := stats.BumpTime(p.Stats, "acquire.time")
	defer elapsedTime.End()
	p.manageOnce.Do(p.goManage)
	r := make(chan io.Closer)
	p.acquire <- r
	c := <-r

	// sentinel value indicates the pool is closed
	if c == closedSentinel {
		return nil, errPoolClosed
	}

	// need to allocate a new resource
	if c == newSentinel {
		t := stats.BumpTime(p.Stats, "acquire.new.time")
		c, err := p.New()
		t.End()
		stats.BumpSum(p.Stats, "acquire.new", 1)
		if err != nil {
			stats.BumpSum(p.Stats, "acquire.error.new", 1)
			// discard our assumed checked out resource since we failed to New
			p.discard <- returnResource{resource: newSentinel}
		} else {
			p.new <- c
		}
		return c, err
	}

	// successfully acquired from pool
	return c, nil
}
Esempio n. 2
0
File: proxy.go Progetto: wptad/dvara
func (p *Proxy) clientReadHeader(c net.Conn, timeout time.Duration) (*messageHeader, error) {
	t := stats.BumpTime(p.stats, "client.read.header.time")
	type headerError struct {
		header *messageHeader
		error  error
	}
	resChan := make(chan headerError)

	c.SetReadDeadline(time.Now().Add(timeout))
	go func() {
		h, err := readHeader(c)
		resChan <- headerError{header: h, error: err}
	}()

	closed := false
	var response headerError

	select {
	case response = <-resChan:
		// all good
	case <-p.closed:
		closed = true
		c.SetReadDeadline(timeInPast)
		response = <-resChan
	}

	// Successfully read a header.
	if response.error == nil {
		t.End()
		return response.header, nil
	}

	// Client side disconnected.
	if response.error == io.EOF {
		stats.BumpSum(p.stats, "client.clean.disconnect", 1)
		return nil, errNormalClose
	}

	// We hit our ReadDeadline.
	if ne, ok := response.error.(net.Error); ok && ne.Timeout() {
		if closed {
			stats.BumpSum(p.stats, "client.clean.disconnect", 1)
			return nil, errNormalClose
		}
		return nil, errClientReadTimeout
	}

	// Some other unknown error.
	stats.BumpSum(p.stats, "client.error.disconnect", 1)
	p.Log.Error(response.error)
	return nil, response.error
}
Esempio n. 3
0
File: proxy.go Progetto: wptad/dvara
// proxyMessage proxies a message, possibly it's response, and possibly a
// follow up call.
func (p *Proxy) proxyMessage(
	h *messageHeader,
	client net.Conn,
	server net.Conn,
	lastError *LastError,
) error {

	p.Log.Debugf("proxying message %s from %s for %s", h, client.RemoteAddr(), p)
	deadline := time.Now().Add(p.ReplicaSet.MessageTimeout)
	server.SetDeadline(deadline)
	client.SetDeadline(deadline)

	// OpQuery may need to be transformed and need special handling in order to
	// make the proxy transparent.
	if h.OpCode == OpQuery {
		stats.BumpSum(p.stats, "message.with.response", 1)
		return p.ReplicaSet.ProxyQuery.Proxy(h, client, server, lastError)
	}

	// Anything besides a getlasterror call (which requires an OpQuery) resets
	// the lastError.
	if lastError.Exists() {
		p.Log.Debug("reset getLastError cache")
		lastError.Reset()
	}

	// For other Ops we proxy the header & raw body over.
	if err := h.WriteTo(server); err != nil {
		p.Log.Error(err)
		return err
	}

	if _, err := io.CopyN(server, client, int64(h.MessageLength-headerLen)); err != nil {
		p.Log.Error(err)
		return err
	}

	// For Ops with responses we proxy the raw response message over.
	if h.OpCode.HasResponse() {
		stats.BumpSum(p.stats, "message.with.response", 1)
		if err := copyMessage(client, server); err != nil {
			p.Log.Error(err)
			return err
		}
	}

	return nil
}
Esempio n. 4
0
File: proxy.go Progetto: wptad/dvara
func (p *Proxy) gleClientReadHeader(c net.Conn) (*messageHeader, error) {
	h, err := p.clientReadHeader(c, p.ReplicaSet.GetLastErrorTimeout)
	if err == errClientReadTimeout {
		stats.BumpSum(p.stats, "client.gle.timeout", 1)
	}
	return h, err
}
func (s *server) Stop() error {
	s.stopOnce.Do(func() {
		defer stats.BumpTime(s.stats, "stop.time").End()
		stats.BumpSum(s.stats, "stop", 1)

		// first disable keep-alive for new connections
		s.server.SetKeepAlivesEnabled(false)

		// then close the listener so new connections can't connect come thru
		closeErr := s.listener.Close()
		<-s.serveDone

		// then trigger the background goroutine to stop and wait for it
		stopDone := make(chan struct{})
		s.stop <- stopDone

		// wait for stop
		select {
		case <-stopDone:
		case <-s.clock.After(s.stopTimeout):
			defer stats.BumpTime(s.stats, "kill.time").End()
			stats.BumpSum(s.stats, "kill", 1)

			// stop timed out, wait for kill
			killDone := make(chan struct{})
			s.kill <- killDone
			select {
			case <-killDone:
			case <-s.clock.After(s.killTimeout):
				// kill timed out, give up
				stats.BumpSum(s.stats, "kill.timeout", 1)
			}
		}

		if closeErr != nil && !isUseOfClosedError(closeErr) {
			stats.BumpSum(s.stats, "listener.close.error", 1)
			s.stopErr = closeErr
		}
	})
	return s.stopErr
}
// ListenAndServe returns a Server for the given http.Server. It is equivalent
// to ListenAndServe from the standard library, but returns immediately.
// Requests will be accepted in a background goroutine. If the http.Server has
// a non-nil TLSConfig, a TLS enabled listener will be setup.
func (h HTTP) ListenAndServe(s *http.Server) (Server, error) {
	addr := s.Addr
	if addr == "" {
		if s.TLSConfig == nil {
			addr = ":http"
		} else {
			addr = ":https"
		}
	}
	l, err := net.Listen("tcp", addr)
	if err != nil {
		stats.BumpSum(h.Stats, "listen.error", 1)
		return nil, err
	}
	if s.TLSConfig != nil {
		l = tls.NewListener(l, s.TLSConfig)
	}
	return h.Serve(s, l), nil
}
Esempio n. 7
0
func (p *Pool) manage() {
	klock := p.Clock
	if klock == nil {
		klock = clock.New()
	}

	// setup goroutines to close resources
	closers := make(chan io.Closer)
	var closeWG sync.WaitGroup
	closeWG.Add(int(p.ClosePoolSize))
	for i := uint(0); i < p.ClosePoolSize; i++ {
		go func() {
			defer closeWG.Done()
			for c := range closers {
				t := stats.BumpTime(p.Stats, "close.time")
				stats.BumpSum(p.Stats, "close", 1)
				if err := c.Close(); err != nil {
					stats.BumpSum(p.Stats, "close.error", 1)
					p.CloseErrorHandler(err)
				}
				t.End()
			}
		}()
	}

	// setup a ticker to report various averages every minute. if we don't have a
	// Stats implementation provided, we Stop it so it never ticks.
	statsTicker := klock.Ticker(time.Minute)
	if p.Stats == nil {
		statsTicker.Stop()
	}

	resources := []entry{}
	outResources := map[io.Closer]struct{}{}
	out := uint(0)
	waiting := list.New()
	idleTicker := klock.Ticker(p.IdleTimeout)
	closed := false
	var closeResponse chan error
	for {
		if closed && out == 0 && waiting.Len() == 0 {
			if p.Stats != nil {
				statsTicker.Stop()
			}

			// all waiting acquires are done, all resources have been released.
			// now just wait for all resources to close.
			close(closers)
			closeWG.Wait()

			// close internal channels.
			close(p.acquire)
			close(p.new)
			close(p.release)
			close(p.discard)
			close(p.close)

			// return a response to the original close.
			closeResponse <- nil

			return
		}

		select {
		case r := <-p.acquire:
			// if closed, new acquire calls are rejected
			if closed {
				r <- closedSentinel
				stats.BumpSum(p.Stats, "acquire.error.closed", 1)
				continue
			}

			// acquire from pool
			if cl := len(resources); cl > 0 {
				c := resources[cl-1]
				outResources[c.resource] = struct{}{}
				r <- c.resource
				resources = resources[:cl-1]
				out++
				stats.BumpSum(p.Stats, "acquire.pool", 1)
				continue
			}

			// max resources already in use, need to block & wait
			if out == p.Max {
				waiting.PushBack(r)
				stats.BumpSum(p.Stats, "acquire.waiting", 1)
				continue
			}

			// Make a new resource in the calling goroutine by sending it a
			// newSentinel. We assume it's checked out. Acquire will discard if
			// creating a new resource fails.
			out++
			r <- newSentinel
		case c := <-p.new:
			outResources[c] = struct{}{}
		case rr := <-p.release:
			// ensure we're dealing with a resource acquired thru us
			if _, found := outResources[rr.resource]; !found {
				rr.response <- errWrongPool
				return
			}
			close(rr.response)

			// pass it to someone who's waiting
			if e := waiting.Front(); e != nil {
				r := waiting.Remove(e).(chan io.Closer)
				r <- rr.resource
				continue
			}

			// no longer out
			out--
			delete(outResources, rr.resource)

			// no one is waiting, and we're closed, schedule it to be closed
			if closed {
				closers <- rr.resource
				continue
			}

			// put it back in our pool
			resources = append(resources, entry{resource: rr.resource, use: klock.Now()})
		case rr := <-p.discard:
			// ensure we're dealing with a resource acquired thru us
			if rr.resource != newSentinel { // this happens when new fails
				if _, found := outResources[rr.resource]; !found {
					rr.response <- errWrongPool
					return
				}
				close(rr.response)
				delete(outResources, rr.resource)
				closers <- rr.resource
			}

			// we can make a new one if someone is waiting. no need to decrement out
			// in this case since we assume this new one is checked out. Acquire will
			// discard if creating a new resource fails.
			if e := waiting.Front(); e != nil {
				r := waiting.Remove(e).(chan io.Closer)
				r <- newSentinel
				continue
			}

			// otherwise we lost a resource and dont need a new one right away
			out--
		case now := <-idleTicker.C:
			eligibleOffset := len(resources) - int(p.MinIdle)

			// less than min idle, nothing to do
			if eligibleOffset <= 0 {
				continue
			}

			t := stats.BumpTime(p.Stats, "idle.cleanup.time")

			// cleanup idle resources
			idleLen := 0
			for _, e := range resources[:eligibleOffset] {
				if now.Sub(e.use) < p.IdleTimeout {
					break
				}
				closers <- e.resource
				idleLen++
			}

			// move the remaining resources to the beginning
			resources = resources[:copy(resources, resources[idleLen:])]

			t.End()
			stats.BumpSum(p.Stats, "idle.closed", float64(idleLen))
		case <-statsTicker.C:
			// We can assume if we hit this then p.Stats is not nil
			p.Stats.BumpAvg("waiting", float64(waiting.Len()))
			p.Stats.BumpAvg("idle", float64(len(resources)))
			p.Stats.BumpAvg("out", float64(out))
			p.Stats.BumpAvg("alive", float64(uint(len(resources))+out))
		case r := <-p.close:
			// cant call close if already closing
			if closed {
				r <- errCloseAgain
				continue
			}

			closed = true
			idleTicker.Stop() // stop idle processing

			// close idle since if we have idle, implicitly no one is waiting
			for _, e := range resources {
				closers <- e.resource
			}

			closeResponse = r
		}
	}
}
Esempio n. 8
0
File: proxy.go Progetto: wptad/dvara
// clientServeLoop loops on a single client connected to the proxy and
// dispatches its requests.
func (p *Proxy) clientServeLoop(c net.Conn) {
	remoteIP := c.RemoteAddr().(*net.TCPAddr).IP.String()

	// enforce per-client max connection limit
	if p.maxPerClientConnections.inc(remoteIP) {
		c.Close()
		stats.BumpSum(p.stats, "client.rejected.max.connections", 1)
		p.Log.Errorf("rejecting client connection due to max connections limit: %s", remoteIP)
		return
	}

	// turn on TCP keep-alive and set it to the recommended period of 2 minutes
	// http://docs.mongodb.org/manual/faq/diagnostics/#faq-keepalive
	if conn, ok := c.(*net.TCPConn); ok {
		conn.SetKeepAlivePeriod(2 * time.Minute)
		conn.SetKeepAlive(true)
	}

	c = teeIf(fmt.Sprintf("client %s <=> %s", c.RemoteAddr(), p), c)
	p.Log.Infof("client %s connected to %s", c.RemoteAddr(), p)
	stats.BumpSum(p.stats, "client.connected", 1)
	p.ReplicaSet.ClientsConnected.Inc(1)
	defer func() {
		p.ReplicaSet.ClientsConnected.Dec(1)
		p.Log.Infof("client %s disconnected from %s", c.RemoteAddr(), p)
		p.wg.Done()
		if err := c.Close(); err != nil {
			p.Log.Error(err)
		}
		p.maxPerClientConnections.dec(remoteIP)
	}()

	var lastError LastError
	for {
		h, err := p.idleClientReadHeader(c)
		if err != nil {
			if err != errNormalClose {
				p.Log.Error(err)
			}
			return
		}

		mpt := stats.BumpTime(p.stats, "message.proxy.time")
		serverConn, err := p.getServerConn()
		if err != nil {
			if err != errNormalClose {
				p.Log.Error(err)
			}
			return
		}

		scht := stats.BumpTime(p.stats, "server.conn.held.time")
		for {
			err := p.proxyMessage(h, c, serverConn, &lastError)
			if err != nil {
				p.serverPool.Discard(serverConn)
				p.Log.Error(err)
				stats.BumpSum(p.stats, "message.proxy.error", 1)
				if ne, ok := err.(net.Error); ok && ne.Timeout() {
					stats.BumpSum(p.stats, "message.proxy.timeout", 1)
				}
				if err == errRSChanged {
					go p.ReplicaSet.Restart()
				}
				return
			}

			// One message was proxied, stop it's timer.
			mpt.End()

			if !h.OpCode.IsMutation() {
				break
			}

			// If the operation we just performed was a mutation, we always make the
			// follow up request on the same server because it's possibly a getLastErr
			// call which expects this behavior.

			stats.BumpSum(p.stats, "message.with.mutation", 1)
			h, err = p.gleClientReadHeader(c)
			if err != nil {
				// Client did not make _any_ query within the GetLastErrorTimeout.
				// Return the server to the pool and wait go back to outer loop.
				if err == errClientReadTimeout {
					break
				}
				// Prevent noise of normal client disconnects, but log if anything else.
				if err != errNormalClose {
					p.Log.Error(err)
				}
				// We need to return our server to the pool (it's still good as far
				// as we know).
				p.serverPool.Release(serverConn)
				return
			}

			// Successfully read message when waiting for the getLastError call.
			mpt = stats.BumpTime(p.stats, "message.proxy.time")
		}
		p.serverPool.Release(serverConn)
		scht.End()
		stats.BumpSum(p.stats, "message.proxy.success", 1)
	}
}
func (s *server) serve() {
	stats.BumpSum(s.stats, "serve", 1)
	s.serveErr <- s.server.Serve(s.listener)
	close(s.serveDone)
	close(s.serveErr)
}
Esempio n. 10
0
func (s *server) manage() {
	defer func() {
		close(s.new)
		close(s.active)
		close(s.idle)
		close(s.closed)
		close(s.stop)
		close(s.kill)
	}()

	var stopDone chan struct{}

	conns := map[net.Conn]http.ConnState{}
	var countNew, countActive, countIdle float64

	// decConn decrements the count associated with the current state of the
	// given connection.
	decConn := func(c net.Conn) {
		switch conns[c] {
		default:
			panic(fmt.Errorf("unknown existing connection: %s", c))
		case http.StateNew:
			countNew--
		case http.StateActive:
			countActive--
		case http.StateIdle:
			countIdle--
		}
	}

	// setup a ticker to report various values every minute. if we don't have a
	// Stats implementation provided, we Stop it so it never ticks.
	statsTicker := s.clock.Ticker(time.Minute)
	if s.stats == nil {
		statsTicker.Stop()
	}

	for {
		select {
		case <-statsTicker.C:
			// we'll only get here when s.stats is not nil
			s.stats.BumpAvg("http-state.new", countNew)
			s.stats.BumpAvg("http-state.active", countActive)
			s.stats.BumpAvg("http-state.idle", countIdle)
			s.stats.BumpAvg("http-state.total", countNew+countActive+countIdle)
		case c := <-s.new:
			conns[c] = http.StateNew
			countNew++
		case c := <-s.active:
			decConn(c)
			countActive++

			conns[c] = http.StateActive
		case c := <-s.idle:
			decConn(c)
			countIdle++

			conns[c] = http.StateIdle

			// if we're already stopping, close it
			if stopDone != nil {
				c.Close()
			}
		case c := <-s.closed:
			stats.BumpSum(s.stats, "conn.closed", 1)
			decConn(c)
			delete(conns, c)

			// if we're waiting to stop and are all empty, we just closed the last
			// connection and we're done.
			if stopDone != nil && len(conns) == 0 {
				close(stopDone)
				return
			}
		case stopDone = <-s.stop:
			// if we're already all empty, we're already done
			if len(conns) == 0 {
				close(stopDone)
				return
			}

			// close current idle connections right away
			for c, cs := range conns {
				if cs == http.StateIdle {
					c.Close()
				}
			}

			// continue the loop and wait for all the ConnState updates which will
			// eventually close(stopDone) and return from this goroutine.

		case killDone := <-s.kill:
			// force close all connections
			stats.BumpSum(s.stats, "kill.conn.count", float64(len(conns)))
			for c := range conns {
				c.Close()
			}

			// don't block the kill.
			close(killDone)

			// continue the loop and we wait for all the ConnState updates and will
			// return from this goroutine when we're all done. otherwise we'll try to
			// send those ConnState updates on closed channels.

		}
	}
}