Example #1
0
// Ensure that the clock's time matches the standary library.
func TestClock_Now(t *testing.T) {
	a := time.Now().Round(time.Second)
	b := clock.New().Now().Round(time.Second)
	if !a.Equal(b) {
		t.Errorf("not equal: %s != %s", a, b)
	}
}
Example #2
0
// NewBreakerWithOptions creates a base breaker with a specified backoff, clock and TripFunc
func NewBreakerWithOptions(options *Options) *Breaker {
	if options == nil {
		options = &Options{}
	}

	if options.Clock == nil {
		options.Clock = clock.New()
	}

	if options.BackOff == nil {
		b := backoff.NewExponentialBackOff()
		b.InitialInterval = defaultInitialBackOffInterval
		b.Clock = options.Clock
		b.Reset()
		options.BackOff = b
	}

	return &Breaker{
		BackOff:     options.BackOff,
		Clock:       options.Clock,
		ShouldTrip:  options.ShouldTrip,
		nextBackOff: options.BackOff.NextBackOff(),
		counts:      newWindow(DefaultWindowTime, DefaultWindowBuckets),
	}
}
Example #3
0
// Ensure that the clock's timer can be stopped.
func TestClock_Timer_Stop(t *testing.T) {
	var ok bool
	go func() {
		time.Sleep(10 * time.Millisecond)
		ok = true
	}()

	timer := clock.New().Timer(20 * time.Millisecond)
	timer.Stop()
	select {
	case <-timer.C:
		t.Fatal("unexpected send")
	case <-time.After(30 * time.Millisecond):
	}
}
Example #4
0
// Ensure that the clock sleeps for the appropriate amount of time.
func TestClock_Sleep(t *testing.T) {
	var ok bool
	go func() {
		time.Sleep(10 * time.Millisecond)
		ok = true
	}()
	go func() {
		time.Sleep(30 * time.Millisecond)
		t.Fatal("too late")
	}()
	gosched()

	clock.New().Sleep(20 * time.Millisecond)
	if !ok {
		t.Fatal("too early")
	}
}
Example #5
0
// newWindow creates a new window. windowTime is the time covering the entire
// window. windowBuckets is the number of buckets the window is divided into.
// An example: a 10 second window with 10 buckets will have 10 buckets covering
// 1 second each.
func newWindow(windowTime time.Duration, windowBuckets int) *window {
	buckets := ring.New(windowBuckets)
	for i := 0; i < buckets.Len(); i++ {
		buckets.Value = &bucket{}
		buckets = buckets.Next()
	}

	clock := clock.New()

	bucketTime := time.Duration(windowTime.Nanoseconds() / int64(windowBuckets))
	return &window{
		buckets:    buckets,
		bucketTime: bucketTime,
		clock:      clock,
		lastAccess: clock.Now(),
	}
}
Example #6
0
// Ensure that the clock ticks correctly.
func TestClock_Tick(t *testing.T) {
	var ok bool
	go func() {
		time.Sleep(10 * time.Millisecond)
		ok = true
	}()
	go func() {
		time.Sleep(50 * time.Millisecond)
		t.Fatal("too late")
	}()
	gosched()

	c := clock.New().Tick(20 * time.Millisecond)
	<-c
	<-c
	if !ok {
		t.Fatal("too early")
	}
}
Example #7
0
// Ensure that the clock's ticker ticks correctly.
func TestClock_Ticker(t *testing.T) {
	var ok bool
	go func() {
		time.Sleep(100 * time.Millisecond)
		ok = true
	}()
	go func() {
		time.Sleep(200 * time.Millisecond)
		t.Fatal("too late")
	}()
	gosched()

	ticker := clock.New().Ticker(50 * time.Millisecond)
	<-ticker.C
	<-ticker.C
	if !ok {
		t.Fatal("too early")
	}
}
Example #8
0
// Ensure that the clock's AfterFunc executes at the correct time.
func TestClock_AfterFunc(t *testing.T) {
	var ok bool
	go func() {
		time.Sleep(10 * time.Millisecond)
		ok = true
	}()
	go func() {
		time.Sleep(30 * time.Millisecond)
		t.Fatal("too late")
	}()
	gosched()

	var wg sync.WaitGroup
	wg.Add(1)
	clock.New().AfterFunc(20*time.Millisecond, func() {
		wg.Done()
	})
	wg.Wait()
	if !ok {
		t.Fatal("too early")
	}
}
// Serve provides the low-level API which is useful if you're creating your own
// net.Listener.
func (h HTTP) Serve(s *http.Server, l net.Listener) Server {
	stopTimeout := h.StopTimeout
	if stopTimeout == 0 {
		stopTimeout = defaultStopTimeout
	}
	killTimeout := h.KillTimeout
	if killTimeout == 0 {
		killTimeout = defaultKillTimeout
	}
	klock := h.Clock
	if klock == nil {
		klock = clock.New()
	}

	ss := &server{
		stopTimeout:  stopTimeout,
		killTimeout:  killTimeout,
		stats:        h.Stats,
		clock:        klock,
		oldConnState: s.ConnState,
		listener:     l,
		server:       s,
		serveDone:    make(chan struct{}),
		serveErr:     make(chan error, 1),
		new:          make(chan net.Conn),
		active:       make(chan net.Conn),
		idle:         make(chan net.Conn),
		closed:       make(chan net.Conn),
		stop:         make(chan chan struct{}),
		kill:         make(chan chan struct{}),
	}
	s.ConnState = ss.connState
	go ss.manage()
	go ss.serve()
	return ss
}
Example #10
0
func (c *Client) clock() clock.Clock {
	if c.klock == nil {
		return clock.New()
	}
	return c.klock
}
Example #11
0
func (p *Pool) manage() {
	klock := p.Clock
	if klock == nil {
		klock = clock.New()
	}

	// setup goroutines to close resources
	closers := make(chan io.Closer)
	var closeWG sync.WaitGroup
	closeWG.Add(int(p.ClosePoolSize))
	for i := uint(0); i < p.ClosePoolSize; i++ {
		go func() {
			defer closeWG.Done()
			for c := range closers {
				t := stats.BumpTime(p.Stats, "close.time")
				stats.BumpSum(p.Stats, "close", 1)
				if err := c.Close(); err != nil {
					stats.BumpSum(p.Stats, "close.error", 1)
					p.CloseErrorHandler(err)
				}
				t.End()
			}
		}()
	}

	// setup a ticker to report various averages every minute. if we don't have a
	// Stats implementation provided, we Stop it so it never ticks.
	statsTicker := klock.Ticker(time.Minute)
	if p.Stats == nil {
		statsTicker.Stop()
	}

	resources := []entry{}
	outResources := map[io.Closer]struct{}{}
	out := uint(0)
	waiting := list.New()
	idleTicker := klock.Ticker(p.IdleTimeout)
	closed := false
	var closeResponse chan error
	for {
		if closed && out == 0 && waiting.Len() == 0 {
			if p.Stats != nil {
				statsTicker.Stop()
			}

			// all waiting acquires are done, all resources have been released.
			// now just wait for all resources to close.
			close(closers)
			closeWG.Wait()

			// close internal channels.
			close(p.acquire)
			close(p.new)
			close(p.release)
			close(p.discard)
			close(p.close)

			// return a response to the original close.
			closeResponse <- nil

			return
		}

		select {
		case r := <-p.acquire:
			// if closed, new acquire calls are rejected
			if closed {
				r <- closedSentinel
				stats.BumpSum(p.Stats, "acquire.error.closed", 1)
				continue
			}

			// acquire from pool
			if cl := len(resources); cl > 0 {
				c := resources[cl-1]
				outResources[c.resource] = struct{}{}
				r <- c.resource
				resources = resources[:cl-1]
				out++
				stats.BumpSum(p.Stats, "acquire.pool", 1)
				continue
			}

			// max resources already in use, need to block & wait
			if out == p.Max {
				waiting.PushBack(r)
				stats.BumpSum(p.Stats, "acquire.waiting", 1)
				continue
			}

			// Make a new resource in the calling goroutine by sending it a
			// newSentinel. We assume it's checked out. Acquire will discard if
			// creating a new resource fails.
			out++
			r <- newSentinel
		case c := <-p.new:
			outResources[c] = struct{}{}
		case rr := <-p.release:
			// ensure we're dealing with a resource acquired thru us
			if _, found := outResources[rr.resource]; !found {
				rr.response <- errWrongPool
				return
			}
			close(rr.response)

			// pass it to someone who's waiting
			if e := waiting.Front(); e != nil {
				r := waiting.Remove(e).(chan io.Closer)
				r <- rr.resource
				continue
			}

			// no longer out
			out--
			delete(outResources, rr.resource)

			// no one is waiting, and we're closed, schedule it to be closed
			if closed {
				closers <- rr.resource
				continue
			}

			// put it back in our pool
			resources = append(resources, entry{resource: rr.resource, use: klock.Now()})
		case rr := <-p.discard:
			// ensure we're dealing with a resource acquired thru us
			if rr.resource != newSentinel { // this happens when new fails
				if _, found := outResources[rr.resource]; !found {
					rr.response <- errWrongPool
					return
				}
				close(rr.response)
				delete(outResources, rr.resource)
				closers <- rr.resource
			}

			// we can make a new one if someone is waiting. no need to decrement out
			// in this case since we assume this new one is checked out. Acquire will
			// discard if creating a new resource fails.
			if e := waiting.Front(); e != nil {
				r := waiting.Remove(e).(chan io.Closer)
				r <- newSentinel
				continue
			}

			// otherwise we lost a resource and dont need a new one right away
			out--
		case now := <-idleTicker.C:
			eligibleOffset := len(resources) - int(p.MinIdle)

			// less than min idle, nothing to do
			if eligibleOffset <= 0 {
				continue
			}

			t := stats.BumpTime(p.Stats, "idle.cleanup.time")

			// cleanup idle resources
			idleLen := 0
			for _, e := range resources[:eligibleOffset] {
				if now.Sub(e.use) < p.IdleTimeout {
					break
				}
				closers <- e.resource
				idleLen++
			}

			// move the remaining resources to the beginning
			resources = resources[:copy(resources, resources[idleLen:])]

			t.End()
			stats.BumpSum(p.Stats, "idle.closed", float64(idleLen))
		case <-statsTicker.C:
			// We can assume if we hit this then p.Stats is not nil
			p.Stats.BumpAvg("waiting", float64(waiting.Len()))
			p.Stats.BumpAvg("idle", float64(len(resources)))
			p.Stats.BumpAvg("out", float64(out))
			p.Stats.BumpAvg("alive", float64(uint(len(resources))+out))
		case r := <-p.close:
			// cant call close if already closing
			if closed {
				r <- errCloseAgain
				continue
			}

			closed = true
			idleTicker.Stop() // stop idle processing

			// close idle since if we have idle, implicitly no one is waiting
			for _, e := range resources {
				closers <- e.resource
			}

			closeResponse = r
		}
	}
}
Example #12
0
func main() {
	// some parts of apps.go are unable to handle
	// interrupts, this logic ensures we exit on system interrupts
	interrupt := make(chan os.Signal, 1)
	signal.Notify(interrupt, os.Interrupt)
	go func() {
		<-interrupt
		os.Exit(1)
	}()

	e := env{
		Root:        os.Getenv("PARSE_ROOT"),
		Server:      os.Getenv("PARSE_SERVER"),
		ErrorStack:  os.Getenv("PARSE_ERROR_STACK") == "1",
		ParserEmail: os.Getenv("PARSER_EMAIL"),
		Out:         os.Stdout,
		Err:         os.Stderr,
		In:          os.Stdin,
		Exit:        os.Exit,
		Clock:       clock.New(),
	}
	if e.Root == "" {
		cur, err := os.Getwd()
		if err != nil {
			fmt.Fprintf(e.Err, "Failed to get current directory:\n%s\n", err)
			os.Exit(1)
		}
		root := getProjectRoot(&e, cur)
		if isProjectDir(root) {
			e.Root = root
			config, err := configFromDir(root)
			if err != nil {
				fmt.Fprintln(e.Err, err)
				os.Exit(1)
			}
			e.Type = config.getProjectConfig().Type
			if e.ParserEmail == "" {
				e.ParserEmail = config.getProjectConfig().ParserEmail
			}
		} else {
			e.Type = legacyParseFormat
			e.Root = getLegacyProjectRoot(&e, cur)
		}
	}
	if e.Type != legacyParseFormat && e.Type != parseFormat {
		fmt.Fprintf(e.Err, "Unknown project type %d.\n", e.Type)
		os.Exit(1)
	}

	if e.Server == "" {
		e.Server = defaultBaseURL
	}

	apiClient, err := newParseAPIClient(&e)
	if err != nil {
		fmt.Fprintln(e.Err, err)
		os.Exit(1)
	}
	e.ParseAPIClient = apiClient

	var (
		rootCmd *cobra.Command
		command []string
	)
	switch e.Type {
	case legacyParseFormat, parseFormat:
		command, rootCmd = parseRootCmd(&e)
	}

	if len(command) == 0 || command[0] != "update" {
		message, err := checkIfSupported(&e, version, command...)
		if err != nil {
			fmt.Fprintln(e.Err, err)
			os.Exit(1)
		}
		if message != "" {
			fmt.Fprintln(e.Err, message)
		}
	}

	if err := rootCmd.Execute(); err != nil {
		// Error is already printed in Execute()
		os.Exit(1)
	}
}
Example #13
0
func main() {
	// some parts of apps_cmd.go are unable to handle
	// interrupts, this logic ensures we exit on system interrupts
	interrupt := make(chan os.Signal, 1)
	signal.Notify(interrupt, os.Interrupt)
	go func() {
		<-interrupt
		os.Exit(1)
	}()

	e := env{
		Root:       os.Getenv("PARSE_ROOT"),
		Server:     os.Getenv("PARSE_SERVER"),
		ErrorStack: os.Getenv("PARSE_ERROR_STACK") == "1",
		Out:        os.Stdout,
		Err:        os.Stderr,
		In:         os.Stdin,
		Exit:       os.Exit,
		Clock:      clock.New(),
	}
	if e.Root == "" {
		cur, err := os.Getwd()
		if err != nil {
			fmt.Fprintf(e.Err, "Failed to get current directory:\n%s\n", err)
			os.Exit(1)
		}
		e.Root = getProjectRoot(&e, cur)
	}
	e.Type = legacy
	if e.Server == "" {
		e.Server = defaultBaseURL
	}
	client, err := newParseClient(&e)
	if err != nil {
		fmt.Fprintln(e.Err, err)
		os.Exit(1)
	}
	e.Client = client

	// autoUpdate is false for all non-production builds
	// so we never auto update
	// for production builds autoUpdate is true but
	// we suppress auto-update iff PARSE_NOUPDATE is not set
	if autoUpdate && os.Getenv("PARSE_NOUPDATE") == "" {
		// Perform a best effort update
		updated, err := (&updateCmd{}).updateCLI(&e)
		if err != nil {
			cmd := exec.Command(os.Args[0], "version")
			if err := cmd.Run(); err != nil {
				fmt.Fprintf(e.Out, `parse cli corrupted during update.
Please follow instructions at: 
	https://parse.com/apps/quickstart#cloud_code
to install a new cli.`)
				os.Exit(1)
			}
		}

		if updated {
			// Re-run the command with the updated CLI
			cmd := exec.Cmd{
				Path:   os.Args[0],
				Args:   os.Args,
				Stdin:  os.Stdin,
				Stdout: os.Stdout,
				Stderr: os.Stderr,
			}

			if err := cmd.Run(); err != nil {
				os.Exit(1)
			}
			return
		}
	}

	if err := rootCmd(&e).Execute(); err != nil {
		// Error is already printed in Execute()
		os.Exit(1)
	}
}
Example #14
0
package circuitbreaker

import (
	"time"

	cb "github.com/andreas/circuitbreaker"
	"github.com/cenkalti/backoff"
	"github.com/facebookgo/clock"
)

var (
	defaultClock = clock.New() // Used for testing
)

type DefaultCircuit struct {
	disabled bool
	circuit  *cb.Breaker
}

func (r *DefaultCircuit) Open() bool {
	if r.disabled {
		return false
	}
	return !r.circuit.Ready()
}

func (r *DefaultCircuit) Result(err error) {
	if err != nil {
		r.circuit.Fail()
	} else {
		r.circuit.Success()