Exemplo n.º 1
0
func (*trySuite) TestExtraResultsAreClosed(c *gc.C) {
	try := parallel.NewTry(0, nil)
	begin := make([]chan struct{}, 4)
	results := make([]*closeResult, len(begin))
	for i := range begin {
		begin[i] = make(chan struct{})
		results[i] = &closeResult{make(chan struct{})}
		i := i
		try.Start(func(<-chan struct{}) (io.Closer, error) {
			<-begin[i]
			return results[i], nil
		})
	}
	begin[0] <- struct{}{}
	val, err := try.Result()
	c.Assert(err, gc.IsNil)
	c.Assert(val, gc.Equals, results[0])

	timeout := time.After(testing.ShortWait)
	for i, r := range results[1:] {
		begin[i+1] <- struct{}{}
		select {
		case <-r.closed:
		case <-timeout:
			c.Fatalf("timed out waiting for close")
		}
	}
	select {
	case <-results[0].closed:
		c.Fatalf("result was inappropriately closed")
	case <-time.After(testing.ShortWait):
	}
}
Exemplo n.º 2
0
func (*trySuite) TestMaxParallel(c *gc.C) {
	try := parallel.NewTry(3, nil)
	var (
		mu    sync.Mutex
		count int
		max   int
	)

	for i := 0; i < 10; i++ {
		try.Start(func(<-chan struct{}) (io.Closer, error) {
			mu.Lock()
			if count++; count > max {
				max = count
			}
			c.Check(count, gc.Not(jc.GreaterThan), 3)
			mu.Unlock()
			time.Sleep(20 * time.Millisecond)
			mu.Lock()
			count--
			mu.Unlock()
			return result("hello"), nil
		})
	}
	r, err := try.Result()
	c.Assert(err, gc.IsNil)
	c.Assert(r, gc.Equals, result("hello"))
	mu.Lock()
	defer mu.Unlock()
	c.Assert(max, gc.Equals, 3)
}
Exemplo n.º 3
0
func (*trySuite) TestOneSuccess(c *gc.C) {
	try := parallel.NewTry(0, nil)
	try.Start(tryFunc(0, result("hello"), nil))
	val, err := try.Result()
	c.Assert(err, gc.IsNil)
	c.Assert(val, gc.Equals, result("hello"))
}
Exemplo n.º 4
0
func (*trySuite) TestOutOfOrderResults(c *gc.C) {
	try := parallel.NewTry(0, nil)
	try.Start(tryFunc(50*time.Millisecond, result("first"), nil))
	try.Start(tryFunc(10*time.Millisecond, result("second"), nil))
	r, err := try.Result()
	c.Assert(err, gc.IsNil)
	c.Assert(r, gc.Equals, result("second"))
}
Exemplo n.º 5
0
func (*trySuite) TestCloseTwice(c *gc.C) {
	try := parallel.NewTry(0, nil)
	try.Close()
	try.Close()
	val, err := try.Result()
	c.Assert(val, gc.IsNil)
	c.Assert(err, gc.IsNil)
}
Exemplo n.º 6
0
// waitSSH waits for the instance to be assigned a routable
// address, then waits until we can connect to it via SSH.
//
// waitSSH attempts on all addresses returned by the instance
// in parallel; the first succeeding one wins. We ensure that
// private addresses are for the correct machine by checking
// the presence of a file on the machine that contains the
// machine's nonce. The "checkHostScript" is a bash script
// that performs this file check.
func waitSSH(ctx environs.BootstrapContext, interrupted <-chan os.Signal, client ssh.Client, checkHostScript string, inst addresser, timeout config.SSHTimeoutOpts) (addr string, err error) {
	globalTimeout := time.After(timeout.Timeout)
	pollAddresses := time.NewTimer(0)

	// checker checks each address in a loop, in parallel,
	// until one succeeds, the global timeout is reached,
	// or the tomb is killed.
	checker := parallelHostChecker{
		Try:             parallel.NewTry(0, nil),
		client:          client,
		stderr:          ctx.GetStderr(),
		active:          make(map[instance.Address]chan struct{}),
		checkDelay:      timeout.RetryDelay,
		checkHostScript: checkHostScript,
	}
	defer checker.Kill()

	fmt.Fprintln(ctx.GetStderr(), "Waiting for address")
	for {
		select {
		case <-pollAddresses.C:
			pollAddresses.Reset(timeout.AddressesDelay)
			if err := inst.Refresh(); err != nil {
				return "", fmt.Errorf("refreshing addresses: %v", err)
			}
			addresses, err := inst.Addresses()
			if err != nil {
				return "", fmt.Errorf("getting addresses: %v", err)
			}
			checker.UpdateAddresses(addresses)
		case <-globalTimeout:
			checker.Close()
			lastErr := checker.Wait()
			format := "waited for %v "
			args := []interface{}{timeout.Timeout}
			if len(checker.active) == 0 {
				format += "without getting any addresses"
			} else {
				format += "without being able to connect"
			}
			if lastErr != nil && lastErr != parallel.ErrStopped {
				format += ": %v"
				args = append(args, lastErr)
			}
			return "", fmt.Errorf(format, args...)
		case <-interrupted:
			return "", fmt.Errorf("interrupted")
		case <-checker.Dead():
			result, err := checker.Result()
			if err != nil {
				return "", err
			}
			return result.(*hostChecker).addr.Value, nil
		}
	}
}
Exemplo n.º 7
0
func Open(info *Info, opts DialOpts) (*State, error) {
	if len(info.Addrs) == 0 {
		return nil, fmt.Errorf("no API addresses to connect to")
	}
	pool := x509.NewCertPool()
	xcert, err := cert.ParseCert(info.CACert)
	if err != nil {
		return nil, err
	}
	pool.AddCert(xcert)

	// Dial all addresses at reasonable intervals.
	try := parallel.NewTry(0, nil)
	defer try.Kill()
	for _, addr := range info.Addrs {
		err := dialWebsocket(addr, opts, pool, try)
		if err == parallel.ErrStopped {
			break
		}
		if err != nil {
			return nil, err
		}
		select {
		case <-time.After(opts.DialAddressInterval):
		case <-try.Dead():
		}
	}
	try.Close()
	result, err := try.Result()
	if err != nil {
		return nil, err
	}
	conn := result.(*websocket.Conn)
	logger.Infof("connection established to %q", conn.RemoteAddr())

	client := rpc.NewConn(jsoncodec.NewWebsocket(conn), nil)
	client.Start()
	st := &State{
		client:     client,
		conn:       conn,
		addr:       conn.Config().Location.Host,
		serverRoot: "https://" + conn.Config().Location.Host,
		tag:        info.Tag,
		password:   info.Password,
		certPool:   pool,
	}
	if info.Tag != "" || info.Password != "" {
		if err := st.Login(info.Tag, info.Password, info.Nonce); err != nil {
			conn.Close()
			return nil, err
		}
	}
	st.broken = make(chan struct{})
	go st.heartbeatMonitor()
	return st, nil
}
Exemplo n.º 8
0
func (*trySuite) TestStartReturnsErrorAfterClose(c *gc.C) {
	try := parallel.NewTry(0, nil)
	expectErr := errors.New("foo")
	err := try.Start(tryFunc(0, nil, expectErr))
	c.Assert(err, gc.IsNil)
	try.Close()
	err = try.Start(tryFunc(0, result("goodbye"), nil))
	c.Assert(err, gc.Equals, parallel.ErrClosed)
	// Wait for the first try to deliver its result
	time.Sleep(testing.ShortWait)
	try.Kill()
	err = try.Wait()
	c.Assert(err, gc.Equals, expectErr)
}
Exemplo n.º 9
0
func (*trySuite) TestTriesAreStopped(c *gc.C) {
	try := parallel.NewTry(0, nil)
	stopped := make(chan struct{})
	try.Start(func(stop <-chan struct{}) (io.Closer, error) {
		<-stop
		stopped <- struct{}{}
		return nil, parallel.ErrStopped
	})
	try.Start(tryFunc(0, result("hello"), nil))
	val, err := try.Result()
	c.Assert(err, gc.IsNil)
	c.Assert(val, gc.Equals, result("hello"))

	select {
	case <-stopped:
	case <-time.After(testing.LongWait):
		c.Fatalf("timed out waiting for stop")
	}
}
Exemplo n.º 10
0
func (*trySuite) TestOneFailure(c *gc.C) {
	try := parallel.NewTry(0, nil)
	expectErr := errors.New("foo")
	err := try.Start(tryFunc(0, nil, expectErr))
	c.Assert(err, gc.IsNil)
	select {
	case <-try.Dead():
		c.Fatalf("try died before it should")
	case <-time.After(testing.ShortWait):
	}
	try.Close()
	select {
	case <-try.Dead():
	case <-time.After(testing.LongWait):
		c.Fatalf("timed out waiting for Try to complete")
	}
	val, err := try.Result()
	c.Assert(val, gc.IsNil)
	c.Assert(err, gc.Equals, expectErr)
}
Exemplo n.º 11
0
func (*trySuite) TestAllConcurrent(c *gc.C) {
	try := parallel.NewTry(0, nil)
	started := make(chan chan struct{})
	for i := 0; i < 10; i++ {
		try.Start(func(<-chan struct{}) (io.Closer, error) {
			reply := make(chan struct{})
			started <- reply
			<-reply
			return result("hello"), nil
		})
	}
	timeout := time.After(testing.LongWait)
	for i := 0; i < 10; i++ {
		select {
		case reply := <-started:
			reply <- struct{}{}
		case <-timeout:
			c.Fatalf("timed out")
		}
	}
}
Exemplo n.º 12
0
func (*trySuite) TestEverything(c *gc.C) {
	try := parallel.NewTry(5, gradedErrorCombine)
	tries := []struct {
		startAt time.Duration
		wait    time.Duration
		val     result
		err     error
	}{{
		wait: 30 * time.Millisecond,
		err:  gradedError(3),
	}, {
		startAt: 10 * time.Millisecond,
		wait:    20 * time.Millisecond,
		val:     result("result 1"),
	}, {
		startAt: 20 * time.Millisecond,
		wait:    10 * time.Millisecond,
		val:     result("result 2"),
	}, {
		startAt: 20 * time.Millisecond,
		wait:    5 * time.Second,
		val:     "delayed result",
	}, {
		startAt: 5 * time.Millisecond,
		err:     gradedError(4),
	}}
	for _, t := range tries {
		t := t
		go func() {
			time.Sleep(t.startAt)
			try.Start(tryFunc(t.wait, t.val, t.err))
		}()
	}
	val, err := try.Result()
	if val != result("result 1") && val != result("result 2") {
		c.Errorf(`expected "result 1" or "result 2" got %#v`, val)
	}
	c.Assert(err, gc.IsNil)
}
Exemplo n.º 13
0
func (*trySuite) TestErrorCombine(c *gc.C) {
	// Use maxParallel=1 to guarantee that all errors are processed sequentially.
	try := parallel.NewTry(1, func(err0, err1 error) error {
		if err0 == nil {
			err0 = &multiError{}
		}
		err0.(*multiError).errs = append(err0.(*multiError).errs, int(err1.(gradedError)))
		return err0
	})
	errors := []gradedError{3, 2, 4, 0, 5, 5, 3}
	for _, err := range errors {
		err := err
		try.Start(func(<-chan struct{}) (io.Closer, error) {
			return nil, err
		})
	}
	try.Close()
	val, err := try.Result()
	c.Assert(val, gc.IsNil)
	grades := err.(*multiError).errs
	sort.Ints(grades)
	c.Assert(grades, gc.DeepEquals, []int{0, 2, 3, 3, 4, 5, 5})
}
Exemplo n.º 14
0
func (*trySuite) TestStartBlocksForMaxParallel(c *gc.C) {
	try := parallel.NewTry(3, nil)

	started := make(chan struct{})
	begin := make(chan struct{})
	go func() {
		for i := 0; i < 6; i++ {
			err := try.Start(func(<-chan struct{}) (io.Closer, error) {
				<-begin
				return nil, fmt.Errorf("an error")
			})
			started <- struct{}{}
			if i < 5 {
				c.Check(err, gc.IsNil)
			} else {
				c.Check(err, gc.Equals, parallel.ErrClosed)
			}
		}
		close(started)
	}()
	// Check we can start the first three.
	timeout := time.After(testing.LongWait)
	for i := 0; i < 3; i++ {
		select {
		case <-started:
		case <-timeout:
			c.Fatalf("timed out")
		}
	}
	// Check we block when going above maxParallel.
	timeout = time.After(testing.ShortWait)
	select {
	case <-started:
		c.Fatalf("Start did not block")
	case <-timeout:
	}

	// Unblock two attempts.
	begin <- struct{}{}
	begin <- struct{}{}

	// Check we can start another two.
	timeout = time.After(testing.LongWait)
	for i := 0; i < 2; i++ {
		select {
		case <-started:
		case <-timeout:
			c.Fatalf("timed out")
		}
	}

	// Check we block again when going above maxParallel.
	timeout = time.After(testing.ShortWait)
	select {
	case <-started:
		c.Fatalf("Start did not block")
	case <-timeout:
	}

	// Close the Try - the last request should be discarded,
	// unblocking last remaining Start request.
	try.Close()

	timeout = time.After(testing.LongWait)
	select {
	case <-started:
	case <-timeout:
		c.Fatalf("Start did not unblock after Close")
	}

	// Ensure all checks are completed
	select {
	case _, ok := <-started:
		c.Assert(ok, gc.Equals, false)
	case <-timeout:
		c.Fatalf("Start goroutine did not finish")
	}
}
Exemplo n.º 15
0
Arquivo: api.go Projeto: jameinel/core
// newAPIFromStore implements the bulk of NewAPIClientFromName
// but is separate for testing purposes.
func newAPIFromStore(envName string, store configstore.Storage, apiOpen apiOpenFunc) (apiState, error) {
	// Try to read the default environment configuration file.
	// If it doesn't exist, we carry on in case
	// there's some environment info for that environment.
	// This enables people to copy environment files
	// into their .juju/environments directory and have
	// them be directly useful with no further configuration changes.
	envs, err := environs.ReadEnvirons("")
	if err == nil {
		if envName == "" {
			envName = envs.Default
		}
		if envName == "" {
			return nil, fmt.Errorf("no default environment found")
		}
	} else if !environs.IsNoEnv(err) {
		return nil, err
	}

	// Try to connect to the API concurrently using two different
	// possible sources of truth for the API endpoint. Our
	// preference is for the API endpoint cached in the API info,
	// because we know that without needing to access any remote
	// provider. However, the addresses stored there may no longer
	// be current (and the network connection may take a very long
	// time to time out) so we also try to connect using information
	// found from the provider. We only start to make that
	// connection after some suitable delay, so that in the
	// hopefully usual case, we will make the connection to the API
	// and never hit the provider. By preference we use provider
	// attributes from the config store, but for backward
	// compatibility reasons, we fall back to information from
	// ReadEnvirons if that does not exist.
	chooseError := func(err0, err1 error) error {
		if err0 == nil {
			return err1
		}
		if errorImportance(err0) < errorImportance(err1) {
			err0, err1 = err1, err0
		}
		logger.Warningf("discarding API open error: %v", err1)
		return err0
	}
	try := parallel.NewTry(0, chooseError)

	info, err := store.ReadInfo(envName)
	if err != nil && !errors.IsNotFound(err) {
		return nil, err
	}
	var delay time.Duration
	if info != nil && len(info.APIEndpoint().Addresses) > 0 {
		logger.Debugf("trying cached API connection settings")
		try.Start(func(stop <-chan struct{}) (io.Closer, error) {
			return apiInfoConnect(store, info, apiOpen, stop)
		})
		// Delay the config connection until we've spent
		// some time trying to connect to the cached info.
		delay = providerConnectDelay
	} else {
		logger.Debugf("no cached API connection settings found")
	}
	try.Start(func(stop <-chan struct{}) (io.Closer, error) {
		cfg, err := getConfig(info, envs, envName)
		if err != nil {
			return nil, err
		}
		return apiConfigConnect(cfg, apiOpen, stop, delay)
	})
	try.Close()
	val0, err := try.Result()
	if err != nil {
		if ierr, ok := err.(*infoConnectError); ok {
			// lose error encapsulation:
			err = ierr.error
		}
		return nil, err
	}

	st := val0.(apiState)
	// Even though we are about to update API addresses based on
	// APIHostPorts in cacheChangedAPIAddresses, we first cache the
	// addresses based on the provider lookup. This is because older API
	// servers didn't return their HostPort information on Login, and we
	// still want to cache our connection information to them.
	if cachedInfo, ok := st.(apiStateCachedInfo); ok {
		st = cachedInfo.apiState
		if cachedInfo.cachedInfo != nil && info != nil {
			// Cache the connection settings only if we used the
			// environment config, but any errors are just logged
			// as warnings, because they're not fatal.
			err = cacheAPIInfo(info, cachedInfo.cachedInfo)
			if err != nil {
				logger.Warningf("cannot cache API connection settings: %v", err.Error())
			} else {
				logger.Infof("updated API connection settings cache")
			}
		}
	}
	// Update API addresses if they've changed. Error is non-fatal.
	if localerr := cacheChangedAPIAddresses(info, st); localerr != nil {
		logger.Warningf("cannot failed to cache API addresses: %v", localerr)
	}
	return st, nil
}