func (mc *managersCluster) addAgents(count int) error { var addrs []api.Peer for _, m := range mc.ms { addrs = append(addrs, api.Peer{Addr: m.addr}) } for i := 0; i < count; i++ { asConfig, err := mc.tc.NewNodeConfig(ca.AgentRole) if err != nil { return err } managers := remotes.NewRemotes(addrs...) id := strconv.Itoa(rand.Int()) a, err := agent.New(&agent.Config{ Hostname: "hostname_" + id, Managers: managers, Executor: &NoopExecutor{}, Credentials: asConfig.ClientTLSCreds, }) if err != nil { return err } if err := a.Start(context.Background()); err != nil { return err } mc.agents = append(mc.agents, a) } return nil }
func (n *Node) runAgent(ctx context.Context, db *bolt.DB, creds credentials.TransportCredentials, ready chan<- struct{}) error { select { case <-ctx.Done(): case <-n.remotes.WaitSelect(ctx): } if ctx.Err() != nil { return ctx.Err() } a, err := agent.New(&agent.Config{ Hostname: n.config.Hostname, Managers: n.remotes, Executor: n.config.Executor, DB: db, NotifyNodeChange: n.notifyNodeChange, Credentials: creds, }) if err != nil { return err } if err := a.Start(ctx); err != nil { return err } n.Lock() n.agent = a n.Unlock() defer func() { n.Lock() n.agent = nil n.Unlock() }() go func() { <-a.Ready() close(ready) }() // todo: manually call stop on context cancellation? return a.Err(context.Background()) }
func (mc *managersCluster) addAgents(count int) error { var addrs []api.Peer for _, m := range mc.ms { addrs = append(addrs, api.Peer{Addr: m.addr}) } for i := 0; i < count; i++ { asConfig, err := mc.tc.NewNodeConfig(ca.AgentRole) if err != nil { return err } managers := picker.NewRemotes(addrs...) peer, err := managers.Select() if err != nil { return err } conn, err := grpc.Dial(peer.Addr, grpc.WithPicker(picker.NewPicker(managers)), grpc.WithTransportCredentials(asConfig.ClientTLSCreds)) if err != nil { return err } id := strconv.Itoa(rand.Int()) a, err := agent.New(&agent.Config{ Hostname: "hostname_" + id, Managers: managers, Executor: &NoopExecutor{}, Conn: conn, }) if err != nil { return err } if err := a.Start(context.Background()); err != nil { return err } mc.agents = append(mc.agents, a) } return nil }
func (n *Node) runAgent(ctx context.Context, db *bolt.DB, creds credentials.TransportCredentials, ready chan<- struct{}) error { waitCtx, waitCancel := context.WithCancel(ctx) remotesCh := n.remotes.WaitSelect(ctx) controlCh := n.ListenControlSocket(waitCtx) waitPeer: for { select { case <-ctx.Done(): break waitPeer case <-remotesCh: break waitPeer case conn := <-controlCh: if conn != nil { break waitPeer } } } waitCancel() select { case <-ctx.Done(): return ctx.Err() default: } a, err := agent.New(&agent.Config{ Hostname: n.config.Hostname, ConnBroker: n.connBroker, Executor: n.config.Executor, DB: db, NotifyNodeChange: n.notifyNodeChange, Credentials: creds, }) if err != nil { return err } if err := a.Start(ctx); err != nil { return err } n.Lock() n.agent = a n.Unlock() defer func() { n.Lock() n.agent = nil n.Unlock() }() go func() { <-a.Ready() close(ready) }() // todo: manually call stop on context cancellation? return a.Err(context.Background()) }