Esempio n. 1
0
// NewDevLogger returns a new DevLogger.
func NewDevLogger(ownIP string) (*DevLogger, error) {
	if DisableFlag.Get() {
		return nil, nil
	}
	dl := &DevLogger{
		server:  syslog.NewServer(),
		channel: make(syslog.LogPartsChannel),
	}
	dl.server.SetFormat(syslog.RFC5424)
	dl.server.SetHandler(syslog.NewChannelHandler(dl.channel))
	dl.server.ListenTCP("0.0.0.0:" + DevLoggerListenPortFlag.Get())
	err := dl.server.Boot()
	if err != nil {
		return nil, err
	}
	go dl.worker()

	// Register service.
	instanceID := InstanceIDFlag.Get()
	serviceAddr := ownIP + ":" + DevLoggerListenPortFlag.Get()
	serviceTTL := 30 * time.Second
	err = scale.RegisterServiceLocal(
		DevLoggerService, instanceID, serviceAddr, serviceTTL)
	if err != nil {
		dl.server.Kill()
		close(dl.channel)
		return nil, err
	}
	dl.serviceSKA = scale.NewServiceSelfKeepAlive(instanceID, serviceTTL/2)

	return dl, nil
}
Esempio n. 2
0
// RegisterThisHost registers the current swarm node against the current
// service.
func RegisterThisHost(
	hostAddr string) (
	serviceSka *scale.SelfKeepAlive, sessionSka *scale.SelfKeepAlive,
	err error) {
	service := leverutil.ServiceFlag.Get()
	instanceID := leverutil.InstanceIDFlag.Get()
	err = scale.RegisterServiceLocal(
		service, instanceID, hostAddr, 30*time.Second)
	if err != nil {
		return nil, nil, err
	}
	node, err := scale.GetOwnNodeName()
	if err != nil {
		return nil, nil, err
	}
	res, success, err := scale.RegisterResourceLocal(
		"swarm://"+node, 30*time.Second)
	if err != nil {
		return nil, nil, err
	}
	if !success {
		return nil, nil, fmt.Errorf("Duplicate Lever host on this node")
	}
	return scale.NewServiceSelfKeepAlive(instanceID, 15*time.Second),
		scale.NewSessionSelfKeepAlive(res.GetSessionID(), 15*time.Second),
		nil
}
Esempio n. 3
0
func (instance *LeverInstance) registerAsService(proxyInAddr string) {
	instanceExpiry := InstanceExpiryTimeFlag.Get()
	err := scale.RegisterServiceLocal(
		instance.servingID, instance.instanceID, proxyInAddr, instanceExpiry)
	if err != nil {
		instance.logger.WithFields("err", err).Error(
			"Error registering instance service with Consul")
		instance.Close(true)
		return
	}
	instance.serviceSKA = scale.NewServiceSelfKeepAlive(
		instance.instanceID, 15*time.Second)
}
Esempio n. 4
0
// NewFleetTracker returns a new FleetTracker.
func NewFleetTracker(
	grpcServer *grpc.Server, grpcPool *scale.GRPCPool, docker *dockerapi.Client,
	grpcAddr string) (tracker *FleetTracker, err error) {
	tracker = &FleetTracker{
		grpcPool: grpcPool,
		docker:   docker,
		services: make(map[string]*LoadTracker),
	}
	RegisterFleetTrackerServer(grpcServer, tracker)
	instanceID := InstanceIDFlag.Get()
	serviceTTL := 30 * time.Second
	err = scale.RegisterServiceLocal(
		FleetTrackerService, instanceID, grpcAddr, serviceTTL)
	if err != nil {
		return nil, err
	}
	tracker.serviceSKA = scale.NewServiceSelfKeepAlive(instanceID, serviceTTL/2)
	return tracker, nil
}
Esempio n. 5
0
// NewManager returns a new Manager.
func NewManager(
	grpcServer *grpc.Server, grpcPool *scale.GRPCPool,
	docker *dockerapi.Client, grpcAddr string, proxyInAddr string) (
	manager *Manager, err error) {
	manager = &Manager{
		grpcPool:     grpcPool,
		docker:       docker,
		proxyInAddr:  proxyInAddr,
		environments: make(map[string]*envEntry),
		servingIDs:   make(map[string]map[string]struct{}),
		logger:       leverutil.GetLogger(PackageName, "Manager"),
	}
	RegisterManagerServer(grpcServer, manager)
	managerInstanceID := InstanceIDFlag.Get()
	serviceTTL := 30 * time.Second
	err = scale.RegisterServiceLocal(
		ManagerService, managerInstanceID, grpcAddr, serviceTTL)
	if err != nil {
		return nil, err
	}
	manager.serviceSKA = scale.NewServiceSelfKeepAlive(
		managerInstanceID, serviceTTL/2)
	err = manager.registerManagerResource(grpcAddr)
	if err != nil {
		return nil, err
	}
	// TODO: This is a workaround for docker's bug where a network connect
	//       causes active connections to be closed. This prevents the admin
	//       env's network from being created at request time, causing that
	//       request to fail (and client to retry).
	adminEntry, err := manager.getEnvironment(core.AdminEnvFlag.Get())
	if err != nil {
		manager.logger.WithFields("err", err).Warning(
			"Admin env workaround failed")
	}
	adminEntry.envLock.Unlock()
	return manager, nil
}