Пример #1
0
// AddHost register a host with serviced. Returns an error if host already
// exists or if the host's IP is a virtual IP
func (f *Facade) AddHost(ctx datastore.Context, entity *host.Host) error {
	glog.V(2).Infof("Facade.AddHost: %v", entity)
	exists, err := f.GetHost(ctx, entity.ID)
	if err != nil {
		return err
	}
	if exists != nil {
		return fmt.Errorf("host already exists: %s", entity.ID)
	}

	// only allow hostid of master if SERVICED_REGISTRY is false
	if !docker.UseRegistry() {
		masterHostID, err := utils.HostID()
		if err != nil {
			return fmt.Errorf("unable to retrieve hostid %s: %s", entity.ID, err)
		}

		if entity.ID != masterHostID {
			return fmt.Errorf("SERVICED_REGISTRY is false and hostid %s does not match master %s", entity.ID, masterHostID)
		}
	}

	// validate Pool exists
	pool, err := f.GetResourcePool(ctx, entity.PoolID)
	if err != nil {
		return fmt.Errorf("error verifying pool exists: %v", err)
	}
	if pool == nil {
		return fmt.Errorf("error creating host, pool %s does not exists", entity.PoolID)
	}

	// verify that there are no virtual IPs with the given host IP(s)
	for _, ip := range entity.IPs {
		if exists, err := f.HasIP(ctx, pool.ID, ip.IPAddress); err != nil {
			return fmt.Errorf("error verifying ip %s exists: %v", ip.IPAddress, err)
		} else if exists {
			return fmt.Errorf("pool already has a virtual ip %s", ip.IPAddress)
		}
	}

	ec := newEventCtx()
	err = nil
	defer f.afterEvent(afterHostAdd, ec, entity, err)
	if err = f.beforeEvent(beforeHostAdd, ec, entity); err != nil {
		return err
	}

	now := time.Now()
	entity.CreatedAt = now
	entity.UpdatedAt = now

	if err = f.hostStore.Put(ctx, host.HostKey(entity.ID), entity); err != nil {
		return err
	}
	err = zkAPI(f).AddHost(entity)
	return err
}
Пример #2
0
// NewHostAgent creates a new HostAgent given a connection string
func NewHostAgent(options AgentOptions) (*HostAgent, error) {
	// save off the arguments
	agent := &HostAgent{}
	agent.dockerRegistry = options.DockerRegistry
	agent.poolID = options.PoolID
	agent.master = options.Master
	agent.uiport = options.UIPort
	agent.rpcport = options.RPCPort
	agent.dockerDNS = options.DockerDNS
	agent.varPath = options.VarPath
	agent.mount = options.Mount
	agent.fsType = "rsync"
	agent.mux = options.Mux
	agent.useTLS = options.UseTLS
	agent.maxContainerAge = options.MaxContainerAge
	agent.virtualAddressSubnet = options.VirtualAddressSubnet
	agent.servicedChain = iptables.NewChain("SERVICED")

	dsn := getZkDSN(options.Zookeepers)
	basePath := ""
	zkClient, err := coordclient.New("zookeeper", dsn, basePath, nil)
	if err != nil {
		return nil, err
	}
	agent.zkClient = zkClient

	hostID, err := utils.HostID()
	if err != nil {
		panic("Could not get hostid")
	}
	agent.hostID = hostID
	agent.currentServices = make(map[string]*exec.Cmd)

	agent.proxyRegistry = proxy.NewDefaultProxyRegistry()
	return agent, err

	/* FIXME: this should work here

	addr, err := net.ResolveTCPAddr("tcp", processForwarderAddr)
	if err != nil {
		return nil, err
	}
	listener, err := net.ListenTCP("tcp", addr)
	if err != nil {
		return nil, err
	}

	sio := shell.NewProcessForwarderServer(proxyOptions.servicedEndpoint)
	sio.Handle("/", http.FileServer(http.Dir("/serviced/www/")))
	go http.Serve(listener, sio)
	c := &ControllerP{
		processForwarderListener: listener,
	}
	*/

}
Пример #3
0
// serviced service logs { SERVICEID | SERVICENAME | DOCKERID | POOL/...PARENTNAME.../SERVICENAME/INSTANCE }
func (c *ServicedCli) cmdServiceLogs(ctx *cli.Context) error {
	// verify args
	args := ctx.Args()
	if len(args) < 1 {
		if !ctx.Bool("help") {
			fmt.Fprintf(os.Stderr, "Incorrect Usage.\n\n")
		}
		cli.ShowSubcommandHelp(ctx)
		return nil
	}

	rs, err := c.searchForRunningService(args[0])
	if err != nil {
		fmt.Fprintln(os.Stderr, err)
		return err
	}

	// docker logs on remote host if service is running on remote
	myHostID, err := utils.HostID()
	if err != nil {
		return err
	}

	if rs.HostID != myHostID {
		hosts, err := c.driver.GetHosts()
		if err != nil {
			return err
		}
		hostmap := make(map[string]host.Host)
		for _, host := range hosts {
			hostmap[host.ID] = host
		}

		cmd := []string{"/usr/bin/ssh", "-t", hostmap[rs.HostID].IPAddr, "--", "serviced", "--endpoint", api.GetOptionsRPCEndpoint(), "service", "logs", args[0]}
		if len(args) > 1 {
			cmd = append(cmd, args[1:]...)
		}

		glog.V(1).Infof("outputting remote logs with: %s\n", cmd)
		return syscall.Exec(cmd[0], cmd[0:], os.Environ())
	}

	// docker logs on local host if service is running locally
	var argv []string
	if len(args) > 2 {
		argv = args[2:]
	}

	if err := dockerclient.Logs(rs.DockerID, argv); err != nil {
		fmt.Fprintln(os.Stderr, err)
	}

	return fmt.Errorf("serviced service logs")
}
Пример #4
0
// Attach runs an arbitrary shell command in a running service container
func (a *api) Attach(config AttachConfig) error {
	if hostID, err := utils.HostID(); err != nil {
		return err
	} else if hostID == config.Running.HostID {
		var command []string
		if config.Command != "" {
			command = append([]string{config.Command}, config.Args...)
		} else {
			command = append([]string{}, "/bin/bash")
		}

		return utils.AttachAndExec(config.Running.DockerID, command)
	}

	return fmt.Errorf("container does not reside locally on host")
}
Пример #5
0
// NewStatsReporter creates a new StatsReporter and kicks off the reporting goroutine.
func NewStatsReporter(destination string, interval time.Duration, conn coordclient.Connection) (*StatsReporter, error) {
	hostID, err := utils.HostID()
	if err != nil {
		glog.Errorf("Could not determine host ID.")
		return nil, err
	}
	if conn == nil {
		glog.Errorf("conn can not be nil")
		return nil, fmt.Errorf("conn can not be nil")
	}
	sr := StatsReporter{
		destination:         destination,
		closeChannel:        make(chan bool),
		conn:                conn,
		containerRegistries: make(map[registryKey]metrics.Registry),
		hostID:              hostID,
	}

	sr.hostRegistry = metrics.NewRegistry()
	go sr.report(interval)
	return &sr, nil
}
Пример #6
0
func (a *api) PostMetric(metricName string, metricValue string) (string, error) {
	url := fmt.Sprintf("http://%s/api/metrics/store", options.HostStats)
	timeStamp := time.Now().Unix()
	hostId, err := utils.HostID()
	if err != nil {
		glog.Errorf("Error getting host id, error: %s", err)
		return "", err
	}

	samples := make([]stats.Sample, 1)
	samples[0] = stats.Sample{
		Metric:    metricName,
		Value:     metricValue,
		Timestamp: timeStamp,
		Tags:      map[string]string{"controlplane_host_id": hostId},
	}

	if err := stats.Post(url, samples); err != nil {
		glog.Errorf("could not post stats: %s", err)
		return "", err
	}
	return "Posted metric", nil
}
Пример #7
0
func (d *daemon) startAgent() error {
	muxListener, err := createMuxListener()
	if err != nil {
		return err
	}
	mux, err := proxy.NewTCPMux(muxListener)
	if err != nil {
		return err
	}

	agentIP := options.OutboundIP
	if agentIP == "" {
		var err error
		agentIP, err = utils.GetIPAddress()
		if err != nil {
			glog.Fatalf("Failed to acquire ip address: %s", err)
		}
	}

	rpcPort := "0"
	parts := strings.Split(options.Listen, ":")
	if len(parts) > 1 {
		rpcPort = parts[1]
	}

	thisHost, err := host.Build(agentIP, rpcPort, "unknown")
	if err != nil {
		panic(err)
	}

	myHostID, err := utils.HostID()
	if err != nil {
		return fmt.Errorf("HostID failed: %v", err)
	} else if err := validation.ValidHostID(myHostID); err != nil {
		glog.Errorf("invalid hostid: %s", myHostID)
	}

	go func() {
		var poolID string
		for {
			poolID = func() string {
				glog.Infof("Trying to discover my pool...")
				var myHost *host.Host
				masterClient, err := master.NewClient(d.servicedEndpoint)
				if err != nil {
					glog.Errorf("master.NewClient failed (endpoint %+v) : %v", d.servicedEndpoint, err)
					return ""
				}
				defer masterClient.Close()
				myHost, err = masterClient.GetHost(myHostID)
				if err != nil {
					glog.Warningf("masterClient.GetHost %v failed: %v (has this host been added?)", myHostID, err)
					return ""
				}
				poolID = myHost.PoolID
				glog.Infof(" My PoolID: %v", poolID)
				//send updated host info
				updatedHost, err := host.UpdateHostInfo(*myHost)
				if err != nil {
					glog.Infof("Could not send updated host information: %v", err)
					return poolID
				}
				err = masterClient.UpdateHost(updatedHost)
				if err != nil {
					glog.Warningf("Could not update host information: %v", err)
					return poolID
				}
				glog.V(2).Infof("Sent updated host info %#v", updatedHost)
				return poolID
			}()
			if poolID != "" {
				break
			}
			select {
			case <-d.shutdown:
				return
			case <-time.After(5 * time.Second):
				continue
			}
		}

		thisHost.PoolID = poolID

		basePoolPath := "/pools/" + poolID
		dsn := coordzk.NewDSN(options.Zookeepers, time.Second*15).String()
		glog.Infof("zookeeper dsn: %s", dsn)
		zClient, err := coordclient.New("zookeeper", dsn, basePoolPath, nil)
		if err != nil {
			glog.Errorf("failed create a new coordclient: %v", err)
		}
		zzk.InitializeLocalClient(zClient)

		poolBasedConn, err := zzk.GetLocalConnection(zzk.GeneratePoolPath(poolID))
		if err != nil {
			glog.Errorf("Error in getting a connection based on pool %v: %v", poolID, err)
		}

		if options.NFSClient != "0" {
			nfsClient, err := storage.NewClient(thisHost, path.Join(options.VarPath, "volumes"))
			if err != nil {
				glog.Fatalf("could not create an NFS client: %s", err)
			}

			go func() {
				<-d.shutdown
				glog.Infof("shutting down storage client")
				nfsClient.Close()
			}()

			//loop and log waiting for Storage Leader
			nfsDone := make(chan struct{})
			go func() {
				defer close(nfsDone)
				nfsClient.Wait()
			}()
			//wait indefinitely(?) for storage to work before starting
			glog.Info("Waiting for Storage Leader")
			nfsUp := false
			for !nfsUp {
				select {
				case <-nfsDone:
					nfsUp = true
					glog.Info("Found Storage Leader")
					break
				case <-time.After(time.Second * 30):
					glog.Info("Waiting for Storage Leader, will not be available for running services. ")
					continue
				}
			}
		} else {
			glog.Info("NFS Client disabled")
		}

		agentOptions := node.AgentOptions{
			PoolID:               thisHost.PoolID,
			Master:               options.Endpoint,
			UIPort:               options.UIPort,
			RPCPort:              options.RPCPort,
			DockerDNS:            options.DockerDNS,
			VarPath:              options.VarPath,
			Mount:                options.Mount,
			FSType:               options.FSType,
			Zookeepers:           options.Zookeepers,
			Mux:                  mux,
			UseTLS:               options.TLS,
			DockerRegistry:       dockerRegistry,
			MaxContainerAge:      time.Duration(int(time.Second) * options.MaxContainerAge),
			VirtualAddressSubnet: options.VirtualAddressSubnet,
		}
		// creates a zClient that is not pool based!
		hostAgent, err := node.NewHostAgent(agentOptions)
		d.hostAgent = hostAgent

		d.waitGroup.Add(1)
		go func() {
			hostAgent.Start(d.shutdown)
			glog.Info("Host Agent has shutdown")
			d.waitGroup.Done()
		}()

		// register the API
		glog.V(0).Infoln("registering ControlPlaneAgent service")
		if err = d.rpcServer.RegisterName("ControlPlaneAgent", hostAgent); err != nil {
			glog.Fatalf("could not register ControlPlaneAgent RPC server: %v", err)
		}

		if options.ReportStats {
			statsdest := fmt.Sprintf("http://%s/api/metrics/store", options.HostStats)
			statsduration := time.Duration(options.StatsPeriod) * time.Second
			glog.V(1).Infoln("Staring container statistics reporter")
			statsReporter, err := stats.NewStatsReporter(statsdest, statsduration, poolBasedConn)
			if err != nil {
				glog.Errorf("Error kicking off stats reporter %v", err)
			} else {
				go func() {
					defer statsReporter.Close()
					<-d.shutdown
				}()
			}
		}
	}()

	glog.Infof("agent start staticips: %v [%d]", d.staticIPs, len(d.staticIPs))
	if err = d.rpcServer.RegisterName("Agent", agent.NewServer(d.staticIPs)); err != nil {
		glog.Fatalf("could not register Agent RPC server: %v", err)
	}
	if err != nil {
		glog.Fatalf("Could not start ControlPlane agent: %v", err)
	}

	// TODO: Integrate this server into the rpc server, or something.
	// Currently its only use is for command execution.
	go func() {
		sio := shell.NewProcessExecutorServer(options.Endpoint, dockerRegistry)
		http.ListenAndServe(":50000", sio)
	}()

	return nil
}
Пример #8
0
func (d *daemon) run() (err error) {
	if d.hostID, err = utils.HostID(); err != nil {
		glog.Fatalf("Could not get host ID: %s", err)
	} else if err := validation.ValidHostID(d.hostID); err != nil {
		glog.Errorf("invalid hostid: %s", d.hostID)
	}

	if currentDockerVersion, err := node.GetDockerVersion(); err != nil {
		glog.Fatalf("Could not get docker version: %s", err)
	} else if minDockerVersion.Compare(currentDockerVersion.Client) < 0 {
		glog.Fatalf("serviced requires docker >= %s", minDockerVersion)
	}

	if _, ok := volume.Registered(options.FSType); !ok {
		glog.Fatalf("no driver registered for %s", options.FSType)
	}

	d.startRPC()
	d.startDockerRegistryProxy()

	if options.Master {
		d.startISVCS()
		if err := d.startMaster(); err != nil {
			glog.Fatal(err)
		}
	}

	if options.Agent {
		if err := d.startAgent(); err != nil {
			glog.Fatal(err)
		}
	}

	signalC := make(chan os.Signal, 10)
	signal.Notify(signalC, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
	sig := <-signalC
	glog.Info("Shutting down due to interrupt")
	close(d.shutdown)

	done := make(chan struct{})
	go func() {
		defer close(done)
		glog.Info("Stopping sub-processes")
		d.waitGroup.Wait()
		glog.Info("Sub-processes have stopped")
	}()

	select {
	case <-done:
		defer glog.Info("Shutdown")
	case <-time.After(60 * time.Second):
		defer glog.Infof("Timeout waiting for shutdown")
	}

	zzk.ShutdownConnections()

	if options.Master {
		switch sig {
		case syscall.SIGHUP:
			glog.Infof("Not shutting down isvcs")
			command := os.Args
			glog.Infof("Reloading by calling syscall.exec for command: %+v\n", command)
			syscall.Exec(command[0], command[0:], os.Environ())
		default:
			d.stopISVCS()
		}
	}

	return nil
}