func editService(service *dao.Service, editor string) error { serviceJson, err := json.MarshalIndent(service, " ", " ") if err != nil { glog.Fatalf("Problem marshaling service object: %s", err) } var reader io.Reader if terminal.IsTerminal(syscall.Stdin) { editorPath, err := findEditor(editor) if err != nil { fmt.Printf("%s\n", err) return err } f, err := ioutil.TempFile("", fmt.Sprintf("serviced_edit_%s_", service.Id)) if err != nil { glog.Fatalf("Could not write tempfile: %s", err) } defer f.Close() defer os.Remove(f.Name()) _, err = f.Write(serviceJson) if err != nil { glog.Fatalf("Problem writing service json to file: %s", err) } editorCmd := exec.Command(editorPath, f.Name()) editorCmd.Stdout = os.Stdout editorCmd.Stdin = os.Stdin editorCmd.Stderr = os.Stderr err = editorCmd.Run() if err != nil { glog.Fatal("Editor command returned error: %s", err) } _, err = f.Seek(0, 0) if err != nil { glog.Fatal("Could not seek to begining of tempfile: %s", err) } reader = f } else { _, err = os.Stdout.Write(serviceJson) if err != nil { glog.Fatal("Could not write service to terminal's stdout: %s", err) } reader = os.Stdin } serviceJson, err = ioutil.ReadAll(reader) if err != nil { glog.Fatal("Could not read tempfile back in: %s", err) } err = json.Unmarshal(serviceJson, &service) if err != nil { glog.Fatal("Could not parse json: %s", err) } return nil }
// listenAndproxy listens, locally, on the prxy's specified Port. For each // incoming connection a goroutine running the prxy method is created. func (p *proxy) listenAndproxy() { connections := make(chan net.Conn) go func(lsocket net.Listener, conns chan net.Conn) { for { conn, err := lsocket.Accept() if err != nil { glog.Fatal("Error (net.Accept): ", err) } conns <- conn } }(p.listener, connections) i := 0 for { select { case conn := <-connections: if len(p.addresses) == 0 { glog.Warningf("No remote services available for prxying %v", p) conn.Close() continue } i++ // round robin connections to list of addresses glog.V(1).Infof("choosing address from %v", p.addresses) go p.prxy(conn, p.addresses[i%len(p.addresses)]) case p.addresses = <-p.newAddresses: case errc := <-p.closing: p.listener.Close() errc <- nil return } } }
func init() { var err error command := `cd /opt/zenoss && exec supervisord -n -c /opt/zenoss/etc/supervisor.conf` opentsdbPortBinding := portBinding{ HostIp: "0.0.0.0", HostIpOverride: "SERVICED_ISVC_OPENTSDB_PORT_4242_HOSTIP", HostPort: 4242, } metricConsumerPortBinding := portBinding{ HostIp: "0.0.0.0", HostIpOverride: "", // metric-consumer should always be open HostPort: 8443, } metricConsumerAdminPortBinding := portBinding{ HostIp: "127.0.0.1", HostIpOverride: "SERVICED_ISVC_OPENTSDB_PORT_58443_HOSTIP", HostPort: 58443, } centralQueryPortBinding := portBinding{ HostIp: "127.0.0.1", HostIpOverride: "SERVICED_ISVC_OPENTSDB_PORT_8888_HOSTIP", HostPort: 8888, } centralQueryAdminPortBinding := portBinding{ HostIp: "127.0.0.1", HostIpOverride: "SERVICED_ISVC_OPENTSDB_PORT_58888_HOSTIP", HostPort: 58888, } hbasePortBinding := portBinding{ HostIp: "127.0.0.1", HostIpOverride: "SERVICED_ISVC_OPENTSDB_PORT_9090_HOSTIP", HostPort: 9090, } opentsdb, err = NewIService( IServiceDefinition{ Name: "opentsdb", Repo: IMAGE_REPO, Tag: IMAGE_TAG, Command: func() string { return command }, PortBindings: []portBinding{ opentsdbPortBinding, metricConsumerPortBinding, metricConsumerAdminPortBinding, centralQueryPortBinding, centralQueryAdminPortBinding, hbasePortBinding}, Volumes: map[string]string{"hbase": "/opt/zenoss/var/hbase"}, }) if err != nil { glog.Fatal("Error initializing opentsdb container: %s", err) } }
func init() { var err error command := "supervisord -n -c /opt/celery/etc/supervisor.conf" celery, err = NewIService( IServiceDefinition{ Name: "celery", Repo: IMAGE_REPO, Tag: IMAGE_TAG, Command: func() string { return command }, PortBindings: []portBinding{}, Volumes: map[string]string{"celery": "/opt/celery/var"}, }) if err != nil { glog.Fatal("Error initializing celery container: %s", err) } }
func init() { var err error logstash, err = NewContainer( ContainerDescription{ Name: "logstash", Repo: IMAGE_REPO, Tag: IMAGE_TAG, Command: "java -jar /opt/logstash/logstash-1.3.2-flatjar.jar agent -f /usr/local/serviced/resources/logstash/logstash.conf -- web", Ports: []int{5043, 9292}, Volumes: map[string]string{}, Notify: notifyLogstashConfigChange, }) if err != nil { glog.Fatal("Error initializing logstash_master container: %s", err) } }
func init() { var err error defaultHealthCheck := healthCheckDefinition{ healthCheck: zkHealthCheck, Interval: DEFAULT_HEALTHCHECK_INTERVAL, Timeout: DEFAULT_HEALTHCHECK_TIMEOUT, } Zookeeper.HealthChecks = map[string]healthCheckDefinition{ DEFAULT_HEALTHCHECK_NAME: defaultHealthCheck, } zookeeper, err = NewIService(Zookeeper) if err != nil { glog.Fatal("Error initializing zookeeper container: %s", err) } }
func init() { var err error elasticsearch, err = NewContainer( ContainerDescription{ Name: "elasticsearch", Repo: IMAGE_REPO, Tag: IMAGE_TAG, Command: `/opt/elasticsearch-0.90.9/bin/elasticsearch -f`, Ports: []int{9200}, Volumes: map[string]string{"data": "/opt/elasticsearch-0.90.9/data"}, HealthCheck: elasticsearchHealthCheck, }, ) if err != nil { glog.Fatal("Error initializing zookeeper container: %s", err) } }
// Starts the agent or master services on this host func (a *api) StartServer() error { glog.Infof("StartServer: %v (%d)", options.StaticIPs, len(options.StaticIPs)) if len(options.CPUProfile) > 0 { f, err := os.Create(options.CPUProfile) if err != nil { glog.Fatal(err) } pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() } d, err := newDaemon(options.Endpoint, options.StaticIPs, options.MasterPoolID) if err != nil { return err } return d.run() }
func init() { var err error zookeeper, err = NewContainer( ContainerDescription{ Name: "zookeeper", Repo: IMAGE_REPO, Tag: IMAGE_TAG, Command: "/opt/zookeeper-3.4.5/bin/zkServer.sh start-foreground", Ports: []int{2181, 12181}, Volumes: map[string]string{"data": "/tmp"}, HealthCheck: zkHealthCheck, }) if err != nil { glog.Fatal("Error initializing zookeeper container: %s", err) } }
func init() { var err error command := "/opt/logstash-1.4.2/bin/logstash agent -f /usr/local/serviced/resources/logstash/logstash.conf" localFilePortBinding := portBinding{ HostIp: "0.0.0.0", HostIpOverride: "", // logstash should always be open HostPort: 5042, } lumberJackPortBinding := portBinding{ HostIp: "0.0.0.0", HostIpOverride: "", // lumberjack should always be open HostPort: 5043, } webserverPortBinding := portBinding{ HostIp: "127.0.0.1", HostIpOverride: "SERVICED_ISVC_LOGSTASH_PORT_9292_HOSTIP", HostPort: 9292, } logstash, err = NewIService( IServiceDefinition{ Name: "logstash", Repo: IMAGE_REPO, Tag: IMAGE_TAG, Command: func() string { return command }, PortBindings: []portBinding{ localFilePortBinding, lumberJackPortBinding, webserverPortBinding}, Volumes: map[string]string{}, Notify: notifyLogstashConfigChange, Links: []string{"serviced-isvcs_elasticsearch-logstash:elasticsearch"}, StartGroup: 1, }) if err != nil { glog.Fatal("Error initializing logstash_master container: %s", err) } }
func init() { var serviceName string var err error serviceName = "elasticsearch-serviced" elasticsearch_servicedPortBinding := portBinding{ HostIp: "127.0.0.1", HostIpOverride: "SERVICED_ISVC_ELASTICSEARCH_SERVICED_PORT_9200_HOSTIP", HostPort: 9200, } defaultHealthCheck := healthCheckDefinition{ healthCheck: elasticsearchHealthCheck(9200), Interval: DEFAULT_HEALTHCHECK_INTERVAL, Timeout: DEFAULT_HEALTHCHECK_TIMEOUT, } healthChecks := map[string]healthCheckDefinition{ DEFAULT_HEALTHCHECK_NAME: defaultHealthCheck, } elasticsearch_serviced, err = NewIService( IServiceDefinition{ Name: serviceName, Repo: IMAGE_REPO, Tag: IMAGE_TAG, Command: func() string { return "" }, PortBindings: []portBinding{elasticsearch_servicedPortBinding}, Volumes: map[string]string{"data": "/opt/elasticsearch-0.90.9/data"}, Configuration: make(map[string]interface{}), HealthChecks: healthChecks, HostNetwork: false, }, ) if err != nil { glog.Fatal("Error initializing elasticsearch container: %s", err) } elasticsearch_serviced.Command = func() string { clusterArg := "" if clusterName, ok := elasticsearch_serviced.Configuration["cluster"]; ok { clusterArg = fmt.Sprintf(" -Des.cluster.name=%s ", clusterName) } return fmt.Sprintf(`/opt/elasticsearch-0.90.9/bin/elasticsearch -f -Des.node.name=%s %s`, elasticsearch_serviced.Name, clusterArg) } serviceName = "elasticsearch-logstash" elasticsearch_logstashPortBinding := portBinding{ HostIp: "127.0.0.1", HostIpOverride: "SERVICED_ISVC_ELASTICSEARCH_LOGSTASH_PORT_9100_HOSTIP", HostPort: 9100, } logStashHealthCheck := defaultHealthCheck logStashHealthCheck.healthCheck = elasticsearchHealthCheck(9100) healthChecks = map[string]healthCheckDefinition{ DEFAULT_HEALTHCHECK_NAME: logStashHealthCheck, } elasticsearch_logstash, err = NewIService( IServiceDefinition{ Name: serviceName, Repo: IMAGE_REPO, Tag: IMAGE_TAG, Command: func() string { return "" }, PortBindings: []portBinding{elasticsearch_logstashPortBinding}, Volumes: map[string]string{"data": "/opt/elasticsearch-1.3.1/data"}, Configuration: make(map[string]interface{}), HealthChecks: healthChecks, HostNetwork: false, }, ) if err != nil { glog.Fatal("Error initializing elasticsearch container: %s", err) } envPerService[serviceName]["ES_JAVA_OPTS"] = "-Xmx4g" elasticsearch_logstash.Command = func() string { clusterArg := "" if clusterName, ok := elasticsearch_logstash.Configuration["cluster"]; ok { clusterArg = fmt.Sprintf(" -Des.cluster.name=%s ", clusterName) } return fmt.Sprintf(`/opt/elasticsearch-1.3.1/bin/elasticsearch -Des.node.name=%s %s`, elasticsearch_logstash.Name, clusterArg) } }
// Start a service instance and update the CP with the state. func (a *HostAgent) startService(conn *zk.Conn, procFinished chan<- int, ssStats *zk.Stat, service *dao.Service, serviceState *dao.ServiceState) (bool, error) { glog.V(2).Infof("About to start service %s with name %s", service.Id, service.Name) client, err := NewControlClient(a.master) if err != nil { glog.Errorf("Could not start ControlPlane client %v", err) return false, err } defer client.Close() //get this service's tenantId for env injection var tenantId string err = client.GetTenantId(service.Id, &tenantId) if err != nil { glog.Errorf("Failed getting tenantId for service: %s, %s", service.Id, err) } portOps := "" if service.Endpoints != nil { glog.V(1).Info("Endpoints for service: ", service.Endpoints) for _, endpoint := range service.Endpoints { if endpoint.Purpose == "export" { // only expose remote endpoints portOps += fmt.Sprintf(" -p %d", endpoint.PortNumber) } } } volumeOpts := "" if len(tenantId) == 0 && len(service.Volumes) > 0 { // FIXME: find a better way of handling this error condition glog.Fatalf("Could not get tenant ID and need to mount a volume, service state: %s, service id: %s", serviceState.Id, service.Id) } for _, volume := range service.Volumes { btrfsVolume, err := getSubvolume(a.varPath, service.PoolId, tenantId) if err != nil { glog.Fatal("Could not create subvolume: %s", err) } else { resourcePath := path.Join(btrfsVolume.Dir(), volume.ResourcePath) if err = os.MkdirAll(resourcePath, 0770); err != nil { glog.Fatal("Could not create resource path: %s, %s", resourcePath, err) } if err := createVolumeDir(resourcePath, volume.ContainerPath, service.ImageId, volume.Owner, volume.Permission); err != nil { glog.Fatalf("Error creating resource path: %v", err) } volumeOpts += fmt.Sprintf(" -v %s:%s", resourcePath, volume.ContainerPath) } } dir, binary, err := ExecPath() if err != nil { glog.Errorf("Error getting exec path: %v", err) return false, err } volumeBinding := fmt.Sprintf("%s:/serviced", dir) if err := injectContext(service, client); err != nil { glog.Errorf("Error injecting context: %s", err) return false, err } // config files configFiles := "" for filename, config := range service.ConfigFiles { prefix := fmt.Sprintf("cp_%s_%s_", service.Id, strings.Replace(filename, "/", "__", -1)) f, err := writeConfFile(prefix, service.Id, filename, config.Content) if err != nil { return false, err } fileChowned := chownConfFile(f, service.Id, filename, config.Owner) if fileChowned == false { continue } // everything worked! configFiles += fmt.Sprintf(" -v %s:%s ", f.Name(), filename) } // if this container is going to produce any logs, bind mount the following files: // logstash-forwarder, sslCertificate, sslKey, logstash-forwarder conf // FIX ME: consider moving this functionality to its own function... logstashForwarderMount := "" if len(service.LogConfigs) > 0 { logstashForwarderLogConf := ` { "paths": [ "%s" ], "fields": { "type": "%s" } }` logstashForwarderLogConf = fmt.Sprintf(logstashForwarderLogConf, service.LogConfigs[0].Path, service.LogConfigs[0].Type) for _, logConfig := range service.LogConfigs[1:] { logstashForwarderLogConf = logstashForwarderLogConf + `, { "paths": [ "%s" ], "fields": { "type": "%s" } }` logstashForwarderLogConf = fmt.Sprintf(logstashForwarderLogConf, logConfig.Path, logConfig.Type) } containerDefaultGatewayAndLogstashForwarderPort := "172.17.42.1:5043" // ********************************************************************************************* // ***** FIX ME the following 3 variables are defined in serviced/proxy.go as well! ************ containerLogstashForwarderDir := "/usr/local/serviced/resources/logstash" containerLogstashForwarderBinaryPath := containerLogstashForwarderDir + "/logstash-forwarder" containerLogstashForwarderConfPath := containerLogstashForwarderDir + "/logstash-forwarder.conf" // ********************************************************************************************* containerSSLCertificatePath := containerLogstashForwarderDir + "/logstash-forwarder.crt" containerSSLKeyPath := containerLogstashForwarderDir + "/logstash-forwarder.key" logstashForwarderShipperConf := ` { "network": { "servers": [ "%s" ], "ssl certificate": "%s", "ssl key": "%s", "ssl ca": "%s", "timeout": 15 }, "files": [ %s ] }` logstashForwarderShipperConf = fmt.Sprintf(logstashForwarderShipperConf, containerDefaultGatewayAndLogstashForwarderPort, containerSSLCertificatePath, containerSSLKeyPath, containerSSLCertificatePath, logstashForwarderLogConf) filename := service.Name + "_logstash_forwarder_conf" prefix := fmt.Sprintf("cp_%s_%s_", service.Id, strings.Replace(filename, "/", "__", -1)) f, err := writeConfFile(prefix, service.Id, filename, logstashForwarderShipperConf) if err != nil { return false, err } logstashPath := resourcesDir() + "/logstash" hostLogstashForwarderPath := logstashPath + "/logstash-forwarder" hostLogstashForwarderConfPath := f.Name() hostSSLCertificatePath := logstashPath + "/logstash-forwarder.crt" hostSSLKeyPath := logstashPath + "/logstash-forwarder.key" logstashForwarderBinaryMount := " -v " + hostLogstashForwarderPath + ":" + containerLogstashForwarderBinaryPath logstashForwarderConfFileMount := " -v " + hostLogstashForwarderConfPath + ":" + containerLogstashForwarderConfPath sslCertificateMount := " -v " + hostSSLCertificatePath + ":" + containerSSLCertificatePath sslKeyMount := " -v " + hostSSLKeyPath + ":" + containerSSLKeyPath logstashForwarderMount = logstashForwarderBinaryMount + sslCertificateMount + sslKeyMount + logstashForwarderConfFileMount } // add arguments to mount requested directory (if requested) requestedMount := "" for _, bindMountString := range a.mount { splitMount := strings.Split(bindMountString, ":") if len(splitMount) == 3 { requestedImage := splitMount[0] hostPath := splitMount[1] containerPath := splitMount[2] if requestedImage == service.ImageId { requestedMount += " -v " + hostPath + ":" + containerPath } } else { glog.Warningf("Could not bind mount the following: %s", bindMountString) } } // add arguments for environment variables environmentVariables := "-e CONTROLPLANE=1" environmentVariables = environmentVariables + " -e CONTROLPLANE_SERVICE_ID=" + service.Id environmentVariables = environmentVariables + " -e CONTROLPLANE_TENANT_ID=" + tenantId environmentVariables = environmentVariables + " -e CONTROLPLANE_CONSUMER_WS=ws://localhost:8444/ws/metrics/store" environmentVariables = environmentVariables + " -e CONTROLPLANE_CONSUMER_URL=http://localhost:8444/ws/metrics/store" proxyCmd := fmt.Sprintf("/serviced/%s proxy %s '%s'", binary, service.Id, service.Startup) // 01 02 03 04 05 06 07 08 09 10 01 02 03 04 05 06 07 08 09 10 cmdString := fmt.Sprintf("docker run %s -rm -name=%s %s -v %s %s %s %s %s %s %s", portOps, serviceState.Id, environmentVariables, volumeBinding, requestedMount, logstashForwarderMount, volumeOpts, configFiles, service.ImageId, proxyCmd) glog.V(0).Infof("Starting: %s", cmdString) a.dockerTerminate(serviceState.Id) a.dockerRemove(serviceState.Id) cmd := exec.Command("bash", "-c", cmdString) go a.waitForProcessToDie(conn, cmd, procFinished, serviceState) glog.V(2).Info("Process started in goroutine") return true, nil }
func (d *daemon) run() (err error) { if d.hostID, err = utils.HostID(); err != nil { glog.Fatalf("Could not get host ID: %s", err) } else if err := validation.ValidHostID(d.hostID); err != nil { glog.Errorf("invalid hostid: %s", d.hostID) } if currentDockerVersion, err := node.GetDockerVersion(); err != nil { glog.Fatalf("Could not get docker version: %s", err) } else if minDockerVersion.Compare(currentDockerVersion.Client) < 0 { glog.Fatalf("serviced requires docker >= %s", minDockerVersion) } if _, ok := volume.Registered(options.FSType); !ok { glog.Fatalf("no driver registered for %s", options.FSType) } d.startRPC() d.startDockerRegistryProxy() if options.Master { d.startISVCS() if err := d.startMaster(); err != nil { glog.Fatal(err) } } if options.Agent { if err := d.startAgent(); err != nil { glog.Fatal(err) } } signalC := make(chan os.Signal, 10) signal.Notify(signalC, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP) sig := <-signalC glog.Info("Shutting down due to interrupt") close(d.shutdown) done := make(chan struct{}) go func() { defer close(done) glog.Info("Stopping sub-processes") d.waitGroup.Wait() glog.Info("Sub-processes have stopped") }() select { case <-done: defer glog.Info("Shutdown") case <-time.After(60 * time.Second): defer glog.Infof("Timeout waiting for shutdown") } zzk.ShutdownConnections() if options.Master { switch sig { case syscall.SIGHUP: glog.Infof("Not shutting down isvcs") command := os.Args glog.Infof("Reloading by calling syscall.exec for command: %+v\n", command) syscall.Exec(command[0], command[0:], os.Environ()) default: d.stopISVCS() } } return nil }