func Init() { var volumesDir string if user, err := user.Current(); err == nil { volumesDir = fmt.Sprintf("/tmp/serviced-%s/var/isvcs", user.Username) } else { volumesDir = "/tmp/serviced/var/isvcs" } Mgr = NewManager("unix:///var/run/docker.sock", imagesDir(), volumesDir) if err := Mgr.Register(elasticsearch); err != nil { glog.Fatalf("%s", err) } if err := Mgr.Register(zookeeper); err != nil { glog.Fatalf("%s", err) } if err := Mgr.Register(logstash); err != nil { glog.Fatalf("%s", err) } if err := Mgr.Register(opentsdb); err != nil { glog.Fatalf("%s", err) } if err := Mgr.Register(celery); err != nil { glog.Fatalf("%s", err) } }
func (d *daemon) startRPC() { if options.DebugPort > 0 { go func() { if err := http.ListenAndServe(fmt.Sprintf(":%d", options.DebugPort), nil); err != nil { glog.Errorf("Unable to bind to debug port %s. Is another instance running?", err) return } }() } listener, err := net.Listen("tcp", options.Listen) if err != nil { glog.Fatalf("Unable to bind to port %s. Is another instance running?") } rpcutils.SetDialTimeout(options.RPCDialTimeout) d.rpcServer.HandleHTTP(rpc.DefaultRPCPath, rpc.DefaultDebugPath) glog.V(0).Infof("Listening on %s", listener.Addr().String()) go func() { for { conn, err := listener.Accept() if err != nil { glog.Fatalf("Error accepting connections: %s", err) } go d.rpcServer.ServeCodec(jsonrpc.NewServerCodec(conn)) } }() }
func editService(service *dao.Service, editor string) error { serviceJson, err := json.MarshalIndent(service, " ", " ") if err != nil { glog.Fatalf("Problem marshaling service object: %s", err) } var reader io.Reader if terminal.IsTerminal(syscall.Stdin) { editorPath, err := findEditor(editor) if err != nil { fmt.Printf("%s\n", err) return err } f, err := ioutil.TempFile("", fmt.Sprintf("serviced_edit_%s_", service.Id)) if err != nil { glog.Fatalf("Could not write tempfile: %s", err) } defer f.Close() defer os.Remove(f.Name()) _, err = f.Write(serviceJson) if err != nil { glog.Fatalf("Problem writing service json to file: %s", err) } editorCmd := exec.Command(editorPath, f.Name()) editorCmd.Stdout = os.Stdout editorCmd.Stdin = os.Stdin editorCmd.Stderr = os.Stderr err = editorCmd.Run() if err != nil { glog.Fatal("Editor command returned error: %s", err) } _, err = f.Seek(0, 0) if err != nil { glog.Fatal("Could not seek to begining of tempfile: %s", err) } reader = f } else { _, err = os.Stdout.Write(serviceJson) if err != nil { glog.Fatal("Could not write service to terminal's stdout: %s", err) } reader = os.Stdin } serviceJson, err = ioutil.ReadAll(reader) if err != nil { glog.Fatal("Could not read tempfile back in: %s", err) } err = json.Unmarshal(serviceJson, &service) if err != nil { glog.Fatal("Could not parse json: %s", err) } return nil }
func (d *daemon) startISVCS() { isvcs.Init() isvcs.Mgr.SetVolumesDir(path.Join(options.VarPath, "isvcs")) if err := isvcs.Mgr.SetConfigurationOption("elasticsearch-serviced", "cluster", d.getEsClusterName("elasticsearch-serviced")); err != nil { glog.Fatalf("Could not set es-serviced option: %s", err) } if err := isvcs.Mgr.SetConfigurationOption("elasticsearch-logstash", "cluster", d.getEsClusterName("elasticsearch-logstash")); err != nil { glog.Fatalf("Could not set es-logstash option: %s", err) } if err := d.initISVCS(); err != nil { glog.Fatalf("Could not start isvcs: %s", err) } }
//SetUpSuite is run before the tests to ensure elastic, zookeeper etc. are running. func (dt *DaoTest) SetUpSuite(c *C) { docker.SetUseRegistry(true) dt.Port = 9202 isvcs.Init() isvcs.Mgr.SetVolumesDir("/tmp/serviced-test") esServicedClusterName, _ := utils.NewUUID36() if err := isvcs.Mgr.SetConfigurationOption("elasticsearch-serviced", "cluster", esServicedClusterName); err != nil { c.Fatalf("Could not set elasticsearch-serviced clustername: %s", err) } esLogstashClusterName, _ := utils.NewUUID36() if err := isvcs.Mgr.SetConfigurationOption("elasticsearch-logstash", "cluster", esLogstashClusterName); err != nil { c.Fatalf("Could not set elasticsearch-logstash clustername: %s", err) } isvcs.Mgr.Wipe() if err := isvcs.Mgr.Start(); err != nil { c.Fatalf("Could not start es container: %s", err) } dt.MappingsFile = "controlplane.json" dt.FacadeTest.SetUpSuite(c) dsn := coordzk.NewDSN([]string{"127.0.0.1:2181"}, time.Second*15).String() glog.Infof("zookeeper dsn: %s", dsn) zClient, err := coordclient.New("zookeeper", dsn, "", nil) if err != nil { glog.Fatalf("Could not start es container: %s", err) } zzk.InitializeLocalClient(zClient) dt.zkConn, err = zzk.GetLocalConnection("/") if err != nil { c.Fatalf("could not get zk connection %v", err) } dt.Dao, err = NewControlSvc("localhost", int(dt.Port), dt.Facade, "/tmp", "rsync", 4979, time.Minute*5, "localhost:5000", MockStorageDriver{}) if err != nil { glog.Fatalf("Could not start es container: %s", err) } else { for i := 0; i < 10; i += 1 { id := strconv.Itoa(i) dt.Dao.RemoveService(id, &unused) } for i := 100; i < 110; i += 1 { id := strconv.Itoa(i) dt.Dao.RemoveService(id, &unused) } } }
func resourcesDir() string { path, err := filepath.EvalSymlinks(localDir("resources")) if err != nil { glog.Fatalf("Could not evaluate %s, not following symlinks: %s", localDir("resources"), err) } return path }
// GetLocalConnection acquires a connection from the local zookeeper client func GetLocalConnection(path string) (client.Connection, error) { localclient, ok := manager[local] if !ok || localclient.Client == nil { glog.Fatalf("zClient has not been initialized!") } return localclient.GetConnection(path) }
func init() { var err error dockerPortBinding := portBinding{ HostIp: "0.0.0.0", HostIpOverride: "", // docker registry should always be open HostPort: registryPort, } defaultHealthCheck := healthCheckDefinition{ healthCheck: registryHealthCheck, Interval: DEFAULT_HEALTHCHECK_INTERVAL, Timeout: DEFAULT_HEALTHCHECK_TIMEOUT, } healthChecks := map[string]healthCheckDefinition{ DEFAULT_HEALTHCHECK_NAME: defaultHealthCheck, } command := `DOCKER_REGISTRY_CONFIG=/docker-registry/config/config_sample.yml SETTINGS_FLAVOR=serviced exec docker-registry` dockerRegistry, err = NewIService( IServiceDefinition{ Name: "docker-registry", Repo: IMAGE_REPO, Tag: IMAGE_TAG, Command: func() string { return command }, PortBindings: []portBinding{dockerPortBinding}, Volumes: map[string]string{"registry": "/tmp/registry"}, HealthChecks: healthChecks, }, ) if err != nil { glog.Fatalf("Error initializing docker-registry container: %s", err) } }
// serviced docker sync func (c *ServicedCli) cmdRegistrySync(ctx *cli.Context) { err := c.driver.RegistrySync() if err != nil { glog.Fatalf("error syncing docker images to local registry: %s", err) } }
// elasticsearchHealthCheck() determines if elasticsearch is healthy func elasticsearchHealthCheck() error { start := time.Now() lastError := time.Now() minUptime := time.Second * 2 timeout := time.Second * 30 schemaFile := localDir("resources/controlplane.json") for { if healthResponse, err := cluster.Health(true); err == nil && (healthResponse.Status == "green" || healthResponse.Status == "yellow") { if buffer, err := os.Open(schemaFile); err != nil { glog.Fatalf("problem reading %s", err) return err } else { http.Post("http://localhost:9200/controlplane", "application/json", buffer) buffer.Close() } } else { lastError = time.Now() glog.V(2).Infof("Still trying to connect to elastic: %v: %s", err, healthResponse) } if time.Since(lastError) > minUptime { break } if time.Since(start) > timeout { return fmt.Errorf("Could not startup elastic search container.") } time.Sleep(time.Millisecond * 1000) } glog.Info("elasticsearch container started, browser at http://localhost:9200/_plugin/head/") return nil }
func (c *ServicedCli) cmdSquash(ctx *cli.Context) { imageName := "" baseLayer := "" newName := "" args := ctx.Args() switch len(ctx.Args()) { case 3: newName = args[2] fallthrough case 2: baseLayer = args[1] fallthrough case 1: imageName = args[0] break default: cli.ShowCommandHelp(ctx, "squash") return } imageID, err := c.driver.Squash(imageName, baseLayer, newName, ctx.String("tempdir")) if err != nil { glog.Fatalf("error squashing: %s", err) } fmt.Println(imageID) }
func (cli *ServicedCli) CmdEditService(args ...string) error { cmd := Subcmd("edit-service", "[SERVICE_ID]", "edit a service") var editor string cmd.StringVar(&editor, "editor", os.Getenv("EDITOR"), "editor to use to edit service definition, also controlled by $EDITOR var") if err := cmd.Parse(args); err != nil { cmd.Usage() return nil } if len(cmd.Args()) != 1 { cmd.Usage() return nil } client := getClient() var service dao.Service err := client.GetService(cmd.Arg(0), &service) if err != nil { glog.Fatalf("Could not get service %s: %v", cmd.Arg(0), err) } err = editService(&service, editor) var unused int err = client.UpdateService(service, &unused) return err }
// Anytime the available service definitions are modified // we need to restart the logstash container so it can write out // its new filter set. // This method depends on the elasticsearch container being up and running. func reloadLogstashContainerImpl(ctx datastore.Context, f *Facade) error { templates, err := f.GetServiceTemplates(ctx) if err != nil { glog.Fatalf("Could not write logstash configuration: %s", err) } if err := writeLogstashConfiguration(templates); err != nil { glog.Fatalf("Could not write logstash configuration: %s", err) return err } glog.V(2).Info("Starting logstash container") if err := isvcs.Mgr.Notify("restart logstash"); err != nil { glog.Fatalf("Could not start logstash container: %s", err) return err } return nil }
// Print the list of available services. func (cli *ServicedCli) CmdServices(args ...string) error { cmd := Subcmd("services", "[CMD]", "Show services") var verbose bool cmd.BoolVar(&verbose, "verbose", false, "Show JSON representation for each service") var raw bool cmd.BoolVar(&raw, "raw", false, "Don't show the header line") var ascii bool if os.Getenv("SERVICED_ASCII") == "1" { ascii = true } cmd.BoolVar(&ascii, "ascii", ascii, "use ascii characters for service tree (env SERVICED_ASCII=1 will default to ascii)") if err := cmd.Parse(args); err != nil { return nil } if ascii { tree_charset = tree_ascii } controlPlane := getClient() var services []*dao.Service err := controlPlane.GetServices(&empty, &services) if err != nil { glog.Fatalf("Could not get services: %v", err) } if verbose == false { svcMap := &svcStub{} svcMap.value = &dao.Service{} generateSvcMap("", services, svcMap) svcMap.treePrint(raw) } else { servicesJson, err := json.MarshalIndent(services, " ", " ") if err != nil { glog.Fatalf("Problem marshaling services object: %s", err) } fmt.Printf("%s\n", servicesJson) } return err }
func (f *Forwarder) Exec(cfg *ProcessConfig) *ProcessInstance { // TODO: make me more extensible urlAddr, err := url.Parse(f.addr) if err != nil { glog.Fatalf("Not a valid path: %s (%v)", f.addr, err) } host := fmt.Sprintf("http://%s:50000/", strings.Split(urlAddr.Host, ":")[0]) // Dial the remote ProcessServer client, err := socketio.Dial(host) if err != nil { glog.Fatalf("Unable to contact remote process server: %v", err) } client.On("connect", func(ns *socketio.NameSpace) { if ns.Session.Values[PROCESSKEY] == nil { ns.Emit("process", cfg) } else { glog.Fatalf("Trying to connect to a stale process!") } }) ns := client.Of("") proc := &ProcessInstance{ Stdin: make(chan byte, 1024), Stdout: make(chan byte, 1024), Stderr: make(chan byte, 1024), Result: make(chan Result), } client.On("disconnect", func(ns *socketio.NameSpace) { glog.Infof("Disconnected!") proc.Disconnect() proc.Close() }) go proc.ReadResponse(ns) go proc.WriteRequest(ns) go client.Run() return proc }
func (d *daemon) getEsClusterName(Type string) string { filename := path.Join(options.VarPath, "isvcs", Type+".clustername") clusterName := "" data, err := ioutil.ReadFile(filename) if err != nil || len(data) <= 0 { clusterName, err = utils.NewUUID36() if err != nil { glog.Fatalf("could not generate uuid: %s", err) } if err := os.MkdirAll(path.Dir(filename), 0770); err != nil { glog.Fatalf("could not create dir %s: %s", path.Dir(filename), err) } if err := ioutil.WriteFile(filename, []byte(clusterName), 0600); err != nil { glog.Fatalf("could not write clustername to %s: %s", filename, err) } } else { clusterName = strings.TrimSpace(string(data)) } return clusterName }
func TestBackup_parseTarInfo(t *testing.T) { output := bytes.NewBufferString(` drwxr-xr-x root/root 0 2014-08-22 13:04 ./ drwxr-xr-x root/root 0 2014-08-22 13:04 ./images/ -rw-r--r-- root/root 1801870336 2014-08-22 13:04 ./images/2.tar -rw-r--r-- root/root 520972800 2014-08-22 13:04 ./images/0.tar `) tparse := func(ts string) time.Time { result, err := time.Parse("2006-01-02 15:04", ts) if err != nil { t.Fatalf("Could not parse time: %s", err) } return result } expected := []tarinfo{ {"drwxr-xr-x", "root", "root", 0, tparse("2014-08-22 13:04"), "./"}, {"drwxr-xr-x", "root", "root", 0, tparse("2014-08-22 13:04"), "./images/"}, {"-rw-r--r--", "root", "root", 1801870336, tparse("2014-08-22 13:04"), "./images/2.tar"}, {"-rw-r--r--", "root", "root", 520972800, tparse("2014-08-22 13:04"), "./images/0.tar"}, } tf, err := new(tarfile).init(output.Bytes()) if err != nil { glog.Fatalf("Could not parse: %s", err) } actual := []tarinfo(*tf) if len(expected) != len(actual) { t.Fatalf("Mismatch (Expected: %v) (Actual: %v)", expected, actual) } for i := range expected { if expected[i].Permission != actual[i].Permission { t.Errorf("Mismatch (Expected: %v) (Actual: %v)", expected[i], actual[i]) } else if expected[i].Owner != actual[i].Owner { t.Errorf("Mismatch (Expected: %v) (Actual: %v)", expected[i], actual[i]) } else if expected[i].Group != actual[i].Group { t.Errorf("Mismatch (Expected: %v) (Actual: %v)", expected[i], actual[i]) } else if expected[i].Size != actual[i].Size { t.Errorf("Mismatch (Expected: %v) (Actual: %v)", expected[i], actual[i]) } else if !expected[i].Timestamp.Equal(actual[i].Timestamp) { t.Errorf("Mismatch (Expected: %v) (Actual: %v)", expected[i], actual[i]) } else if strings.TrimSpace(expected[i].Filename) != actual[i].Filename { t.Errorf("Mismatch (Expected: \"%v\") (Actual: \"%v\")", expected[i].Filename, actual[i].Filename) } } }
// Deploy a service template into the given pool func (cli *ServicedCli) CmdDeployTemplate(args ...string) error { cmd := Subcmd("deploy-template", "[OPTIONS] TEMPLATE_ID POOL_ID DEPLOYMENT_ID", "Deploy TEMPLATE_ID into POOL_ID with a new id DEPLOYMENT_ID") if err := cmd.Parse(args); err != nil { return err } deployreq := dao.ServiceTemplateDeploymentRequest{cmd.Arg(1), cmd.Arg(0), cmd.Arg(2)} var unused int if err := getClient().DeployTemplate(deployreq, &unused); err != nil { glog.Fatalf("Could not deploy service template: %v", err) } fmt.Println("OK") return nil }
// List the service templates associated with the control plane. func (cli *ServicedCli) CmdTemplates(args ...string) error { cmd := Subcmd("templates", "[OPTIONS]", "List templates") var verbose bool cmd.BoolVar(&verbose, "verbose", false, "Show JSON representation for each template") var raw bool cmd.BoolVar(&raw, "raw", false, "Don't show header line") if err := cmd.Parse(args); err != nil { return err } c := getClient() var serviceTemplates map[string]*dao.ServiceTemplate var unused int err := c.GetServiceTemplates(unused, &serviceTemplates) if err != nil { glog.Fatalf("Could not get list of templates: %s", err) } if verbose == false { outfmt := "%-36s %-16s %-32.32s\n" if raw == false { fmt.Printf("%-36s %-16s %-32s\n", "TEMPLATE ID", "NAME", "DESCRIPTION") } else { outfmt = "%s|%s|%s\n" } for id, t := range serviceTemplates { fmt.Printf(outfmt, id, t.Name, t.Description) } } else { for id, template := range serviceTemplates { if t, err := json.MarshalIndent(template, " ", " "); err == nil { if verbose { fmt.Printf("%s: %s\n", id, t) } } } } return err }
func init() { isvcs.Init() isvcs.Mgr.SetVolumesDir("/tmp/serviced-test") isvcs.Mgr.Wipe() controlPlaneDao, err = NewControlSvc("localhost", 9200, addresses) if err != nil { glog.Fatalf("Could not start es container: %s", err) } else { for i := 0; i < 10; i += 1 { id := strconv.Itoa(i) controlPlaneDao.RemoveService(id, &unused) } for i := 100; i < 110; i += 1 { id := strconv.Itoa(i) controlPlaneDao.RemoveService(id, &unused) } } }
// Remove a service template associated with the control plane. func (cli *ServicedCli) CmdRemoveTemplate(args ...string) error { cmd := Subcmd("remove-template", "[OPTIONS]", "Remove a service template") if err := cmd.Parse(args); err != nil { return err } if len(cmd.Args()) != 1 { cmd.Usage() return nil } var unused int if err := getClient().RemoveServiceTemplate(cmd.Arg(0), &unused); err != nil { glog.Fatalf("Could not remove service template: %v", err) } fmt.Println("OK") return nil }
func (c *Controller) checkPrereqs(prereqsPassed chan bool, rpcDead chan struct{}) error { if len(c.prereqs) == 0 { glog.Infof("No prereqs to pass.") prereqsPassed <- true return nil } healthCheckInterval := time.Tick(1 * time.Second) for { select { case <-rpcDead: glog.Fatalf("Exiting, RPC server has gone away") case <-healthCheckInterval: failedAny := false for _, script := range c.prereqs { glog.Infof("Running prereq command: %s", script.Script) cmd := exec.Command("sh", "-c", script.Script) err := cmd.Run() if err != nil { msg := fmt.Sprintf("Not starting service yet, waiting on prereq: %s", script.Name) glog.Warning(msg) fmt.Fprintln(os.Stderr, msg) failedAny = true break } else { glog.Infof("Passed prereq [%s].", script.Name) } } if !failedAny { glog.Infof("Passed all prereqs.") prereqsPassed <- true return nil } } } return nil }
func (c *ServicedCli) cmdResetRegistry(ctx *cli.Context) { if err := c.driver.ResetRegistry(); err != nil { glog.Fatalf("error while resetting the registry: %s", err) } }
func (svc *IService) create() (*docker.Container, error) { var config dockerclient.Config cd := &docker.ContainerDefinition{ dockerclient.CreateContainerOptions{Name: svc.name(), Config: &config}, dockerclient.HostConfig{}, } config.Image = commons.JoinRepoTag(svc.Repo, svc.Tag) config.Cmd = []string{"/bin/sh", "-c", svc.Command()} // NOTE: USE WITH CARE! // Enabling host networking for an isvc may expose ports // of the isvcs to access outside of the serviced host, potentially // compromising security. if svc.HostNetwork { cd.NetworkMode = "host" glog.Warningf("Host networking enabled for isvc %s", svc.Name) } // attach all exported ports if svc.PortBindings != nil && len(svc.PortBindings) > 0 { config.ExposedPorts = make(map[dockerclient.Port]struct{}) cd.PortBindings = make(map[dockerclient.Port][]dockerclient.PortBinding) for _, binding := range svc.PortBindings { port := dockerclient.Port(fmt.Sprintf("%d", binding.HostPort)) config.ExposedPorts[port] = struct{}{} portBinding := dockerclient.PortBinding{ HostIp: getHostIp(binding), HostPort: port.Port(), } cd.PortBindings[port] = append(cd.PortBindings[port], portBinding) } } glog.V(1).Infof("Bindings for %s = %v", svc.Name, cd.PortBindings) // copy any links to other isvcs if svc.Links != nil && len(svc.Links) > 0 { // To use a link, the source container must be instantiated already, so // the service using a link can't be in the first start group. // // FIXME: Other sanity checks we could add - make sure that the source // container is not in the same group or a later group if svc.StartGroup == 0 { glog.Fatalf("isvc %s can not use docker Links with StartGroup=0", svc.Name) } cd.Links = make([]string, len(svc.Links)) copy(cd.Links, svc.Links) glog.V(1).Infof("Links for %s = %v", svc.Name, cd.Links) } // attach all exported volumes config.Volumes = make(map[string]struct{}) cd.Binds = []string{} // service-specific volumes if svc.Volumes != nil && len(svc.Volumes) > 0 { for src, dest := range svc.Volumes { hostpath := svc.getResourcePath(src) if exists, _ := isDir(hostpath); !exists { if err := os.MkdirAll(hostpath, 0777); err != nil { glog.Errorf("could not create %s on host: %s", hostpath, err) return nil, err } } cd.Binds = append(cd.Binds, fmt.Sprintf("%s:%s", hostpath, dest)) config.Volumes[dest] = struct{}{} } } // global volumes if isvcsVolumes != nil && len(isvcsVolumes) > 0 { for src, dest := range isvcsVolumes { if exists, _ := isDir(src); !exists { glog.Warningf("Could not mount source %s: path does not exist", src) continue } cd.Binds = append(cd.Binds, fmt.Sprintf("%s:%s", src, dest)) config.Volumes[dest] = struct{}{} } } // attach environment variables for key, val := range envPerService[svc.Name] { config.Env = append(config.Env, fmt.Sprintf("%s=%s", key, val)) } return docker.NewContainer(cd, false, 5*time.Second, nil, nil) }
func init() { if mappingError != nil { glog.Fatalf("error creating addressassignment: %v", mappingError) } }
// configureContainer creates and populates two structures, a docker client Config and a docker client HostConfig structure // that are used to create and start a container respectively. The information used to populate the structures is pulled from // the service, serviceState, and conn values that are passed into configureContainer. func configureContainer(a *HostAgent, client *ControlClient, svc *service.Service, serviceState *servicestate.ServiceState, virtualAddressSubnet string) (*dockerclient.Config, *dockerclient.HostConfig, error) { cfg := &dockerclient.Config{} hcfg := &dockerclient.HostConfig{} //get this service's tenantId for volume mapping var tenantID string err := client.GetTenantId(svc.ID, &tenantID) if err != nil { glog.Errorf("Failed getting tenantID for service: %s, %s", svc.ID, err) return nil, nil, err } // get the system user unused := 0 systemUser := user.User{} err = client.GetSystemUser(unused, &systemUser) if err != nil { glog.Errorf("Unable to get system user account for agent %s", err) return nil, nil, err } glog.V(1).Infof("System User %v", systemUser) cfg.Image = svc.ImageID // get the endpoints cfg.ExposedPorts = make(map[dockerclient.Port]struct{}) hcfg.PortBindings = make(map[dockerclient.Port][]dockerclient.PortBinding) if svc.Endpoints != nil { glog.V(1).Info("Endpoints for service: ", svc.Endpoints) for _, endpoint := range svc.Endpoints { if endpoint.Purpose == "export" { // only expose remote endpoints var port uint16 port = endpoint.PortNumber if endpoint.PortTemplate != "" { t := template.Must(template.New("PortTemplate").Funcs(funcmap).Parse(endpoint.PortTemplate)) b := bytes.Buffer{} err := t.Execute(&b, serviceState) if err == nil { j, err := strconv.Atoi(b.String()) if err != nil { glog.Errorf("%+v", err) } else if j > 0 { port = uint16(j) } } } var p string switch endpoint.Protocol { case commons.UDP: p = fmt.Sprintf("%d/%s", port, "udp") default: p = fmt.Sprintf("%d/%s", port, "tcp") } cfg.ExposedPorts[dockerclient.Port(p)] = struct{}{} hcfg.PortBindings[dockerclient.Port(p)] = append(hcfg.PortBindings[dockerclient.Port(p)], dockerclient.PortBinding{}) } } } if len(tenantID) == 0 && len(svc.Volumes) > 0 { // FIXME: find a better way of handling this error condition glog.Fatalf("Could not get tenant ID and need to mount a volume, service state: %s, service id: %s", serviceState.ID, svc.ID) } // Make sure the image exists locally. if _, err = docker.FindImage(svc.ImageID, true); err != nil { glog.Errorf("can't find docker image %s: %s", svc.ImageID, err) return nil, nil, err } cfg.Volumes = make(map[string]struct{}) hcfg.Binds = []string{} if err := injectContext(svc, serviceState, client); err != nil { glog.Errorf("Error injecting context: %s", err) return nil, nil, err } for _, volume := range svc.Volumes { if volume.Type != "" && volume.Type != "dfs" { continue } resourcePath, err := a.setupVolume(tenantID, svc, volume) if err != nil { return nil, nil, err } binding := fmt.Sprintf("%s:%s", resourcePath, volume.ContainerPath) cfg.Volumes[strings.Split(binding, ":")[1]] = struct{}{} hcfg.Binds = append(hcfg.Binds, strings.TrimSpace(binding)) } dir, binary, err := ExecPath() if err != nil { glog.Errorf("Error getting exec path: %v", err) return nil, nil, err } volumeBinding := fmt.Sprintf("%s:/serviced", dir) cfg.Volumes[strings.Split(volumeBinding, ":")[1]] = struct{}{} hcfg.Binds = append(hcfg.Binds, strings.TrimSpace(volumeBinding)) // bind mount everything we need for logstash-forwarder if len(svc.LogConfigs) != 0 { const LOGSTASH_CONTAINER_DIRECTORY = "/usr/local/serviced/resources/logstash" logstashPath := utils.ResourcesDir() + "/logstash" binding := fmt.Sprintf("%s:%s", logstashPath, LOGSTASH_CONTAINER_DIRECTORY) cfg.Volumes[LOGSTASH_CONTAINER_DIRECTORY] = struct{}{} hcfg.Binds = append(hcfg.Binds, binding) glog.V(1).Infof("added logstash bind mount: %s", binding) } // specify temporary volume paths for docker to create tmpVolumes := []string{"/tmp"} for _, volume := range svc.Volumes { if volume.Type == "tmp" { tmpVolumes = append(tmpVolumes, volume.ContainerPath) } } for _, path := range tmpVolumes { cfg.Volumes[path] = struct{}{} glog.V(4).Infof("added temporary docker container path: %s", path) } // add arguments to mount requested directory (if requested) glog.V(2).Infof("Checking Mount options for service %#v", svc) for _, bindMountString := range a.mount { glog.V(2).Infof("bindmount is %#v", bindMountString) splitMount := strings.Split(bindMountString, ",") numMountArgs := len(splitMount) if numMountArgs == 2 || numMountArgs == 3 { requestedImage := splitMount[0] glog.V(2).Infof("mount requestedImage %#v", requestedImage) hostPath := splitMount[1] glog.V(2).Infof("mount hostPath %#v", hostPath) // assume the container path is going to be the same as the host path containerPath := hostPath // if the container path is provided, use it if numMountArgs > 2 { containerPath = splitMount[2] } glog.V(2).Infof("mount containerPath %#v", containerPath) // insert tenantId into requestedImage - see facade.DeployService matchedRequestedImage := false if requestedImage == "*" { matchedRequestedImage = true } else { imageID, err := commons.ParseImageID(requestedImage) if err != nil { glog.Errorf("error parsing imageid %v: %v", requestedImage, err) continue } svcImageID, err := commons.ParseImageID(svc.ImageID) if err != nil { glog.Errorf("error parsing service imageid %v; %v", svc.ImageID, err) continue } glog.V(2).Infof("mount checking %#v and %#v ", imageID, svcImageID) matchedRequestedImage = (imageID.Repo == svcImageID.Repo) } if matchedRequestedImage { binding := fmt.Sprintf("%s:%s", hostPath, containerPath) cfg.Volumes[strings.Split(binding, ":")[1]] = struct{}{} hcfg.Binds = append(hcfg.Binds, strings.TrimSpace(binding)) } } else { glog.Warningf("Could not bind mount the following: %s", bindMountString) } } // Get host IP ips, err := utils.GetIPv4Addresses() if err != nil { glog.Errorf("Error getting host IP addresses: %v", err) return nil, nil, err } // add arguments for environment variables cfg.Env = append([]string{}, fmt.Sprintf("CONTROLPLANE_SYSTEM_USER=%s", systemUser.Name), fmt.Sprintf("CONTROLPLANE_SYSTEM_PASSWORD=%s", systemUser.Password), fmt.Sprintf("CONTROLPLANE_HOST_IPS='%s'", strings.Join(ips, " ")), fmt.Sprintf("SERVICED_VIRTUAL_ADDRESS_SUBNET=%s", virtualAddressSubnet), fmt.Sprintf("SERVICED_IS_SERVICE_SHELL=false"), fmt.Sprintf("SERVICED_NOREGISTRY=%s", os.Getenv("SERVICED_NOREGISTRY")), fmt.Sprintf("SERVICED_SERVICE_IMAGE=%s", svc.ImageID), fmt.Sprintf("SERVICED_MAX_RPC_CLIENTS=1"), fmt.Sprintf("SERVICED_RPC_PORT=%s", a.rpcport), fmt.Sprintf("TZ=%s", os.Getenv("TZ"))) // add dns values to setup for _, addr := range a.dockerDNS { _addr := strings.TrimSpace(addr) if len(_addr) > 0 { cfg.Dns = append(cfg.Dns, addr) } } // Add hostname if set if svc.Hostname != "" { cfg.Hostname = svc.Hostname } cfg.Cmd = append([]string{}, fmt.Sprintf("/serviced/%s", binary), "service", "proxy", svc.ID, strconv.Itoa(serviceState.InstanceID), svc.Startup) if svc.Privileged { hcfg.Privileged = true } // Memory and CpuShares should never be negative if svc.MemoryLimit < 0 { cfg.Memory = 0 } else { cfg.Memory = svc.MemoryLimit } if svc.CPUShares < 0 { cfg.CpuShares = 0 } else { cfg.CpuShares = svc.CPUShares } return cfg, hcfg, nil }
func (dt *DaoTest) TestStoppingParentStopsChildren(t *C) { svc := service.Service{ ID: "ParentServiceID", Name: "ParentService", Startup: "/usr/bin/ping -c localhost", Description: "Ping a remote host a fixed number of times", Instances: 1, InstanceLimits: domain.MinMax{1, 1, 1}, ImageID: "test/pinger", PoolID: "default", DeploymentID: "deployment_id", DesiredState: int(service.SVCRun), Launch: "auto", Endpoints: []service.ServiceEndpoint{}, CreatedAt: time.Now(), UpdatedAt: time.Now(), } childService1 := service.Service{ ID: "childService1", Name: "childservice1", Launch: "auto", PoolID: "default", DeploymentID: "deployment_id", Startup: "/bin/sh -c \"while true; do echo hello world 10; sleep 3; done\"", ParentServiceID: "ParentServiceID", } childService2 := service.Service{ ID: "childService2", Name: "childservice2", Launch: "auto", PoolID: "default", DeploymentID: "deployment_id", Startup: "/bin/sh -c \"while true; do echo date 10; sleep 3; done\"", ParentServiceID: "ParentServiceID", } // add a service with a subservice id := "ParentServiceID" var err error if err = dt.Dao.AddService(svc, &id); err != nil { glog.Fatalf("Failed Loading Parent Service Service: %+v, %s", svc, err) } childService1Id := "childService1" childService2Id := "childService2" if err = dt.Dao.AddService(childService1, &childService1Id); err != nil { glog.Fatalf("Failed Loading Child Service 1: %+v, %s", childService1, err) } if err = dt.Dao.AddService(childService2, &childService2Id); err != nil { glog.Fatalf("Failed Loading Child Service 2: %+v, %s", childService2, err) } // start the service var affected int if err = dt.Dao.StartService(dao.ScheduleServiceRequest{id, true}, &affected); err != nil { glog.Fatalf("Unable to stop parent service: %+v, %s", svc, err) } // stop the parent if err = dt.Dao.StopService(dao.ScheduleServiceRequest{id, true}, &affected); err != nil { glog.Fatalf("Unable to stop parent service: %+v, %s", svc, err) } // verify the children have all stopped var services []service.Service var serviceRequest dao.ServiceRequest err = dt.Dao.GetServices(serviceRequest, &services) for _, subService := range services { if subService.DesiredState == int(service.SVCRun) && subService.ParentServiceID == id { t.Errorf("Was expecting child services to be stopped %v", subService) } } }
func init() { if mappingError != nil { glog.Fatalf("error creating service mapping: %v", mappingError) } }
// RunShell runs a predefined service shell command via the service definition func (a *api) RunShell(config ShellConfig) error { client, err := a.connectDAO() if err != nil { return err } svc, err := a.GetService(config.ServiceID) if err != nil { return err } getSvc := func(svcID string) (service.Service, error) { s := service.Service{} err := client.GetService(svcID, &s) return s, err } findChild := func(svcID, childName string) (service.Service, error) { s := service.Service{} err := client.FindChildService(dao.FindChildRequest{svcID, childName}, &s) return s, err } if err := svc.EvaluateRunsTemplate(getSvc, findChild); err != nil { fmt.Errorf("error evaluating service:%s Runs:%+v error:%s", svc.ID, svc.Runs, err) } command, ok := svc.Runs[config.Command] if !ok { return fmt.Errorf("command not found for service") } mounts, err := buildMounts(config.ServicedEndpoint, config.ServiceID, config.Mounts) if err != nil { return err } quotedArgs := utils.ShellQuoteArgs(config.Args) command = strings.Join([]string{command, quotedArgs}, " ") asUser := "******" if config.Username != "" && config.Username != "root" { asUser = fmt.Sprintf("su - %s -c ", config.Username) } cfg := shell.ProcessConfig{ ServiceID: config.ServiceID, IsTTY: config.IsTTY, SaveAs: config.SaveAs, Mount: mounts, Command: asUser + utils.ShellQuoteArg(command), LogToStderr: config.LogToStderr, } cfg.LogStash.Enable = config.LogStash.Enable cfg.LogStash.SettleTime, err = time.ParseDuration(config.LogStash.SettleTime) if err != nil { return err } cfg.LogStash.IdleFlushTime, err = time.ParseDuration(config.LogStash.IdleFlushTime) if err != nil { return err } // TODO: change me to use sockets cmd, err := shell.StartDocker(&cfg, options.Endpoint) if err != nil { return fmt.Errorf("failed to connect to service: %s", err) } cmd.Stdin = os.Stdin cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr err = cmd.Run() if _, ok := utils.GetExitStatus(err); !ok { glog.Fatalf("abnormal termination from shell command: %s", err) } dockercli, err := a.connectDocker() if err != nil { glog.Fatalf("unable to connect to the docker service: %s", err) } exitcode, err := dockercli.WaitContainer(config.SaveAs) if err != nil { glog.Fatalf("failure waiting for container: %s", err) } container, err := dockercli.InspectContainer(config.SaveAs) if err != nil { glog.Fatalf("cannot acquire information about container: %s (%s)", config.SaveAs, err) } glog.V(2).Infof("Container ID: %s", container.ID) switch exitcode { case 0: // Commit the container label := "" glog.V(0).Infof("Committing container") if err := client.Commit(container.ID, &label); err != nil { glog.Fatalf("Error committing container: %s (%s)", container.ID, err) } var layers = 0 if err := client.ImageLayerCount(container.Image, &layers); err != nil { glog.Errorf("Counting layers for image %s", svc.ImageID) } if layers > layer.WARN_LAYER_COUNT { glog.Warningf("Image '%s' number of layers (%d) approaching maximum (%d). Please squash image layers.", svc.ImageID, layers, layer.MAX_LAYER_COUNT) } default: // Delete the container if err := dockercli.StopContainer(container.ID, 10); err != nil { glog.Fatalf("failed to stop container: %s (%s)", container.ID, err) } else if err := dockercli.RemoveContainer(dockerclient.RemoveContainerOptions{ID: container.ID}); err != nil { glog.Fatalf("failed to remove container: %s (%s)", container.ID, err) } return fmt.Errorf("Command returned non-zero exit code %d. Container not commited.", exitcode) } return nil }
func init() { if mappingError != nil { glog.Fatalf("error creating svcconfigfile mapping: %v", mappingError) } }