// GetServiceBindMounts returns the service bindmounts func (a *HostAgent) GetServiceBindMounts(serviceID string, bindmounts *map[string]string) error { glog.V(4).Infof("ControlPlaneAgent.GetServiceBindMounts(serviceID:%s)", serviceID) *bindmounts = make(map[string]string, 0) var tenantID string if err := a.GetTenantId(serviceID, &tenantID); err != nil { return err } var service service.Service if err := a.GetService(serviceID, &service); err != nil { return err } response := map[string]string{} for _, volume := range service.Volumes { if volume.Type != "" && volume.Type != "dfs" { continue } resourcePath, err := a.setupVolume(tenantID, &service, volume) if err != nil { return err } glog.V(4).Infof("retrieved bindmount resourcePath:%s containerPath:%s", resourcePath, volume.ContainerPath) response[resourcePath] = volume.ContainerPath } *bindmounts = response return nil }
func restGetAllRunning(w *rest.ResponseWriter, r *rest.Request, client *node.ControlClient) { var services []dao.RunningService err := client.GetRunningServices(&empty, &services) if err != nil { glog.Errorf("Could not get services: %v", err) restServerError(w, err) return } if services == nil { glog.V(3).Info("Services was nil, returning empty list instead") services = []dao.RunningService{} } for ii, rsvc := range services { var svc service.Service if err := client.GetService(rsvc.ServiceID, &svc); err != nil { glog.Errorf("Could not get services: %v", err) restServerError(w, err) } fillBuiltinMetrics(&svc) services[ii].MonitoringProfile = svc.MonitoringProfile } services = append(services, getIRS()...) glog.V(2).Infof("Return %d running services", len(services)) w.WriteJson(&services) }
// Snapshots returns the current snapshots on the volume (sorted by date) func (c *BtrfsConn) Snapshots() ([]string, error) { c.Lock() defer c.Unlock() glog.V(2).Infof("listing snapshots of volume:%v and c.name:%s ", c.root, c.name) output, err := runcmd(c.sudoer, "subvolume", "list", "-s", c.root) if err != nil { glog.Errorf("Could not list subvolumes of %s: %s", c.root, err) return nil, err } var files []os.FileInfo for _, line := range strings.Split(string(output), "\n") { glog.V(2).Infof("line: %s", line) if parts := strings.Split(line, "path"); len(parts) == 2 { label := strings.TrimSpace(parts[1]) label = strings.TrimPrefix(label, "volumes/") glog.V(2).Infof("looking for tenant:%s in label:'%s'", c.name, label) if strings.HasPrefix(label, c.name+"_") { file, err := os.Stat(filepath.Join(c.root, label)) if err != nil { glog.Errorf("Could not stat snapshot %s: %s", label, err) return nil, err } files = append(files, file) glog.V(2).Infof("found snapshot:%s", label) } } } return volume.FileInfoSlice(files).Labels(), nil }
func Lead(dao dao.ControlPlane, conn *zk.Conn, zkEvent <-chan zk.Event) { shutdown_mode := false for { if shutdown_mode { glog.V(1).Info("Shutdown mode encountered.") break } time.Sleep(time.Second) func() error { select { case evt := <-zkEvent: // shut this thing down shutdown_mode = true glog.V(0).Info("Got a zkevent, leaving lead: ", evt) return nil default: glog.V(0).Info("Processing leader duties") // passthru } watchServices(dao, conn) return nil }() } }
// elasticsearchHealthCheck() determines if elasticsearch is healthy func elasticsearchHealthCheck(port int) HealthCheckFunction { return func(halt <-chan struct{}) error { lastError := time.Now() minUptime := time.Second * 2 baseUrl := fmt.Sprintf("http://localhost:%d", port) for { healthResponse, err := getElasticHealth(baseUrl) if err == nil && (healthResponse.Status == "green" || healthResponse.Status == "yellow") { break } else { lastError = time.Now() glog.V(1).Infof("Still trying to connect to elasticsearch at %s: %v: %s", baseUrl, err, healthResponse.Status) } if time.Since(lastError) > minUptime { break } select { case <-halt: glog.V(1).Infof("Quit healthcheck for elasticsearch at %s", baseUrl) return nil default: time.Sleep(time.Second) } } glog.V(1).Infof("elasticsearch running browser at %s/_plugin/head/", baseUrl) return nil } }
// addInstance creates a new service state and host instance func addInstance(conn client.Connection, state ss.ServiceState) error { glog.V(2).Infof("Adding instance %+v", state) // check the object if err := state.ValidEntity(); err != nil { glog.Errorf("Could not validate service state %+v: %s", state, err) return err } // CC-1050: we need to trigger the scheduler in case we only have a // partial create. svclock := newStateLock(conn, state.ServiceID) if err := svclock.Lock(); err != nil { glog.Errorf("Could not set lock on service %s: %s", state.ServiceID, err) return err } defer svclock.Unlock() lock := newInstanceLock(conn, state.ID) if err := lock.Lock(); err != nil { glog.Errorf("Could not set lock for service instance %s for service %s on host %s: %s", state.ID, state.ServiceID, state.HostID, err) return err } glog.V(2).Infof("Acquired lock for instance %s", state.ID) defer lock.Unlock() var err error defer func() { if err != nil { conn.Delete(hostpath(state.HostID, state.ID)) conn.Delete(servicepath(state.ServiceID, state.ID)) rmInstanceLock(conn, state.ID) } }() // Create node on the service spath := servicepath(state.ServiceID, state.ID) snode := &ServiceStateNode{ServiceState: &state} if err = conn.Create(spath, snode); err != nil { glog.Errorf("Could not create service state %s for service %s: %s", state.ID, state.ServiceID, err) return err } else if err = conn.Set(spath, snode); err != nil { glog.Errorf("Could not set service state %s for node %+v: %s", state.ID, snode, err) return err } // Create node on the host hpath := hostpath(state.HostID, state.ID) hnode := NewHostState(&state) glog.V(2).Infof("Host node: %+v", hnode) if err = conn.Create(hpath, hnode); err != nil { glog.Errorf("Could not create host state %s for host %s: %s", state.ID, state.HostID, err) return err } else if err = conn.Set(hpath, hnode); err != nil { glog.Errorf("Could not set host state %s for node %+v: %s", state.ID, hnode, err) return err } glog.V(2).Infof("Releasing lock for instance %s", state.ID) return nil }
func purgeOldsessionTs() { // use a closure to facilitate safe locking regardless of when the purge function returns doPurge := func() { if len(sessions) == 0 { return } sessionsLock.Lock() defer sessionsLock.Unlock() glog.V(1).Info("Searching for expired sessions") cutoff := time.Now().UTC().Unix() - int64((30 * time.Minute).Seconds()) toDel := []string{} for key, value := range sessions { if value.access.UTC().Unix() < cutoff { toDel = append(toDel, key) } } for _, key := range toDel { glog.V(0).Infof("Deleting session %s (exceeded max age)", key) delete(sessions, key) } } for { time.Sleep(time.Second * 60) doPurge() } }
//Set node to the key in registry. Returns the path of the node in the registry func (r *registryType) setItem(conn client.Connection, key string, nodeID string, node client.Node) (string, error) { if err := r.ensureKey(conn, key); err != nil { return "", err } //TODO: make ephemeral path := r.getPath(key, nodeID) exists, err := zzk.PathExists(conn, path) if err != nil { return "", err } if exists { glog.V(3).Infof("Set to %s: %#v", path, node) epn := EndpointNode{} if err := conn.Get(path, &epn); err != nil { return "", err } node.SetVersion(epn.Version()) if err := conn.Set(path, node); err != nil { return "", err } } else { if addPath, err := r.addItem(conn, key, nodeID, node); err != nil { return "", err } else { path = addPath } glog.V(3).Infof("Add to %s: %#v", path, node) } return path, nil }
func watch(conn client.Connection, path string, cancel <-chan bool, processChildren ProcessChildrenFunc, errorHandler WatchError) error { exists, err := zzk.PathExists(conn, path) if err != nil { return err } if !exists { return client.ErrNoNode } for { glog.V(1).Infof("watching children at path: %s", path) nodeIDs, event, err := conn.ChildrenW(path) glog.V(1).Infof("child watch for path %s returned: %#v", path, nodeIDs) if err != nil { glog.Errorf("Could not watch %s: %s", path, err) defer errorHandler(path, err) return err } processChildren(conn, path, nodeIDs...) select { case ev := <-event: glog.V(1).Infof("watch event %+v at path: %s", ev, path) case <-cancel: glog.V(1).Infof("watch cancel at path: %s", path) return nil } } glog.V(1).Infof("no longer watching children at path: %s", path) return nil }
func (this *ControlPlaneDao) GetServiceLogs(serviceID string, logs *string) error { glog.V(3).Info("ControlPlaneDao.GetServiceLogs serviceID=", serviceID) var serviceStates []servicestate.ServiceState if err := this.GetServiceStates(serviceID, &serviceStates); err != nil { glog.Errorf("ControlPlaneDao.GetServiceLogs failed: %v", err) return err } if len(serviceStates) == 0 { glog.V(1).Info("Unable to find any running services for service:", serviceID) return nil } serviceState := serviceStates[0] endpoint := fmt.Sprintf("%s:%d", serviceState.HostIP, this.rpcPort) agentClient, err := agent.NewClient(endpoint) if err != nil { glog.Errorf("could not create client to %s", endpoint) return err } defer agentClient.Close() if mylogs, err := agentClient.GetDockerLogs(serviceState.DockerID); err != nil { glog.Errorf("could not get docker logs from agent client: %s", err) return err } else { *logs = mylogs } return nil }
func (sr StatsReporter) postStats(stats []byte) error { statsreq, err := http.NewRequest("POST", sr.destination, bytes.NewBuffer(stats)) if err != nil { glog.V(3).Info("Couldn't create stats request: ", err) return err } statsreq.Header["User-Agent"] = []string{"Zenoss Metric Publisher"} statsreq.Header["Content-Type"] = []string{"application/json"} if glog.V(4) { glog.Info(string(stats)) } resp, reqerr := http.DefaultClient.Do(statsreq) if reqerr != nil { glog.V(3).Info("Couldn't post stats: ", reqerr) return reqerr } if strings.Contains(resp.Status, "200") == false { glog.V(3).Info("Non-success: ", resp.Status) return fmt.Errorf("Couldn't post stats: ", resp.Status) } resp.Body.Close() return nil }
//ValidEntity validates Host fields func (h *Host) ValidEntity() error { glog.V(4).Info("Validating host") //if err := validation.ValidHostID(entity.ID); err != nil { // return fmt.Errorf("invalid hostid:'%s' for host Name:'%s' IP:%s", entity.ID, entity.Name, entity.IPAddr) //} trimmedID := strings.TrimSpace(h.ID) violations := validation.NewValidationError() violations.Add(validation.NotEmpty("Host.ID", h.ID)) violations.Add(validation.ValidHostID(h.ID)) violations.Add(validation.StringsEqual(h.ID, trimmedID, "leading and trailing spaces not allowed for host id")) violations.Add(validation.ValidPort(h.RPCPort)) violations.Add(validation.NotEmpty("Host.PoolID", h.PoolID)) violations.Add(validation.IsIP(h.IPAddr)) //TODO: what should we be validating here? It doesn't seem to work for glog.V(4).Infof("Validating IPAddr %v for host %s", h.IPAddr, h.ID) ipAddr, err := net.ResolveIPAddr("ip4", h.IPAddr) if err != nil { glog.Errorf("Could not resolve: %s to an ip4 address: %v", h.IPAddr, err) violations.Add(err) } else if ipAddr.IP.IsLoopback() { glog.Errorf("Can not use %s as host address because it is a loopback address", h.IPAddr) violations.Add(errors.New("host ip can not be a loopback address")) } if len(violations.Errors) > 0 { return violations } return nil }
// cmdScriptRun serviced script run filename func (c *ServicedCli) cmdScriptRun(ctx *cli.Context) { args := ctx.Args() if len(args) != 1 { fmt.Fprintln(os.Stderr, "Incorrect Usage.\n\n") if !ctx.Bool("help") { fmt.Fprintf(os.Stderr, "Incorrect Usage.\n\n") } cli.ShowSubcommandHelp(ctx) return } var svc *service.Service if svcID := ctx.String("service"); svcID != "" { //verify service or translate to ID var err error svc, err = c.searchForService(svcID) if err != nil { fmt.Fprintln(os.Stderr, err) c.exit(1) return } if svc == nil { fmt.Fprintf(os.Stderr, "service %s not found\n", svcID) c.exit(1) return } } fileName := args[0] config := &script.Config{} if svc != nil { config.ServiceID = svc.ID } // exec unix script command to log output if isWithin := os.Getenv("IS_WITHIN_UNIX_SCRIPT"); isWithin != "TRUE" { os.Setenv("IS_WITHIN_UNIX_SCRIPT", "TRUE") // prevent inception problem // DO NOT EXIT ON ANY ERRORS - continue without logging logdir := utils.ServicedLogDir() if userrec, err := user.Current(); err != nil { fmt.Fprintf(os.Stderr, "Unable to retrieve userid to log output: %s", err) } else { logfile := time.Now().Format(fmt.Sprintf("%s/script-2006-01-02-150405-%s.log", logdir, userrec.Username)) // unix exec ourselves cmd := []string{"/usr/bin/script", "--append", "--return", "--flush", "-c", strings.Join(os.Args, " "), logfile} fmt.Fprintf(os.Stderr, "Logging to logfile: %s\n", logfile) glog.V(1).Infof("syscall.exec unix script with command: %+v", cmd) if err := syscall.Exec(cmd[0], cmd[0:], os.Environ()); err != nil { fmt.Fprintf(os.Stderr, "Unable to log output with command:%+v err:%s\n", cmd, err) } } } glog.V(1).Infof("runScript filename:%s %+v\n", fileName, config) runScript(c, ctx, fileName, config) }
func MonitorResourcePool(shutdown <-chan interface{}, conn client.Connection, poolID string) <-chan *pool.ResourcePool { monitor := make(chan *pool.ResourcePool) go func() { defer close(monitor) if err := zzk.Ready(shutdown, conn, poolpath(poolID)); err != nil { glog.V(2).Infof("Could not watch pool %s: %s", poolID, err) return } for { var node PoolNode event, err := conn.GetW(poolpath(poolID), &node) if err != nil { glog.V(2).Infof("Could not get pool %s: %s", poolID, err) return } select { case monitor <- node.ResourcePool: case <-shutdown: return } select { case <-event: case <-shutdown: return } } }() return monitor }
func (a *HostAgent) processChildrenAndWait(conn *zk.Conn) bool { processing := make(map[string]chan int) ssDone := make(chan stateResult, 25) hostPath := zzk.HostPath(a.hostId) for { children, _, zkEvent, err := conn.ChildrenW(hostPath) if err != nil { glog.V(0).Infoln("Unable to read children, retrying.") time.Sleep(3 * time.Second) return true } a.startMissingChildren(conn, children, processing, ssDone) select { case errc := <-a.closing: glog.V(1).Info("Agent received interrupt") err = waitForSsNodes(processing, ssDone) errc <- err return false case ssResult := <-ssDone: glog.V(1).Infof("Goroutine finished %s", ssResult.id) delete(processing, ssResult.id) case evt := <-zkEvent: glog.V(1).Info("Agent event: ", evt) } } }
func (f *Facade) fillServiceConfigs(ctx datastore.Context, svc *service.Service) error { glog.V(3).Infof("fillServiceConfigs for %s", svc.ID) tenantID, servicePath, err := f.getTenantIDAndPath(ctx, *svc) if err != nil { return err } glog.V(3).Infof("service %v; tenantid=%s; path=%s", svc.ID, tenantID, servicePath) configStore := serviceconfigfile.NewStore() existingConfs, err := configStore.GetConfigFiles(ctx, tenantID, servicePath) if err != nil { return err } //found confs are the modified confs for f service foundConfs := make(map[string]*servicedefinition.ConfigFile) for _, svcConfig := range existingConfs { foundConfs[svcConfig.ConfFile.Filename] = &svcConfig.ConfFile } //replace with stored service config only if it is an existing config for name, conf := range foundConfs { if _, found := svc.ConfigFiles[name]; found { svc.ConfigFiles[name] = *conf } } return nil }
func NewControlSvc(hostName string, port int, facade *facade.Facade, varpath, fsType string, rpcPort int, maxdfstimeout time.Duration, dockerRegistry string, networkDriver storage.StorageDriver) (*ControlPlaneDao, error) { glog.V(2).Info("calling NewControlSvc()") defer glog.V(2).Info("leaving NewControlSvc()") s, err := NewControlPlaneDao(hostName, port, rpcPort) if err != nil { return nil, err } //Used to bridge old to new s.facade = facade s.varpath = varpath s.fsType = fsType // create the account credentials if err = createSystemUser(s); err != nil { return nil, err } dfs, err := dfs.NewDistributedFilesystem(fsType, varpath, dockerRegistry, facade, maxdfstimeout, networkDriver) if err != nil { return nil, err } s.dfs = dfs return s, nil }
func (d *daemon) addTemplates() { root := utils.LocalDir("templates") glog.V(1).Infof("Adding templates from %s", root) // Don't block startup for this. It's merely a convenience. go func() { err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { if err != nil { return err } if info == nil || !strings.HasSuffix(info.Name(), ".json") { return nil } if info.IsDir() { return filepath.SkipDir } var reader io.ReadCloser if reader, err = os.Open(path); err != nil { glog.Warningf("Unable to open template %s", path) return nil } defer reader.Close() st := servicetemplate.ServiceTemplate{} if err := json.NewDecoder(reader).Decode(&st); err != nil { glog.Warningf("Unable to parse template file %s", path) return nil } glog.V(1).Infof("Adding service template %s", path) d.facade.AddServiceTemplate(d.dsContext, st) return nil }) if err != nil { glog.Warningf("Not loading templates from %s: %s", root, err) } }() }
// RemoveAddressAssignemnt Removes an AddressAssignment by id func (f *Facade) RemoveAddressAssignment(ctx datastore.Context, id string) error { store := addressassignment.NewStore() key := addressassignment.Key(id) var assignment addressassignment.AddressAssignment if err := store.Get(ctx, key, &assignment); err != nil { return err } if err := store.Delete(ctx, key); err != nil { return err } var svc *service.Service var err error if svc, err = f.GetService(ctx, assignment.ServiceID); err != nil { glog.V(2).Infof("ControlPlaneDao.GetService service=%+v err=%s", assignment.ServiceID, err) return err } if err := f.updateService(ctx, svc); err != nil { glog.V(2).Infof("ControlPlaneDao.updateService service=%+v err=%s", assignment.ServiceID, err) return err } return nil }
// UpdateRemoteMonitorFile is used by remote clients to write a tiny file to the DFS volume at the given cycle func UpdateRemoteMonitorFile(localPath string, writeInterval time.Duration, ipAddr string, shutdown <-chan interface{}) { monitorPath := path.Join(localPath, monitorSubDir) remoteFile := path.Join(localPath, monitorSubDir, ipAddr) glog.Infof("updating DFS volume monitor file %s at write interval: %s", remoteFile, writeInterval) for { glog.V(2).Infof("checking DFS monitor path %s", monitorPath) _, err := os.Stat(monitorPath) if err != nil { glog.V(2).Infof("unable to stat DFS monitor path: %s %s", monitorPath, err) if err := os.MkdirAll(monitorPath, 0755); err != nil { glog.Warningf("unable to create DFS volume monitor path %s: %s", monitorPath, err) } else { glog.Infof("created DFS volume monitor path %s", monitorPath) } } glog.V(2).Infof("writing DFS file %s", remoteFile) if err := ioutil.WriteFile(remoteFile, []byte(ipAddr), 0600); err != nil { glog.Warningf("unable to write DFS file %s: %s", remoteFile, err) } // wait for next cycle or shutdown select { case <-time.After(writeInterval): case <-shutdown: glog.Infof("no longer writing remote monitor status for DFS volume %s to %s", localPath, remoteFile) return } } }
// a health check for zookeeper func zkHealthCheck(halt <-chan struct{}) error { lastError := time.Now() minUptime := time.Second * 2 zookeepers := []string{"127.0.0.1:2181"} for { if conn, _, err := zk.Connect(zookeepers, time.Second*10); err == nil { conn.Close() } else { conn.Close() glog.V(1).Infof("Could not connect to zookeeper: %s", err) lastError = time.Now() } // make sure that service has been good for at least minUptime if time.Since(lastError) > minUptime { break } select { case <-halt: glog.V(1).Infof("Quit healthcheck for zookeeper") return nil default: time.Sleep(time.Second) } } glog.V(1).Info("zookeeper running, browser at http://localhost:12181/exhibitor/v1/ui/index.html") return nil }
// ImportImage creates a new image in the local repository from a file system archive. func ImportImage(repotag, filename string) error { dc, err := dockerclient.NewClient(dockerep) if err != nil { return err } glog.V(1).Infof("importing image %s from %s", repotag, filename) f, err := os.Open(filename) if err != nil { return err } defer f.Close() iid, err := commons.ParseImageID(repotag) if err != nil { return err } opts := dockerclient.ImportImageOptions{ Repository: iid.BaseName(), Source: "-", InputStream: f, Tag: iid.Tag, } if err = dc.ImportImage(opts); err != nil { glog.V(1).Infof("unable to import %s: %v", repotag, err) return err } return err }
func updateServiceInstances(cpDao dao.ControlPlane, conn *zk.Conn, service *dao.Service, serviceStates []*dao.ServiceState) error { var err error // pick services instances to start if len(serviceStates) < service.Instances { instancesToStart := service.Instances - len(serviceStates) glog.V(2).Infof("updateServiceInstances wants to start %d instances", instancesToStart) var poolHosts []*dao.PoolHost err = cpDao.GetHostsForResourcePool(service.PoolId, &poolHosts) if err != nil { glog.Errorf("Leader unable to acquire hosts for pool %s: %v", service.PoolId, err) return err } if len(poolHosts) == 0 { glog.Warningf("Pool %s has no hosts", service.PoolId) return nil } return startServiceInstances(conn, service, poolHosts, instancesToStart) } else if len(serviceStates) > service.Instances { instancesToKill := len(serviceStates) - service.Instances glog.V(2).Infof("updateServiceInstances wants to kill %d instances", instancesToKill) shutdownServiceInstances(conn, serviceStates, instancesToKill) } return nil }
func (ed *elasticDriver) Initialize(timeout time.Duration) error { quit := make(chan int) healthy := make(chan int) go ed.checkHealth(quit, healthy) select { case <-healthy: glog.V(4).Infof("Got response from Elastic") case <-time.After(timeout): return errors.New("timed Out waiting for response from Elastic") } if err := ed.postIndex(); err != nil { return err } if err := ed.postMappings(); err != nil { return err } // postMapping and postIndex affect es health go ed.checkHealth(quit, healthy) select { case <-healthy: glog.V(4).Infof("Got response from Elastic") case <-time.After(timeout): return errors.New("timed Out waiting for response from Elastic") } return nil }
func (p *ProcessInstance) ReadResponse(ns *socketio.NameSpace) { ns.On("stdout", func(n *socketio.NameSpace, stdout string) { glog.V(4).Infof("Process received stdout: %s", stdout) if p.closed { glog.Warning("connection closed; cannot write stdout: %s", stdout) } else { for _, b := range []byte(stdout) { p.Stdout <- b } } }) ns.On("stderr", func(n *socketio.NameSpace, stderr string) { glog.V(4).Infof("Process received stderr: %s", stderr) if p.closed { glog.Warning("connection closed; cannot write stderr: %s", stderr) } else { for _, b := range []byte(stderr) { p.Stderr <- b } } }) ns.On("result", func(n *socketio.NameSpace, result Result) { glog.V(0).Infof("Process received result: %s", result) p.Result <- result }) glog.V(0).Info("Hooked up outgoing events!") }
//restUpdatePool updates a resource pool. Request input is pool.ResourcePool func restUpdatePool(w *rest.ResponseWriter, r *rest.Request, ctx *requestContext) { poolID, err := url.QueryUnescape(r.PathParam("poolId")) if err != nil { restBadRequest(w, err) return } var payload pool.ResourcePool err = r.DecodeJsonPayload(&payload) if err != nil { glog.V(1).Info("Could not decode pool payload: ", err) restBadRequest(w, err) return } client, err := ctx.getMasterClient() if err != nil { restServerError(w, err) return } err = client.UpdateResourcePool(payload) if err != nil { glog.Error("Unable to update pool: ", err) restServerError(w, err) return } glog.V(1).Info("Updated pool ", poolID) w.WriteJson(&simpleResponse{"Updated resource pool", poolLinks(poolID)}) }
func restDeployAppTemplate(w *rest.ResponseWriter, r *rest.Request, client *node.ControlClient) { var payload dao.ServiceTemplateDeploymentRequest err := r.DecodeJsonPayload(&payload) if err != nil { glog.V(1).Info("Could not decode deployment payload: ", err) restBadRequest(w, err) return } var tenantID string err = client.DeployTemplate(payload, &tenantID) if err != nil { glog.Error("Could not deploy template: ", err) restServerError(w, err) return } glog.V(0).Info("Deployed template ", payload) assignmentRequest := dao.AssignmentRequest{tenantID, "", true} if err := client.AssignIPs(assignmentRequest, nil); err != nil { glog.Error("Could not automatically assign IPs: %v", err) return } glog.Infof("Automatically assigned IP addresses to service: %v", tenantID) // end of automatic IP assignment w.WriteJson(&simpleResponse{tenantID, servicesLinks()}) }
//restGetHostsForResourcePool gets all Hosts in a resource pool. response is [dao.PoolHost] func restGetHostsForResourcePool(w *rest.ResponseWriter, r *rest.Request, ctx *requestContext) { poolHosts := make([]*dao.PoolHost, 0) poolID, err := url.QueryUnescape(r.PathParam("poolId")) if err != nil { glog.V(1).Infof("Unable to acquire pool ID: %v", err) restBadRequest(w, err) return } client, err := ctx.getMasterClient() if err != nil { restServerError(w, err) return } hosts, err := client.FindHostsInPool(poolID) if err != nil { glog.Errorf("Could not get hosts: %v", err) restServerError(w, err) return } for _, host := range hosts { ph := dao.PoolHost{ HostID: host.ID, PoolID: poolID, HostIP: host.IPAddr, } poolHosts = append(poolHosts, &ph) } glog.V(2).Infof("Returning %d hosts for pool %s", len(poolHosts), poolID) w.WriteJson(&poolHosts) }
func evalSnapshot(r *runner, n node) error { glog.V(0).Info("performing snapshot") if r.snapshot == nil { return fmt.Errorf("no snapshot function provided for %s", SNAPSHOT) } if r.restore == nil { return fmt.Errorf("no restore function provided for %s", SNAPSHOT) } tID, found := r.env["TENANT_ID"] if !found { return fmt.Errorf("no service tenant id specified for %s", SNAPSHOT) } mySnapshotID, err := r.snapshot(tID) if err != nil { return err } r.snapshotID = mySnapshotID //keep track of the latest snapshot to rollback to glog.V(0).Infof("snapshot id: %s", mySnapshotID) exitFunc := func(failed bool) { if failed && r.snapshotID == mySnapshotID { glog.Infof("restoring snapshot %s", mySnapshotID) if err := r.restore(mySnapshotID, true); err != nil { glog.Errorf("failed restoring snapshot %s: %v", mySnapshotID, err) } } } r.addExitFunction(exitFunc) return nil }
func (mux *TCPMux) acceptor(listener net.Listener, closing chan chan struct{}) { defer func() { close(mux.connections) }() for { conn, err := mux.listener.Accept() if err != nil { if strings.Contains(err.Error(), "too many open files") { glog.Warningf("error accepting connections, retrying in 50 ms: %s", err) select { case <-closing: glog.V(5).Info("shutting down acceptor") return case <-time.After(time.Millisecond * 50): continue } } glog.Errorf("shutting down acceptor: %s", err) return } glog.V(5).Infof("accepted connection: %s", conn) select { case <-closing: glog.V(5).Info("shutting down acceptor") conn.Close() return case mux.connections <- conn: } } }