func (r *runner) evalNodes(nodes []node, stop <-chan struct{}) error { failed := true defer func() { glog.Infof("Executing exit functions") for _, ef := range r.exitFunctions { ef(failed) } }() for i, n := range nodes { if f, found := cmdEval[n.cmd]; found { glog.Infof("executing step %d: %s", i, n.line) if err := f(r, n); err != nil { glog.Errorf("error executing step %d: %s: %s", i, n.cmd, err) return err } } else { glog.Infof("skipping step %d unknown function: %s", i, n.line) } select { case <-stop: glog.Infof("Received signal, stopping script evaluation") return fmt.Errorf("received stop signal, error executing step %d: %s", i, n.cmd) default: } } failed = false return nil }
func (d *daemon) stopISVCS() { glog.Infof("Shutting down isvcs") if err := isvcs.Mgr.Stop(); err != nil { glog.Errorf("Error while stopping isvcs: %s", err) } glog.Infof("isvcs shut down") }
func (c *Controller) kickOffHealthChecks(healthExit chan struct{}) { client, err := node.NewLBClient(c.options.ServicedEndpoint) if err != nil { glog.Errorf("Could not create a client to endpoint: %s, %s", c.options.ServicedEndpoint, err) return } defer client.Close() var healthChecks map[string]domain.HealthCheck instanceID, err := strconv.Atoi(c.options.Service.InstanceID) if err != nil { glog.Errorf("Invalid instance from instanceID:%s", c.options.Service.InstanceID) return } err = client.GetHealthCheck(node.HealthCheckRequest{ c.options.Service.ID, instanceID}, &healthChecks) if err != nil { glog.Errorf("Error getting health checks: %s", err) return } for key, mapping := range healthChecks { glog.Infof("Kicking off health check %s.", key) glog.Infof("Setting up health check: %s", mapping.Script) timeout := mapping.Timeout if timeout == 0 { timeout = time.Second * 30 } go c.handleHealthCheck(key, mapping.Script, mapping.Interval, timeout, healthExit) } return }
// UpdateRemoteMonitorFile is used by remote clients to write a tiny file to the DFS volume at the given cycle func UpdateRemoteMonitorFile(localPath string, writeInterval time.Duration, ipAddr string, shutdown <-chan interface{}) { monitorPath := path.Join(localPath, monitorSubDir) remoteFile := path.Join(localPath, monitorSubDir, ipAddr) glog.Infof("updating DFS volume monitor file %s at write interval: %s", remoteFile, writeInterval) for { glog.V(2).Infof("checking DFS monitor path %s", monitorPath) _, err := os.Stat(monitorPath) if err != nil { glog.V(2).Infof("unable to stat DFS monitor path: %s %s", monitorPath, err) if err := os.MkdirAll(monitorPath, 0755); err != nil { glog.Warningf("unable to create DFS volume monitor path %s: %s", monitorPath, err) } else { glog.Infof("created DFS volume monitor path %s", monitorPath) } } glog.V(2).Infof("writing DFS file %s", remoteFile) if err := ioutil.WriteFile(remoteFile, []byte(ipAddr), 0600); err != nil { glog.Warningf("unable to write DFS file %s: %s", remoteFile, err) } // wait for next cycle or shutdown select { case <-time.After(writeInterval): case <-shutdown: glog.Infof("no longer writing remote monitor status for DFS volume %s to %s", localPath, remoteFile) return } } }
func (viface *vif) redirectCommand(from, to, protocol string) error { glog.Infof("Trying to set up redirect %s:%s->:%s %s", viface.hostname, from, to, protocol) for _, chain := range []string{"OUTPUT", "PREROUTING"} { command := []string{ "iptables", "-t", "nat", "-A", chain, "-d", viface.ip, "-p", protocol, "--dport", from, "-j", "REDIRECT", "--to-ports", to, } c := exec.Command(command[0], command[1:]...) c.Stdout = os.Stdout c.Stderr = os.Stdout if err := c.Run(); err != nil { glog.Errorf("Unable to set up redirect %s:%s->:%s %s command:%+v", viface.hostname, from, to, protocol, command) return err } } glog.Infof("AddToEtcHosts(%s, %s)", viface.hostname, viface.ip) err := node.AddToEtcHosts(viface.hostname, viface.ip) if err != nil { glog.Errorf("Unable to add %s %s to /etc/hosts", viface.ip, viface.hostname) return err } return nil }
func (f *Facade) RestoreIPs(ctx datastore.Context, svc service.Service) error { for _, ep := range svc.Endpoints { if ep.AddressAssignment.IPAddr != "" { if assign, err := f.FindAssignmentByServiceEndpoint(ctx, svc.ID, ep.Name); err != nil { glog.Errorf("Could not look up address assignment %s for service %s (%s): %s", ep.Name, svc.Name, svc.ID, err) return err } else if assign == nil || !assign.EqualIP(ep.AddressAssignment) { ip, err := f.getManualAssignment(ctx, svc.PoolID, ep.AddressAssignment.IPAddr, ep.AddressConfig.Port) if err != nil { glog.Warningf("Could not assign ip (%s) to endpoint %s for service %s (%s): %s", ep.AddressAssignment.IPAddr, ep.Name, svc.Name, svc.ID, err) continue } assign = &addressassignment.AddressAssignment{ AssignmentType: ip.Type, HostID: ip.HostID, PoolID: svc.PoolID, IPAddr: ip.IP, Port: ep.AddressConfig.Port, ServiceID: svc.ID, EndpointName: ep.Name, } if _, err := f.assign(ctx, *assign); err != nil { glog.Errorf("Could not restore address assignment for %s of service %s at %s:%d: %s", assign.EndpointName, assign.ServiceID, assign.IPAddr, assign.Port, err) return err } glog.Infof("Restored address assignment for endpoint %s of service %s at %s:%d", assign.EndpointName, assign.ServiceID, assign.IPAddr, assign.Port) } else { glog.Infof("Endpoint %s for service %s (%s) already assigned; skipping", assign.EndpointName, assign.ServiceID) } } } return nil }
func (s *S) TestEvaluateEndpointTemplate(t *C) { err := createSvcs(s.store, s.ctx) t.Assert(err, IsNil) for _, testcase := range endpoint_testcases { if len(testcase.service.Endpoints) > 0 { glog.Infof("Service.Endpoint[0].Application: %s", testcase.service.Endpoints[0].Application) oldApp := testcase.service.Endpoints[0].Application err = testcase.service.EvaluateEndpointTemplates(s.getSVC, s.findChild) glog.Infof("Service.Endpoint[0].Application: %s, error=%s", testcase.service.Endpoints[0].Application, err) result := testcase.service.Endpoints[0].Application if result != testcase.expected { t.Errorf("Expecting \"%s\" got \"%s\"\n", testcase.expected, result) } if testcase.service.Endpoints[0].ApplicationTemplate != oldApp { t.Errorf("Expecting \"%s\" got \"%s\"\n", oldApp, testcase.service.Endpoints[0].ApplicationTemplate) } glog.Infof("Evaluate ServiceEndpoints a second time") err = testcase.service.EvaluateEndpointTemplates(s.getSVC, s.findChild) result = testcase.service.Endpoints[0].Application if result != testcase.expected { t.Errorf("Expecting \"%s\" got \"%s\"\n", testcase.expected, result) } if testcase.service.Endpoints[0].ApplicationTemplate != oldApp { t.Errorf("Expecting \"%s\" got \"%s\"\n", oldApp, testcase.service.Endpoints[0].ApplicationTemplate) } } } }
func start(shutdown <-chan interface{}, conn client.Connection, listeners ...Listener) { var count int done := make(chan int) defer func() { glog.Infof("Shutting down %d child listeners", len(listeners)) for count > 0 { count -= <-done } }() _shutdown := make(chan interface{}) defer close(_shutdown) for i := range listeners { count++ go func(l Listener) { defer func() { done <- 1 }() Listen(_shutdown, make(chan error, 1), conn, l) glog.Infof("Listener at %s exited", l.GetPath()) }(listeners[i]) } select { case i := <-done: glog.Warningf("Listener exited prematurely, stopping all listeners") count -= i case <-shutdown: glog.Infof("Receieved signal to shutdown") } }
func removeHostIPs(hostId string) { hostIPs := dao.HostIPs{} err = controlPlaneDao.GetHostIPs(hostId, &hostIPs) glog.Infof("Getting HostIPs id: %v: %v, %v", hostIPs.Id, hostIPs, err) if err == nil && hostIPs.Id != "" { x, err := deleteHostIPs(hostIPs.Id) glog.Infof("Deleting HostIPs %v: %v: %v", hostIPs, err, x) } }
//func TestPutGetDelete(t *testing.T) { func (s *S) TestPutGetDelete(t *C) { esdriver := s.Driver() // driver, err := getConnection() // if err != nil { // t.Fatalf("Error initializing driver: %v", err) // } conn, err := esdriver.GetConnection() if err != nil { t.Fatalf("Error getting connection: %v", err) } k := datastore.NewKey("tweet", "1") tweet := map[string]string{ "user": "******", "post_date": "2009-11-15T14:12:12", "message": "trying out Elasticsearch", } tweetJSON, err := json.Marshal(tweet) err = conn.Put(k, datastore.NewJSONMessage(tweetJSON, 0)) if err != nil { t.Errorf("%v", err) } //Get tweet raw, err := conn.Get(k) if err != nil { t.Fatalf("Unexpected: %v", err) } glog.Infof("raw is %v", string(raw.Bytes())) var tweetMap map[string]string json.Unmarshal(raw.Bytes(), &tweetMap) glog.Infof("tweet is %v", tweetMap) if tweetMap["user"] != "kimchy" { t.Errorf("Expected kimchy, found %s", tweetMap["user"]) } //Delete tweet err = conn.Delete(k) if err != nil { t.Errorf("Unexpected delete error: %v", err) } //test not found raw, err = conn.Get(k) if raw != nil { t.Errorf("Expected nil return;") } if err == nil { t.Error("Expected error, not nil") } else if !datastore.IsErrNoSuchEntity(err) { glog.Infof("type is %s", reflect.ValueOf(err)) t.Fatalf("Unexpected: %v", err) } }
// Rollback rolls back the volume to the given snapshot func (c *BtrfsConn) Rollback(label string) error { if exists, err := c.snapshotExists(label); err != nil || !exists { if err != nil { return err } else { return fmt.Errorf("snapshot %s does not exist", label) } } c.Lock() defer c.Unlock() vd := path.Join(c.root, c.name) dirp, err := volume.IsDir(vd) if err != nil { return err } glog.Infof("starting rollback of snapshot %s", label) start := time.Now() if dirp { timeout := getEnvMinDuration("SERVICED_BTRFS_ROLLBACK_TIMEOUT", 300, 120) glog.Infof("rollback using env var SERVICED_BTRFS_ROLLBACK_TIMEOUT:%s", timeout) for { cmd := []string{"subvolume", "delete", vd} output, deleteError := runcmd(c.sudoer, cmd...) if deleteError == nil { break } now := time.Now() if now.Sub(start) > timeout { glog.Errorf("rollback of snapshot %s failed - btrfs subvolume deletes took %s for cmd:%s", label, timeout, cmd) return deleteError } else if strings.Contains(string(output), "Device or resource busy") { waitTime := time.Duration(5 * time.Second) glog.Warningf("retrying rollback subvolume delete in %s - unable to run cmd:%s output:%s error:%s", waitTime, cmd, string(output), deleteError) time.Sleep(waitTime) } else { return deleteError } } } cmd := []string{"subvolume", "snapshot", c.SnapshotPath(label), vd} _, err = runcmd(c.sudoer, cmd...) if err != nil { glog.Errorf("rollback of snapshot %s failed for cmd:%s", label, cmd) } else { duration := time.Now().Sub(start) glog.Infof("rollback of snapshot %s took %s", label, duration) } return err }
// Snapshot performs a writable snapshot on the subvolume func (c *RsyncConn) Snapshot(label string) (err error) { c.Lock() defer c.Unlock() dest := c.SnapshotPath(label) if exists, err := volume.IsDir(dest); exists || err != nil { if exists { return fmt.Errorf("snapshot %s already exists", label) } return err } exe, err := exec.LookPath("rsync") if err != nil { return err } argv := []string{"-a", c.Path() + "/", dest + "/"} glog.Infof("Performing snapshot rsync command: %s %s", exe, argv) var output []byte for i := 0; i < 3; i++ { rsync := exec.Command(exe, argv...) done := make(chan interface{}) go func() { defer close(done) output, err = rsync.CombinedOutput() }() select { case <-time.After(c.timeout): glog.V(2).Infof("Received signal to kill rsync") rsync.Process.Kill() <-done case <-done: } if err == nil { return nil } if exitStatus, ok := utils.GetExitStatus(err); !ok || exitStatus != 24 { glog.Errorf("Could not perform rsync: %s", string(output)) return err } glog.Infof("trying snapshot again: %s", label) } if exitStatus, _ := utils.GetExitStatus(err); exitStatus == 24 { glog.Warningf("snapshot completed with errors: Partial transfer due to vanished source files") return nil } glog.Errorf("Could not perform rsync: %s", string(output)) return err }
func (a *HostAgent) UnbindVirtualIP(virtualIP *pool.VirtualIP) error { glog.Infof("Removing: %v", virtualIP.IP) binaryNetmask := net.IPMask(net.ParseIP(virtualIP.Netmask).To4()) cidr, _ := binaryNetmask.Size() //sudo ip addr del 192.168.0.10/24 dev eth0 if err := exec.Command("ip", "addr", "del", virtualIP.IP+"/"+strconv.Itoa(cidr), "dev", virtualIP.BindInterface).Run(); err != nil { return fmt.Errorf("Problem with removing virtual interface %+v: %v", virtualIP, err) } glog.Infof("Removed virtual interface: %+v", virtualIP) return nil }
func (s *S) TestEvaluateStartupTemplate(t *C) { err := createSvcs(s.store, s.ctx) t.Assert(err, IsNil) for _, testcase := range startup_testcases { glog.Infof("Service.Startup before: %s", testcase.service.Startup) err = testcase.service.EvaluateStartupTemplate(s.getSVC, s.findChild, 0) t.Assert(err, IsNil) glog.Infof("Service.Startup after: %s, error=%s", testcase.service.Startup, err) result := testcase.service.Startup if result != testcase.expected { t.Errorf("Expecting \"%s\" got \"%s\"\n", testcase.expected, result) } } }
func (d *NFSDriver) Mount(remotePath, localPath string, timeout time.Duration) error { glog.Infof("Mounting %s -> %s", remotePath, localPath) cmd := commandFactory("mount.nfs4", "-o", "intr", remotePath, localPath) errC := make(chan error, 1) go func() { output, err := cmd.CombinedOutput() glog.V(1).Infof("Mounting %s -> %s: %s (%s)", remotePath, localPath, string(output), err) if exitCode, ok := utils.GetExitStatus(err); exitCode == 32 || !ok { errC <- fmt.Errorf("%s (%s)", string(output), err) } else { errC <- nil } }() select { case <-time.After(timeout): err := fmt.Errorf("timeout waiting for nfs mount") if execCmd, ok := cmd.(*exec.Cmd); ok { execCmd.Process.Kill() } return err case err := <-errC: return err } }
func (svc *IService) setHealthStatus(result error, currentTime int64) { if len(svc.healthStatuses) == 0 { return } svc.lock.Lock() defer svc.lock.Unlock() if healthStatus, found := svc.healthStatuses[DEFAULT_HEALTHCHECK_NAME]; found { if result == nil { if healthStatus.Status != "passed" && healthStatus.Status != "unknown" { glog.Infof("Health status for %s returned to 'passed'", svc.Name) } healthStatus.Status = "passed" healthStatus.Failure = "" } else { healthStatus.Status = "failed" healthStatus.Failure = result.Error() } healthStatus.Timestamp = currentTime if healthStatus.StartedAt == 0 { healthStatus.StartedAt = currentTime } } else { glog.Errorf("isvc %s does have the default health check %s", svc.Name, DEFAULT_HEALTHCHECK_NAME) } }
func (svc *IService) doHealthChecks(halt <-chan struct{}) { if len(svc.HealthChecks) == 0 { return } var found bool var checkDefinition healthCheckDefinition if checkDefinition, found = svc.HealthChecks[DEFAULT_HEALTHCHECK_NAME]; !found { glog.Warningf("Default healthcheck %q not found for isvc %s", DEFAULT_HEALTHCHECK_NAME, svc.Name) return } timer := time.Tick(checkDefinition.Interval) for { select { case <-halt: glog.Infof("Stopped healthchecks for %s", svc.Name) return case currentTime := <-timer: err := svc.runCheckOrTimeout(checkDefinition) svc.setHealthStatus(err, currentTime.Unix()) if err != nil { glog.Errorf("Healthcheck for isvc %s failed: %s", svc.Name, err) } } } }
// startupHealthcheck runs the default healthchecks (if any) and the return the result. // If the healthcheck fails, then this method will sleep 1 second, and then // repeat the healthcheck, continuing that sleep/retry pattern until // the healthcheck succeeds or 2 minutes has elapsed. // // An error is returned if the no healtchecks succeed in the 2 minute interval, // otherwise nil is returned func (svc *IService) startupHealthcheck() <-chan error { err := make(chan error, 1) go func() { var result error if len(svc.HealthChecks) > 0 { checkDefinition, found := svc.HealthChecks[DEFAULT_HEALTHCHECK_NAME] if !found { glog.Warningf("Default healthcheck %q not found for isvc %s", DEFAULT_HEALTHCHECK_NAME, svc.Name) err <- nil return } startCheck := time.Now() for { currentTime := time.Now() result = svc.runCheckOrTimeout(checkDefinition) svc.setHealthStatus(result, currentTime.Unix()) if result == nil || time.Since(startCheck).Seconds() > WAIT_FOR_INITIAL_HEALTHCHECK.Seconds() { break } glog.Infof("waiting for %s to start, checking health status again in 1 second", svc.Name) time.Sleep(time.Second) } err <- result } else { svc.setHealthStatus(nil, time.Now().Unix()) err <- nil } }() return err }
func restDeployAppTemplate(w *rest.ResponseWriter, r *rest.Request, client *node.ControlClient) { var payload dao.ServiceTemplateDeploymentRequest err := r.DecodeJsonPayload(&payload) if err != nil { glog.V(1).Info("Could not decode deployment payload: ", err) restBadRequest(w, err) return } var tenantID string err = client.DeployTemplate(payload, &tenantID) if err != nil { glog.Error("Could not deploy template: ", err) restServerError(w, err) return } glog.V(0).Info("Deployed template ", payload) assignmentRequest := dao.AssignmentRequest{tenantID, "", true} if err := client.AssignIPs(assignmentRequest, nil); err != nil { glog.Error("Could not automatically assign IPs: %v", err) return } glog.Infof("Automatically assigned IP addresses to service: %v", tenantID) // end of automatic IP assignment w.WriteJson(&simpleResponse{tenantID, servicesLinks()}) }
func (ed *elasticDriver) AddMappingsFile(path string) error { glog.Infof("AddMappingsFiles %v", path) bytes, err := ioutil.ReadFile(path) if err != nil { return err } glog.V(4).Infof("AddMappingsFiles: content %v", string(bytes)) type mapFile struct { Mappings map[string]map[string]interface{} Settings map[string]interface{} } var allMappings mapFile err = json.Unmarshal(bytes, &allMappings) if err != nil { return err } for key, val := range allMappings.Settings { ed.settings[key] = val } for key, mapping := range allMappings.Mappings { var rawMapping = make(map[string]map[string]interface{}) rawMapping[key] = mapping if value, err := newMapping(rawMapping); err != nil { glog.Errorf("%v; could not create mapping from: %v", err, rawMapping) return err } else { ed.AddMapping(value) } } return nil }
func Test_Build(t *testing.T) { ip, err := utils.GetIPAddress() if err != nil { t.Fatalf("Unexpected error %v", err) } empty := make([]string, 0) host, err := Build("", "65535", "test_pool", empty...) glog.Infof("build error %v", err) if err != nil { t.Errorf("Unexpected error %v", err) } if err = host.ValidEntity(); err != nil { t.Errorf("Validation failed %v", err) } if len(host.IPs) != 1 { t.Errorf("Unexpected result %v", host.IPs) } if host.IPAddr != ip { t.Errorf("Expected ip %v, got %v", ip, host.IPs) } if host.IPs[0].IPAddress != ip { t.Errorf("Expected ip %v, got %v", ip, host.IPs) } }
func (dt *DaoTest) TestDao_VhostRegistrySet(t *C) { vr, err := registry.VHostRegistry(dt.zkConn) t.Assert(err, IsNil) // TODO: add tests for ephemeral nodes and remove vr.SetEphemeral(false) vr.SetEphemeral(false) vep := registry.VhostEndpoint{} vep.EndpointName = "epn_test" vep.ServiceID = "svc_id" vep.HostIP = "testip" path, err := vr.SetItem(dt.zkConn, "testKey", vep) t.Assert(err, IsNil) t.Assert(path, Not(Equals), 0) var newVep *registry.VhostEndpoint newVep, err = vr.GetItem(dt.zkConn, path) t.Assert(err, IsNil) t.Assert(vep, NotNil) //remove version for equals newVep.SetVersion(nil) t.Assert(vep, Equals, *newVep) //test double add glog.Infof("%+v", vep) path, err = vr.SetItem(dt.zkConn, "testKey", vep) t.Assert(err, IsNil) }
// Snapshots() returns the current snapshots on the volume func (v *BtrfsVolume) Snapshots() (labels []string, err error) { labels = make([]string, 0) glog.Info("about to execute subvolume list command") if output, err := BtrfsCmd("subvolume", "list", "-apucr", v.baseDir).CombinedOutput(); err != nil { glog.Errorf("got an error with subvolume list: %s", string(output)) return labels, err } else { glog.Info("btrfs subvolume list:, baseDir: %s", v.baseDir) prefixedName := v.name + "_" for _, line := range strings.Split(string(output), "\n") { glog.Infof("btrfs subvolume list: %s", line) fields := strings.Fields(line) for i, field := range fields { if field == "path" { fstree := fields[i+1] parts := strings.Split(fstree, "/") label := parts[len(parts)-1] if strings.HasPrefix(label, prefixedName) { labels = append(labels, label) break } } } } } return labels, err }
// chownConfFile sets the owner and permissions for a file func chownConfFile(filename, owner, permissions string) error { runCommand := func(exe, arg, filename string) error { command := exec.Command(exe, arg, filename) output, err := command.CombinedOutput() if err != nil { glog.Errorf("Error running command:'%v' output: %s error: %s\n", command, output, err) return err } glog.Infof("Successfully ran command:'%v' output: %s\n", command, output) return nil } if owner != "" { if err := runCommand("chown", owner, filename); err != nil { return err } } if permissions != "" { if err := runCommand("chmod", permissions, filename); err != nil { return err } } return nil }
// TestEvaluateActionsTemplate makes sure that the Actions templates can be // parsed and evaluated correctly. func (s *S) TestEvaluateActionsTemplate(t *C) { err := createSvcs(s.store, s.ctx) t.Assert(err, IsNil) for _, testcase := range startup_testcases { glog.Infof("Service.Actions before: %s", testcase.service.Actions) err = testcase.service.EvaluateActionsTemplate(s.getSVC, s.findChild, 0) glog.Infof("Service.Actions after: %v, error=%v", testcase.service.Actions, err) for key, result := range testcase.service.Actions { expected := fmt.Sprintf("%s %s", testcase.service.Name, key) if result != expected { t.Errorf("Expecting \"%s\" got \"%s\"\n", expected, result) } glog.Infof("Expecting \"%s\" got \"%s\"\n", expected, result) } } }
func evalSvcRestart(r *runner, n node) error { if r.svcFromPath == nil { return fmt.Errorf("no service id lookup function for %s", SVC_RESTART) } if r.svcRestart == nil { return fmt.Errorf("no service restart function for %s", SVC_RESTART) } svcPath := n.args[0] tenantID, found := r.env["TENANT_ID"] if !found { return fmt.Errorf("no service tenant id specified for %s", SVC_RESTART) } svcID, err := r.svcFromPath(tenantID, svcPath) if err != nil { return err } if svcID == "" { return fmt.Errorf("no service id found for %s", svcPath) } recursive := false if len(n.args) > 1 { recursive = true } glog.Infof("restarting service %s %s", svcPath, svcID) if err := r.svcRestart(svcID, recursive); err != nil { return err } return nil }
func (s *scheduler) startRemote(cancel <-chan struct{}, remote, local client.Connection) <-chan interface{} { var ( shutdown = make(chan interface{}) done = make(chan interface{}) ) // wait to receieve a cancel channel or a done channel and shutdown go func() { defer close(shutdown) select { case <-cancel: case <-done: } }() // start the listeners and wait for shutdown or for something to break go func() { defer close(done) glog.Infof("Remote connection established; synchronizing") zzk.Start(shutdown, remote, nil, s.getPoolSynchronizer(), s.getEndpointSynchronizer(local)) glog.Warningf("Running in disconnected mode") }() // indicate when the listeners a finished return done }
func evalSnapshot(r *runner, n node) error { glog.V(0).Info("performing snapshot") if r.snapshot == nil { return fmt.Errorf("no snapshot function provided for %s", SNAPSHOT) } if r.restore == nil { return fmt.Errorf("no restore function provided for %s", SNAPSHOT) } tID, found := r.env["TENANT_ID"] if !found { return fmt.Errorf("no service tenant id specified for %s", SNAPSHOT) } mySnapshotID, err := r.snapshot(tID) if err != nil { return err } r.snapshotID = mySnapshotID //keep track of the latest snapshot to rollback to glog.V(0).Infof("snapshot id: %s", mySnapshotID) exitFunc := func(failed bool) { if failed && r.snapshotID == mySnapshotID { glog.Infof("restoring snapshot %s", mySnapshotID) if err := r.restore(mySnapshotID, true); err != nil { glog.Errorf("failed restoring snapshot %s: %v", mySnapshotID, err) } } } r.addExitFunction(exitFunc) return nil }
// loop maintains the state of the container; it handles requests to start() & // stop() containers as well as detect container failures. func (c *Container) loop() { var exitChan chan error var cmd *exec.Cmd for { select { case req := <-c.ops: switch req.op { case containerOpStop: glog.Infof("containerOpStop(): %s", c.Name) if exitChan == nil { req.response <- ErrNotRunning continue } oldCmd := cmd cmd = nil exitChan = nil // setting extChan to nil will disable reading from it in the select() oldCmd.Process.Kill() // kill the docker run() wrapper c.stop() // stop the container if it's not already stopped c.rm() // remove the container if it's not already gone req.response <- nil case containerOpStart: glog.Infof("containerOpStart(): %s", c.Name) if cmd != nil { req.response <- ErrRunning continue } c.stop() // stop the container, if it's not stoppped c.rm() // remove it if it was not already removed cmd, exitChan = c.run() // run the actual container if c.HealthCheck != nil { req.response <- c.HealthCheck() // run the HealthCheck if it exists } else { req.response <- nil } } case exitErr := <-exitChan: glog.Errorf("Unexpected failure of %s, got %s", c.Name, exitErr) c.stop() // stop the container, if it's not stoppped c.rm() // remove it if it was not already removed cmd, exitChan = c.run() // run the actual container } } }
// ServiceUse will tag a new image (imageName) in a given registry for a given tenant // to latest, making sure to push changes to the registry func ServiceUse(serviceID string, imageName string, registry string, noOp bool) (string, error) { // If noOp is True, then replace the 'real' functions that talk to Docker with // no-op functions (for dry run purposes) pullImage := PullImage findImage := FindImage tagImage := TagImage if noOp { pullImage = noOpPullImage findImage = noOpFindImage tagImage = noOpTagImage } // imageName is the new image to pull, eg. "zenoss/resmgr-unstable:1.2.3.4" glog.V(0).Infof("preparing to use image: %s", imageName) imageID, err := commons.ParseImageID(imageName) if err != nil { return "", err } if imageID.Tag == "" { imageID.Tag = "latest" } glog.Infof("pulling image %s, this may take a while...", imageID) if err := pullImage(imageID.String()); err != nil { glog.Warningf("unable to pull image %s", imageID) } //verify image has been pulled img, err := findImage(imageID.String(), false) if err != nil { err = fmt.Errorf("could not look up image %s: %s. Check your docker login and retry service deployment.", imageID, err) return "", err } //Tag images to latest all images var newTag *commons.ImageID newTag, err = commons.RenameImageID(registry, serviceID, imageID.String(), "latest") if err != nil { return "", err } glog.Infof("tagging image %s to %s ", imageName, newTag) if _, err = tagImage(img, newTag.String()); err != nil { glog.Errorf("could not tag image: %s (%v)", imageName, err) return "", err } return newTag.String(), nil }