func (j *PatchEnvironmentRequest) Execute(resp jobs.Response) { if err := j.Write(true); err != nil { resp.Failure(ErrEnvironmentUpdateFailed) return } resp.Success(jobs.ResponseOk) }
func (j *ListContainersRequest) Execute(resp jobs.Response) { r := &ListContainersResponse{make(ContainerUnitResponses, 0)} if err := unitsMatching(reContainerUnits, func(name string, unit *dbus.UnitStatus) { if unit.LoadState == "not-found" || unit.LoadState == "masked" { return } r.Containers = append(r.Containers, ContainerUnitResponse{ UnitResponse{ name, unit.ActiveState, unit.SubState, }, unit.LoadState, unit.JobType, "", }) }); err != nil { log.Printf("list_units: Unable to list units from systemd: %v", err) resp.Failure(ErrListContainersFailed) return } r.Sort() resp.SuccessWithData(jobs.ResponseOk, r) }
func (j *CreateKeysRequest) Execute(resp jobs.Response) { failedKeys := []KeyFailure{} for i := range j.Keys { key := j.Keys[i] locator, err := key.Create() if err != nil { failedKeys = append(failedKeys, KeyFailure{i, &key, err}) continue } for k := range j.Permissions { if err := j.Permissions[k].Create(locator); err != nil { failedKeys = append(failedKeys, KeyFailure{i, &key, err}) continue } } } if len(failedKeys) > 0 { data := make([]KeyStructuredFailure, len(failedKeys)) for i := range failedKeys { data[i] = KeyStructuredFailure{failedKeys[i].Index, failedKeys[i].Reason.Error()} log.Printf("Failure %d: %+v", failedKeys[i].Index, failedKeys[i].Reason) } resp.Failure(jobs.StructuredJobError{jobs.SimpleError{jobs.ResponseError, "Not all keys were completed"}, data}) } else { resp.Success(jobs.ResponseOk) } }
func (j AddAliasRequest) Execute(resp jobs.Response) { value, ok := router.GlobalRoutes[j.Frontend] if ok { router.AddAlias(j.Alias, value.Name) } else { router.CreateFrontend(j.Frontend, j.Alias) } resp.Success(jobs.ResponseOk) }
func (j CreateFrontendRequest) Execute(resp jobs.Response) { value, ok := router.GlobalRoutes[j.Frontend] if !ok { router.CreateFrontend(j.Frontend, j.Alias) } else { log.Printf("Error : Frontend %s already exists.", value.Name) } resp.Success(jobs.ResponseOk) }
func (j *LinkContainersRequest) Execute(resp jobs.Response) { for i := range j.Links { if errw := j.Links[i].NetworkLinks.Write(j.Links[i].Id.NetworkLinksPathFor(), false); errw != nil { resp.Failure(ErrLinkContainersFailed) return } } resp.Success(jobs.ResponseOk) }
func (j *ContainerPortsRequest) Execute(resp jobs.Response) { portPairs, err := containers.GetExistingPorts(j.Id) if err != nil { log.Printf("job_container_ports_log: Unable to find unit: %s\n", err.Error()) resp.Failure(ErrContainerNotFound) return } resp.SuccessWithData(jobs.ResponseAccepted, ContainerPortsResponse{portPairs}) }
func (j *ContainerLogRequest) Execute(resp jobs.Response) { if _, err := os.Stat(j.Id.UnitPathFor()); err != nil { resp.Failure(ErrContainerNotFound) return } w := resp.SuccessWithWrite(jobs.ResponseOk, true, false) err := systemd.WriteLogsTo(w, j.Id.UnitNameFor(), 30, time.After(30*time.Second)) if err != nil { log.Printf("job_container_log: Unable to fetch journal logs: %s\n", err.Error()) } }
func (j GetRoutesRequest) Execute(resp jobs.Response) { // router.ReadRoutes() var out string if j.Frontend == "*" { out = router.PrintRoutes() } else { out = router.PrintFrontendRoutes(j.Frontend) } fmt.Println(out) resp.SuccessWithData(jobs.ResponseOk, &out) // fmt.Fprintf(w, out) }
func (j *ContainerStatusRequest) Execute(resp jobs.Response) { if _, err := os.Stat(j.Id.UnitPathFor()); err != nil { //log.Printf("container_status: Can't stat unit: %v", err) resp.Failure(ErrContainerNotFound) return } w := resp.SuccessWithWrite(jobs.ResponseOk, true, false) err := systemd.WriteStatusTo(w, j.Id.UnitNameFor()) if err != nil { log.Printf("container_status: Unable to fetch container status logs: %s\n", err.Error()) } }
func (j *ListBuildsRequest) Execute(resp jobs.Response) { r := ListBuildsResponse{make(UnitResponses, 0)} if err := unitsMatching(reBuildUnits, func(name string, unit *dbus.UnitStatus) { r.Builds = append(r.Builds, UnitResponse{ name, unit.ActiveState, unit.SubState, }) }); err != nil { log.Printf("list_units: Unable to list units from systemd: %v", err) resp.Failure(ErrListContainersFailed) return } sort.Sort(r.Builds) resp.SuccessWithData(jobs.ResponseOk, r) }
func (j UpdateFrontendRequest) Execute(resp jobs.Response) { // detach empty frontends for _, frontend := range j.Frontends { if frontend.BackendId == "" { frontend.Remove() } } errs := []backendError{} for _, backend := range j.Backends { if err := utils.WriteToPathExclusive(backend.Id.BackendPathFor(), 0554, backend); err != nil { errs = append(errs, backendError{backend.Id, err}) } } if len(errs) != 0 { log.Printf("Unable to persist some backends: %+v", errs) resp.Failure(ErrBackendWriteFailed) return } resp.Success(jobs.ResponseOk) }
func (j *PutEnvironmentRequest) Execute(resp jobs.Response) { if err := j.Fetch(100 * 1024); err != nil { resp.Failure(ErrEnvironmentUpdateFailed) return } if err := j.Write(false); err != nil { resp.Failure(ErrEnvironmentUpdateFailed) return } resp.Success(jobs.ResponseOk) }
func (j *ContentRequest) Execute(resp jobs.Response) { switch j.Type { case ContentTypeEnvironment: id, errr := containers.NewIdentifier(j.Locator) if errr != nil { resp.Failure(jobs.SimpleError{jobs.ResponseInvalidRequest, fmt.Sprintf("Invalid environment identifier: %s", errr.Error())}) return } file, erro := os.Open(id.EnvironmentPathFor()) if erro != nil { resp.Failure(ErrEnvironmentNotFound) return } defer file.Close() w := resp.SuccessWithWrite(jobs.ResponseOk, false, false) if _, err := io.Copy(w, file); err != nil { log.Printf("job_content: Unable to write environment file: %+v", err) return } } }
func (j *ListImagesRequest) Execute(resp jobs.Response) { // TODO: config item for docker port dockerClient, err := docker.NewClient(j.DockerSocket) if err != nil { log.Printf("job_list_images: Couldn't connect to docker: %+v", err) resp.Failure(ErrListImagesFailed) return } imgs, err := dockerClient.ListImages(false) if err != nil { log.Printf("job_list_images: Couldn't connect to docker: %+v", err) resp.Failure(ErrListImagesFailed) return } w := resp.SuccessWithWrite(jobs.ResponseAccepted, true, false) for _, img := range imgs { fmt.Fprintf(w, "%+v\n", img.RepoTags[0]) } }
func (j DeleteFrontendRequest) Execute(resp jobs.Response) { router.DeleteFrontend(j.Frontend) resp.Success(jobs.ResponseOk) }
func (req *InstallContainerRequest) Execute(resp jobs.Response) { id := req.Id unitName := id.UnitNameFor() unitPath := id.UnitPathFor() unitVersionPath := id.VersionedUnitPathFor(req.RequestIdentifier.String()) socketUnitName := id.SocketUnitNameFor() socketUnitPath := id.SocketUnitPathFor() var socketActivationType string if req.SocketActivation { socketActivationType = "enabled" if !req.SkipSocketProxy { socketActivationType = "proxied" } } // attempt to download the environment if it is remote env := req.Environment if env != nil { if err := env.Fetch(100 * 1024); err != nil { resp.Failure(ErrContainerCreateFailed) return } if env.Empty() { env = nil } } // open and lock the base path (to prevent simultaneous updates) state, exists, err := utils.OpenFileExclusive(unitPath, 0664) if err != nil { log.Print("install_container: Unable to lock unit file: ", err) resp.Failure(ErrContainerCreateFailed) } defer state.Close() // write a new file to disk that describes the new service unit, err := utils.CreateFileExclusive(unitVersionPath, 0664) if err != nil { log.Print("install_container: Unable to open unit file definition: ", err) resp.Failure(ErrContainerCreateFailed) return } defer unit.Close() // if this is an existing container, read the currently reserved ports existingPorts := port.PortPairs{} if exists { existingPorts, err = containers.GetExistingPorts(id) if err != nil { if _, ok := err.(*os.PathError); !ok { log.Print("install_container: Unable to read existing ports from file: ", err) resp.Failure(ErrContainerCreateFailed) return } } } // allocate and reserve ports for this container reserved, erra := port.AtomicReserveExternalPorts(unitVersionPath, req.Ports, existingPorts) if erra != nil { log.Printf("install_container: Unable to reserve external ports: %+v", erra) resp.Failure(ErrContainerCreateFailedPortsReserved) return } if len(reserved) > 0 { resp.WritePendingSuccess(PendingPortMappingName, reserved) } var portSpec string if req.Simple && len(reserved) == 0 { portSpec = "-P" } else { portSpec = dockerPortSpec(reserved) } // write the environment to disk var environmentPath string if env != nil { if errw := env.Write(false); errw != nil { resp.Failure(ErrContainerCreateFailed) return } environmentPath = env.Id.EnvironmentPathFor() } // write the network links (if any) to disk if req.NetworkLinks != nil { if errw := req.NetworkLinks.Write(id.NetworkLinksPathFor(), false); errw != nil { resp.Failure(ErrContainerCreateFailed) return } } slice := "container-small" // write the definition unit file args := csystemd.ContainerUnit{ Id: id, Image: req.Image, PortSpec: portSpec, Slice: slice + ".slice", Isolate: req.Isolate, ReqId: req.RequestIdentifier.String(), HomeDir: id.HomePath(), RunDir: id.RunPathFor(), EnvironmentPath: environmentPath, ExecutablePath: filepath.Join("/", "usr", "bin", "gear"), IncludePath: "", PortPairs: reserved, SocketUnitName: socketUnitName, SocketActivationType: socketActivationType, DockerFeatures: config.SystemDockerFeatures, } var templateName string switch { case req.SocketActivation: templateName = "SOCKETACTIVATED" case config.SystemDockerFeatures.ForegroundRun: templateName = "FOREGROUND" default: templateName = "SIMPLE" } if erre := csystemd.ContainerUnitTemplate.ExecuteTemplate(unit, templateName, args); erre != nil { log.Printf("install_container: Unable to output template: %+v", erre) resp.Failure(ErrContainerCreateFailed) defer os.Remove(unitVersionPath) return } if err := unit.Close(); err != nil { log.Printf("install_container: Unable to finish writing unit: %+v", err) resp.Failure(ErrContainerCreateFailed) defer os.Remove(unitVersionPath) return } // swap the new definition with the old one if err := utils.AtomicReplaceLink(unitVersionPath, unitPath); err != nil { log.Printf("install_container: Failed to activate new unit: %+v", err) resp.Failure(ErrContainerCreateFailed) return } state.Close() // write whether this container should be started on next boot if req.Started { if errs := csystemd.SetUnitStartOnBoot(id, true); errs != nil { log.Print("install_container: Unable to write container boot link: ", err) resp.Failure(ErrContainerCreateFailed) return } } // Generate the socket file and ignore failures paths := []string{unitPath} if req.SocketActivation { if err := writeSocketUnit(socketUnitPath, &args); err == nil { paths = []string{unitPath, socketUnitPath} } } if err := systemd.EnableAndReloadUnit(systemd.Connection(), unitName, paths...); err != nil { log.Printf("install_container: Could not enable container %s (%v): %v", unitName, paths, err) resp.Failure(ErrContainerCreateFailed) return } if req.Started { if req.SocketActivation { // Start the socket file, not the service and ignore failures if err := systemd.Connection().StartUnitJob(socketUnitName, "replace"); err != nil { log.Printf("install_container: Could not start container socket %s: %v", socketUnitName, err) resp.Failure(ErrContainerCreateFailed) return } } else { if err := systemd.Connection().StartUnitJob(unitName, "replace"); err != nil { log.Printf("install_container: Could not start container %s: %v", unitName, err) resp.Failure(ErrContainerCreateFailed) return } } } w := resp.SuccessWithWrite(jobs.ResponseAccepted, true, false) if req.Started { fmt.Fprintf(w, "Container %s is starting\n", id) } else { fmt.Fprintf(w, "Container %s is installed\n", id) } }
func (j CreateRepositoryRequest) Execute(resp jobs.Response) { unitName := fmt.Sprintf("job-create-repo-%s.service", j.RequestId.String()) path := j.Id.HomePath() if _, err := os.Stat(path); err == nil || !os.IsNotExist(err) { resp.Failure(ErrRepositoryAlreadyExists) return } conn, errc := systemd.NewConnection() if errc != nil { log.Print("create_repository:", errc) return } if err := conn.Subscribe(); err != nil { log.Print("create_repository:", err) return } defer conn.Unsubscribe() // make subscription global for efficiency changes, errch := conn.SubscribeUnitsCustom(1*time.Second, 2, func(s1 *dbus.UnitStatus, s2 *dbus.UnitStatus) bool { return true }, func(unit string) bool { return unit != unitName }) stdout, err := systemd.ProcessLogsForUnit(unitName) if err != nil { stdout = utils.EmptyReader log.Printf("create_repository: Unable to fetch build logs: %+v", err) } defer stdout.Close() startCmd := []string{ filepath.Join("/", "usr", "bin", "gear"), "init-repo", string(j.Id), } if j.CloneUrl != "" { startCmd = append(startCmd, j.CloneUrl) } status, err := conn.StartTransientUnit( unitName, "fail", dbus.PropExecStart(startCmd, true), dbus.PropDescription(fmt.Sprintf("Create repository %s", j.Id)), dbus.PropRemainAfterExit(true), dbus.PropSlice("githost.slice"), ) if err != nil { log.Printf("create_repository: Could not start unit %s: %s", unitName, systemd.SprintSystemdError(err)) resp.Failure(ErrRepositoryCreateFailed) return } else if status != "done" { log.Printf("create_repository: Unit did not return 'done'") resp.Failure(ErrRepositoryCreateFailed) return } w := resp.SuccessWithWrite(jobs.ResponseAccepted, true, false) go io.Copy(w, stdout) wait: for { select { case c := <-changes: if changed, ok := c[unitName]; ok { if changed.SubState != "running" { fmt.Fprintf(w, "Repository created succesfully\n") break wait } } case err := <-errch: fmt.Fprintf(w, "Error %+v\n", err) case <-time.After(10 * time.Second): log.Print("create_repository:", "timeout") break wait } } stdout.Close() }
func (j GitArchiveContentRequest) Execute(resp jobs.Response) { w := resp.SuccessWithWrite(jobs.ResponseOk, false, false) if err := writeGitRepositoryArchive(w, j.RepositoryId.RepositoryPathFor(), j.Ref); err != nil { log.Printf("job_content: Invalid git repository stream: %v", err) } }
func (h *HttpTransport) ExecuteRemote(baseUrl *url.URL, job RemoteExecutable, res jobs.Response) error { reader, writer := io.Pipe() httpreq, errn := http.NewRequest(job.HttpMethod(), baseUrl.String(), reader) if errn != nil { return errn } id := job.MarshalRequestIdentifier() if len(id) == 0 { id = jobs.NewRequestIdentifier() } query := &url.Values{} job.MarshalUrlQuery(query) req := httpreq req.Header.Set("X-Request-Id", id.String()) req.Header.Set("If-Match", "api="+ApiVersion()) req.Header.Set("Content-Type", "application/json") //TODO: introduce API version per job //TODO: content request signing for GETs req.URL.Path = job.HttpPath() req.URL.RawQuery = query.Encode() go func() { if err := job.MarshalHttpRequestBody(writer); err != nil { log.Printf("http_remote: Error when writing to http: %v", err) writer.CloseWithError(err) } else { writer.Close() } }() resp, err := h.client.Do(req) if err != nil { return err } defer resp.Body.Close() isJson := resp.Header.Get("Content-Type") == "application/json" switch code := resp.StatusCode; { case code == 202: if isJson { return errors.New("Decoding of streaming JSON has not been implemented") } data, err := job.UnmarshalHttpResponse(resp.Header, nil, ResponseTable) if err != nil { return err } if pending, ok := data.(map[string]interface{}); ok { for k := range pending { res.WritePendingSuccess(k, pending[k]) } } w := res.SuccessWithWrite(jobs.ResponseOk, false, false) if _, err := io.Copy(w, resp.Body); err != nil { return err } case code == 204: data, err := job.UnmarshalHttpResponse(resp.Header, nil, ResponseTable) if err != nil { return err } if pending, ok := data.(map[string]interface{}); ok { for k := range pending { res.WritePendingSuccess(k, pending[k]) } } res.Success(jobs.ResponseOk) case code >= 200 && code < 300: if !isJson { return errors.New(fmt.Sprintf("remote: Response with %d status code had content type %s (should be application/json)", code, resp.Header.Get("Content-Type"))) } data, err := job.UnmarshalHttpResponse(nil, resp.Body, ResponseJson) if err != nil { return err } res.SuccessWithData(jobs.ResponseOk, data) default: if isJson { decoder := json.NewDecoder(resp.Body) data := httpFailureResponse{} if err := decoder.Decode(&data); err != nil { return err } res.Failure(jobs.SimpleError{jobs.ResponseError, data.Message}) return nil } io.Copy(os.Stderr, resp.Body) res.Failure(jobs.SimpleError{jobs.ResponseError, "Unable to decode response."}) } return nil }
func (j DeleteRouteRequest) Execute(resp jobs.Response) { // TODO resp.Success(jobs.ResponseOk) }
func (j *BuildImageRequest) Execute(resp jobs.Response) { w := resp.SuccessWithWrite(jobs.ResponseAccepted, true, false) fmt.Fprintf(w, "Processing build-image request:\n") // TODO: download source, add bind-mount unitName := containers.JobIdentifier(j.Name).UnitNameForBuild() unitDescription := fmt.Sprintf("Builder for %s", j.Tag) stdout, err := systemd.ProcessLogsForUnit(unitName) if err != nil { stdout = utils.EmptyReader log.Printf("job_build_image: Unable to fetch build logs: %s, %+v", err.Error(), err) } defer stdout.Close() conn, errc := systemd.NewConnection() if errc != nil { log.Print("job_build_image:", errc) fmt.Fprintf(w, "Unable to watch start status", errc) return } if err := conn.Subscribe(); err != nil { log.Print("job_build_image:", err) fmt.Fprintf(w, "Unable to watch start status", errc) return } defer conn.Unsubscribe() // make subscription global for efficiency var ( changes <-chan map[string]*dbus.UnitStatus errch <-chan error ) if resp.StreamResult() { changes, errch = conn.SubscribeUnitsCustom(1*time.Second, 2, func(s1 *dbus.UnitStatus, s2 *dbus.UnitStatus) bool { return true }, func(unit string) bool { return unit != unitName }) } fmt.Fprintf(w, "Running sti build unit: %s\n", unitName) log.Printf("build_image: Running build %s", unitName) var startCmd []string if _, err := os.Stat(gearBinaryPath); err != nil { log.Println("gear executable is not installed on system; using sti builder image") startCmd = []string{ "/usr/bin/docker", "run", "-rm", "-v", "/run/docker.sock:/run/docker.sock", "-t", buildImage, "sti", "build", j.Source, j.BaseImage, j.Tag, "-U", "unix:///run/docker.sock", } } else { startCmd = []string{ gearBinaryPath, "build", j.Source, j.BaseImage, j.Tag, } } if j.RuntimeImage != "" { startCmd = append(startCmd, "--runtime-image") startCmd = append(startCmd, j.RuntimeImage) } if j.Clean { startCmd = append(startCmd, "--clean") } if j.Verbose { startCmd = append(startCmd, "--debug") } if j.CallbackUrl != "" { startCmd = append(startCmd, "--callbackUrl="+j.CallbackUrl) } log.Printf("build_image: Will execute %v", startCmd) status, err := systemd.Connection().StartTransientUnit( unitName, "fail", dbus.PropExecStart(startCmd, true), dbus.PropDescription(unitDescription), dbus.PropRemainAfterExit(true), dbus.PropSlice("container-small.slice"), ) if err != nil { errType := reflect.TypeOf(err) fmt.Fprintf(w, "Unable to start build container for this image due to (%s): %s\n", errType, err.Error()) return } else if status != "done" { fmt.Fprintf(w, "Build did not complete successfully: %s\n", status) } else { fmt.Fprintf(w, "Sti build is running\n") } if resp.StreamResult() { go io.Copy(w, stdout) wait: for { select { case c := <-changes: if changed, ok := c[unitName]; ok { if changed.SubState != "running" { fmt.Fprintf(w, "Build completed\n") break wait } } case err := <-errch: fmt.Fprintf(w, "Error %+v\n", err) case <-time.After(25 * time.Second): log.Print("job_build_image:", "timeout") break wait } } } }
func (j *runContainer) Execute(resp jobs.Response) { command := j.UnitCommand() unitName := containers.JobIdentifier(j.Name).UnitNameFor() unitDescription := fmt.Sprintf("Execute image '%s': %s %s", j.Image, j.Command, strings.Join(command, " ")) var ( stdout io.ReadCloser changes <-chan map[string]*dbus.UnitStatus errch <-chan error ) if resp.StreamResult() { r, err := systemd.ProcessLogsForUnit(unitName) if err != nil { r = utils.EmptyReader log.Printf("run_container: Unable to fetch container run logs: %s, %+v", err.Error(), err) } defer r.Close() conn, errc := systemd.NewConnection() if errc != nil { log.Print("run_container:", errc) return } if err := conn.Subscribe(); err != nil { log.Print("run_container:", err) return } defer conn.Unsubscribe() // make subscription global for efficiency c, ech := conn.SubscribeUnitsCustom(1*time.Second, 2, func(s1 *dbus.UnitStatus, s2 *dbus.UnitStatus) bool { return true }, func(unit string) bool { return unit != unitName }) stdout = r changes = c errch = ech } log.Printf("run_container: Running container %s", unitName) status, err := systemd.Connection().StartTransientUnit( unitName, "fail", dbus.PropExecStart(command, true), dbus.PropDescription(unitDescription), dbus.PropRemainAfterExit(true), dbus.PropSlice("container.slice"), ) switch { case err != nil: errType := reflect.TypeOf(err) resp.Failure(jobs.SimpleError{jobs.ResponseError, fmt.Sprintf("Unable to start container execution due to (%s): %s", errType, err.Error())}) return case status != "done": resp.Failure(jobs.SimpleError{jobs.ResponseError, fmt.Sprintf("Start did not complete successfully: %s", status)}) return case stdout == nil: resp.Success(jobs.ResponseOk) return } w := resp.SuccessWithWrite(jobs.ResponseAccepted, true, false) go io.Copy(w, stdout) wait: for { select { case c := <-changes: if changed, ok := c[unitName]; ok { if changed.SubState != "running" { break wait } } case err := <-errch: fmt.Fprintf(w, "Error %+v\n", err) case <-time.After(1 * time.Minute): log.Print("run_container:", "timeout") break wait } } stdout.Close() }
func (j *StoppedContainerStateRequest) Execute(resp jobs.Response) { unitName := j.Id.UnitNameFor() inState, tooSoon := inStateOrTooSoon(j.Id, unitName, false, false, rateLimitChanges) if inState { w := resp.SuccessWithWrite(jobs.ResponseAccepted, true, false) fmt.Fprintf(w, "Container %s is stopped\n", j.Id) return } if tooSoon { resp.Failure(ErrStopRequestThrottled) return } if errs := csystemd.SetUnitStartOnBoot(j.Id, false); errs != nil { log.Print("alter_container_state: Unable to persist whether the unit is started on boot: ", errs) resp.Failure(ErrContainerStopFailed) return } w := resp.SuccessWithWrite(jobs.ResponseAccepted, true, false) done := make(chan time.Time) ioerr := make(chan error) go func() { ioerr <- systemd.WriteLogsTo(w, unitName, 0, done) }() joberr := make(chan error) go func() { status, err := systemd.Connection().StopUnit(unitName, "replace") if err == nil && status != "done" { err = errors.New(fmt.Sprintf("Job status 'done' != %s", status)) } joberr <- err }() var err error select { case err = <-ioerr: log.Printf("alter_container_state: Client hung up") close(ioerr) case err = <-joberr: log.Printf("alter_container_state: Stop job done") case <-time.After(15 * time.Second): log.Printf("alter_container_state: Timeout waiting for stop completion") } close(done) select { case <-ioerr: } switch { case systemd.IsNoSuchUnit(err): if _, err := os.Stat(j.Id.UnitPathFor()); err == nil { fmt.Fprintf(w, "Container %s is stopped\n", j.Id) } else { fmt.Fprintf(w, "No such container %s\n", j.Id) } case err != nil: fmt.Fprintf(w, "Could not stop container: %s\n", err.Error()) default: fmt.Fprintf(w, "Container %s is stopped\n", j.Id) } }
func (j *StartedContainerStateRequest) Execute(resp jobs.Response) { unitName := j.Id.UnitNameFor() unitPath := j.Id.UnitPathFor() inState, tooSoon := inStateOrTooSoon(j.Id, unitName, true, false, rateLimitChanges) if inState { w := resp.SuccessWithWrite(jobs.ResponseAccepted, true, false) fmt.Fprintf(w, "Container %s starting\n", j.Id) return } if tooSoon { resp.Failure(ErrStartRequestThrottled) return } if errs := csystemd.SetUnitStartOnBoot(j.Id, true); errs != nil { log.Print("alter_container_state: Unable to persist whether the unit is started on boot: ", errs) resp.Failure(ErrContainerStartFailed) return } if err := systemd.EnableAndReloadUnit(systemd.Connection(), unitName, unitPath); err != nil { if systemd.IsNoSuchUnit(err) || systemd.IsFileNotFound(err) { resp.Failure(ErrContainerNotFound) return } log.Printf("alter_container_state: Could not enable container %s: %v", unitName, err) resp.Failure(ErrContainerStartFailed) return } if err := systemd.Connection().StartUnitJob(unitName, "replace"); err != nil { log.Printf("alter_container_state: Could not start container %s: %v", unitName, err) resp.Failure(ErrContainerStartFailed) return } w := resp.SuccessWithWrite(jobs.ResponseAccepted, true, false) fmt.Fprintf(w, "Container %s starting\n", j.Id) }
func (j *DeleteContainerRequest) Execute(resp jobs.Response) { unitName := j.Id.UnitNameFor() unitPath := j.Id.UnitPathFor() unitDefinitionsPath := j.Id.VersionedUnitsPathFor() idleFlagPath := j.Id.IdleUnitPathFor() socketUnitPath := j.Id.SocketUnitPathFor() homeDirPath := j.Id.BaseHomePath() runDirPath := j.Id.RunPathFor() networkLinksPath := j.Id.NetworkLinksPathFor() _, err := systemd.Connection().GetUnitProperties(unitName) switch { case systemd.IsNoSuchUnit(err): resp.Success(jobs.ResponseOk) return case err != nil: resp.Failure(ErrDeleteContainerFailed) return } if err := systemd.Connection().StopUnitJob(unitName, "fail"); err != nil { log.Printf("delete_container: Unable to queue stop unit job: %v", err) } ports, err := containers.GetExistingPorts(j.Id) if err != nil { if !os.IsNotExist(err) { log.Printf("delete_container: Unable to read existing port definitions: %v", err) } ports = port.PortPairs{} } if err := port.ReleaseExternalPorts(ports); err != nil { log.Printf("delete_container: Unable to release ports: %v", err) } if err := os.Remove(unitPath); err != nil && !os.IsNotExist(err) { resp.Failure(ErrDeleteContainerFailed) return } if err := os.Remove(idleFlagPath); err != nil && !os.IsNotExist(err) { resp.Failure(ErrDeleteContainerFailed) return } if err := j.Id.SetUnitStartOnBoot(false); err != nil { log.Printf("delete_container: Unable to clear unit boot state: %v", err) } if err := os.Remove(socketUnitPath); err != nil && !os.IsNotExist(err) { log.Printf("delete_container: Unable to remove socket unit path: %v", err) } if err := os.Remove(networkLinksPath); err != nil && !os.IsNotExist(err) { log.Printf("delete_container: Unable to remove network links file: %v", err) } if err := os.RemoveAll(unitDefinitionsPath); err != nil { log.Printf("delete_container: Unable to remove definitions for container: %v", err) } if err := os.RemoveAll(filepath.Dir(runDirPath)); err != nil { log.Printf("delete_container: Unable to remove run directory: %v", err) } if err := os.RemoveAll(filepath.Dir(homeDirPath)); err != nil { log.Printf("delete_container: Unable to remove home directory: %v", err) } if _, err := systemd.Connection().DisableUnitFiles([]string{unitPath, socketUnitPath}, false); err != nil { log.Printf("delete_container: Some units have not been disabled: %v", err) } if err := systemd.Connection().Reload(); err != nil { log.Printf("delete_container: Some units have not been disabled: %v", err) } resp.Success(jobs.ResponseOk) }
func (j AddRouteRequest) Execute(resp jobs.Response) { router.AddRoute(j.Frontend, j.FrontendPath, j.BackendPath, j.Protocols, j.Endpoints) resp.Success(jobs.ResponseOk) }
func (p *purgeContainers) Execute(res jobs.Response) { Clean() res.Success(jobs.ResponseOk) }