func (j *CreateKeysRequest) Execute(resp jobs.Response) { failedKeys := []KeyFailure{} for i := range j.Keys { key := j.Keys[i] locator, err := key.Create() if err != nil { failedKeys = append(failedKeys, KeyFailure{i, &key, err}) continue } for k := range j.Permissions { if err := j.Permissions[k].Create(locator); err != nil { failedKeys = append(failedKeys, KeyFailure{i, &key, err}) continue } } } if len(failedKeys) > 0 { data := make([]KeyStructuredFailure, len(failedKeys)) for i := range failedKeys { data[i] = KeyStructuredFailure{failedKeys[i].Index, failedKeys[i].Reason.Error()} log.Printf("Failure %d: %+v", failedKeys[i].Index, failedKeys[i].Reason) } resp.Failure(jobs.StructuredJobError{jobs.SimpleError{jobs.ResponseError, "Not all keys were completed"}, data}) } else { resp.Success(jobs.ResponseOk) } }
func (j *PatchEnvironmentRequest) Execute(resp jobs.Response) { if err := j.Write(true); err != nil { resp.Failure(ErrEnvironmentUpdateFailed) return } resp.Success(jobs.ResponseOk) }
func (j AddAliasRequest) Execute(resp jobs.Response) { value, ok := router.GlobalRoutes[j.Frontend] if ok { router.AddAlias(j.Alias, value.Name) } else { router.CreateFrontend(j.Frontend, j.Alias) } resp.Success(jobs.ResponseOk) }
func (j CreateFrontendRequest) Execute(resp jobs.Response) { value, ok := router.GlobalRoutes[j.Frontend] if !ok { router.CreateFrontend(j.Frontend, j.Alias) } else { log.Printf("Error : Frontend %s already exists.", value.Name) } resp.Success(jobs.ResponseOk) }
func (j *LinkContainersRequest) Execute(resp jobs.Response) { for i := range j.Links { if errw := j.Links[i].NetworkLinks.Write(j.Links[i].Id.NetworkLinksPathFor(), false); errw != nil { resp.Failure(ErrLinkContainersFailed) return } } resp.Success(jobs.ResponseOk) }
func (j *PutEnvironmentRequest) Execute(resp jobs.Response) { if err := j.Fetch(100 * 1024); err != nil { resp.Failure(ErrEnvironmentUpdateFailed) return } if err := j.Write(false); err != nil { resp.Failure(ErrEnvironmentUpdateFailed) return } resp.Success(jobs.ResponseOk) }
func (j UpdateFrontendRequest) Execute(resp jobs.Response) { // detach empty frontends for _, frontend := range j.Frontends { if frontend.BackendId == "" { frontend.Remove() } } errs := []backendError{} for _, backend := range j.Backends { if err := utils.WriteToPathExclusive(backend.Id.BackendPathFor(), 0554, backend); err != nil { errs = append(errs, backendError{backend.Id, err}) } } if len(errs) != 0 { log.Printf("Unable to persist some backends: %+v", errs) resp.Failure(ErrBackendWriteFailed) return } resp.Success(jobs.ResponseOk) }
func (h *HttpTransport) ExecuteRemote(baseUrl *url.URL, job RemoteExecutable, res jobs.Response) error { reader, writer := io.Pipe() httpreq, errn := http.NewRequest(job.HttpMethod(), baseUrl.String(), reader) if errn != nil { return errn } id := job.MarshalRequestIdentifier() if len(id) == 0 { id = jobs.NewRequestIdentifier() } query := &url.Values{} job.MarshalUrlQuery(query) req := httpreq req.Header.Set("X-Request-Id", id.String()) req.Header.Set("If-Match", "api="+ApiVersion()) req.Header.Set("Content-Type", "application/json") //TODO: introduce API version per job //TODO: content request signing for GETs req.URL.Path = job.HttpPath() req.URL.RawQuery = query.Encode() go func() { if err := job.MarshalHttpRequestBody(writer); err != nil { log.Printf("http_remote: Error when writing to http: %v", err) writer.CloseWithError(err) } else { writer.Close() } }() resp, err := h.client.Do(req) if err != nil { return err } defer resp.Body.Close() isJson := resp.Header.Get("Content-Type") == "application/json" switch code := resp.StatusCode; { case code == 202: if isJson { return errors.New("Decoding of streaming JSON has not been implemented") } data, err := job.UnmarshalHttpResponse(resp.Header, nil, ResponseTable) if err != nil { return err } if pending, ok := data.(map[string]interface{}); ok { for k := range pending { res.WritePendingSuccess(k, pending[k]) } } w := res.SuccessWithWrite(jobs.ResponseOk, false, false) if _, err := io.Copy(w, resp.Body); err != nil { return err } case code == 204: data, err := job.UnmarshalHttpResponse(resp.Header, nil, ResponseTable) if err != nil { return err } if pending, ok := data.(map[string]interface{}); ok { for k := range pending { res.WritePendingSuccess(k, pending[k]) } } res.Success(jobs.ResponseOk) case code >= 200 && code < 300: if !isJson { return errors.New(fmt.Sprintf("remote: Response with %d status code had content type %s (should be application/json)", code, resp.Header.Get("Content-Type"))) } data, err := job.UnmarshalHttpResponse(nil, resp.Body, ResponseJson) if err != nil { return err } res.SuccessWithData(jobs.ResponseOk, data) default: if isJson { decoder := json.NewDecoder(resp.Body) data := httpFailureResponse{} if err := decoder.Decode(&data); err != nil { return err } res.Failure(jobs.SimpleError{jobs.ResponseError, data.Message}) return nil } io.Copy(os.Stderr, resp.Body) res.Failure(jobs.SimpleError{jobs.ResponseError, "Unable to decode response."}) } return nil }
func (j *runContainer) Execute(resp jobs.Response) { command := j.UnitCommand() unitName := containers.JobIdentifier(j.Name).UnitNameFor() unitDescription := fmt.Sprintf("Execute image '%s': %s %s", j.Image, j.Command, strings.Join(command, " ")) var ( stdout io.ReadCloser changes <-chan map[string]*dbus.UnitStatus errch <-chan error ) if resp.StreamResult() { r, err := systemd.ProcessLogsForUnit(unitName) if err != nil { r = utils.EmptyReader log.Printf("run_container: Unable to fetch container run logs: %s, %+v", err.Error(), err) } defer r.Close() conn, errc := systemd.NewConnection() if errc != nil { log.Print("run_container:", errc) return } if err := conn.Subscribe(); err != nil { log.Print("run_container:", err) return } defer conn.Unsubscribe() // make subscription global for efficiency c, ech := conn.SubscribeUnitsCustom(1*time.Second, 2, func(s1 *dbus.UnitStatus, s2 *dbus.UnitStatus) bool { return true }, func(unit string) bool { return unit != unitName }) stdout = r changes = c errch = ech } log.Printf("run_container: Running container %s", unitName) status, err := systemd.Connection().StartTransientUnit( unitName, "fail", dbus.PropExecStart(command, true), dbus.PropDescription(unitDescription), dbus.PropRemainAfterExit(true), dbus.PropSlice("container.slice"), ) switch { case err != nil: errType := reflect.TypeOf(err) resp.Failure(jobs.SimpleError{jobs.ResponseError, fmt.Sprintf("Unable to start container execution due to (%s): %s", errType, err.Error())}) return case status != "done": resp.Failure(jobs.SimpleError{jobs.ResponseError, fmt.Sprintf("Start did not complete successfully: %s", status)}) return case stdout == nil: resp.Success(jobs.ResponseOk) return } w := resp.SuccessWithWrite(jobs.ResponseAccepted, true, false) go io.Copy(w, stdout) wait: for { select { case c := <-changes: if changed, ok := c[unitName]; ok { if changed.SubState != "running" { break wait } } case err := <-errch: fmt.Fprintf(w, "Error %+v\n", err) case <-time.After(1 * time.Minute): log.Print("run_container:", "timeout") break wait } } stdout.Close() }
func (j *DeleteContainerRequest) Execute(resp jobs.Response) { unitName := j.Id.UnitNameFor() unitPath := j.Id.UnitPathFor() unitDefinitionsPath := j.Id.VersionedUnitsPathFor() idleFlagPath := j.Id.IdleUnitPathFor() socketUnitPath := j.Id.SocketUnitPathFor() homeDirPath := j.Id.BaseHomePath() runDirPath := j.Id.RunPathFor() networkLinksPath := j.Id.NetworkLinksPathFor() _, err := systemd.Connection().GetUnitProperties(unitName) switch { case systemd.IsNoSuchUnit(err): resp.Success(jobs.ResponseOk) return case err != nil: resp.Failure(ErrDeleteContainerFailed) return } if err := systemd.Connection().StopUnitJob(unitName, "fail"); err != nil { log.Printf("delete_container: Unable to queue stop unit job: %v", err) } ports, err := containers.GetExistingPorts(j.Id) if err != nil { if !os.IsNotExist(err) { log.Printf("delete_container: Unable to read existing port definitions: %v", err) } ports = port.PortPairs{} } if err := port.ReleaseExternalPorts(ports); err != nil { log.Printf("delete_container: Unable to release ports: %v", err) } if err := os.Remove(unitPath); err != nil && !os.IsNotExist(err) { resp.Failure(ErrDeleteContainerFailed) return } if err := os.Remove(idleFlagPath); err != nil && !os.IsNotExist(err) { resp.Failure(ErrDeleteContainerFailed) return } if err := j.Id.SetUnitStartOnBoot(false); err != nil { log.Printf("delete_container: Unable to clear unit boot state: %v", err) } if err := os.Remove(socketUnitPath); err != nil && !os.IsNotExist(err) { log.Printf("delete_container: Unable to remove socket unit path: %v", err) } if err := os.Remove(networkLinksPath); err != nil && !os.IsNotExist(err) { log.Printf("delete_container: Unable to remove network links file: %v", err) } if err := os.RemoveAll(unitDefinitionsPath); err != nil { log.Printf("delete_container: Unable to remove definitions for container: %v", err) } if err := os.RemoveAll(filepath.Dir(runDirPath)); err != nil { log.Printf("delete_container: Unable to remove run directory: %v", err) } if err := os.RemoveAll(filepath.Dir(homeDirPath)); err != nil { log.Printf("delete_container: Unable to remove home directory: %v", err) } if _, err := systemd.Connection().DisableUnitFiles([]string{unitPath, socketUnitPath}, false); err != nil { log.Printf("delete_container: Some units have not been disabled: %v", err) } if err := systemd.Connection().Reload(); err != nil { log.Printf("delete_container: Some units have not been disabled: %v", err) } resp.Success(jobs.ResponseOk) }
func (p *purgeContainers) Execute(res jobs.Response) { Clean() res.Success(jobs.ResponseOk) }
func (j DeleteFrontendRequest) Execute(resp jobs.Response) { router.DeleteFrontend(j.Frontend) resp.Success(jobs.ResponseOk) }
func (j DeleteRouteRequest) Execute(resp jobs.Response) { // TODO resp.Success(jobs.ResponseOk) }
func (j AddRouteRequest) Execute(resp jobs.Response) { router.AddRoute(j.Frontend, j.FrontendPath, j.BackendPath, j.Protocols, j.Endpoints) resp.Success(jobs.ResponseOk) }