func (j *CreateKeysRequest) Execute(resp jobs.JobResponse) { failedKeys := []KeyFailure{} for i := range j.Keys { key := j.Keys[i] locator, err := key.Create() if err != nil { failedKeys = append(failedKeys, KeyFailure{i, &key, err}) continue } for k := range j.Permissions { if err := j.Permissions[k].Create(locator); err != nil { failedKeys = append(failedKeys, KeyFailure{i, &key, err}) continue } } } if len(failedKeys) > 0 { data := make([]KeyStructuredFailure, len(failedKeys)) for i := range failedKeys { data[i] = KeyStructuredFailure{failedKeys[i].Index, failedKeys[i].Reason.Error()} log.Printf("Failure %d: %+v", failedKeys[i].Index, failedKeys[i].Reason) } resp.Failure(jobs.StructuredJobError{jobs.SimpleJobError{jobs.JobResponseError, "Not all keys were completed"}, data}) } else { resp.Success(jobs.JobResponseOk) } }
func (j UpdateFrontendRequest) Execute(resp jobs.JobResponse) { // detach empty frontends for _, frontend := range j.Frontends { if frontend.BackendId == "" { frontend.Remove() } } errs := []backendError{} for _, backend := range j.Backends { if err := utils.WriteToPathExclusive(backend.Id.BackendPathFor(), 0554, backend); err != nil { errs = append(errs, backendError{backend.Id, err}) } } if len(errs) != 0 { log.Printf("Unable to persist some backends: %+v", errs) resp.Failure(ErrBackendWriteFailed) return } resp.Success(jobs.JobResponseOk) }
func (j CreateRepositoryRequest) Execute(resp jobs.JobResponse) { unitName := fmt.Sprintf("job-create-repo-%s.service", j.RequestId.String()) path := j.Id.HomePath() if _, err := os.Stat(path); err == nil || !os.IsNotExist(err) { resp.Failure(ErrRepositoryAlreadyExists) return } conn, errc := systemd.NewConnection() if errc != nil { log.Print("create_repository:", errc) return } if err := conn.Subscribe(); err != nil { log.Print("create_repository:", err) return } defer conn.Unsubscribe() // make subscription global for efficiency changes, errch := conn.SubscribeUnitsCustom(1*time.Second, 2, func(s1 *dbus.UnitStatus, s2 *dbus.UnitStatus) bool { return true }, func(unit string) bool { return unit != unitName }) stdout, err := systemd.ProcessLogsForUnit(unitName) if err != nil { stdout = utils.EmptyReader log.Printf("create_repository: Unable to fetch build logs: %+v", err) } defer stdout.Close() startCmd := []string{ filepath.Join("/", "usr", "bin", "gear"), "init-repo", string(j.Id), } if j.CloneUrl != "" { startCmd = append(startCmd, j.CloneUrl) } status, err := conn.StartTransientUnit( unitName, "fail", dbus.PropExecStart(startCmd, true), dbus.PropDescription(fmt.Sprintf("Create repository %s", j.Id)), dbus.PropRemainAfterExit(true), dbus.PropSlice("githost.slice"), ) if err != nil { log.Printf("create_repository: Could not start unit %s: %s", unitName, systemd.SprintSystemdError(err)) resp.Failure(ErrRepositoryCreateFailed) return } else if status != "done" { log.Printf("create_repository: Unit did not return 'done'") resp.Failure(ErrRepositoryCreateFailed) return } w := resp.SuccessWithWrite(jobs.JobResponseAccepted, true, false) go io.Copy(w, stdout) wait: for { select { case c := <-changes: if changed, ok := c[unitName]; ok { if changed.SubState != "running" { fmt.Fprintf(w, "Repository created succesfully\n") break wait } } case err := <-errch: fmt.Fprintf(w, "Error %+v\n", err) case <-time.After(10 * time.Second): log.Print("create_repository:", "timeout") break wait } } stdout.Close() }
func (h *HttpDispatcher) Dispatch(job RemoteExecutable, res jobs.JobResponse) error { reader, writer := io.Pipe() httpreq, errn := http.NewRequest(job.HttpMethod(), h.locator.BaseURL().String(), reader) if errn != nil { return errn } id := job.MarshalRequestIdentifier() if len(id) == 0 { id = jobs.NewRequestIdentifier() } query := &url.Values{} job.MarshalUrlQuery(query) req := httpreq req.Header.Set("X-Request-Id", id.String()) req.Header.Set("If-Match", "api="+ApiVersion()) req.Header.Set("Content-Type", "application/json") //TODO: introduce API version per job //TODO: content request signing for GETs req.URL.Path = job.HttpPath() req.URL.RawQuery = query.Encode() go func() { if err := job.MarshalHttpRequestBody(writer); err != nil { h.log.Printf("remote: Error when writing to http: %v", err) writer.CloseWithError(err) } else { writer.Close() } }() resp, err := h.client.Do(req) if err != nil { h.log.Printf("Failed: %v", err) return err } defer resp.Body.Close() isJson := resp.Header.Get("Content-Type") == "application/json" switch code := resp.StatusCode; { case code == 202: if isJson { return errors.New("Decoding of streaming JSON has not been implemented") } data, err := job.UnmarshalHttpResponse(resp.Header, nil, ResponseTable) if err != nil { return err } if pending, ok := data.(map[string]interface{}); ok { for k := range pending { res.WritePendingSuccess(k, pending[k]) } } w := res.SuccessWithWrite(jobs.JobResponseOk, false, false) if _, err := io.Copy(w, resp.Body); err != nil { return err } case code == 204: data, err := job.UnmarshalHttpResponse(resp.Header, nil, ResponseTable) if err != nil { return err } if pending, ok := data.(map[string]interface{}); ok { for k := range pending { res.WritePendingSuccess(k, pending[k]) } } res.Success(jobs.JobResponseOk) case code >= 200 && code < 300: if !isJson { return errors.New(fmt.Sprintf("remote: Response with %d status code had content type %s (should be application/json)", code, resp.Header.Get("Content-Type"))) } data, err := job.UnmarshalHttpResponse(nil, resp.Body, ResponseJson) if err != nil { return err } res.SuccessWithData(jobs.JobResponseOk, data) default: if isJson { decoder := json.NewDecoder(resp.Body) data := httpFailureResponse{} if err := decoder.Decode(&data); err != nil { return err } res.Failure(jobs.SimpleJobError{jobs.JobResponseError, data.Message}) return nil } io.Copy(os.Stderr, resp.Body) res.Failure(jobs.SimpleJobError{jobs.JobResponseError, "Unable to decode response."}) } return nil }