// EnterState transit the statemachine from the current state to the state in parameter. // It returns the next state the statemachine should tranit to. func (sm *SM) EnterState(s string) (string, error) { log.Debugf("Job id: %d, transiting from State: %s, to State: %s", sm.JobID, sm.CurrentState, s) targets, ok := sm.Transitions[sm.CurrentState] _, exist := targets[s] _, isForced := sm.ForcedStates[s] if !exist && !isForced { return "", fmt.Errorf("job id: %d, transition from %s to %s does not exist", sm.JobID, sm.CurrentState, s) } exitHandler, ok := sm.Handlers[sm.CurrentState] if ok { if err := exitHandler.Exit(); err != nil { return "", err } } else { log.Debugf("Job id: %d, no handler found for state:%s, skip", sm.JobID, sm.CurrentState) } enterHandler, ok := sm.Handlers[s] var next = models.JobContinue var err error if ok { if next, err = enterHandler.Enter(); err != nil { return "", err } } else { log.Debugf("Job id: %d, no handler found for state:%s, skip", sm.JobID, s) } sm.PreviousState = sm.CurrentState sm.CurrentState = s log.Debugf("Job id: %d, transition succeeded, current state: %s", sm.JobID, s) return next, nil }
func filterEvents(notification *models.Notification) ([]*models.Event, error) { events := []*models.Event{} for _, event := range notification.Events { log.Debugf("receive an event: \n----ID: %s \n----target: %s:%s \n----digest: %s \n----action: %s \n----mediatype: %s \n----user-agent: %s", event.ID, event.Target.Repository, event.Target.Tag, event.Target.Digest, event.Action, event.Target.MediaType, event.Request.UserAgent) isManifest, err := regexp.MatchString(manifestPattern, event.Target.MediaType) if err != nil { log.Errorf("failed to match the media type against pattern: %v", err) continue } if !isManifest { continue } //pull and push manifest by docker-client or vic if (strings.HasPrefix(event.Request.UserAgent, "docker") || strings.HasPrefix(event.Request.UserAgent, vicPrefix)) && (event.Action == "pull" || event.Action == "push") { events = append(events, &event) log.Debugf("add event to collect: %s", event.ID) continue } //push manifest by docker-client or job-service if strings.ToLower(strings.TrimSpace(event.Request.UserAgent)) == "harbor-registry-client" && event.Action == "push" { events = append(events, &event) log.Debugf("add event to collect: %s", event.ID) continue } } return events, nil }
// Start kicks off the statemachine to transit from current state to s, and moves on // It will search the transit map if the next state is "_continue", and // will enter error state if there's more than one possible path when next state is "_continue" func (sm *SM) Start(s string) { n, err := sm.EnterState(s) log.Debugf("Job id: %d, next state from handler: %s", sm.JobID, n) for len(n) > 0 && err == nil { if d := sm.getDesiredState(); len(d) > 0 { log.Debugf("Job id: %d. Desired state: %s, will ignore the next state from handler", sm.JobID, d) n = d sm.setDesiredState("") continue } if n == models.JobContinue && len(sm.Transitions[sm.CurrentState]) == 1 { for n = range sm.Transitions[sm.CurrentState] { break } log.Debugf("Job id: %d, Continue to state: %s", sm.JobID, n) continue } if n == models.JobContinue && len(sm.Transitions[sm.CurrentState]) != 1 { log.Errorf("Job id: %d, next state is continue but there are %d possible next states in transition table", sm.JobID, len(sm.Transitions[sm.CurrentState])) err = fmt.Errorf("Unable to continue") break } n, err = sm.EnterState(n) log.Debugf("Job id: %d, next state from handler: %s", sm.JobID, n) } if err != nil { log.Warningf("Job id: %d, the statemachin will enter error state due to error: %v", sm.JobID, err) sm.EnterState(models.JobError) } }
// Get handles GET request, it checks the http header for user credentials // and parse service and scope based on docker registry v2 standard, // checkes the permission agains local DB and generates jwt token. func (h *Handler) Get() { var uid, password, username string request := h.Ctx.Request service := h.GetString("service") scopes := h.GetStrings("scope") access := GetResourceActions(scopes) log.Infof("request url: %v", request.URL.String()) if svc_utils.VerifySecret(request) { log.Debugf("Will grant all access as this request is from job service with legal secret.") username = "******" } else { uid, password, _ = request.BasicAuth() log.Debugf("uid for logging: %s", uid) user := authenticate(uid, password) if user == nil { log.Warningf("login request with invalid credentials in token service, uid: %s", uid) if len(scopes) == 0 { h.CustomAbort(http.StatusUnauthorized, "") } } else { username = user.Username } log.Debugf("username for filtering access: %s.", username) for _, a := range access { FilterAccess(username, a) } } h.serveToken(username, service, access) }
// Post handles POST request, and records audit log or refreshes cache based on event. func (n *NotificationHandler) Post() { var notification models.Notification err := json.Unmarshal(n.Ctx.Input.CopyBody(1<<32), ¬ification) if err != nil { log.Errorf("failed to decode notification: %v", err) return } events, err := filterEvents(¬ification) if err != nil { log.Errorf("failed to filter events: %v", err) return } for _, event := range events { repository := event.Target.Repository project, _ := utils.ParseRepository(repository) tag := event.Target.Tag action := event.Action user := event.Actor.Name if len(user) == 0 { user = "******" } go func() { if err := dao.AccessLog(user, project, repository, tag, action); err != nil { log.Errorf("failed to add access log: %v", err) } }() if action == "push" { go func() { exist := dao.RepositoryExists(repository) if exist { return } log.Debugf("Add repository %s into DB.", repository) repoRecord := models.RepoRecord{Name: repository, OwnerName: user, ProjectName: project} if err := dao.AddRepository(repoRecord); err != nil { log.Errorf("Error happens when adding repository: %v", err) } if err := cache.RefreshCatalogCache(); err != nil { log.Errorf("failed to refresh cache: %v", err) } }() go api.TriggerReplicationByRepository(repository, []string{tag}, models.RepOpTransfer) } if action == "pull" { go func() { log.Debugf("Increase the repository %s pull count.", repository) if err := dao.IncreasePullCount(repository); err != nil { log.Errorf("Error happens when increasing pull count: %v", repository) } }() } } }
// StopJobs accepts a list of jobs and will try to stop them if any of them is being executed by the worker. func (wp *workerPool) StopJobs(jobs []int64) { log.Debugf("Works working on jobs: %v will be stopped", jobs) for _, id := range jobs { for _, w := range wp.workerList { if w.SM.JobID == id { log.Debugf("found a worker whose job ID is %d, will try to stop it", id) w.SM.Stop(id) } } } }
// Stop will set the desired state as "stopped" such that when next tranisition happen the state machine will stop handling the current job // and the worker can release itself to the workerpool. func (sm *SM) Stop(id int64) { log.Debugf("Trying to stop the job: %d", id) sm.lock.Lock() defer sm.lock.Unlock() //need to check if the sm switched to other job if id == sm.JobID { sm.desiredState = models.JobStopped log.Debugf("Desired state of job %d is set to stopped", id) } else { log.Debugf("State machine has switched to job %d, so the action to stop job %d will be ignored", sm.JobID, id) } }
func (rj *ReplicationJob) addJob(repo string, policyID int64, operation string, tags ...string) error { j := models.RepJob{ Repository: repo, PolicyID: policyID, Operation: operation, TagList: tags, } log.Debugf("Creating job for repo: %s, policy: %d", repo, policyID) id, err := dao.AddRepJob(j) if err != nil { return err } log.Debugf("Send job to scheduler, job id: %d", id) job.Schedule(id) return nil }
func resumeJobs() { log.Debugf("Trying to resume halted jobs...") err := dao.ResetRunningJobs() if err != nil { log.Warningf("Failed to reset all running jobs to pending, error: %v", err) } jobs, err := dao.GetRepJobByStatus(models.JobPending, models.JobRetrying) if err == nil { for _, j := range jobs { log.Debugf("Resuming job: %d", j.ID) job.Schedule(j.ID) } } else { log.Warningf("Failed to jobs to resume, error: %v", err) } }
// GetResourceActions ... func GetResourceActions(scopes []string) []*token.ResourceActions { log.Debugf("scopes: %+v", scopes) var res []*token.ResourceActions for _, s := range scopes { if s == "" { continue } items := strings.Split(s, ":") length := len(items) typee := items[0] name := "" if length > 1 { name = items[1] } actions := []string{} if length > 2 { actions = strings.Split(items[2], ",") } res = append(res, &token.ResourceActions{ Type: typee, Name: name, Actions: actions, }) } return res }
// Post creates replication jobs according to the policy. func (rj *ReplicationJob) Post() { var data ReplicationReq rj.DecodeJSONReq(&data) log.Debugf("data: %+v", data) p, err := dao.GetRepPolicy(data.PolicyID) if err != nil { log.Errorf("Failed to get policy, error: %v", err) rj.RenderError(http.StatusInternalServerError, fmt.Sprintf("Failed to get policy, id: %d", data.PolicyID)) return } if p == nil { log.Errorf("Policy not found, id: %d", data.PolicyID) rj.RenderError(http.StatusNotFound, fmt.Sprintf("Policy not found, id: %d", data.PolicyID)) return } if len(data.Repo) == 0 { // sync all repositories repoList, err := getRepoList(p.ProjectID) if err != nil { log.Errorf("Failed to get repository list, project id: %d, error: %v", p.ProjectID, err) rj.RenderError(http.StatusInternalServerError, err.Error()) return } log.Debugf("repo list: %v", repoList) for _, repo := range repoList { err := rj.addJob(repo, data.PolicyID, models.RepOpTransfer) if err != nil { log.Errorf("Failed to insert job record, error: %v", err) rj.RenderError(http.StatusInternalServerError, err.Error()) return } } } else { // sync a single repository var op string if len(data.Operation) > 0 { op = data.Operation } else { op = models.RepOpTransfer } err := rj.addJob(data.Repo, data.PolicyID, op, data.TagList...) if err != nil { log.Errorf("Failed to insert job record, error: %v", err) rj.RenderError(http.StatusInternalServerError, err.Error()) return } } }
// Start is a loop worker gets id from its channel and handle it. func (w *Worker) Start() { go func() { for { WorkerPool.workerChan <- w select { case jobID := <-w.RepJobs: log.Debugf("worker: %d, will handle job: %d", w.ID, jobID) w.handleRepJob(jobID) case q := <-w.quit: if q { log.Debugf("worker: %d, will stop.", w.ID) return } } } }() }
// InitWorkerPool create workers according to configuration. func InitWorkerPool() { WorkerPool = &workerPool{ workerChan: make(chan *Worker, config.MaxJobWorkers()), workerList: make([]*Worker, 0, config.MaxJobWorkers()), } for i := 0; i < config.MaxJobWorkers(); i++ { worker := NewWorker(i) WorkerPool.workerList = append(WorkerPool.workerList, worker) worker.Start() log.Debugf("worker %d started", worker.ID) } }
// Dispatch will listen to the jobQueue of job service and try to pick a free worker from the worker pool and assign the job to it. func Dispatch() { for { select { case job := <-jobQueue: go func(jobID int64) { log.Debugf("Trying to dispatch job: %d", jobID) worker := <-WorkerPool.workerChan worker.RepJobs <- jobID }(job) } } }
// Login authenticates user credentials based on setting. func Login(m models.AuthModel) (*models.User, error) { var authMode = config.AuthMode() if authMode == "" || m.Principal == "admin" { authMode = "db_auth" } log.Debug("Current AUTH_MODE is ", authMode) authenticator, ok := registry[authMode] if !ok { return nil, fmt.Errorf("Unrecognized auth_mode: %s", authMode) } if lock.IsLocked(m.Principal) { log.Debugf("%s is locked due to login failure, login failed", m.Principal) return nil, nil } user, err := authenticator.Authenticate(m) if user == nil && err == nil { log.Debugf("Login failed, locking %s, and sleep for %v", m.Principal, frozenTime) lock.Lock(m.Principal) time.Sleep(frozenTime) } return user, err }
// RoundTrip ... func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { for _, modifier := range t.modifiers { if err := modifier.Modify(req); err != nil { return nil, err } } resp, err := t.transport.RoundTrip(req) if err != nil { return nil, err } log.Debugf("%d | %s %s", resp.StatusCode, req.Method, req.URL.String()) return resp, err }
func (w *Worker) handleRepJob(id int64) { err := w.SM.Reset(id) if err != nil { log.Errorf("Worker %d, failed to re-initialize statemachine for job: %d, error: %v", w.ID, id, err) err2 := dao.UpdateRepJobStatus(id, models.JobError) if err2 != nil { log.Errorf("Failed to update job status to ERROR, job: %d, error:%v", id, err2) } return } if w.SM.Parms.Enabled == 0 { log.Debugf("The policy of job:%d is disabled, will cancel the job", id) _ = dao.UpdateRepJobStatus(id, models.JobCanceled) w.SM.Logger.Info("The job has been canceled") } else { w.SM.Start(models.JobRunning) } }
// GetPaginationParams ... func (b *BaseAPI) GetPaginationParams() (page, pageSize int64) { page, err := b.GetInt64("page", 1) if err != nil || page <= 0 { b.CustomAbort(http.StatusBadRequest, "invalid page") } pageSize, err = b.GetInt64("page_size", defaultPageSize) if err != nil || pageSize <= 0 { b.CustomAbort(http.StatusBadRequest, "invalid page_size") } if pageSize > maxPageSize { pageSize = maxPageSize log.Debugf("the parameter page_size %d exceeds the max %d, set it to max", pageSize, maxPageSize) } return page, pageSize }
// calls the api from UI to get repo list func getRepoList(projectID int64) ([]string, error) { repositories := []string{} client := &http.Client{} uiURL := config.LocalUIURL() next := "/api/repositories?project_id=" + strconv.Itoa(int(projectID)) for len(next) != 0 { req, err := http.NewRequest("GET", uiURL+next, nil) if err != nil { return repositories, err } req.AddCookie(&http.Cookie{Name: models.UISecretCookie, Value: config.UISecret()}) resp, err := client.Do(req) if err != nil { return repositories, err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { dump, _ := httputil.DumpResponse(resp, true) log.Debugf("response: %q", dump) return repositories, fmt.Errorf("Unexpected status code when getting repository list: %d", resp.StatusCode) } body, err := ioutil.ReadAll(resp.Body) if err != nil { return repositories, err } var list []string if err = json.Unmarshal(body, &list); err != nil { return repositories, err } repositories = append(repositories, list...) links := u.ParseLink(resp.Header.Get(http.CanonicalHeaderKey("link"))) next = links.Next() } return repositories, nil }
// Forward to setup layout and template for content for a page. func (b *BaseController) Forward(title, templateName string) { b.Layout = filepath.Join(prefixNg, "layout.htm") b.TplName = filepath.Join(prefixNg, templateName) b.Data["Title"] = b.Tr(title) b.LayoutSections = make(map[string]string) b.LayoutSections["HeaderInclude"] = filepath.Join(prefixNg, viewPath, "header-include.htm") if b.UseCompressedJS { b.LayoutSections["HeaderScriptInclude"] = filepath.Join(prefixNg, viewPath, "script-min-include.htm") } else { b.LayoutSections["HeaderScriptInclude"] = filepath.Join(prefixNg, viewPath, "script-include.htm") } log.Debugf("Loaded HeaderScriptInclude file: %s", b.LayoutSections["HeaderScriptInclude"]) b.LayoutSections["FooterInclude"] = filepath.Join(prefixNg, viewPath, "footer-include.htm") b.LayoutSections["HeaderContent"] = filepath.Join(prefixNg, viewPath, "header-content.htm") b.LayoutSections["FooterContent"] = filepath.Join(prefixNg, viewPath, "footer-content.htm") }
// Reschedule is called by statemachine to retry a job func Reschedule(jobID int64) { log.Debugf("Job %d will be rescheduled in 5 minutes", jobID) time.Sleep(5 * time.Minute) log.Debugf("Rescheduling job %d", jobID) Schedule(jobID) }
func init() { maxWorkersEnv := os.Getenv("MAX_JOB_WORKERS") maxWorkers64, err := strconv.ParseInt(maxWorkersEnv, 10, 32) maxJobWorkers = int(maxWorkers64) if err != nil { log.Warningf("Failed to parse max works setting, error: %v, the default value: %d will be used", err, defaultMaxWorkers) maxJobWorkers = defaultMaxWorkers } localRegURL = os.Getenv("REGISTRY_URL") if len(localRegURL) == 0 { localRegURL = "http://registry:5000" } localUIURL = os.Getenv("UI_URL") if len(localUIURL) == 0 { localUIURL = "http://ui" } logDir = os.Getenv("LOG_DIR") if len(logDir) == 0 { logDir = "/var/log" } f, err := os.Open(logDir) defer f.Close() if err != nil { panic(err) } finfo, err := f.Stat() if err != nil { panic(err) } if !finfo.IsDir() { panic(fmt.Sprintf("%s is not a direcotry", logDir)) } uiSecret = os.Getenv("UI_SECRET") if len(uiSecret) == 0 { panic("UI Secret is not set") } verifyRemoteCert = os.Getenv("VERIFY_REMOTE_CERT") if len(verifyRemoteCert) == 0 { verifyRemoteCert = "on" } configPath := os.Getenv("CONFIG_PATH") if len(configPath) != 0 { log.Infof("Config path: %s", configPath) beego.LoadAppConfig("ini", configPath) } secretKey = os.Getenv("SECRET_KEY") if len(secretKey) != 16 { panic("The length of secretkey has to be 16 characters!") } log.Debugf("config: maxJobWorkers: %d", maxJobWorkers) log.Debugf("config: localUIURL: %s", localUIURL) log.Debugf("config: localRegURL: %s", localRegURL) log.Debugf("config: verifyRemoteCert: %s", verifyRemoteCert) log.Debugf("config: logDir: %s", logDir) log.Debugf("config: uiSecret: ******") }
// SyncRegistry syncs the repositories of registry with database. func SyncRegistry() error { log.Debugf("Start syncing repositories from registry to DB... ") reposInRegistry, err := catalog() if err != nil { log.Error(err) return err } var repoRecordsInDB []models.RepoRecord repoRecordsInDB, err = dao.GetAllRepositories() if err != nil { log.Errorf("error occurred while getting all registories. %v", err) return err } var reposInDB []string for _, repoRecordInDB := range repoRecordsInDB { reposInDB = append(reposInDB, repoRecordInDB.Name) } var reposToAdd []string var reposToDel []string reposToAdd, reposToDel, err = diffRepos(reposInRegistry, reposInDB) if err != nil { return err } if len(reposToAdd) > 0 { log.Debugf("Start adding repositories into DB... ") for _, repoToAdd := range reposToAdd { project, _ := utils.ParseRepository(repoToAdd) user, err := dao.GetAccessLogCreator(repoToAdd) if err != nil { log.Errorf("Error happens when getting the repository owner from access log: %v", err) } if len(user) == 0 { user = "******" } pullCount, err := dao.CountPull(repoToAdd) if err != nil { log.Errorf("Error happens when counting pull count from access log: %v", err) } repoRecord := models.RepoRecord{Name: repoToAdd, OwnerName: user, ProjectName: project, PullCount: pullCount} if err := dao.AddRepository(repoRecord); err != nil { log.Errorf("Error happens when adding the missing repository: %v", err) } else { log.Debugf("Add repository: %s success.", repoToAdd) } } } if len(reposToDel) > 0 { log.Debugf("Start deleting repositories from DB... ") for _, repoToDel := range reposToDel { if err := dao.DeleteRepository(repoToDel); err != nil { log.Errorf("Error happens when deleting the repository: %v", err) } else { log.Debugf("Delete repository: %s success.", repoToDel) } } } log.Debugf("Sync repositories from registry to DB is done.") return nil }