func filterEvents(notification *models.Notification) ([]*models.Event, error) { events := []*models.Event{} for _, event := range notification.Events { log.Debugf("receive an event: ID-%s, target-%s:%s, digest-%s, action-%s", event.ID, event.Target.Repository, event.Target.Tag, event.Target.Digest, event.Action) isManifest, err := regexp.MatchString(manifestPattern, event.Target.MediaType) if err != nil { log.Errorf("failed to match the media type against pattern: %v", err) continue } if !isManifest { continue } //pull and push manifest by docker-client if strings.HasPrefix(event.Request.UserAgent, "docker") && (event.Action == "pull" || event.Action == "push") { events = append(events, &event) log.Debugf("add event to collect: %s", event.ID) continue } //push manifest by docker-client or job-service if strings.ToLower(strings.TrimSpace(event.Request.UserAgent)) == "harbor-registry-client" && event.Action == "push" { events = append(events, &event) log.Debugf("add event to collect: %s", event.ID) continue } } return events, nil }
// EnterState transit the statemachine from the current state to the state in parameter. // It returns the next state the statemachine should tranit to. func (sm *SM) EnterState(s string) (string, error) { log.Debugf("Job id: %d, transiting from State: %s, to State: %s", sm.JobID, sm.CurrentState, s) targets, ok := sm.Transitions[sm.CurrentState] _, exist := targets[s] _, isForced := sm.ForcedStates[s] if !exist && !isForced { return "", fmt.Errorf("Job id: %d, transition from %s to %s does not exist!", sm.JobID, sm.CurrentState, s) } exitHandler, ok := sm.Handlers[sm.CurrentState] if ok { if err := exitHandler.Exit(); err != nil { return "", err } } else { log.Debugf("Job id: %d, no handler found for state:%s, skip", sm.JobID, sm.CurrentState) } enterHandler, ok := sm.Handlers[s] var next = models.JobContinue var err error if ok { if next, err = enterHandler.Enter(); err != nil { return "", err } } else { log.Debugf("Job id: %d, no handler found for state:%s, skip", sm.JobID, s) } sm.PreviousState = sm.CurrentState sm.CurrentState = s log.Debugf("Job id: %d, transition succeeded, current state: %s", sm.JobID, s) return next, nil }
// Start kicks off the statemachine to transit from current state to s, and moves on // It will search the transit map if the next state is "_continue", and // will enter error state if there's more than one possible path when next state is "_continue" func (sm *SM) Start(s string) { n, err := sm.EnterState(s) log.Debugf("Job id: %d, next state from handler: %s", sm.JobID, n) for len(n) > 0 && err == nil { if d := sm.getDesiredState(); len(d) > 0 { log.Debugf("Job id: %d. Desired state: %s, will ignore the next state from handler", sm.JobID, d) n = d sm.setDesiredState("") continue } if n == models.JobContinue && len(sm.Transitions[sm.CurrentState]) == 1 { for n = range sm.Transitions[sm.CurrentState] { break } log.Debugf("Job id: %d, Continue to state: %s", sm.JobID, n) continue } if n == models.JobContinue && len(sm.Transitions[sm.CurrentState]) != 1 { log.Errorf("Job id: %d, next state is continue but there are %d possible next states in transition table", sm.JobID, len(sm.Transitions[sm.CurrentState])) err = fmt.Errorf("Unable to continue") break } n, err = sm.EnterState(n) log.Debugf("Job id: %d, next state from handler: %s", sm.JobID, n) } if err != nil { log.Warningf("Job id: %d, the statemachin will enter error state due to error: %v", sm.JobID, err) sm.EnterState(models.JobError) } }
// StopJobs accepts a list of jobs and will try to stop them if any of them is being executed by the worker. func (wp *workerPool) StopJobs(jobs []int64) { log.Debugf("Works working on jobs: %v will be stopped", jobs) for _, id := range jobs { for _, w := range wp.workerList { if w.SM.JobID == id { log.Debugf("found a worker whose job ID is %d, will try to stop it", id) w.SM.Stop(id) } } } }
// Stop will set the desired state as "stopped" such that when next tranisition happen the state machine will stop handling the current job // and the worker can release itself to the workerpool. func (sm *SM) Stop(id int64) { log.Debugf("Trying to stop the job: %d", id) sm.lock.Lock() defer sm.lock.Unlock() //need to check if the sm switched to other job if id == sm.JobID { sm.desiredState = models.JobStopped log.Debugf("Desired state of job %d is set to stopped", id) } else { log.Debugf("State machine has switched to job %d, so the action to stop job %d will be ignored", sm.JobID, id) } }
// GetResourceActions ... func GetResourceActions(scopes []string) []*token.ResourceActions { log.Debugf("scopes: %+v", scopes) var res []*token.ResourceActions for _, s := range scopes { if s == "" { continue } items := strings.Split(s, ":") length := len(items) typee := items[0] name := "" if length > 1 { name = items[1] } actions := []string{} if length > 2 { actions = strings.Split(items[2], ",") } res = append(res, &token.ResourceActions{ Type: typee, Name: name, Actions: actions, }) } return res }
// Get handles GET request, it checks the http header for user credentials // and parse service and scope based on docker registry v2 standard, // checkes the permission agains local DB and generates jwt token. func (h *Handler) Get() { var username, password string request := h.Ctx.Request service := h.GetString("service") scopes := h.GetStrings("scope") access := GetResourceActions(scopes) log.Infof("request url: %v", request.URL.String()) if svc_utils.VerifySecret(request) { log.Debugf("Will grant all access as this request is from job service with legal secret.") username = "******" } else { username, password, _ = request.BasicAuth() authenticated := authenticate(username, password) if len(scopes) == 0 && !authenticated { log.Info("login request with invalid credentials") h.CustomAbort(http.StatusUnauthorized, "") } for _, a := range access { FilterAccess(username, authenticated, a) } } h.serveToken(username, service, access) }
func resumeJobs() { log.Debugf("Trying to resume halted jobs...") err := dao.ResetRunningJobs() if err != nil { log.Warningf("Failed to reset all running jobs to pending, error: %v", err) } jobs, err := dao.GetRepJobByStatus(models.JobPending, models.JobRetrying) if err == nil { for _, j := range jobs { log.Debugf("Resuming job: %d", j.ID) job.Schedule(j.ID) } } else { log.Warningf("Failed to jobs to resume, error: %v", err) } }
//InitDB initializes the database func InitDB() { orm.RegisterDriver("mysql", orm.DRMySQL) addr := os.Getenv("MYSQL_HOST") port := os.Getenv("MYSQL_PORT") username := os.Getenv("MYSQL_USR") password := os.Getenv("MYSQL_PWD") log.Debugf("db url: %s:%s, db user: %s", addr, port, username) dbStr := username + ":" + password + "@tcp(" + addr + ":" + port + ")/registry" ch := make(chan int, 1) go func() { var err error var c net.Conn for { c, err = net.DialTimeout("tcp", addr+":"+port, 20*time.Second) if err == nil { c.Close() ch <- 1 } else { log.Errorf("failed to connect to db, retry after 2 seconds :%v", err) time.Sleep(2 * time.Second) } } }() select { case <-ch: case <-time.After(60 * time.Second): panic("Failed to connect to DB after 60 seconds") } err := orm.RegisterDataBase("default", "mysql", dbStr) if err != nil { panic(err) } }
// ParseChallengeFromResponse ... func ParseChallengeFromResponse(resp *http.Response) []au.Challenge { challenges := au.ResponseChallenges(resp) log.Debugf("challenges: %v", challenges) return challenges }
func (rj *ReplicationJob) addJob(repo string, policyID int64, operation string, tags ...string) error { j := models.RepJob{ Repository: repo, PolicyID: policyID, Operation: operation, TagList: tags, } log.Debugf("Creating job for repo: %s, policy: %d", repo, policyID) id, err := dao.AddRepJob(j) if err != nil { return err } log.Debugf("Send job to scheduler, job id: %d", id) job.Schedule(id) return nil }
// Post creates replication jobs according to the policy. func (rj *ReplicationJob) Post() { var data ReplicationReq rj.DecodeJSONReq(&data) log.Debugf("data: %+v", data) p, err := dao.GetRepPolicy(data.PolicyID) if err != nil { log.Errorf("Failed to get policy, error: %v", err) rj.RenderError(http.StatusInternalServerError, fmt.Sprintf("Failed to get policy, id: %d", data.PolicyID)) return } if p == nil { log.Errorf("Policy not found, id: %d", data.PolicyID) rj.RenderError(http.StatusNotFound, fmt.Sprintf("Policy not found, id: %d", data.PolicyID)) return } if len(data.Repo) == 0 { // sync all repositories repoList, err := getRepoList(p.ProjectID) if err != nil { log.Errorf("Failed to get repository list, project id: %d, error: %v", p.ProjectID, err) rj.RenderError(http.StatusInternalServerError, err.Error()) return } log.Debugf("repo list: %v", repoList) for _, repo := range repoList { err := rj.addJob(repo, data.PolicyID, models.RepOpTransfer) if err != nil { log.Errorf("Failed to insert job record, error: %v", err) rj.RenderError(http.StatusInternalServerError, err.Error()) return } } } else { // sync a single repository var op string if len(data.Operation) > 0 { op = data.Operation } else { op = models.RepOpTransfer } err := rj.addJob(data.Repo, data.PolicyID, op, data.TagList...) if err != nil { log.Errorf("Failed to insert job record, error: %v", err) rj.RenderError(http.StatusInternalServerError, err.Error()) return } } }
// Start is a loop worker gets id from its channel and handle it. func (w *Worker) Start() { go func() { for { WorkerPool.workerChan <- w select { case jobID := <-w.RepJobs: log.Debugf("worker: %d, will handle job: %d", w.ID, jobID) w.handleRepJob(jobID) case q := <-w.quit: if q { log.Debugf("worker: %d, will stop.", w.ID) return } } } }() }
// InitWorkerPool create workers according to configuration. func InitWorkerPool() { WorkerPool = &workerPool{ workerChan: make(chan *Worker, config.MaxJobWorkers()), workerList: make([]*Worker, 0, config.MaxJobWorkers()), } for i := 0; i < config.MaxJobWorkers(); i++ { worker := NewWorker(i) WorkerPool.workerList = append(WorkerPool.workerList, worker) worker.Start() log.Debugf("worker %d started", worker.ID) } }
// Dispatch will listen to the jobQueue of job service and try to pick a free worker from the worker pool and assign the job to it. func Dispatch() { for { select { case job := <-jobQueue: go func(jobID int64) { log.Debugf("Trying to dispatch job: %d", jobID) worker := <-WorkerPool.workerChan worker.RepJobs <- jobID }(job) } } }
func init() { maxWorkersEnv := os.Getenv("MAX_JOB_WORKERS") maxWorkers64, err := strconv.ParseInt(maxWorkersEnv, 10, 32) maxJobWorkers = int(maxWorkers64) if err != nil { log.Warningf("Failed to parse max works setting, error: %v, the default value: %d will be used", err, defaultMaxWorkers) maxJobWorkers = defaultMaxWorkers } localRegURL = os.Getenv("REGISTRY_URL") if len(localRegURL) == 0 { localRegURL = "http://registry:5000" } localUIURL = os.Getenv("UI_URL") if len(localUIURL) == 0 { localUIURL = "http://ui" } logDir = os.Getenv("LOG_DIR") if len(logDir) == 0 { logDir = "/var/log" } f, err := os.Open(logDir) defer f.Close() if err != nil { panic(err) } finfo, err := f.Stat() if err != nil { panic(err) } if !finfo.IsDir() { panic(fmt.Sprintf("%s is not a direcotry", logDir)) } uiSecret = os.Getenv("UI_SECRET") if len(uiSecret) == 0 { panic("UI Secret is not set") } verifyRemoteCert = os.Getenv("VERIFY_REMOTE_CERT") if len(verifyRemoteCert) == 0 { verifyRemoteCert = "on" } configPath := os.Getenv("CONFIG_PATH") if len(configPath) != 0 { log.Infof("Config path: %s", configPath) beego.LoadAppConfig("ini", configPath) } log.Debugf("config: maxJobWorkers: %d", maxJobWorkers) log.Debugf("config: localUIURL: %s", localUIURL) log.Debugf("config: localRegURL: %s", localRegURL) log.Debugf("config: verifyRemoteCert: %s", verifyRemoteCert) log.Debugf("config: logDir: %s", logDir) log.Debugf("config: uiSecret: ******") }
// AuthorizeRequest will add authorization header which contains a token before the request is sent func (t *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { var scopes []*scope var token string hasFrom := false from := req.URL.Query().Get("from") if len(from) != 0 { s := &scope{ Type: "repository", Name: from, Actions: []string{"pull"}, } scopes = append(scopes, s) // do not cache the token if "from" appears hasFrom = true } scopes = append(scopes, t.scope) expired := true cachedToken, cachedExpiredIn, cachedIssuedAt := t.getCachedToken() if len(cachedToken) != 0 && cachedExpiredIn != 0 && cachedIssuedAt != nil { expired = cachedIssuedAt.Add(time.Duration(cachedExpiredIn) * time.Second).Before(time.Now().UTC()) } if expired || hasFrom { scopeStrs := []string{} for _, scope := range scopes { scopeStrs = append(scopeStrs, scope.string()) } to, expiresIn, issuedAt, err := t.tg(params["realm"], params["service"], scopeStrs) if err != nil { return err } token = to if !hasFrom { t.updateCachedToken(to, expiresIn, issuedAt) log.Debug("add token to cache") } } else { token = cachedToken log.Debug("get token from cache") } req.Header.Add(http.CanonicalHeaderKey("Authorization"), fmt.Sprintf("Bearer %s", token)) log.Debugf("add token to request: %s %s", req.Method, req.URL.String()) return nil }
// RoundTrip ... func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { for _, modifier := range t.modifiers { if err := modifier.Modify(req); err != nil { return nil, err } } resp, err := t.transport.RoundTrip(req) if err != nil { return nil, err } log.Debugf("%d | %s %s", resp.StatusCode, req.Method, req.URL.String()) return resp, err }
// NewRegistry returns an instance of registry func NewRegistry(endpoint string, client *http.Client) (*Registry, error) { endpoint = strings.TrimRight(endpoint, "/") u, err := url.Parse(endpoint) if err != nil { return nil, err } registry := &Registry{ Endpoint: u, client: client, } log.Debugf("initialized a registry client: %s", endpoint) return registry, nil }
// RefreshCatalogCache calls registry's API to get repository list and write it to cache. func RefreshCatalogCache() error { log.Debug("refreshing catalog cache...") if registryClient == nil { var err error registryClient, err = registry.NewRegistryWithUsername(endpoint, username) if err != nil { log.Errorf("error occurred while initializing registry client used by cache: %v", err) return err } } var err error rs, err := registryClient.Catalog() if err != nil { return err } repos := []string{} for _, repo := range rs { rc, ok := repositoryClients[repo] if !ok { rc, err = registry.NewRepositoryWithUsername(repo, endpoint, username) if err != nil { log.Errorf("error occurred while initializing repository client used by cache: %s %v", repo, err) continue } repositoryClients[repo] = rc } tags, err := rc.ListTag() if err != nil { log.Errorf("error occurred while list tag for %s: %v", repo, err) continue } if len(tags) != 0 { repos = append(repos, repo) log.Debugf("add %s to catalog cache", repo) } } Cache.Put(catalogKey, repos, 600*time.Second) return nil }
func (w *Worker) handleRepJob(id int64) { err := w.SM.Reset(id) if err != nil { log.Errorf("Worker %d, failed to re-initialize statemachine for job: %d, error: %v", w.ID, id, err) err2 := dao.UpdateRepJobStatus(id, models.JobError) if err2 != nil { log.Errorf("Failed to update job status to ERROR, job: %d, error:%v", id, err2) } return } if w.SM.Parms.Enabled == 0 { log.Debugf("The policy of job:%d is disabled, will cancel the job", id) _ = dao.UpdateRepJobStatus(id, models.JobCanceled) w.SM.Logger.Info("The job has been canceled") } else { w.SM.Start(models.JobRunning) } }
// calls the api from UI to get repo list func getRepoList(projectID int64) ([]string, error) { /* uiUser := os.Getenv("UI_USR") if len(uiUser) == 0 { uiUser = "******" } uiPwd := os.Getenv("UI_PWD") if len(uiPwd) == 0 { uiPwd = "Harbor12345" } */ uiURL := config.LocalUIURL() client := &http.Client{} req, err := http.NewRequest("GET", uiURL+"/api/repositories?project_id="+strconv.Itoa(int(projectID)), nil) if err != nil { log.Errorf("Error when creating request: %v", err) return nil, err } //req.SetBasicAuth(uiUser, uiPwd) req.AddCookie(&http.Cookie{Name: models.UISecretCookie, Value: config.UISecret()}) //dump, err := httputil.DumpRequest(req, true) //log.Debugf("req: %q", dump) resp, err := client.Do(req) if err != nil { log.Errorf("Error when calling UI api to get repositories, error: %v", err) return nil, err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { log.Errorf("Unexpected status code: %d", resp.StatusCode) dump, _ := httputil.DumpResponse(resp, true) log.Debugf("response: %q", dump) return nil, fmt.Errorf("Unexpected status code when getting repository list: %d", resp.StatusCode) } body, err := ioutil.ReadAll(resp.Body) if err != nil { log.Errorf("Failed to read the response body, error: %v", err) return nil, err } var repoList []string err = json.Unmarshal(body, &repoList) return repoList, err }
// Forward to setup layout and template for content for a page. func (b *BaseController) Forward(title, templateName string) { b.Layout = filepath.Join(prefixNg, "layout.htm") b.TplName = filepath.Join(prefixNg, templateName) b.Data["Title"] = b.Tr(title) b.LayoutSections = make(map[string]string) b.LayoutSections["HeaderInclude"] = filepath.Join(prefixNg, viewPath, "header-include.htm") if b.UseCompressedJS { b.LayoutSections["HeaderScriptInclude"] = filepath.Join(prefixNg, viewPath, "script-min-include.htm") } else { b.LayoutSections["HeaderScriptInclude"] = filepath.Join(prefixNg, viewPath, "script-include.htm") } log.Debugf("Loaded HeaderScriptInclude file: %s", b.LayoutSections["HeaderScriptInclude"]) b.LayoutSections["FooterInclude"] = filepath.Join(prefixNg, viewPath, "footer-include.htm") b.LayoutSections["HeaderContent"] = filepath.Join(prefixNg, viewPath, "header-content.htm") b.LayoutSections["FooterContent"] = filepath.Join(prefixNg, viewPath, "footer-content.htm") }
// NewRepositoryWithUsername returns a Repository instance which will authorize the request // according to the privileges of user func NewRepositoryWithUsername(name, endpoint, username string) (*Repository, error) { name = strings.TrimSpace(name) endpoint = strings.TrimRight(endpoint, "/") u, err := url.Parse(endpoint) if err != nil { return nil, err } client, err := newClient(endpoint, username, nil, "repository", name, "pull", "push") repository := &Repository{ Name: name, Endpoint: u, client: client, } log.Debugf("initialized a repository client with username: %s %s %s", endpoint, name, username) return repository, nil }
// NewRegistryWithUsername returns a Registry instance which will authorize the request // according to the privileges of user func NewRegistryWithUsername(endpoint, username string) (*Registry, error) { endpoint = strings.TrimRight(endpoint, "/") u, err := url.Parse(endpoint) if err != nil { return nil, err } client, err := newClient(endpoint, username, nil, "registry", "catalog", "*") if err != nil { return nil, err } registry := &Registry{ Endpoint: u, client: client, } log.Debugf("initialized a registry client with username: %s %s", endpoint, username) return registry, nil }
// Reschedule is called by statemachine to retry a job func Reschedule(jobID int64) { log.Debugf("Job %d will be rescheduled in 5 minutes", jobID) time.Sleep(5 * time.Minute) log.Debugf("Rescheduling job %d", jobID) Schedule(jobID) }