Esempio n. 1
0
// FlushCacheProjects saves cache data to database.
func FlushCacheProjects(pinfos []*PkgInfo, procks []*PkgRock) {
	q := connDb()
	defer q.Close()

	// Update project data.
	for _, p := range pinfos {
		info := new(PkgInfo)
		err := q.WhereEqual("path", p.Path).Find(info)
		if err == nil {
			// Shoule always be nil, just in case not exist.
			p.Id = info.Id
			// Limit 10 views each period.
			if p.Views-info.Views > 10 {
				p.Views = info.Views + 10
			}
		}
		_, err = q.Save(p)
		if err != nil {
			beego.Error("models.FlushCacheProjects(", p.Path, ") ->", err)
		}
	}

	// Update rock this week.
	if time.Now().UTC().Weekday() == time.Monday && utils.Cfg.MustBool("task", "rock_reset") {
		utils.Cfg.SetValue("task", "rock_reset", "0")
		utils.SaveConfig()
		// Reset rock table.
		_, err := q.Where("id > ?", int64(0)).Delete(new(PkgRock))
		if err != nil {
			beego.Error("models.FlushCacheProjects -> Reset rock table:", err)
		}
	} else if time.Now().UTC().Weekday() != time.Monday && !utils.Cfg.MustBool("task", "rock_reset") {
		utils.Cfg.SetValue("task", "rock_reset", "1")
		utils.SaveConfig()
	}

	for _, pr := range procks {
		r := new(PkgRock)
		err := q.WhereEqual("pid", pr.Pid).Find(r)
		if err == nil {
			pr.Id = r.Id
			r.Delta += pr.Rank - r.Rank
			pr.Delta = r.Delta
		}
		q.Save(pr)
	}
}
Esempio n. 2
0
// CheckDoc returns 'Package' by given import path and tag,
// or fetch from the VCS and render as needed.
// It returns error when error occurs in the underlying functions.
func CheckDoc(broPath, tag string, rt requestType) (*hv.Package, error) {
	// Package documentation and crawl sign.
	pdoc, needsCrawl := &hv.Package{}, false

	// Trim prefix of standard library path.
	broPath = strings.TrimPrefix(broPath, "code.google.com/p/go/source/browse/src/pkg/")

	// Check Block List.
	if strings.Contains(utils.Cfg.MustValue("info", "block_list"), "|"+broPath+"|") {
		return nil, errors.New("Unable to process the operation bacause " + broPath +
			" is in the Block List")
	}

	// Get the package info.
	pinfo, err := models.GetPkgInfo(broPath, tag)
	switch {
	case err != nil:
		// Error means it does not exist.
		beego.Trace("doc.CheckDoc -> ", err)

		// Check if it's "Error 1040: Too many connections"
		if strings.Contains(err.Error(), "Error 1040:") {
			break
		}
		fallthrough
	case err != nil || pinfo.PkgVer != hv.PACKAGE_VER:
		// If PACKAGE_VER does not match, refresh anyway.
		pinfo.PkgVer = 0
		pinfo.Ptag = "ptag"
		needsCrawl = true
	default:
		// Check request type.
		switch rt {
		case RT_Human:
		case RT_Refresh:
			if len(tag) > 0 {
				break // Things of Tag will not be changed.
			}

			// Check if the refresh operation is too frequently (within 5 minutes).
			needsCrawl = time.Unix(pinfo.Created, 0).Add(_REFRESH_LIMIT).Before(time.Now())
			if !needsCrawl {
				// Return limit time information as error message.
				return nil, errors.New(time.Unix(pinfo.Created, 0).Add(_REFRESH_LIMIT).UTC().String())
			}
		}
	}

	if needsCrawl {
		// Fetch package from VCS.
		c := make(chan crawlResult, 1)
		go func() {
			// TODO
			pdoc, err = crawlDoc(broPath, tag, pinfo)
			c <- crawlResult{pdoc, err}
		}()

		select {
		case cr := <-c:
			if cr.err == nil {
				pdoc = cr.pdoc
			}
			err = cr.err
		case <-time.After(_FETCH_TIMEOUT):
			err = errUpdateTimeout
		}

		if pdoc == nil {
			if err != nil && strings.HasPrefix(err.Error(), "Cannot find Go files") &&
				len(tag) == 0 {
				beego.Info("Added to block list:", broPath)
				utils.Cfg.SetValue("info", "block_list",
					utils.Cfg.MustValue("info", "block_list")+broPath+"|")
				utils.SaveConfig()
			}
			return nil, err
		}

		if err == nil {
			pdoc.IsNeedRender = true
			beego.Info("doc.CheckDoc(", pdoc.ImportPath, tag, "), Goroutine #", runtime.NumGoroutine())
		} else {
			switch {
			case err == errNotModified:
				beego.Info("Serving(", broPath, ")without modified")
				pdoc = &hv.Package{}
				pinfo.Created = time.Now().UTC().Unix()
				pdoc.PkgInfo = pinfo
				return pdoc, nil
			case len(pdoc.ImportPath) > 0:
				return pdoc, err
			case err == errUpdateTimeout:
				// Handle timeout on packages never seen before as not found.
				beego.Error("Serving(", broPath, ")as not found after timeout")
				return nil, errors.New("doc.CheckDoc -> " + err.Error())
			}
		}
	} else {
		pdoc.PkgInfo = pinfo
	}

	return pdoc, err
}
Esempio n. 3
0
// FlushCacheProjects saves cache data to database.
func FlushCacheProjects(pinfos []hv.PkgInfo) {
	procks := make([]PkgRock, 0, len(pinfos))
	// Update project data.
	for _, p := range pinfos {
		info := &hv.PkgInfo{ImportPath: p.ImportPath}
		has, err := x.Get(info)
		if err != nil {
			beego.Error("models.FlushCacheProjects(", p.ImportPath, ") -> Get hv.PkgInfo:", err)
			continue
		}
		if has {
			// Shoule always be nil, just in case not exist.
			p.Id = info.Id
			// Limit 10 views each period.
			if p.Views-info.Views > 10 {
				p.Views = info.Views + 10
			}
		}

		// Update rank.
		p.Rank = calRefRanks(strings.Split(p.RefPids, "|")) + p.Views
		if p.Rank > 2*p.Views {
			p.Rank = 2 * p.Views
		}

		if has {
			_, err = x.Id(p.Id).Update(p)
		} else {
			_, err = x.Insert(p)
		}
		if err != nil {
			beego.Error("models.FlushCacheProjects(", p.ImportPath,
				") -> Save hv.PkgInfo:", err)
			continue
		}

		procks = append(procks, PkgRock{
			Pid:  p.Id,
			Path: p.ImportPath,
			Rank: p.Rank,
		})
	}

	// Update rock this week.
	if time.Now().UTC().Weekday() == time.Monday && utils.Cfg.MustBool("task", "rock_reset") {
		utils.Cfg.SetValue("task", "rock_reset", "0")
		utils.SaveConfig()
		// Reset rock table.
		_, err := x.Where("id > ?", int64(0)).Delete(new(PkgRock))
		if err != nil {
			beego.Error("models.FlushCacheProjects -> Reset rock table:", err)
		}
	} else if time.Now().UTC().Weekday() != time.Monday && !utils.Cfg.MustBool("task", "rock_reset") {
		utils.Cfg.SetValue("task", "rock_reset", "1")
		utils.SaveConfig()
	}

	for _, pr := range procks {
		r := &PkgRock{Path: pr.Path}
		has, err := x.Get(r)
		if err != nil {
			beego.Error("models.FlushCacheProjects(", pr.Path, ") -> Get PkgRock:", err)
			continue
		}
		if has {
			pr.Id = r.Id
			r.Delta += pr.Rank - r.Rank
			pr.Delta = r.Delta
			_, err = x.Id(pr.Id).Update(pr)
		} else {
			_, err = x.Insert(pr)
		}
		if err != nil {
			beego.Error("models.FlushCacheProjects(", pr.Path, ") -> Save PkgRock:", err)
		}
	}
}
Esempio n. 4
0
// FlushCacheProjects saves cache data to database.
func FlushCacheProjects(pinfos []*hv.PkgInfo) {
	q := connDb()
	defer q.Close()

	procks := make([]*PkgRock, 0, len(pinfos))
	// Update project data.
	for _, p := range pinfos {
		info := new(hv.PkgInfo)
		err := q.WhereEqual("import_path", p.ImportPath).Find(info)
		if err == nil {
			// Shoule always be nil, just in case not exist.
			p.Id = info.Id
			// Limit 10 views each period.
			if p.Views-info.Views > 10 {
				p.Views = info.Views + 10
			}
		}

		// Update rank.
		p.Rank = calRefRanks(q, strings.Split(p.RefPids, "|")) + p.Views
		if p.Rank > 2*p.Views {
			p.Rank = 2 * p.Views
		}

		_, err = q.Save(p)
		if err != nil {
			beego.Error("models.FlushCacheProjects(", p.ImportPath, ") ->", err)
		}

		procks = append(procks, &PkgRock{
			Pid:  p.Id,
			Path: p.ImportPath,
			Rank: p.Rank,
		})
	}

	// Update rock this week.
	if time.Now().UTC().Weekday() == time.Monday && utils.Cfg.MustBool("task", "rock_reset") {
		utils.Cfg.SetValue("task", "rock_reset", "0")
		utils.SaveConfig()
		// Reset rock table.
		_, err := q.Where("id > ?", int64(0)).Delete(new(PkgRock))
		if err != nil {
			beego.Error("models.FlushCacheProjects -> Reset rock table:", err)
		}
	} else if time.Now().UTC().Weekday() != time.Monday && !utils.Cfg.MustBool("task", "rock_reset") {
		utils.Cfg.SetValue("task", "rock_reset", "1")
		utils.SaveConfig()
	}

	for _, pr := range procks {
		r := new(PkgRock)
		err := q.WhereEqual("path", pr.Path).Find(r)
		if err == nil {
			pr.Id = r.Id
			r.Delta += pr.Rank - r.Rank
			pr.Delta = r.Delta
		}
		q.Save(pr)
	}
}