Exemple #1
0
// Parse html dom here and record the parse result that we want to Page.
// Package goquery (http://godoc.org/github.com/PuerkitoBio/goquery) is used to parse html.
func (this *MyPageProcesser) Process(p *page.Page) {
	if !p.IsSucc() {
		println(p.Errormsg())
		return
	}
	query := p.GetHtmlParser()
	currentUrl := p.GetRequest().GetUrl()
	var urls []string
	query.Find("a").Each(func(i int, s *goquery.Selection) {
		href, _ := s.Attr("href")
		urlHref, err := url.Parse(href)
		if err != nil {
			mlog.LogInst().LogError(err.Error())
			return
		}
		if !urlHref.IsAbs() {
			href = currentUrl + href
		}
		// Temporarily check in crawler.go, it will be implemented in pattern package.

		if checkMatchPattern(base, href) {
			visited, _ := rep.CheckIfVisited(href)
			if !visited {
				rep.VisitedNewNode(href)
				// urls = append(urls, href)
				urlstr.UploadURL(href)
			}
		}
	})

	// store content to db

	fmt.Printf("====store & commit : %s====\n\n\n", currentUrl)
	content, _ := query.Html()
	// content := ""
	storage.StoreInsert(collection, storage.StoreFormat{currentUrl, content})
	urlstr.CommitURL(currentUrl)
	releaseSlot <- 1

	url := GetOneURL()
	if url != "" {
		urls = append(urls, url)
	}

	p.AddTargetRequests(urls, "html")

}
Exemple #2
0
func (this *MyProcessor) Process(p *page.Page) {
	if !p.IsSucc() {
		mlog.LogInst().LogError(p.Errormsg())
		return
	}

	u, err := url.Parse(p.GetRequest().GetUrl())
	if err != nil {
		mlog.LogInst().LogError(err.Error())
		return
	}
	if !strings.HasSuffix(u.Host, "jiexieyin.org") {
		return
	}

	var urls []string
	query := p.GetHtmlParser()

	query.Find("a").Each(func(i int, s *goquery.Selection) {
		href, _ := s.Attr("href")
		reJavascript := regexp.MustCompile("^javascript\\:")
		reLocal := regexp.MustCompile("^\\#")
		reMailto := regexp.MustCompile("^mailto\\:")
		if reJavascript.MatchString(href) || reLocal.MatchString(href) || reMailto.MatchString(href) {
			return
		}

		//处理相对路径
		var absHref string
		urlHref, err := url.Parse(href)
		if err != nil {
			mlog.LogInst().LogError(err.Error())
			return
		}
		if !urlHref.IsAbs() {
			urlPrefix := p.GetRequest().GetUrl()
			absHref = urlPrefix + href
			urls = append(urls, absHref)
		} else {
			urls = append(urls, href)
		}

	})

	p.AddTargetRequests(urls, "html")

}
Exemple #3
0
func (this MyPageProcesser) Process(p *page.Page) {
	query := p.GetHtmlParser()

	if p.GetUrlTag() == "index" {
		query.Find(`div[class="main area"] div[class="lc"] ul li a`).Each(func(i int, s *goquery.Selection) {
			url, isExsit := s.Attr("href")
			if isExsit {
				reg := regexp.MustCompile(`^do not know what is this`)
				var fmtStr string
				if rxYule.MatchString(url) {
					reg = rxYule
					fmtStr = wkSohuYule
				}

				if rxPic.MatchString(url) {
					reg = rxPic
					fmtStr = wkSohuPic
				}

				regxpArrag := reg.FindStringSubmatch(url)
				if len(regxpArrag) == 2 {
					addRequest(p, "changyan", fmt.Sprintf(fmtStr, regxpArrag[1]), "", s.Text())
				}
			}
		})
	}

	if p.GetUrlTag() == "changyan" {
		jsonMap := ChangyanJson{}
		err := json.NewDecoder(strings.NewReader(p.GetBodyStr())).Decode(&jsonMap)
		if err == nil {
			content, ok := p.GetRequest().GetMeta().(string)
			if ok {
				fmt.Println("Title:", content, " CommentCount:", jsonMap.ListData.OuterCmtSum, " ParticipationCount:", jsonMap.ListData.ParticipationSum)
			}
		}
	}
}
// Parse html dom here and record the parse result that we want to Page.
// Package goquery (http://godoc.org/github.com/PuerkitoBio/goquery) is used to parse html.
func (this *MyPageProcesser) Process(p *page.Page) {
	if !p.IsSucc() {
		println(p.Errormsg())
		return
	}
	var crawok bool
	crawok = false
	//query := p.GetHtmlParser()
	//var urls []string

	//fmt.Println(p.GetBodyStr())
	re := regexp.MustCompile(`<a href="(.*?)">(.*?)`)

	sectUrlsTemp := re.FindAllSubmatch([]byte(p.GetBodyStr()), -1)

	for _, url := range sectUrlsTemp {
		for _, url1 := range url {
			crawok = true

			http_index := strings.Index(string(url1), "http://shinichr.diandian.com")

			http_note := strings.Index(string(url1), "\"")
			http_quote := strings.Index(string(url1), "#")

			if http_index >= 0 && http_quote < 0 {
				if http_note > 0 && http_note < http_index {
					continue
				}

				var http_url string
				if http_note <= 0 {
					http_url = string(url1)[http_index:]
				} else {
					http_url = string(url1)[http_index:http_note]
				}

				if this.visit_url[http_url] == 0 {
					this.visit_url[http_url] = 1

					fmt.Println("####unvisited:", http_url)
					//fmt.Println("###AddTargetRequest:", http_url)
					p.AddTargetRequest(http_url, "html")
				}
			}
		}
	}
	if crawok == false {
		fmt.Println("crawl false:*****************", p.GetRequest().GetUrl())
		http_page := strings.Index(p.GetRequest().GetUrl(), "http://shinichr.diandian.com/page")
		http_post := strings.Index(p.GetRequest().GetUrl(), "http://shinichr.diandian.com/post")
		fmt.Println("http_page:", http_page, "http_post:", http_post)
		if http_page >= 0 || http_post >= 0 {
			//this.visit_url[p.GetRequest().GetUrl()] = 0
			p.AddTargetRequest(p.GetRequest().GetUrl(), "html")
		}
	}
	http_index := strings.Index(p.GetRequest().GetUrl(), "http://shinichr.diandian.com/post/")

	//	rex, _ := regexp.Compile("\\/")
	//replaceurl := rex.ReplaceAllString(p.GetRequest().GetUrl(), ".")
	//fmt.Println("http_index=", http_index)
	//fmt.Println("replaceurl=", p.GetRequest().GetUrl()[http_index:], "....", http_index)
	if http_index >= 0 {

		cuturl := p.GetRequest().GetUrl()[34:]
		//fmt.Println("replaceurl=", cuturl)
		rex, _ := regexp.Compile("\\/")
		replaceurl := rex.ReplaceAllString(cuturl, ".")

		filedir := fmt.Sprintf("/home/shinichr/diandian_post/%s", replaceurl)

		fout, err := os.Create(filedir)
		if err != nil {
			fmt.Println(filedir, err)
			return
		}
		defer fout.Close()

		src := p.GetBodyStr()
		re, _ := regexp.Compile("\\<[\\S\\s]+?\\>")
		src = re.ReplaceAllStringFunc(src, strings.ToLower)

		//去除STYLE
		re, _ = regexp.Compile("\\<style[\\S\\s]+?\\</style\\>")
		src = re.ReplaceAllString(src, "")

		//去除SCRIPT
		re, _ = regexp.Compile("\\<script[\\S\\s]+?\\</script\\>")
		src = re.ReplaceAllString(src, "")

		//去除所有尖括号内的HTML代码,并换成换行符
		re, _ = regexp.Compile("\\<[\\S\\s]+?\\>")
		src = re.ReplaceAllString(src, "\n")

		//去除连续的换行符
		re, _ = regexp.Compile("\\s{2,}")
		src = re.ReplaceAllString(src, "\n")

		//fmt.Println(strings.TrimSpace(src))

		fout.WriteString(html.UnescapeString(src))
		fmt.Println("save file ", filedir)
	}
	//query.Find(`div[class="rich-content] div[class="post"] div[class="post-top"] div[class="post-content post-text"] a`).Each(func(i int, s *goquery.Selection) {
	/*query.Find("div.content").Each(func(i int, s *goquery.Selection) {
		href, _ := s.Attr("href")
		http_index := strings.Index(href, "http")
		http_url := href[http_index:]

		//fmt.Println("###url:\n", http_url, "=", this.visit_url[http_url])
		//this.newurl <- http_url
		if this.visit_url[http_url] == 0 {
			this.visit_url[http_url] = 1
			fmt.Println("###AddTargetRequest:", http_url)
			p.AddTargetRequest(http_url, "html")
		}
		urls = append(urls, href)
	})*/
	// these urls will be saved and crawed by other coroutines.
	/*doc, _ := goquery.NewDocument("http://shinichr.diandian.com")

	doc.Find("a").Each(func(i int, s *goquery.Selection) {
		href, _ := s.Attr("href")
		fmt.Println("####href=", href)
	})*/
	//p.AddField("readme", readme)
}