コード例 #1
0
ファイル: main.go プロジェクト: xujb/go_spider
// Parse html dom here and record the parse result that we want to Page.
// Package goquery (http://godoc.org/github.com/PuerkitoBio/goquery) is used to parse html.
func (this *MyPageProcesser) Process(p *page.Page) {
	if !p.IsSucc() {
		println(p.Errormsg())
		return
	}

	query := p.GetHtmlParser()

	query.Find(`div[class="wx-rb bg-blue wx-rb_v1 _item"]`).Each(func(i int, s *goquery.Selection) {
		name := s.Find("div.txt-box > h3").Text()
		href, _ := s.Attr("href")

		fmt.Printf("WeName:%v link:http://http://weixin.sogou.com%v \r\n", name, href)
		// the entity we want to save by Pipeline
		p.AddField("name", name)
		p.AddField("href", href)
	})

	next_page_href, _ := query.Find("#sogou_next").Attr("href")
	if next_page_href == "" {
		p.SetSkip(true)
	} else {
		p.AddTargetRequestWithHeaderFile("http://weixin.sogou.com/weixin"+next_page_href, "html", "weixin.sogou.com.json")
	}

}
コード例 #2
0
ファイル: spider.go プロジェクト: luzh0422/spider-docker
/*
 ** 解析页面,把粉丝的信息存入dynamodb,同时把接下来要爬取的url存入sqs
 */
func (this *MyPageProcesser) Process(p *page.Page) {
	if !p.IsSucc() {
		glog.Errorln(p.Errormsg())
		return
	}
	/*
	 ** 打印爬取得页面
	 */
	glog.Infoln(p)
	query := p.GetHtmlParser()

	if Urls[i] == "weibo.cn" {
		i = i + 1
	}

	if UrlsLevel[i] == 0 {
		glog.Infoln("layer:", crawlUrl.Layer)
		this.w.GetNextPageUrl(query, p)
		this.w.GetFriendsUrl(query, p)
	} else if UrlsLevel[i] == 1 {
		this.w.GetFriendsInfo(query)
	}
	// if crawlUrl.Layer == 0 {
	// } else if crawlUrl.Layer == 1 {
	// 	glog.Infoln("layer:", crawlUrl.Layer)
	// 	this.w.GetNextPageUrl(query, p)
	// 	this.w.GetFFUrl(query)
	// } else if crawlUrl.Layer == 2 {
	// 	glog.Infoln("layer:", crawlUrl.Layer)
	// 	this.w.GetFFInfo(query)
	// }
	//

	header_num := rand.Intn(9)
	header_json := headerJson[header_num]
	i = i + 1
	p.AddTargetRequestWithHeaderFile(Urls[i], "html", header_json)

}