Exemplo n.º 1
0
func (this *PlantProcesser) getName(query *goquery.Document, p *page.Page) {

	name := query.Find(".lemmaWgt-lemmaTitle-title").Find("h1").Text()
	name = strings.Trim(name, " \t\n")
	p.AddField("name", name)

}
Exemplo n.º 2
0
// choose http GET/method to download
func connectByHttp(p *page.Page, req *request.Request) (*http.Response, error) {
	client := &http.Client{
		CheckRedirect: req.GetRedirectFunc(),
	}

	httpreq, err := http.NewRequest(req.GetMethod(), req.GetUrl(), strings.NewReader(req.GetPostdata()))
	if header := req.GetHeader(); header != nil {
		httpreq.Header = req.GetHeader()
	}

	if cookies := req.GetCookies(); cookies != nil {
		for i := range cookies {
			httpreq.AddCookie(cookies[i])
		}
	}

	var resp *http.Response
	if resp, err = client.Do(httpreq); err != nil {
		if e, ok := err.(*url.Error); ok && e.Err != nil && e.Err.Error() == "normal" {
			//  normal
		} else {
			mlog.LogInst().LogError(err.Error())
			p.SetStatus(true, err.Error())
			//fmt.Printf("client do error %v \r\n", err)
			return nil, err
		}
	}

	return resp, nil
}
Exemplo n.º 3
0
Arquivo: main.go Projeto: tuyuwei/test
// Parse html dom here and record the parse result that we want to Page.
// Package goquery (http://godoc.org/github.com/PuerkitoBio/goquery) is used to parse html.
func (this *MyPageProcesser) Process(p *page.Page) {
	query := p.GetHtmlParser()
	var urls []string
	query.Find("#threadlisttableid tbody").Each(func(i int, s *goquery.Selection) {
		if s.HasClass("emptb") {
			return
		}
		href, _ := s.Find("tbody tr .icn a").Attr("href")
		urls = append(urls, href)
	})

	// these urls will be saved and crawed by other coroutines.
	p.AddTargetRequests(urls, "html")

	title := query.Find("#thread_subject").Text()
	title = strings.Trim(title, "\t\n\r")
	author := query.Find("#postlist div .authi").Eq(0).Text()
	author = strings.Trim(author, "\t\r\n")

	if title == "" || author == "" {
		p.SetSkip(true)
	}

	p.AddField("title", title)
	p.AddField("author", author)
}
Exemplo n.º 4
0
func (this *HttpDownloader) downloadText(p *page.Page, req *request.Request) *page.Page {
	p, destbody := this.downloadFile(p, req)
	if !p.IsSucc() {
		return p
	}

	p.SetBodyStr(destbody).SetStatus(false, "")
	return p
}
Exemplo n.º 5
0
func (this *HttpDownloader) downloadHtml(p *page.Page, req *request.Request) *page.Page {
	var err error
	p, destbody := this.downloadFile(p, req)
	//fmt.Printf("Destbody %v \r\n", destbody)
	if !p.IsSucc() {
		//fmt.Print("Page error \r\n")
		return p
	}
	bodyReader := bytes.NewReader([]byte(destbody))

	var doc *goquery.Document
	if doc, err = goquery.NewDocumentFromReader(bodyReader); err != nil {
		mlog.LogInst().LogError(err.Error())
		p.SetStatus(true, err.Error())
		return p
	}

	var body string
	if body, err = doc.Html(); err != nil {
		mlog.LogInst().LogError(err.Error())
		p.SetStatus(true, err.Error())
		return p
	}

	p.SetBodyStr(body).SetHtmlParser(doc).SetStatus(false, "")

	return p
}
Exemplo n.º 6
0
func (this *PlantProcesser) Process(p *page.Page) {
	if !p.IsSucc() {
		println(p.Errormsg())
		return
	}
	query := p.GetHtmlParser()

	if !this.isPlant(query, p) {
		p.SetSkip(true)
	}

	this.getName(query, p)
	this.getSummary(query, p)
	this.getCatalog(query, p)
	p.AddTargetRequests(this.getUrls(query), "html")
}
Exemplo n.º 7
0
// Parse html dom here and record the parse result that we want to Page.
// Package goquery (http://godoc.org/github.com/PuerkitoBio/goquery) is used to parse html.
func (this *MyPageProcesser) Process(p *page.Page) {
	if !p.IsSucc() {
		println(p.Errormsg())
		return
	}
	query := p.GetHtmlParser()
	currentUrl := p.GetRequest().GetUrl()
	var urls []string
	query.Find("a").Each(func(i int, s *goquery.Selection) {
		href, _ := s.Attr("href")
		urlHref, err := url.Parse(href)
		if err != nil {
			mlog.LogInst().LogError(err.Error())
			return
		}
		if !urlHref.IsAbs() {
			href = currentUrl + href
		}
		// Temporarily check in crawler.go, it will be implemented in pattern package.

		if checkMatchPattern(base, href) {
			visited, _ := rep.CheckIfVisited(href)
			if !visited {
				rep.VisitedNewNode(href)
				// urls = append(urls, href)
				urlstr.UploadURL(href)
			}
		}
	})

	// store content to db

	fmt.Printf("====store & commit : %s====\n\n\n", currentUrl)
	content, _ := query.Html()
	// content := ""
	storage.StoreInsert(collection, storage.StoreFormat{currentUrl, content})
	urlstr.CommitURL(currentUrl)
	releaseSlot <- 1

	url := GetOneURL()
	if url != "" {
		urls = append(urls, url)
	}

	p.AddTargetRequests(urls, "html")

}
Exemplo n.º 8
0
// Parse html dom here and record the parse result that we want to crawl.
// Package goquery (http://godoc.org/github.com/PuerkitoBio/goquery) is used to parse html.
func (this *MyPageProcesser) Process(p *page.Page) {
	if !p.IsSucc() {
		println(p.Errormsg())
		return
	}

	query := p.GetHtmlParser()

	name := query.Find(".lemmaTitleH1").Text()
	name = strings.Trim(name, " \t\n")

	summary := query.Find(".card-summary-content .para").Text()
	summary = strings.Trim(summary, " \t\n")

	// the entity we want to save by Pipeline
	p.AddField("name", name)
	p.AddField("summary", summary)
}
Exemplo n.º 9
0
func TestDownloadJson(t *testing.T) {
	var req *request.Request
	req = request.NewRequest("http://live.sina.com.cn/zt/api/l/get/finance/globalnews1/index.htm?format=json&id=23521&pagesize=4&dire=f&dpc=1", "json")

	var dl downloader.Downloader
	dl = downloader.NewHttpDownloader()

	var p *page.Page
	p = dl.Download(req)

	var jsonMap interface{}
	jsonMap = p.GetJsonMap()
	fmt.Printf("%v", jsonMap)

	//fmt.Println(doc)
	//body := p.GetBodyStr()
	//fmt.Println(body)

}
Exemplo n.º 10
0
func TestDownloadHtml(t *testing.T) {
	//return
	//request := request.NewRequest("http://live.sina.com.cn/zt/api/l/get/finance/globalnews1/index.htm?format=json&callback=t13975294&id=23521&pagesize=45&dire=f&dpc=1")
	var req *request.Request
	req = request.NewRequest("http://live.sina.com.cn/zt/l/v/finance/globalnews1/", "html", "", "GET", "", nil, nil, nil, nil)

	var dl downloader.Downloader
	dl = downloader.NewHttpDownloader()

	var p *page.Page
	p = dl.Download(req)

	var doc *goquery.Document
	doc = p.GetHtmlParser()
	//fmt.Println(doc)
	//body := p.GetBodyStr()
	//fmt.Println(body)

	var s *goquery.Selection
	s = doc.Find("body")
	if s.Length() < 1 {
		t.Error("html parse failed!")
	}

	/*
	   doc, err := goquery.NewDocument("http://live.sina.com.cn/zt/l/v/finance/globalnews1/")
	   if err != nil {
	       fmt.Printf("%v",err)
	   }
	   s := doc.Find("meta");
	   fmt.Println(s.Length())

	   resp, err := http.Get("http://live.sina.com.cn/zt/l/v/finance/globalnews1/")
	   if err != nil {
	       fmt.Printf("%v",err)
	   }
	   defer resp.Body.Close()
	   doc, err = goquery.NewDocumentFromReader(resp.Body)
	   s = doc.Find("meta");
	   fmt.Println(s.Length())
	*/
}
Exemplo n.º 11
0
func TestCharSetChange(t *testing.T) {
	var req *request.Request
	//req = request.NewRequest("http://stock.finance.sina.com.cn/usstock/api/jsonp.php/t/US_CategoryService.getList?page=1&num=60", "jsonp")
	req = request.NewRequest("http://soft.chinabyte.com/416/13164916.shtml", "html", "", "GET", "", nil, nil, nil, nil)

	var dl downloader.Downloader
	dl = downloader.NewHttpDownloader()

	var p *page.Page
	p = dl.Download(req)

	//hp := p.GetHtmlParser()
	//fmt.Printf("%v", jsonMap)

	//fmt.Println(doc)
	p.GetBodyStr()
	body := p.GetBodyStr()
	fmt.Println(body)

}
Exemplo n.º 12
0
func (this *HttpDownloader) downloadJson(p *page.Page, req *request.Request) *page.Page {
	var err error
	p, destbody := this.downloadFile(p, req)
	if !p.IsSucc() {
		return p
	}

	var body []byte
	body = []byte(destbody)
	mtype := req.GetResponceType()
	if mtype == "jsonp" {
		tmpstr := util.JsonpToJson(destbody)
		body = []byte(tmpstr)
	}

	var r *simplejson.Json
	if r, err = simplejson.NewJson(body); err != nil {
		mlog.LogInst().LogError(string(body) + "\t" + err.Error())
		p.SetStatus(true, err.Error())
		return p
	}

	// json result
	p.SetBodyStr(string(body)).SetJson(r).SetStatus(false, "")

	return p
}
Exemplo n.º 13
0
func (this MyPageProcesser) Process(p *page.Page) {
	query := p.GetHtmlParser()

	if p.GetUrlTag() == "index" {
		query.Find(`div[class="main area"] div[class="lc"] ul li a`).Each(func(i int, s *goquery.Selection) {
			url, isExsit := s.Attr("href")
			if isExsit {
				reg := regexp.MustCompile(`^do not know what is this`)
				var fmtStr string
				if rxYule.MatchString(url) {
					reg = rxYule
					fmtStr = wkSohuYule
				}

				if rxPic.MatchString(url) {
					reg = rxPic
					fmtStr = wkSohuPic
				}

				regxpArrag := reg.FindStringSubmatch(url)
				if len(regxpArrag) == 2 {
					addRequest(p, "changyan", fmt.Sprintf(fmtStr, regxpArrag[1]), "", s.Text())
				}
			}
		})
	}

	if p.GetUrlTag() == "changyan" {
		jsonMap := ChangyanJson{}
		err := json.NewDecoder(strings.NewReader(p.GetBodyStr())).Decode(&jsonMap)
		if err == nil {
			content, ok := p.GetRequest().GetMeta().(string)
			if ok {
				fmt.Println("Title:", content, " CommentCount:", jsonMap.ListData.OuterCmtSum, " ParticipationCount:", jsonMap.ListData.ParticipationSum)
			}
		}
	}
}
Exemplo n.º 14
0
func (this SitePageProcesser) Process(p *page.Page) {
	fmt.Println("Site Page Processer")

	if p.GetUrlTag() == "index" {
		query := p.GetHtmlParser()
		query.Find("ul[class='audioList fontYaHei'] li a").Each(func(i int, s *goquery.Selection) {
			strTitle, _ := s.Attr("title")
			strUrl, _ := s.Attr("data-url")

			if !IsFileExist(strTitle) {
				strFileName := fmt.Sprintf("%s.mp3", strTitle)
				fmt.Println(strFileName)
				cmd := exec.Command("/usr/local/bin/wget", strUrl, "-O", strFileName)
				err := cmd.Run()
				if err != nil {
					fmt.Println(err)
				}
				d, _ := cmd.Output()
				fmt.Println(string(d))
			}
		})
	}
}
// Parse html dom here and record the parse result that we want to Page.
// Package goquery (http://godoc.org/github.com/PuerkitoBio/goquery) is used to parse html.
func (this *MyPageProcesser) Process(p *page.Page) {
	query := p.GetHtmlParser()
	var urls []string
	query.Find("h3[class='repo-list-name'] a").Each(func(i int, s *goquery.Selection) {
		href, _ := s.Attr("href")
		urls = append(urls, "http://github.com/"+href)
	})
	// these urls will be saved and crawed by other coroutines.
	p.AddTargetRequests(urls, "html")

	name := query.Find(".entry-title .author").Text()
	name = strings.Trim(name, " \t\n")
	repository := query.Find(".entry-title .js-current-repository").Text()
	repository = strings.Trim(repository, " \t\n")
	//readme, _ := query.Find("#readme").Html()
	if name == "" {
		p.SetSkip(true)
	}
	// the entity we want to save by Pipeline
	p.AddField("author", name)
	p.AddField("project", repository)
	//p.AddField("readme", readme)
}
Exemplo n.º 16
0
// Parse html dom here and record the parse result that we want to Page.
// Package goquery (http://godoc.org/github.com/PuerkitoBio/goquery) is used to parse html.
func (this *MyPageProcesser) Process(p *page.Page) {
	if !p.IsSucc() {
		println(p.Errormsg())
		return
	}
	var fetch_content string
	query := p.GetHtmlParser()
	content := p.GetBodyStr()
	reg := regexp.MustCompile(`class="([0-9a-zA-Z_-]*content[0-9a-zA-Z_-]*)"`)
	reg_res := reg.FindAllStringSubmatch(content, -1)
	class_content := make([]string, 0)
	for _, class := range reg_res {
		submatch := class[1]
		class_content = append(class_content, submatch)
	}
	removeDuplicate(&class_content)

	for _, class := range class_content {

		query.Find("." + class).Each(func(i int, s *goquery.Selection) {
			text := strings.Trim(s.Text(), " \t\n")
			text = strings.Replace(text, " ", "", -1)
			text = strings.Replace(text, "\n", "", -1)
			text = strings.Replace(text, "\t", "", -1)

			if text != "" {
				fetch_content = fetch_content + text
			}
		})
	}

	if fetch_content != "" {
		p.AddField("content", fetch_content)
	}

}
Exemplo n.º 17
0
// Download file and change the charset of page charset.
func (this *HttpDownloader) downloadFile(p *page.Page, req *request.Request) (*page.Page, string) {
	var err error
	var urlstr string
	if urlstr = req.GetUrl(); len(urlstr) == 0 {
		mlog.LogInst().LogError("url is empty")
		p.SetStatus(true, "url is empty")
		return p, ""
	}

	client := &http.Client{
		CheckRedirect: req.GetRedirectFunc(),
	}
	httpreq, err := http.NewRequest(req.GetMethod(), req.GetUrl(), strings.NewReader(req.GetPostdata()))
	if header := req.GetHeader(); header != nil {
		httpreq.Header = req.GetHeader()
	}
	if cookies := req.GetCookies(); cookies != nil {
		for i := range cookies {
			httpreq.AddCookie(cookies[i])
		}
	}

	var resp *http.Response
	if resp, err = client.Do(httpreq); err != nil {
		if e, ok := err.(*url.Error); ok && e.Err != nil && e.Err.Error() == "normal" {
			//  normal
		} else {
			mlog.LogInst().LogError(err.Error())
			p.SetStatus(true, err.Error())
			return p, ""
		}
	}

	p.SetHeader(resp.Header)
	p.SetCookies(resp.Cookies())

	// get converter to utf-8
	bodyStr := this.changeCharsetEncodingAuto(resp.Header.Get("Content-Type"), resp.Body)

	defer resp.Body.Close()
	return p, bodyStr
}
Exemplo n.º 18
0
/*
 ** 解析页面,把粉丝的信息存入dynamodb,同时把接下来要爬取的url存入sqs
 */
func (this *MyPageProcesser) Process(p *page.Page) {
	if !p.IsSucc() {
		glog.Errorln(p.Errormsg())
		return
	}
	/*
	 ** 打印爬取得页面
	 */
	glog.Infoln(p)
	query := p.GetHtmlParser()

	if Urls[i] == "weibo.cn" {
		i = i + 1
	}

	if UrlsLevel[i] == 0 {
		glog.Infoln("layer:", crawlUrl.Layer)
		this.w.GetNextPageUrl(query, p)
		this.w.GetFriendsUrl(query, p)
	} else if UrlsLevel[i] == 1 {
		this.w.GetFriendsInfo(query)
	}
	// if crawlUrl.Layer == 0 {
	// } else if crawlUrl.Layer == 1 {
	// 	glog.Infoln("layer:", crawlUrl.Layer)
	// 	this.w.GetNextPageUrl(query, p)
	// 	this.w.GetFFUrl(query)
	// } else if crawlUrl.Layer == 2 {
	// 	glog.Infoln("layer:", crawlUrl.Layer)
	// 	this.w.GetFFInfo(query)
	// }
	//

	header_num := rand.Intn(9)
	header_json := headerJson[header_num]
	i = i + 1
	p.AddTargetRequestWithHeaderFile(Urls[i], "html", header_json)

}
Exemplo n.º 19
0
// Download file and change the charset of page charset.
func (this *HttpDownloader) downloadFile(p *page.Page, req *request.Request) (*page.Page, string) {
	var err error
	var urlstr string
	if urlstr = req.GetUrl(); len(urlstr) == 0 {
		mlog.LogInst().LogError("url is empty")
		p.SetStatus(true, "url is empty")
		return p, ""
	}

	var resp *http.Response

	if proxystr := req.GetProxyHost(); len(proxystr) != 0 {
		//using http proxy
		//fmt.Print("HttpProxy Enter ",proxystr,"\n")
		resp, err = connectByHttpProxy(p, req)
	} else {
		//normal http download
		//fmt.Print("Http Normal Enter \n",proxystr,"\n")
		resp, err = connectByHttp(p, req)
	}

	if err != nil {
		return p, ""
	}

	//b, _ := ioutil.ReadAll(resp.Body)
	//fmt.Printf("Resp body %v \r\n", string(b))

	p.SetHeader(resp.Header)
	p.SetCookies(resp.Cookies())

	// get converter to utf-8
	var bodyStr string
	if resp.Header.Get("Content-Encoding") == "gzip" {
		bodyStr = this.changeCharsetEncodingAutoGzipSupport(resp.Header.Get("Content-Type"), resp.Body)
	} else {
		bodyStr = this.changeCharsetEncodingAuto(resp.Header.Get("Content-Type"), resp.Body)
	}
	//fmt.Printf("utf-8 body %v \r\n", bodyStr)
	defer resp.Body.Close()
	return p, bodyStr
}
Exemplo n.º 20
0
// Parse html dom here and record the parse result that we want to Page.
// Package goquery (http://godoc.org/github.com/PuerkitoBio/goquery) is used to parse html.
func (this *MyPageProcesser) Process(p *page.Page) {

	if p.GetUrlTag() == "site_login" {
		//fmt.Printf("%v\n", p.GetCookies())
		this.cookies = p.GetCookies()
		// AddTargetRequestWithParams Params:
		//  1. Url.
		//  2. Responce type is "html" or "json" or "jsonp" or "text".
		//  3. The urltag is name for marking url and distinguish different urls in PageProcesser and Pipeline.
		//  4. The method is POST or GET.
		//  5. The postdata is body string sent to sever.
		//  6. The header is header for http request.
		//  7. Cookies
		//  8. Http redirect function
		if len(this.cookies) != 0 {
			p.AddField("info", "get cookies success")
			req := request.NewRequest("http://backadmin.hucong.net/site/index", "html", "site_index", "GET", "", nil, this.cookies, nil, nil)
			p.AddTargetRequestWithParams(req)
		} else {
			p.AddField("info", "get cookies failed")
		}
	} else {
		//fmt.Printf("%v\n", p.GetBodyStr())
		query := p.GetHtmlParser()
		pageTitle := query.Find(".page-content .page-title").Text()

		if len(pageTitle) != 0 {
			p.AddField("page_title", pageTitle)
			p.AddField("info", "login success")
		} else {
			p.AddField("info", "login failed")
		}

	}

	return
	if !p.IsSucc() {
		println(p.Errormsg())
		return
	}

	query := p.GetHtmlParser()
	var urls []string
	query.Find("h3[class='repo-list-name'] a").Each(func(i int, s *goquery.Selection) {
		href, _ := s.Attr("href")
		urls = append(urls, "http://github.com/"+href)
	})
	// these urls will be saved and crawed by other coroutines.
	p.AddTargetRequests(urls, "html")

	name := query.Find(".entry-title .author").Text()
	name = strings.Trim(name, " \t\n")
	repository := query.Find(".entry-title .js-current-repository").Text()
	repository = strings.Trim(repository, " \t\n")
	//readme, _ := query.Find("#readme").Html()
	if name == "" {
		p.SetSkip(true)
	}
	// the entity we want to save by Pipeline
	p.AddField("author", name)
	p.AddField("project", repository)
	//p.AddField("readme", readme)
}
// Parse html dom here and record the parse result that we want to Page.
// Package goquery (http://godoc.org/github.com/PuerkitoBio/goquery) is used to parse html.
func (this *MyPageProcesser) Process(p *page.Page) {
	if !p.IsSucc() {
		println(p.Errormsg())
		return
	}
	var crawok bool
	crawok = false
	//query := p.GetHtmlParser()
	//var urls []string

	//fmt.Println(p.GetBodyStr())
	re := regexp.MustCompile(`<a href="(.*?)">(.*?)`)

	sectUrlsTemp := re.FindAllSubmatch([]byte(p.GetBodyStr()), -1)

	for _, url := range sectUrlsTemp {
		for _, url1 := range url {
			crawok = true

			http_index := strings.Index(string(url1), "http://shinichr.diandian.com")

			http_note := strings.Index(string(url1), "\"")
			http_quote := strings.Index(string(url1), "#")

			if http_index >= 0 && http_quote < 0 {
				if http_note > 0 && http_note < http_index {
					continue
				}

				var http_url string
				if http_note <= 0 {
					http_url = string(url1)[http_index:]
				} else {
					http_url = string(url1)[http_index:http_note]
				}

				if this.visit_url[http_url] == 0 {
					this.visit_url[http_url] = 1

					fmt.Println("####unvisited:", http_url)
					//fmt.Println("###AddTargetRequest:", http_url)
					p.AddTargetRequest(http_url, "html")
				}
			}
		}
	}
	if crawok == false {
		fmt.Println("crawl false:*****************", p.GetRequest().GetUrl())
		http_page := strings.Index(p.GetRequest().GetUrl(), "http://shinichr.diandian.com/page")
		http_post := strings.Index(p.GetRequest().GetUrl(), "http://shinichr.diandian.com/post")
		fmt.Println("http_page:", http_page, "http_post:", http_post)
		if http_page >= 0 || http_post >= 0 {
			//this.visit_url[p.GetRequest().GetUrl()] = 0
			p.AddTargetRequest(p.GetRequest().GetUrl(), "html")
		}
	}
	http_index := strings.Index(p.GetRequest().GetUrl(), "http://shinichr.diandian.com/post/")

	//	rex, _ := regexp.Compile("\\/")
	//replaceurl := rex.ReplaceAllString(p.GetRequest().GetUrl(), ".")
	//fmt.Println("http_index=", http_index)
	//fmt.Println("replaceurl=", p.GetRequest().GetUrl()[http_index:], "....", http_index)
	if http_index >= 0 {

		cuturl := p.GetRequest().GetUrl()[34:]
		//fmt.Println("replaceurl=", cuturl)
		rex, _ := regexp.Compile("\\/")
		replaceurl := rex.ReplaceAllString(cuturl, ".")

		filedir := fmt.Sprintf("/home/shinichr/diandian_post/%s", replaceurl)

		fout, err := os.Create(filedir)
		if err != nil {
			fmt.Println(filedir, err)
			return
		}
		defer fout.Close()

		src := p.GetBodyStr()
		re, _ := regexp.Compile("\\<[\\S\\s]+?\\>")
		src = re.ReplaceAllStringFunc(src, strings.ToLower)

		//去除STYLE
		re, _ = regexp.Compile("\\<style[\\S\\s]+?\\</style\\>")
		src = re.ReplaceAllString(src, "")

		//去除SCRIPT
		re, _ = regexp.Compile("\\<script[\\S\\s]+?\\</script\\>")
		src = re.ReplaceAllString(src, "")

		//去除所有尖括号内的HTML代码,并换成换行符
		re, _ = regexp.Compile("\\<[\\S\\s]+?\\>")
		src = re.ReplaceAllString(src, "\n")

		//去除连续的换行符
		re, _ = regexp.Compile("\\s{2,}")
		src = re.ReplaceAllString(src, "\n")

		//fmt.Println(strings.TrimSpace(src))

		fout.WriteString(html.UnescapeString(src))
		fmt.Println("save file ", filedir)
	}
	//query.Find(`div[class="rich-content] div[class="post"] div[class="post-top"] div[class="post-content post-text"] a`).Each(func(i int, s *goquery.Selection) {
	/*query.Find("div.content").Each(func(i int, s *goquery.Selection) {
		href, _ := s.Attr("href")
		http_index := strings.Index(href, "http")
		http_url := href[http_index:]

		//fmt.Println("###url:\n", http_url, "=", this.visit_url[http_url])
		//this.newurl <- http_url
		if this.visit_url[http_url] == 0 {
			this.visit_url[http_url] = 1
			fmt.Println("###AddTargetRequest:", http_url)
			p.AddTargetRequest(http_url, "html")
		}
		urls = append(urls, href)
	})*/
	// these urls will be saved and crawed by other coroutines.
	/*doc, _ := goquery.NewDocument("http://shinichr.diandian.com")

	doc.Find("a").Each(func(i int, s *goquery.Selection) {
		href, _ := s.Attr("href")
		fmt.Println("####href=", href)
	})*/
	//p.AddField("readme", readme)
}
Exemplo n.º 22
0
// Parse html dom here and record the parse result that we want to crawl.
// Package simplejson (https://github.com/bitly/go-simplejson) is used to parse data of json.
func (this *MyPageProcesser) Process(p *page.Page) {
	if !p.IsSucc() {
		println(p.Errormsg())
		return
	}

	query := p.GetJson()
	status, err := query.GetPath("result", "status", "code").Int()
	if status != 0 || err != nil {
		log.Panicf("page is crawled error : errorinfo=%s : status=%d : startNewsId=%d", err.Error(), status, this.startNewsId)
	}
	num, err := query.GetPath("result", "pageStr", "pageSize").Int()
	if num == 0 || err != nil {
		// Add url of next crawl
		startIdstr := strconv.Itoa(this.startNewsId)
		p.AddTargetRequest("http://live.sina.com.cn/zt/api/l/get/finance/globalnews1/index.htm?format=json&id="+startIdstr+"&pagesize=10&dire=f", "json")
		return
	}

	var idint, nextid int
	var nextidstr string
	query = query.Get("result").Get("data")
	for i := 0; i < num; i++ {
		id, err := query.GetIndex(i).Get("id").String()
		if id == "" || err != nil {
			continue
		}
		idint, err = strconv.Atoi(id)
		if err != nil {
			continue
		}
		if idint <= this.startNewsId {
			break
		}
		if i == 0 {
			nextid = idint
			nextidstr = id
		}
		content, err := query.GetIndex(i).Get("content").String()
		if content == "" || err != nil {
			continue
		}
		time, err := query.GetIndex(i).Get("created_at").String()
		if err != nil {
			continue
		}

		p.AddField(id+"_id", id)
		p.AddField(id+"_content", content)
		p.AddField(id+"_time", time)
	}
	// Add url of next crawl
	this.startNewsId = nextid
	p.AddTargetRequest("http://live.sina.com.cn/zt/api/l/get/finance/globalnews1/index.htm?format=json&id="+nextidstr+"&pagesize=10&dire=f", "json")
	//println(p.GetTargetRequests())

}
Exemplo n.º 23
0
func (this *PlantProcesser) getCatalog(query *goquery.Document, p *page.Page) {

	catalog := query.Find(".lemma-catalog").Find("span.text").Text()
	catalog = strings.Trim(catalog, " \t\n")
	p.AddField("catalog", catalog)
}
Exemplo n.º 24
0
func (this *PlantProcesser) getSummary(query *goquery.Document, p *page.Page) {

	summary := query.Find(".lemma-summary .para").Text()
	summary = strings.Trim(summary, " \t\n")
	p.AddField("summary", summary)
}
Exemplo n.º 25
0
// core processer
func (this *Spider) pageProcess(req *request.Request) {
	var p *page.Page

	defer func() {
		if err := recover(); err != nil { // do not affect other
			if strerr, ok := err.(string); ok {
				mlog.LogInst().LogError(strerr)
			} else {
				mlog.LogInst().LogError("pageProcess error")
			}
		}
	}()

	// download page
	for i := 0; i < 3; i++ {
		this.sleep()
		p = this.pDownloader.Download(req)
		if p.IsSucc() { // if fail retry 3 times
			break
		}

	}

	if !p.IsSucc() { // if fail do not need process
		return
	}

	this.pPageProcesser.Process(p)
	for _, req := range p.GetTargetRequests() {
		this.AddRequest(req)
	}

	// output
	if !p.GetSkip() {
		for _, pip := range this.pPiplelines {
			//fmt.Println("%v",p.GetPageItems().GetAll())
			pip.Process(p.GetPageItems(), this)
		}
	}
}
Exemplo n.º 26
0
func (this *MyProcessor) Process(p *page.Page) {
	if !p.IsSucc() {
		mlog.LogInst().LogError(p.Errormsg())
		return
	}

	u, err := url.Parse(p.GetRequest().GetUrl())
	if err != nil {
		mlog.LogInst().LogError(err.Error())
		return
	}
	if !strings.HasSuffix(u.Host, "jiexieyin.org") {
		return
	}

	var urls []string
	query := p.GetHtmlParser()

	query.Find("a").Each(func(i int, s *goquery.Selection) {
		href, _ := s.Attr("href")
		reJavascript := regexp.MustCompile("^javascript\\:")
		reLocal := regexp.MustCompile("^\\#")
		reMailto := regexp.MustCompile("^mailto\\:")
		if reJavascript.MatchString(href) || reLocal.MatchString(href) || reMailto.MatchString(href) {
			return
		}

		//处理相对路径
		var absHref string
		urlHref, err := url.Parse(href)
		if err != nil {
			mlog.LogInst().LogError(err.Error())
			return
		}
		if !urlHref.IsAbs() {
			urlPrefix := p.GetRequest().GetUrl()
			absHref = urlPrefix + href
			urls = append(urls, absHref)
		} else {
			urls = append(urls, href)
		}

	})

	p.AddTargetRequests(urls, "html")

}
Exemplo n.º 27
0
func addRequest(p *page.Page, tag, url, cookie, content string) {
	req := request.NewRequest(url, "json", tag, "GET", "", nil, nil, nil, content)
	p.AddTargetRequestWithParams(req)
}
Exemplo n.º 28
0
// Parse html dom here and record the parse result that we want to Page.
// Package goquery (http://godoc.org/github.com/PuerkitoBio/goquery) is used to parse html.
func (this *MyPageProcesser) Process(p *page.Page) {
	if !p.IsSucc() {
		println(p.Errormsg())
		return
	}

	query := p.GetHtmlParser()

	query.Find(`div[class="wx-rb bg-blue wx-rb_v1 _item"]`).Each(func(i int, s *goquery.Selection) {
		name := s.Find("div.txt-box > h3").Text()
		href, _ := s.Attr("href")

		fmt.Printf("WeName:%v link:http://http://weixin.sogou.com%v \r\n", name, href)
		// the entity we want to save by Pipeline
		p.AddField("name", name)
		p.AddField("href", href)
	})

	next_page_href, _ := query.Find("#sogou_next").Attr("href")
	if next_page_href == "" {
		p.SetSkip(true)
	} else {
		p.AddTargetRequestWithHeaderFile("http://weixin.sogou.com/weixin"+next_page_href, "html", "weixin.sogou.com.json")
	}

}