// core processer func (self *crawler) Process(req *context.Request) { defer func() { if err := recover(); err != nil { // do not affect other scheduler.Sdl.DelDeduplication(req.GetUrl() + req.GetMethod()) // 统计失败数 cache.PageFailCount() // 提示错误 logs.Log.Error(" * Fail [process panic]: %v", err) } }() // download page resp := self.Downloader.Download(req) // if fail do not need process if resp.GetError() != nil { // 删除该请求的去重样本 scheduler.Sdl.DelDeduplication(req.GetUrl() + req.GetMethod()) // 统计失败数 cache.PageFailCount() // 提示错误 logs.Log.Error(" * Fail [download]: %v", resp.GetError()) return } // 过程处理,提炼数据 self.Spider.Parse(resp, resp.GetRuleName()) // 统计成功页数 cache.PageSuccCount() // 提示抓取成功 logs.Log.Informational(" * Success: %v", req.GetUrl()) // 该条请求文本结果存入pipeline for _, data := range resp.GetItems() { self.Pipeline.CollectData( resp.GetRuleName(), //DataCell.RuleName data, //DataCell.Data resp.GetUrl(), //DataCell.Url resp.GetReferer(), //DataCell.ParentUrl time.Now().Format("2006-01-02 15:04:05"), ) } // 该条请求文件结果存入pipeline for _, img := range resp.GetFiles() { self.Pipeline.CollectFile( resp.GetRuleName(), img["Name"].(string), img["Body"].(io.ReadCloser), ) } }
// 生成并添加请求至队列 // Request.Url与Request.Rule必须设置 // Request.Spider无需手动设置(由系统自动设置) // Request.EnableCookie在Spider字段中统一设置,规则请求中指定的无效 // 以下字段有默认值,可不设置: // Request.Method默认为GET方法; // Request.DialTimeout默认为常量context.DefaultDialTimeout,小于0时不限制等待响应时长; // Request.ConnTimeout默认为常量context.DefaultConnTimeout,小于0时不限制下载超时; // Request.TryTimes默认为常量context.DefaultTryTimes,小于0时不限制失败重载次数; // Request.RedirectTimes默认不限制重定向次数,小于0时可禁止重定向跳转; // Request.RetryPause默认为常量context.DefaultRetryPause; // Request.DownloaderID指定下载器ID,0为默认的Surf高并发下载器,功能完备,1为PhantomJS下载器,特点破防力强,速度慢,低并发。 func (self *Spider) AddQueue(req *context.Request) { req. SetSpiderName(self.Name). SetSpiderId(self.GetId()). SetEnableCookie(self.EnableCookie). Prepare() scheduler.Sdl.Push(req) }
func (self *Surfer) Download(cReq *context.Request) *context.Response { cResp := context.NewResponse(nil) var resp *http.Response var err error switch cReq.GetDownloaderID() { case SURF_ID: resp, err = self.surf.Download(cReq) case PHANTOM_ID: resp, err = self.phantom.Download(cReq) } cResp.SetRequest(cReq) cResp.SetResponse(resp) cResp.SetError(err) return cResp }
func (self *SrcManage) Push(req *context.Request) { spiderId, ok := req.GetSpiderId() if !ok { return } // 初始化该蜘蛛的队列 if _, ok := self.queue[spiderId]; !ok { self.mutex[spiderId] = new(sync.Mutex) self.queue[spiderId] = make(map[int][]*context.Request) } priority := req.GetPriority() // 登记该蜘蛛下该优先级队列 if _, ok := self.queue[spiderId][priority]; !ok { self.uIndex(spiderId, priority) } // 添加请求到队列 self.queue[spiderId][priority] = append(self.queue[spiderId][priority], req) }
// 添加请求到队列 func (self *scheduler) Push(req *context.Request) { self.RWMutex.RLock() defer self.RWMutex.RUnlock() if self.status == status.STOP { return } // 当req不可重复时,有重复则返回 if !req.GetDuplicatable() && self.Deduplicate(req.GetUrl()+req.GetMethod()) { return } self.SrcManage.Push(req) }
// 批量url生成请求,并添加至队列 func (self *Spider) BulkAddQueue(urls []string, req *context.Request) { for _, url := range urls { req.SetUrl(url) self.AddQueue(req) } }