示例#1
0
func Test_PT1Recorder_ShortenTime(t *testing.T) {
	assert := wcg.NewAssert(t)

	iepg := &pt.IEPG{}
	iepg.ProgramTitle = "test"
	iepg.StartAt = lib.Now()
	iepg.EndAt = lib.Now().Add(10 * time.Second)
	iepg2 := &pt.IEPG{}
	iepg2.ProgramTitle = "test"
	iepg2.StartAt = iepg.StartAt
	iepg2.EndAt = iepg.EndAt.Add(-8 * time.Second)
	notifier := &testNotifier{}
	recorder := NewPT1Recorder(iepg, newPT1RecorderConfig(), notifier)
	go recorder.Start()
	stats := recorder.GetStats()

	time.Sleep(500 * time.Millisecond)
	recorder.Update(iepg2)

	time.Sleep(500 * time.Millisecond)
	assert.EqInt(
		int(pt.IEPGProcessing), int(stats.Status),
		"recorder status should be pt.IEPGCanceled",
	)

	time.Sleep(2 * time.Second)
	assert.EqInt(
		int(pt.IEPGCompleted), int(stats.Status),
		"recorder status should be pt.IEPGCompleted",
	)
	assert.EqInt(2, int(stats.EndAt.Sub(stats.StartAt).Seconds()), "recorder should be cancled in about 4 seconds (actual=%f)", stats.EndAt.Sub(stats.StartAt).Seconds())
	assert.OK(notifier.StartNotified, "notifier should be notified on start.")
	assert.OK(notifier.EndNotified, "notifier should be notified on end.")
}
示例#2
0
func Test_PT1Recorder_Retry(t *testing.T) {
	assert := wcg.NewAssert(t)

	iepg := &pt.IEPG{}
	iepg.ProgramTitle = "test"
	iepg.StartAt = lib.Now()
	iepg.EndAt = lib.Now().Add(3 * time.Second)
	cfg := newPT1RecorderConfig()
	cfg.Recpt1Path = "./fixtures/pt1commands/fail.sh"
	notifier := &testNotifier{}
	recorder := NewPT1Recorder(iepg, cfg, notifier)
	go recorder.Start()
	time.Sleep(500 * time.Millisecond)
	stats := recorder.GetStats()
	assert.EqInt(
		int(pt.IEPGProcessing), int(stats.Status),
		"recorder status should be pt.IEPGCanceled",
	)
	// change the recpt1path to the correct one to succeed.
	cfg.Recpt1Path = "./fixtures/pt1commands/simple.sh"
	time.Sleep(2800 * time.Millisecond)

	assert.EqInt(
		int(pt.IEPGCompleted), int(stats.Status),
		"recorder status should be pt.IEPGCompleted",
	)
	assert.EqInt(3, int(stats.EndAt.Sub(stats.StartAt).Seconds()), "recorder should be cancled in about 3 seconds (actual=%f)", stats.EndAt.Sub(stats.StartAt).Seconds())
	assert.GtInt(0, stats.Retries, "recorder should retry the command")
	assert.OK(notifier.StartNotified, "notifier should be notified on start.")
	assert.OK(notifier.EndNotified, "notifier should be notified on end.")
}
示例#3
0
func Test_Kind_CreateEntityFromForm(t *testing.T) {
	assert := gaetest.NewAssert(t)

	n := lib.Now()
	lib.TemporarySetNow(n, func() {
		var ent, err = testEntity.CreateEntityFromForm(url.Values{
			"content_bytes": []string{"abcdef"},
			"slice_type":    []string{"1", "2", "3"},
		})
		assert.Nil(err)
		assert.EqStr("This is defualt value", ent.(*TEntity).Desc)
		assert.EqInt(10, ent.(*TEntity).Digit)
		assert.EqStr("abcdef", string(ent.(*TEntity).ContentBytes))
		assert.EqInt(3, len(ent.(*TEntity).SliceType))
		assert.EqStr("1", ent.(*TEntity).SliceType[0])
		assert.EqStr("2", ent.(*TEntity).SliceType[1])
		assert.EqStr("3", ent.(*TEntity).SliceType[2])
		assert.EqTime(n, ent.(*TEntity).CreatedAt)
	})

	var ent, err = testEntity.CreateEntityFromForm(url.Values{
		"digit": []string{"2"},
		"desc":  []string{"specific desc"},
	})
	assert.Nil(err)
	assert.EqInt(2, ent.(*TEntity).Digit)
	assert.EqStr("specific desc", ent.(*TEntity).Desc)
}
示例#4
0
func NewMessengerOptIn(pageID, userID, messengerUserID string) *MessengerOptIn {
	return &MessengerOptIn{
		Key:             fmt.Sprintf("%s|%s", pageID, userID),
		PageID:          pageID,
		UserID:          userID,
		MessengerUserID: messengerUserID,
		UpdatedAt:       lib.Now(),
	}
}
示例#5
0
func setupAPITasks(app *server.App) {
	var API = app.API()

	API.GET("/tasks/", server.Handler(
		middleware.EntityQuery(
			entities.AsyncAPITask.Query().Filter(
				"UpdatedAt >=",
				request.Value(func(req *wcg.Request) interface{} {
					return wcg.ParseDateOr(req.Query("since"), lib.Now().Add(-_TaskExpirationDays))
				}),
			).Filter(
				"UpdatedAt <=",
				request.Value(func(req *wcg.Request) interface{} {
					return wcg.ParseDateOr(req.Query("until"), lib.Now())
				}),
			),
		),
	))
}
示例#6
0
func Test_PT1Recorder_Completed(t *testing.T) {
	assert := wcg.NewAssert(t)

	iepg := &pt.IEPG{}
	iepg.ProgramTitle = "test"
	iepg.StartAt = lib.Now()
	iepg.EndAt = lib.Now().Add(1 * time.Second)
	notifier := &testNotifier{}
	recorder := NewPT1Recorder(iepg, newPT1RecorderConfig(), notifier)
	recorder.Start()

	stats := recorder.GetStats()
	assert.EqInt(
		int(pt.IEPGCompleted), int(stats.Status),
		"recorder status should be pt.IEPGCompleted",
	)
	assert.EqInt(0, stats.Retries, "recorder should not retry")
	assert.EqInt(1, int(stats.EndAt.Sub(stats.StartAt).Seconds()), "recorder should finish in about 1 second (actual=%f)", stats.EndAt.Sub(stats.StartAt).Seconds())
	assert.OK(notifier.StartNotified, "notifier should be notified on start.")
	assert.OK(notifier.EndNotified, "notifier should be notified on end.")
}
示例#7
0
func (r *MockRecorder) Start() {
	r.stats.StartAt = lib.Now()
	r.stats.Status = pt.IEPGProcessing
	r.stopChannel = make(chan bool, 1)
	defer func() {
		close(r.stopChannel)
	}()
	interval := time.Tick(100 * time.Microsecond)
	for r.c < 3 {
		select {
		case <-r.stopChannel:
			r.stats.Status = pt.IEPGCanceled
			r.stats.EndAt = lib.Now()
			return
		case <-interval:
			break
		}
	}
	r.stats.EndAt = lib.Now()
	r.stats.Status = pt.IEPGCompleted
}
func _crawlAmebloPost(req *wcg.Request, post *hplink.AmebloPost, members []hplink.Member) error {
	crawler := crawlers.NewAmebloPostCrawler(cacheutil.NewURLCacheAwareClient(req, entities.URLCache))
	req.Logger.Infof("[Task.Crawlers.AmebloPosts] crawling URL: %s", post.URL)
	crawled, err := crawler.RunOnPostURL(post.URL)
	if err != nil {
		return err
	}
	post.NumLikes = crawled.NumLikes
	post.NumComments = crawled.NumComments
	post.NumReblogs = crawled.NumReblogs
	post.PostAt = crawled.PostAt
	post.Title = crawled.Title
	post.Theme = crawled.Theme
	post.IsContentsCrawled = true
	post.Images = make([]models.Image, len(crawled.ImageURLs))
	if cacher, err := cacheutil.NewImageCacher(req, entities.ImageCache); err == nil {
		iterator.ParallelSlice(crawled.ImageURLs, func(i int, url string) error {
			req.Logger.Infof("[Task.Crawlers.AmebloPosts] caching image URL %s (on %s)", url, post.URL)
			const retries = 5
			var err error
			var cache *models.ImageCache
			for j := 0; j < retries; j++ {
				if j != 0 {
					req.Logger.Infof("[Task.Crawlers.AmebloPosts] Retry image URL caching: %s (on %s) (%d)", url, post.URL, j)
				}
				cache, err = cacher.Cache(url)
				if err == nil {
					post.Images[i] = *cache.ToImage()
					return nil
				}
				lib.WaitAndEnsureAfter(lib.Now(), 3*time.Second)
			}
			req.Logger.Warnf("[Task.Crawlers.AmebloPosts] Failed to cache image URL %s (on %s) - %v", url, post.URL, err)
			return err
		})
	} else {
		for j, url := range crawled.ImageURLs {
			req.Logger.Infof("[Task.Crawlers.AmebloPosts] skip caching image URL %s (on %s)", url, post.URL)
			post.Images[j] = models.Image{
				URL: url,
			}
		}
	}
	// Update MemberKey only if not set.
	if post.MemberKey == "" {
		if m := _guessMember(req, post, members); m != nil {
			post.MemberKey = m.Key
		}
	}
	req.Logger.Infof("[Task.Crawlers.AmebloPosts] finished crawling")
	return nil
}
示例#9
0
func Test_PT1Recorder_Canceled(t *testing.T) {
	assert := wcg.NewAssert(t)

	iepg := &pt.IEPG{}
	iepg.ProgramTitle = "test"
	iepg.StartAt = lib.Now()
	iepg.EndAt = lib.Now().Add(10 * time.Second)
	notifier := &testNotifier{}
	recorder := NewPT1Recorder(iepg, newPT1RecorderConfig(), notifier)
	go recorder.Start()
	time.Sleep(1 * time.Second)
	recorder.Stop()
	time.Sleep(100 * time.Millisecond)

	stats := recorder.GetStats()
	assert.EqInt(
		int(pt.IEPGCanceled), int(stats.Status),
		"recorder status should be pt.IEPGCanceled",
	)
	assert.EqInt(1, int(stats.EndAt.Sub(stats.StartAt).Seconds()), "recorder should be cancled in about 1 second (actual=%f)", stats.EndAt.Sub(stats.StartAt).Seconds())
	assert.OK(notifier.StartNotified, "notifier should be notified on start.")
	assert.OK(notifier.EndNotified, "notifier should be notified on end.")
}
示例#10
0
文件: put.go 项目: speedland/service
// Update executes the put operation and store `ent` into datastore.
func (put *Put) Update(req *wcg.Request, ent interface{}) (*datastore.Key, interface{}, error) {
	var err error
	var entValue = reflect.Indirect(reflect.ValueOf(ent))

	for _, kv := range put.fields {
		k, v := kv.Eval(req)
		entValue.FieldByName(k).Set(reflect.ValueOf(v))
	}

	if put.base.kind.BeforeSave != nil {
		if err = put.base.kind.BeforeSave(ent, req); err != nil {
			return nil, nil, err
		}
	}

	// assign id if not set (using stringKeys[0] and auto-generated UUID)
	var keystr string
	if len(put.base.stringKeys) > 0 {
		keystr = put.base.stringKeys[0]
	} else if len(put.base.datastoreKeys) > 0 {
		keystr = put.base.datastoreKeys[0].StringID()
	}
	keystr = put.base.kind.updateIDField(ent, keystr)
	// and update timestamp
	if !put.dontUpdateTimestamp {
		tsValue := reflect.ValueOf(lib.Now())
		tsFieldName := put.base.kind.TimestampFieldName
		entValue.FieldByName(tsFieldName).Set(tsValue)
	}

	// execute datastore operation
	driver, err := put.base.kind.NewDriver(req)
	key := driver.NewKey(keystr, 0, put.base.parent)
	if err != nil {
		return nil, nil, err
	}
	if err := putEnt(req, put.base.kind, driver, key, ent); err != nil {
		return nil, nil, err
	}
	put.base.invalidateKeys(req, keystr)

	if put.base.kind.AfterSave != nil {
		if err = put.base.kind.AfterSave(ent, req); err != nil {
			return key, ent, err
		}
	}

	return key, ent, nil
}
示例#11
0
func Test_API_SMARTStats_Post(t *testing.T) {
	assert := gaetest.NewAssert(t)
	now := lib.Now()
	dummy := []*home.SMARTStats{
		&home.SMARTStats{
			Serial:     "dummy",
			Timestamp:  now,
			SpinUpTime: 1,
		},
	}
	data, _ := json.Marshal(dummy)
	req := ts.POST("/api/intern/home/stats/smart/", string(data))
	req.Request.User = apiTokenUser
	res := req.RouteTo(NewApp().Routes())
	assert.HTTPStatus(200, res)
}
示例#12
0
func Test_API_SystemStats_Post(t *testing.T) {
	assert := gaetest.NewAssert(t)
	now := lib.Now()
	dummy := []*home.SystemMetric{
		&home.SystemMetric{ // CPU
			ServerName: "dummy",
			Timestamp:  now,
			MetricName: "%%user",
			Value:      10.1,
		},
		&home.SystemMetric{ // Memory
			ServerName: "dummy",
			Timestamp:  now,
			MetricName: "kbmemfree",
			Value:      172830.0,
		},
		&home.SystemMetric{ // System
			ServerName: "dummy",
			Timestamp:  now,
			MetricName: "ldavg-1",
			Value:      1.0,
		},
		&home.SystemMetric{ // disk
			ServerName: "dummy",
			Timestamp:  now,
			Device:     "sda",
			MetricName: "tps",
			Value:      12.0,
		},
		&home.SystemMetric{ // network
			ServerName: "dummy",
			Timestamp:  now,
			Device:     "em0",
			MetricName: "rxpck/s",
			Value:      12.0,
		},
	}
	data, _ := json.Marshal(dummy)
	req := ts.POST("/api/intern/home/stats/metrics/", string(data))
	req.Request.User = apiTokenUser
	res := req.RouteTo(NewApp().Routes())
	assert.HTTPStatus(200, res)
}
示例#13
0
func Test_API_FileSystemStats_Post(t *testing.T) {
	assert := gaetest.NewAssert(t)
	now := lib.Now()
	dummy := []*home.FileSystemStats{
		&home.FileSystemStats{
			ServerName: "dummy",
			Timestamp:  now,
			Name:       "/dev/sda1",
			Type:       "ext4",
			Capacity:   100,
			Used:       0,
			Available:  100,
		},
	}
	data, _ := json.Marshal(dummy)
	req := ts.POST("/api/intern/home/stats/filesystem/", string(data))
	req.Request.User = apiTokenUser
	res := req.RouteTo(NewApp().Routes())
	assert.HTTPStatus(200, res)
}
示例#14
0
func execStatsQuery(req *wcg.Request, query *entities.Query) response.Response {
	now := lib.Now()
	until := wcg.ParseDateTimeOr(req.Query("until"), now)
	if until.After(now) {
		until = now
	}
	sinceDefault := until.Add(StatsQueryDefaultTimeWindow)
	sinceMin := until.Add(StatsQueryMaxTimeWindow)
	since := wcg.ParseDateTimeOr(req.Query("since"), until.Add(-24*time.Hour))
	if since.After(until) {
		since = sinceDefault
	} else if sinceMin.After(since) {
		since = sinceMin
	}
	query = query.Filter("Timestamp >=", since).Filter("Timestamp <", until)
	if req.Query("device") != "" {
		query = query.Filter("Device =", req.Query("device"))
	}
	return response.NewJSONResponse(query.MustExecute(req).Data)
}
示例#15
0
func (r *PT1Recorder) waitForScheduledTime() {
	for {
		if r.iepg.StartAt.Add(-r.cfg.ProcessPreparationTime).Sub(lib.Now()) <= 0 {
			return
		}
		r.logger.Tracef(
			"Waiting %s for the next schedule check until %s",
			r.cfg.ProcessPreparationTime,
			lib.FormatDateTimeString(r.iepg.StartAt),
		)
		select {
		case <-r.stopChannel:
			r.logger.Debugf("A stop flag received, cancel recording...")
			r.stats.Status = pt.IEPGCanceled
			r.stopChannel <- true
			return
		case <-time.Tick(r.cfg.ProcessPreparationTime):
			break
		}
	}
}
func runTasksCrawlersAmebloPosts(req *wcg.Request, task *models.AsyncAPITask) (*models.AsyncAPITaskProgress, error) {
	// Tuning conditions to set the proper `limit`` and `concurrency`` value:
	//
	//   - The task should be done in 10 minutes.
	//   - We'll invoke the number of goroutines defined in `concurrency`.
	//   - Each goroutine can access one URL per `urlCrawlWait` seconds (fixed value - 5 seconds)
	//
	// So we finally gets N (< 120) URLs updated
	//
	concurrency := configs.GetIntValue(req, "hplink.ameblo_crawler_post_concurrency", 3, 1, 10)
	perThread := configs.GetIntValue(req, "hplink.ameblo_crawler_num_posts_per_thread", 30, 1, 100)
	wait := configs.GetIntValue(req, "hplink.ameblo_crawler_url_wait", 2, 0, 10)
	selected, err := _selectAmebloPosts(req, perThread*concurrency)
	if err != nil {
		return nil, err
	}
	// prepare members for MemberKey detection
	p, err := entities.Member.Query().Execute(req)
	if err != nil {
		return nil, err
	}
	members := p.Data.([]hplink.Member)
	req.Logger.Infof("[Task.Crawlers.AmebloPosts] Crawling %d URLs (concurrency: %d)", len(selected), concurrency)
	iterator.ParallelSliceWithMaxConcurrency(selected, concurrency, func(i int, post *hplink.AmebloPost) error {
		startTime := lib.Now()
		if err := _crawlAmebloPost(req, post, members); err != nil {
			req.Logger.Errorf("Crawler Failure: %v", err)
			return err
		}
		selected[i] = post
		lib.WaitAndEnsureAfter(startTime, time.Duration(wait)*time.Second)
		return nil
	})
	req.Logger.Infof("[Task.Crawlers.AmebloPosts] Updating datastore.")
	_, err = entities.AmebloPost.PutMulti().Update(req, selected)
	if err != nil {
		return nil, err
	}
	return nil, nil
}
示例#17
0
func Test_Put_SetField(t *testing.T) {
	assert := gaetest.NewAssert(t)
	gaetest.CleanupDatastore(ts.Context)

	var now = lib.Now()
	var ent, err = testEntity.CreateEntityFromForm(url.Values{
		"desc":          []string{"hogehoge"},
		"digit":         []string{"2"},
		"content_bytes": []string{"abcdefg"},
	})
	_, _ent, _ := testEntity.Put().SetField("Desc", "static description").Update(ts.POSTForm("/", nil).Request, ent)
	got := _ent.(*TEntity)
	assert.Nil(err)
	assert.OK(got.ID != "")
	assert.EqInt(2, got.Digit)
	assert.EqStr("static description", got.Desc)
	assert.EqStr("abcdefg", string(got.ContentBytes))
	assert.OK(got.CreatedAt.After(now), "TEntity.CreatedAt: %v", got.CreatedAt)
	assert.OK(got.UpdatedAt.After(now), "TEntity.UpdatedAt: %v", got.UpdatedAt)
	assert.OK(got.BeforeSaveProcessed, "BeforeSave triggerd")
	assert.OK(got.AfterSaveProcessed, "AfterSave triggerd")
}
示例#18
0
func (r *PT1Recorder) keepProcessUpAndRunning() error {
	r.stats.Status = pt.IEPGProcessing
	// process started.
	err := r.invokeProcess()
	if err != nil {
		r.stats.Status = pt.IEPGFailed
		return err
	}
	for {
		wait := r.iepg.EndAt.Sub(lib.Now())
		if wait <= 0 {
			wait = 0 * time.Second
		}
		checkWait := r.cfg.ProcessCheckInterval
		select {
		case <-r.stopChannel:
			r.stats.Status = pt.IEPGCanceled
			return nil
		case <-time.Tick(wait):
			r.stats.Status = pt.IEPGCompleted
			return nil
		case <-time.Tick(checkWait):
			break
		}
		if !r.isProcessRunning() {
			r.logger.Warnf("Something wrong in recpt1 process, relaunching...")
			err := r.invokeProcess()
			if err != nil {
				r.stats.Status = pt.IEPGFailed
				return err
			}
			r.stats.Retries++
		} else {
			r.logger.Tracef("Confirmed to keep up and running.", r.cmd.Process.Pid)
		}
	}
}
func runTasksCrawlersAmebloEntryLists(req *wcg.Request, task *models.AsyncAPITask) (*models.AsyncAPITaskProgress, error) {
	const FollowLinkKey = "fl"
	const SettingsKey = "s"
	const URLKey = "u"

	var query = req.HTTPRequest().URL.Query()
	var settingsList []*hplink.CrawlerSettings
	var urlList []string
	if settingsKeys, ok := query[SettingsKey]; ok {
		_, _list := entities.CrawlerSettings.GetMulti().Keys(settingsKeys...).MustList(req)
		settingsList = _list.([]*hplink.CrawlerSettings)
	} else {
		query := entities.CrawlerSettings.Query().Filter("Type=", hplink.CrawlerSettingsTypeAmeblo)
		if pagination := query.MustExecute(req); pagination.Length() > 0 {
			list := pagination.Data.([]hplink.CrawlerSettings)
			settingsList = make([]*hplink.CrawlerSettings, len(list))
			for i := range list {
				settingsList[i] = &list[i]
			}
		}
	}

	var numList = len(settingsList)
	urlList = make([]string, numList)
	if urls, ok := query[URLKey]; ok {
		if numList != len(urls) {
			return nil, fmt.Errorf("List mismatch - found %d settings but %d urls are specified", numList, len(urls))
		}
		urlList = query[URLKey]
	} else {
		for i := range settingsList {
			urlList[i] = (*hplink.AmebloCrawlerSettings)(settingsList[i]).GetEntryListURL()
		}
	}

	startTime := lib.Now()
	nextParamSettingsKeys := make([]string, numList)
	nextParamURLs := make([]string, numList)
	err := iterator.ParallelSlice(settingsList, func(i int, v *hplink.CrawlerSettings) error {
		next, err := _crawlAmebloEntryList(req, v, urlList[i])
		if err != nil {
			settingsList[i].Error = []byte(fmt.Sprintf("%v", err))
			settingsList[i].Status = hplink.CrawlerStatusFailure
			settingsList[i].LastRun = lib.Now()
			return err
		}
		settingsList[i].Error = nil
		settingsList[i].Status = hplink.CrawlerStatusSuccess
		settingsList[i].LastRun = lib.Now()
		if next != "" {
			nextParamSettingsKeys[i] = v.URL
			nextParamURLs[i] = next
		}
		return nil
	})
	entities.CrawlerSettings.PutMulti().MustUpdate(req, settingsList)
	if err != nil {
		return nil, err
	}
	if req.Query(FollowLinkKey) != "true" {
		return nil, err
	}
	// fl=true make a recursive call to follow next links
	// reduce empty urls from nextParam* and return it for recursive call
	var fixedNextParamSettingsKeys []string
	var fixedNextParamURLs []string
	var hasNext = false
	for i := range nextParamURLs {
		if nextParamURLs[i] != "" {
			hasNext = true
			fixedNextParamSettingsKeys = append(fixedNextParamSettingsKeys, nextParamSettingsKeys[i])
			fixedNextParamURLs = append(fixedNextParamURLs, nextParamURLs[i])
		}
	}
	var progress models.AsyncAPITaskProgress
	var lastProgress = task.LastProgress()
	if lastProgress == nil {
		progress.Current = len(urlList)
		progress.Total = 0
	} else {
		progress.Current = lastProgress.Current + len(urlList)
	}
	if hasNext {
		progress.Next = url.Values{
			FollowLinkKey: []string{"true"},
			SettingsKey:   fixedNextParamSettingsKeys,
			URLKey:        fixedNextParamURLs,
		}
		wait := configs.GetIntValue(req, "hplink.ameblo_crawler_url_wait", 2, 0, 10)
		lib.WaitAndEnsureAfter(startTime, time.Duration(wait)*time.Second)
	}
	req.Logger.Infof("No more URL needs to be crawled.")
	return &progress, nil
}
示例#20
0
		return reflect.ValueOf(float32(v)), err
	},
	reflect.TypeOf(float64(0)): func(s string) (reflect.Value, error) {
		v, err := strconv.ParseFloat(s, 64)
		return reflect.ValueOf(v), err
	},

	// time
	reflect.TypeOf(time.Duration(0)): func(s string) (reflect.Value, error) {
		v, err := time.ParseDuration(s)
		return reflect.ValueOf(v), err
	},
	reflect.TypeOf(time.Now()): func(s string) (reflect.Value, error) {
		switch s {
		case _ValueMacroTimeNow:
			return reflect.ValueOf(lib.Now()), nil
		case _ValueMacroTimeToday:
			return reflect.ValueOf(lib.Today()), nil
		case "":
			return reflect.ValueOf(time.Time{}), nil
		default:
			if t, err := wcg.ParseDateTime(s); err == nil {
				return reflect.ValueOf(t), err
			} else if t, err := wcg.ParseDate(s); err == nil {
				return reflect.ValueOf(t), err
			} else {
				// as JSON representation
				var t time.Time
				err := json.Unmarshal([]byte(fmt.Sprintf("\"%s\"", s)), &t)
				return reflect.ValueOf(t), err
			}
示例#21
0
// Define endpoints to process an async task.
func (helper *AsyncAPIHelper) Define(path string) *AsyncAPIConfig {
	if !strings.HasSuffix(path, "/") {
		panic(fmt.Errorf("Task endpoint must end with '/' (%s)", path))
	}
	api := helper.app.API()
	// /path/to/something/ => api-path-to-something
	p := api.Path(path)
	name := strings.Replace(p[1:len(p)-1], ":", "", -1) // remove parameters
	name = strings.Replace(name, "/", "-", -1)          // replace '/' with '-'
	config := &AsyncAPIConfig{
		Queue: helper.app.Queue().DefinePush(name),
		app:   helper.app,
		path:  path,
	}

	// Endpoint that trigger the task.
	api.POST(path, Handler(
		func(req *wcg.Request) response.Response {
			for _, h := range config.triggerHandlers {
				resp := h.Handle(req)
				if resp != nil {
					req.Logger.Debugf("Async Task is not triggered.")
					return resp
				}
			}
			task := &models.AsyncAPITask{}
			task.ID = wcg.NewUUID()
			task.Path = path
			task.Query = req.HTTPRequest().URL.RawQuery
			task.Status = models.AsyncAPIStatusReady
			entities.AsyncAPITask.Put().Key(string(task.ID)).MustUpdate(req, task)
			// Push a task
			taskPath := fmt.Sprintf("%s%s.json?%s", req.URL().Path, string(task.ID), req.HTTPRequest().URL.Query().Encode())
			if err := config.Queue.PushTask(req, taskPath, nil); err != nil {
				return response.InternalServerError(req, err)
			}
			req.Logger.Infof("Triggered an async task [id:%s path:%s]", task.ID, taskPath)
			return response.NewJSONResponseWithStatus(
				AsyncTaskTriggerResponse{task.ID},
				201,
			)
		},
	))

	// Endpoint that executes the task in background
	// This endpoint must be called by PushQueue
	api.POST(path+":taskid.json", Handler(
		func(req *wcg.Request) response.Response {
			if !request.IsTask(req) {
				req.Logger.Warnf(
					"Non system call to the async task endpoint: UserAgent=%s, IP=%s",
					req.HTTPRequest().UserAgent(),
					req.HTTPRequest().RemoteAddr,
				)
				return response.APIOK
			}

			_, one := entities.AsyncAPITask.Get().Key(req.Param("taskid")).MustOne(req)
			if one == nil {
				req.Logger.Warnf(
					"Task %q not found: UserAgent=%s, IP=%s",
					req.Param("taskid"),
					req.HTTPRequest().UserAgent(),
					req.HTTPRequest().RemoteAddr,
				)
				return response.APIOK
			}
			task := one.(*models.AsyncAPITask)
			logger := wcg.NewLoggerWithPrefix(req, fmt.Sprintf("[AsyncTask: %s (queue: %s)]", task.ID, name))
			if task.Status != models.AsyncAPIStatusReady && task.Status != models.AsyncAPIStatusRunning {
				logger.Warnf("Task is not ready: Status=%s", task.Status)
				return response.APIOK
			}

			if task.Status == models.AsyncAPIStatusReady {
				task.StartAt = lib.Now()
				task.Status = models.AsyncAPIStatusRunning
				entities.AsyncAPITask.Put().Key(req.Param("taskid")).MustUpdate(req, task)
			}

			var err error
			var progress *models.AsyncAPITaskProgress
			var resp response.Response
			func() {
				defer func() {
					if x := recover(); x != nil {
						logger.Errorf("Unhandle error recovered from the task: %v", x)
						err = fmt.Errorf("%v", x)
					}
				}()
				progress, err = config.processHandler(req, task)
			}()

			if progress != nil {
				task.Progress = append(task.Progress, *progress)
			}
			if progress != nil && progress.Next != nil {
				// update the entity (Progress field)
				entities.AsyncAPITask.Put().Key(req.Param("taskid")).MustUpdate(req, task)

				// Push a next request into the queue
				// This is not required in test environment as test code manually call the next request (see testhelper.AsyncTaskTestRunner#Run code)
				// The test environment uses the response JSON to call the next URL and that response is used only for this purpose.
				// If the code cannot push task for the next request, we should stop the task as failure.
				logger.Infof("Task needs to call recursively: Next parameter is %v", progress.Next)
				if !req.IsTest() {
					err = config.Queue.PushTask(req, fmt.Sprintf("%s?%s", req.URL().Path, progress.Next.Encode()), nil)
				}
				if err == nil {
					return response.NewJSONResponse(progress.Next)
				}
				// if an error occurrs by PushTask, just stopping the task and update the datastore as failure.
			}
			// finished the task
			task.FinishAt = lib.Now()
			tt := task.FinishAt.Sub(task.StartAt)
			if err == nil {
				task.Status = models.AsyncAPIStatusSuccess
				logger.Infof("Task finished successfully (time: %s)", tt.String())
				resp = response.APIOK
			} else {
				task.Error = err.Error()
				task.Status = models.AsyncAPIStatusFailure
				logger.Errorf("Task failed (time: %s): %s", tt.String(), task.Error)
				resp = response.APIInternalServerError
			}
			entities.AsyncAPITask.Put().Key(req.Param("taskid")).MustUpdate(req, task)
			return resp
		},
	))

	// Endpoint that returns the task status.
	api.GET(path+":taskid.json", Handler(
		func(req *wcg.Request) response.Response {
			for _, h := range config.triggerHandlers {
				resp := h.Handle(req)
				if resp != nil {
					return resp
				}
			}
			_, one := entities.AsyncAPITask.Get().Key(req.Param("taskid")).MustOne(req)
			if one == nil {
				return response.NotFound(req)
			}
			task := one.(*models.AsyncAPITask)
			return response.NewJSONResponse(
				AsyncTaskMonitorResponse{
					ID:       task.ID,
					Status:   task.Status,
					Progress: task.LastProgress(),
				},
			)
		},
	))

	return config
}
示例#22
0
// Start starts recording system.
func (r *PT1Recorder) Start() {
	var err error
	r.stopChannel = make(chan bool)
	r.logger.Infof("Started a recording thread.")
	defer func() {
		close(r.stopChannel)
		r.logger.Infof("Finished a recording thread: status = %d", r.stats.Status)
	}()

	r.waitForScheduledTime()
	if r.stats.Status == pt.IEPGCanceled {
		return
	}

	r.logger.Infof("Start recording: %s", r.iepg.ProgramTitle)
	r.stats.StartAt = lib.Now()
	defer func() { r.stats.EndAt = lib.Now() }()
	if r.notifier != nil {
		// we should not block the recording thread by start notification.
		go func() {
			r.notifier.NotifyStart(r.stats)
		}()
		// the thread should exit after end notification is sent.
		defer func() {
			r.notifier.NotifyEnd(r.stats)
		}()
	}
	// directory check
	if err = os.MkdirAll(r.DirectoryPath(), 0755); err != nil {
		r.logger.Fatalf("Failed to create a directory on %s: %v", r.DirectoryPath(), err)
		r.stats.Status = pt.IEPGFailed
		r.stats.FailureReason = "Failed to create a directory"
		return
	}
	err = r.keepProcessUpAndRunning()
	if err != nil {
		r.logger.Fatalf("Failed to keep the process running: %v", err)
		r.stats.Status = pt.IEPGFailed
		r.stats.FailureReason = fmt.Sprintf("%v", err)
		return
	}

	// kill the process
	r.cmd.Process.Signal(syscall.SIGTERM)
	procExitWait := make(chan bool)
	go func() {
		r.cmd.Wait()
		procExitWait <- true
		close(procExitWait)
	}()

	select {
	case <-procExitWait:
		break
	case <-time.Tick(r.cfg.ProcessCleanupTime):
		r.logger.Warnf("Could not stop the process, trying to kill %s by SIGKILL", r.cmd.Process.Pid)
		r.cmd.Process.Signal(syscall.SIGKILL)
		break
	}
	if r.stats.Status == pt.IEPGCanceled {
		r.stopChannel <- true
		return
	}
}
示例#23
0
// Update executes the put operation and store `entities` ([]Kind or []*Kind) into datastore.
func (multi *PutMulti) Update(req *wcg.Request, entities interface{}) ([]*datastore.Key, error) {
	driver, err := multi.base.kind.NewDriver(req)
	if err != nil {
		return nil, err
	}

	var entValue = reflect.Indirect(reflect.ValueOf(entities))
	var entLen = entValue.Len()
	var entPointers = make([]interface{}, entLen)
	var entValueItems = make([]reflect.Value, entLen)
	var stringKeys = make([]string, entLen)
	var keys = make([]*datastore.Key, entLen)

	// collect pointers
	for i := 0; i < entLen; i++ {
		val := entValue.Index(i)
		if val.Kind() == reflect.Ptr { // []*Kind
			entPointers[i] = val.Interface()
			entValueItems[i] = reflect.Indirect(val)
		} else if val.Kind() == reflect.Interface { // []interface{}{*Kind, *Kind, ...}
			entPointers[i] = reflect.ValueOf(val.Interface()).Interface()
			entValueItems[i] = reflect.Indirect(reflect.ValueOf(val.Interface()))
		} else { // []Kind
			entPointers[i] = val.Addr().Interface()
			entValueItems[i] = val
		}
	}

	if multi.base.kind.BeforeSave != nil {
		for i := 0; i < entLen; i++ {
			if err := multi.base.kind.BeforeSave(entPointers[i], req); err != nil {
				return nil, err
			}
		}
	}

	// preparing keys and update timestamp
	tsValue := reflect.ValueOf(lib.Now())
	tsFieldName := multi.base.kind.TimestampFieldName

	if len(multi.base.stringKeys) == entLen {
		for i := 0; i < entLen; i++ {
			stringKeys[i] = multi.base.kind.updateIDField(entPointers[i], multi.base.stringKeys[i])
			keys[i] = driver.NewKey(stringKeys[i], 0, multi.base.parent)
		}
	} else if len(multi.base.datastoreKeys) == entLen {
		for i := 0; i < entLen; i++ {
			stringKeys[i] = multi.base.kind.updateIDField(entPointers[i], multi.base.datastoreKeys[i].StringID())
			keys[i] = multi.base.datastoreKeys[i]
		}
	} else {
		for i := 0; i < entLen; i++ {
			stringKeys[i] = multi.base.kind.updateIDField(entPointers[i], "")
			keys[i] = driver.NewKey(stringKeys[i], 0, multi.base.parent)
		}
	}

	for i := 0; i < entLen; i++ {
		if !multi.dontUpdateTimestamp {
			entValueItems[i].FieldByName(tsFieldName).Set(tsValue)
		}
	}

	if err := putMultiEnts(req, multi.base.kind, driver, keys, entities); err != nil {
		return keys, err
	}

	multi.base.invalidateKeys(req, stringKeys...)

	if multi.base.kind.AfterSave != nil {
		for i := 0; i < entLen; i++ {
			if err := multi.base.kind.AfterSave(entPointers[i], req); err != nil {
				return keys, err
			}
		}
	}

	return keys, nil
}