Пример #1
2
func main() {
	var wg sync.WaitGroup
	sc := make(chan os.Signal, 1)
	signal.Notify(sc,
		syscall.SIGINT,
		syscall.SIGTERM,
		syscall.SIGQUIT)

	go func() {
		sig := <-sc
		running = false
		fmt.Printf("main:Got signal:%v", sig)
	}()
	fmt.Printf("main:Mock get id process start!\n")
	db, err := GetDatabase()
	if err != nil {
		fmt.Printf("main:GetDatabase error:%s\n", err.Error())
		return
	}
	idGenerator, err := GetIdGenerator(db, idKey)
	if err != nil {
		fmt.Printf("main:GetIdGenerator error:%s\n", err.Error())
		return
	}
	wg.Add(1)
	go MockGetId(idGenerator, db, &wg)
	wg.Wait()
}
Пример #2
1
func TestBalancer(t *testing.T) { // {{{
	b := queue.NewBalancer("TestCacheAPISearch", PoolSize, QueueSize)
	t.Logf("[Balancer] Created new: '%s'", b.Name())

	b.Run()
	defer b.Close()
	t.Logf("[Balancer] Started: '%s'", b.Info())

	var j *TestBalancerJob
	var wg sync.WaitGroup
	startedAt := time.Now()

	for i := 0; i < JobsQuantity; i++ {
		j = &TestBalancerJob{Message: "Get worker info from Balancer"}
		j.Initialize()
		t.Logf("[TestBalancerJob:%d] Created: '%s'", i, j.Message)

		wg.Add(1)
		b.Dispatch(j)
		go finalizeTestBalancerJob(j, i, &wg, t)
	}

	wg.Wait()
	finishedAt := time.Now()
	t.Logf("[Balancer] Executed %d of tasks -> [%.6fs]", JobsQuantity, finishedAt.Sub(startedAt).Seconds())
} // }}}
Пример #3
1
// Work turns on the worker
func (w *Worker) Work(wg *sync.WaitGroup) {
	defer wg.Done()
	for {
		select {
		// safely stop the worker
		case <-w.stop:
			return
		case task := <-w.reader:
			tasks, err := w.processFn(task)
			if err != nil {
				if task.Retries < MaxRetries-1 {
					task.Retries++
					w.writer <- task
					continue
				}
			}

			// submit any new tasks returned by the old one
			if tasks != nil {
				for _, t := range tasks {
					w.writer <- t
				}
			}
		}
	}
}
Пример #4
1
func (hp *httpProxy) Serve(wg *sync.WaitGroup) {
	defer func() {
		wg.Done()
	}()
	ln, err := net.Listen("tcp", hp.addr)
	if err != nil {
		fmt.Println("listen http failed:", err)
		return
	}
	host, _, _ := net.SplitHostPort(hp.addr)
	var pacURL string
	if host == "" || host == "0.0.0.0" {
		pacURL = fmt.Sprintf("http://<hostip>:%s/pac", hp.port)
	} else if hp.addrInPAC == "" {
		pacURL = fmt.Sprintf("http://%s/pac", hp.addr)
	} else {
		pacURL = fmt.Sprintf("http://%s/pac", hp.addrInPAC)
	}
	info.Printf("listen http %s, PAC url %s\n", hp.addr, pacURL)

	for {
		conn, err := ln.Accept()
		if err != nil {
			errl.Printf("http proxy(%s) accept %v\n", ln.Addr(), err)
			if isErrTooManyOpenFd(err) {
				connPool.CloseAll()
			}
			time.Sleep(time.Millisecond)
			continue
		}
		c := newClientConn(conn, hp)
		go c.serve()
	}
}
Пример #5
1
func main() {
	introText := "SIMPLE TWITTER REFORMATTER \n (╯°□°)╯︵ ┻━┻) \n"
	fmt.Printf(introText)

	key := flag.String("key", "nokey", "Twitter consumer key")
	secret := flag.String("sec", "nosecret", "Twitter consumer secret")
	debug := flag.Bool("debug", false, "Debug logging level")
	numTweets := flag.Int("num", 3, "Number of tweets to retrieve")

	flag.Parse()

	access_token, err := getBearerToken(*key, *secret, *debug)
	if err != nil || access_token == "" {
		log.Fatal("Could not retrieve token to make twitter API request")
		os.Exit(1)
	}

	// Create a very basic channel with tweets getting passed into the expander
	// Wait for it to finish executing before quiting.
	var tweetChannel chan string = make(chan string)
	var wg sync.WaitGroup
	wg.Add(1)
	go tweetRetriever(access_token, *numTweets, tweetChannel, &wg, *debug)
	go textExpander(tweetChannel)
	wg.Wait()
}
Пример #6
1
func createRunningPod(wg *sync.WaitGroup, c *client.Client, name, ns, image string, labels map[string]string) {
	defer GinkgoRecover()
	defer wg.Done()
	pod := &api.Pod{
		TypeMeta: unversioned.TypeMeta{
			Kind: "Pod",
		},
		ObjectMeta: api.ObjectMeta{
			Name:   name,
			Labels: labels,
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:  name,
					Image: image,
				},
			},
			DNSPolicy: api.DNSDefault,
		},
	}
	_, err := c.Pods(ns).Create(pod)
	expectNoError(err)
	expectNoError(waitForPodRunningInNamespace(c, name, ns))
}
Пример #7
0
func NewHttpService(context interface {
	Acquire()
	Release()
}, server *http.Server) (s *HttpService, err error) {
	s = &HttpService{}
	addr := server.Addr
	if addr == "" {
		addr = ":http"
	}

	s.listener, err = net.Listen("tcp", addr)
	if err != nil {
		return
	}

	var w sync.WaitGroup
	w.Add(1)
	context.Acquire()
	go func() {
		defer context.Release()
		l := s.listener
		w.Done()

		server.Serve(l)
	}()

	return
}
Пример #8
0
// trapSignal wait on listed signals for pre-defined behaviors
func (a *app) trapSignal(wg *sync.WaitGroup) {
	ch := make(chan os.Signal, 10)
	signal.Notify(ch, syscall.SIGTERM, syscall.SIGHUP)
	for {
		sig := <-ch
		switch sig {
		case syscall.SIGTERM:
			// this ensures a subsequent TERM will trigger standard go behaviour of terminating
			signal.Stop(ch)
			// roll through all initialized http servers and stop them
			for _, s := range a.sds {
				go func(s httpdown.Server) {
					defer wg.Done()
					if err := s.Stop(); err != nil {
						a.errors <- probe.NewError(err)
					}
				}(s)
			}
			return
		case syscall.SIGHUP:
			// we only return here if there's an error, otherwise the new process
			// will send us a TERM when it's ready to trigger the actual shutdown.
			if _, err := a.net.StartProcess(); err != nil {
				a.errors <- err.Trace()
			}
		}
	}
}
Пример #9
0
// StartGoogleNews start collecting google news
func StartGoogleNews(googleLoopCounterDelay int) {
	fmt.Println("startgoogle news launched!")
	fmt.Println(googleLoopCounterDelay)

	for t := range time.Tick(time.Duration(googleLoopCounterDelay) * time.Second) {
		_ = t
		fmt.Println("loop will start")

		var wsg sync.WaitGroup
		n := make(chan GoogleNewsResponseData)
		// cs := make(chan int)
		for _, v := range TopicsList() {
			wsg.Add(1)
			go func(v TopicIdentity) {
				go GoogleNewsRequester(googleURLConstructor(v.Initial), v, n, &wsg)

				result := <-n
				GoogleNewsRW(result, &wsg)
			}(v)
		}
		wsg.Wait()
		close(n)

		// cache index news keys
		newsCache.NewsIndexCache()
	}
}
Пример #10
0
func NewFakeNeverRateLimiter() RateLimiter {
	wg := sync.WaitGroup{}
	wg.Add(1)
	return &fakeNeverRateLimiter{
		wg: wg,
	}
}
Пример #11
0
func dialWebsocket(db *sql.DB, wg *sync.WaitGroup, i int) {

	origin := "http://localhost/"
	url := "ws://localhost:8080/scoreboard"

	ws, err := websocket.Dial(url, "", origin)
	if err != nil {
		log.Fatal(err)
	}

	res, err := scoreboard.CollectLastResult(db)
	if err != nil {
		log.Fatal(err)
	}

	html_res := res.ToHTML(false)

	var msg = make([]byte, len(html_res))
	if _, err = ws.Read(msg); err != nil {
		log.Fatal(err)
	}

	if string(msg) != html_res {
		log.Fatalln("Received result invalid",
			html_res, msg)
	}

	wg.Done()
}
Пример #12
0
func TestWrite(t *testing.T) {
	var wg sync.WaitGroup
	wg.Add(1)
	go TCPServer(t, &wg)
	// Give the fake TCP server some time to start:
	time.Sleep(time.Millisecond * 100)

	i := Instrumental{
		Host:     "127.0.0.1",
		ApiToken: "abc123token",
		Prefix:   "my.prefix",
	}
	i.Connect()

	// Default to gauge
	m1, _ := telegraf.NewMetric(
		"mymeasurement",
		map[string]string{"host": "192.168.0.1"},
		map[string]interface{}{"myfield": float64(3.14)},
		time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
	)
	m2, _ := telegraf.NewMetric(
		"mymeasurement",
		map[string]string{"host": "192.168.0.1", "metric_type": "set"},
		map[string]interface{}{"value": float64(3.14)},
		time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
	)

	// Simulate a connection close and reconnect.
	metrics := []telegraf.Metric{m1, m2}
	i.Write(metrics)
	i.Close()

	// Counter and Histogram are increments
	m3, _ := telegraf.NewMetric(
		"my_histogram",
		map[string]string{"host": "192.168.0.1", "metric_type": "histogram"},
		map[string]interface{}{"value": float64(3.14)},
		time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
	)
	// We will drop metrics that simply won't be accepted by Instrumental
	m4, _ := telegraf.NewMetric(
		"bad_values",
		map[string]string{"host": "192.168.0.1", "metric_type": "counter"},
		map[string]interface{}{"value": "\" 3:30\""},
		time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
	)
	m5, _ := telegraf.NewMetric(
		"my_counter",
		map[string]string{"host": "192.168.0.1", "metric_type": "counter"},
		map[string]interface{}{"value": float64(3.14)},
		time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
	)

	metrics = []telegraf.Metric{m3, m4, m5}
	i.Write(metrics)

	wg.Wait()
	i.Close()
}
Пример #13
0
Файл: saxer.go Проект: tcw/saxer
func emitterMetaPrinter(emitter chan contentBuffer.EmitterData, wg *sync.WaitGroup) {
	for {
		ed := <-emitter
		fmt.Printf("%d-%d    %s\n", ed.LineStart, ed.LineEnd, ed.NodePath)
		wg.Done()
	}
}
Пример #14
0
// a raw byte Key, and a json value
func RawJsonInternalOutputProtocol(writer io.Writer) (*sync.WaitGroup, chan<- KeyValue) {
	w := bufio.NewWriter(writer)
	in := make(chan KeyValue, 100)
	tab := []byte("\t")
	newline := []byte("\n")
	var wg sync.WaitGroup
	wg.Add(1)
	go func() {
		for kv := range in {
			kBytes, ok := kv.Key.([]byte)
			if !ok {
				Counter("RawJsonInternalOutputProtocol", "key is not []byte", 1)
				log.Printf("failed type casting %v", kv.Key)
				continue
			}
			vBytes, err := json.Marshal(kv.Value)
			if err != nil {
				Counter("RawJsonInternalOutputProtocol", "unable to json encode value", 1)
				log.Printf("%s - failed encoding %v", err, kv.Value)
				continue
			}
			w.Write(kBytes)
			w.Write(tab)
			w.Write(vBytes)
			w.Write(newline)
		}
		w.Flush()
		wg.Done()
	}()
	return &wg, in
}
Пример #15
0
func client(configuration *Configuration, result *Result, done *sync.WaitGroup) {
	defer func() {
		if r := recover(); r != nil {
			fmt.Println("caught recover: ", r)
			os.Exit(1)
		}
	}()

	myclient := MyClient(result, time.Duration(connectTimeout)*time.Millisecond,
		time.Duration(readTimeout)*time.Millisecond,
		time.Duration(writeTimeout)*time.Millisecond)

	for result.requests < configuration.requests {
		for _, tmpUrl := range configuration.urls {
			req, _ := http.NewRequest(configuration.method, tmpUrl, bytes.NewReader(configuration.postData))

			if configuration.keepAlive == true {
				req.Header.Add("Connection", "keep-alive")
			} else {
				req.Header.Add("Connection", "close")
			}

			if len(configuration.authHeader) > 0 {
				req.Header.Add("Authorization", configuration.authHeader)
			}

			if len(configuration.contentType) > 0 {
				req.Header.Add("Content-Type", contentType)

			} else if len(configuration.postData) > 0 {
				req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
			}

			resp, err := myclient.Do(req)
			result.requests++

			if err != nil {
				result.networkFailed++
				continue
			}

			_, errRead := ioutil.ReadAll(resp.Body)

			if errRead != nil {
				result.networkFailed++
				continue
			}

			if resp.StatusCode == http.StatusOK {
				result.success++
			} else {
				result.badFailed++
			}

			resp.Body.Close()
		}
	}

	done.Done()
}
Пример #16
0
func (s *Scheduler) TriggerImmediately(logger lager.Logger, job atc.JobConfig, resources atc.ResourceConfigs, resourceTypes atc.ResourceTypes) (db.Build, Waiter, error) {
	logger = logger.Session("trigger-immediately")

	build, err := s.PipelineDB.CreateJobBuild(job.Name)
	if err != nil {
		logger.Error("failed-to-create-build", err)
		return db.Build{}, nil, err
	}

	jobService, err := NewJobService(job, s.PipelineDB, s.Scanner)
	if err != nil {
		return db.Build{}, nil, err
	}

	wg := new(sync.WaitGroup)
	wg.Add(1)

	// do not block request on scanning input versions
	go func() {
		defer wg.Done()
		s.ScheduleAndResumePendingBuild(logger, nil, build, job, resources, resourceTypes, jobService)
	}()

	return build, wg, nil
}
Пример #17
0
func main() {
	var wg sync.WaitGroup
	wg.Add(1) //one threads will decide that the server continues alive

	//create a server ID
	serverID = "leader" + RandStringRunes(5)

	//define which port it will listen
	requestsPort = "8091"

	//define port for followers connect to
	//followersPort = "8092"

	//create the channel with thread that listen requests
	requestsChan := make(chan string)

	//by now, there are now followers
	nFollowers = 0

	//start the listener of requests
	go notifyFollowers(requestsChan)

	//start the listener of followers
	go listenRequests(requestsChan, &wg)

	wg.Wait()
}
Пример #18
0
func GenMessages(c *C, prefix, topic string, keys map[string]int) map[string][]*sarama.ProducerMessage {
	config := NewConfig()
	config.ClientID = "producer"
	config.Kafka.SeedPeers = testKafkaPeers
	producer, err := SpawnGracefulProducer(config)
	c.Assert(err, IsNil)

	messages := make(map[string][]*sarama.ProducerMessage)
	var wg sync.WaitGroup
	var lock sync.Mutex
	for key, count := range keys {
		for i := 0; i < count; i++ {
			key := key
			message := fmt.Sprintf("%s:%s:%d", prefix, key, i)
			spawn(&wg, func() {
				keyEncoder := sarama.StringEncoder(key)
				msgEncoder := sarama.StringEncoder(message)
				prodMsg, err := producer.Produce(topic, keyEncoder, msgEncoder)
				c.Assert(err, IsNil)
				log.Infof("*** produced: topic=%s, partition=%d, offset=%d, message=%s",
					topic, prodMsg.Partition, prodMsg.Offset, message)
				lock.Lock()
				messages[key] = append(messages[key], prodMsg)
				lock.Unlock()
			})
		}
	}
	wg.Wait()
	// Sort the produced messages in ascending order of their offsets.
	for _, keyMessages := range messages {
		sort.Sort(MessageSlice(keyMessages))
	}
	return messages
}
Пример #19
0
func client(wg *sync.WaitGroup, host string, port, len int, duration time.Duration, chStat chan int) {
	defer func() {
		if r := recover(); r != nil {
			log.Printf("error: %s", r)
		}
		log.Printf("disconnected")
		wg.Done()
	}()
	log.Printf("connecting to %s:%d, len %d, duration %s", host, port, len, duration.String())
	conn, err := utp.DialTimeout(fmt.Sprintf("%s:%d", host, port), time.Second)
	if err != nil {
		panic(err)
	}
	defer conn.Close()
	log.Printf("connected")
	buf := bytes.Repeat([]byte("H"), len)
	ts := time.Now()
	for time.Since(ts) < duration {
		n, err := conn.Write(buf)
		if err != nil {
			if err == io.EOF {
				break
			}
			panic(err)
		}
		chStat <- n
	}
}
Пример #20
0
func (f *SystemFacts) getSysInfo(wg *sync.WaitGroup) {
	defer wg.Done()

	var info unix.Sysinfo_t
	if err := unix.Sysinfo(&info); err != nil {
		if c.Debug {
			log.Println(err.Error())
		}
		return
	}

	f.mu.Lock()
	defer f.mu.Unlock()

	f.Memory.Total = info.Totalram
	f.Memory.Free = info.Freeram
	f.Memory.Shared = info.Sharedram
	f.Memory.Buffered = info.Bufferram

	f.Swap.Total = info.Totalswap
	f.Swap.Free = info.Freeswap

	f.Uptime = info.Uptime

	f.LoadAverage.One = fmt.Sprintf("%.2f", float64(info.Loads[0])/LINUX_SYSINFO_LOADS_SCALE)
	f.LoadAverage.Five = fmt.Sprintf("%.2f", float64(info.Loads[1])/LINUX_SYSINFO_LOADS_SCALE)
	f.LoadAverage.Ten = fmt.Sprintf("%.2f", float64(info.Loads[2])/LINUX_SYSINFO_LOADS_SCALE)

	return
}
Пример #21
0
Файл: site.go Проект: jaden/hugo
func pageRenderer(s *Site, pages <-chan *Page, results chan<- error, wg *sync.WaitGroup) {
	defer wg.Done()
	for p := range pages {
		var layouts []string

		if !p.IsRenderable() {
			self := "__" + p.TargetPath()
			_, err := s.Tmpl.New(self).Parse(string(p.Content))
			if err != nil {
				results <- err
				continue
			}
			layouts = append(layouts, self)
		} else {
			layouts = append(layouts, p.Layout()...)
			layouts = append(layouts, "_default/single.html")
		}

		b, err := s.renderPage("page "+p.FullFilePath(), p, s.appendThemeTemplates(layouts)...)
		if err != nil {
			results <- err
		} else {
			results <- s.WriteDestPage(p.TargetPath(), b)
		}
	}
}
func TestRace(t *testing.T) {

	o := observable.New()
	n := 0

	asyncTask := func(wg *sync.WaitGroup) {
		o.Trigger("foo")
		wg.Done()
	}
	var wg sync.WaitGroup

	wg.Add(5)

	o.On("foo", func() {
		n++
	})

	go asyncTask(&wg)
	go asyncTask(&wg)
	go asyncTask(&wg)
	go asyncTask(&wg)
	go asyncTask(&wg)

	wg.Wait()

	if n != 5 {
		t.Errorf("The counter is %d instead of being %d", n, 5)
	}

}
Пример #23
0
func BenchmarkContention(b *testing.B) {
	b.StopTimer()

	var procs = runtime.NumCPU()
	var origProcs = runtime.GOMAXPROCS(procs)

	var db = NewLogeDB(NewMemStore())
	db.CreateType(NewTypeDef("counters", 1, &TestCounter{}))

	db.Transact(func(t *Transaction) {
		t.Set("counters", "contended", &TestCounter{Value: 0})
	}, 0)

	b.StartTimer()

	var group sync.WaitGroup
	for i := 0; i < procs; i++ {
		group.Add(1)
		go LoopIncrement(db, "contended", &group, b.N)
	}
	group.Wait()

	b.StopTimer()

	db.Transact(func(t *Transaction) {
		var target = b.N * procs
		var counter = t.Read("counters", "contended").(*TestCounter)
		if counter.Value != uint32(target) {
			b.Errorf("Wrong count for counter: %d / %d",
				counter.Value, target)
		}
	}, 0)

	runtime.GOMAXPROCS(origProcs)
}
Пример #24
0
func LoopIncrement(db *LogeDB, key LogeKey, group *sync.WaitGroup, count int) {
	var actor = func(t *Transaction) { Increment(t, key) }
	for i := 0; i < count; i++ {
		db.Transact(actor, 0)
	}
	group.Done()
}
Пример #25
0
func (s *managedStorageSuite) checkPutResponse(c *gc.C, index int, wg *sync.WaitGroup,
	requestId int64, sha384Hash string, blob []byte) {

	// After a random time, respond to a previously queued put request and check the result.
	go func() {
		delay := rand.Intn(3)
		time.Sleep(time.Duration(delay) * time.Millisecond)
		expectError := index == 2
		if expectError {
			sha384Hash = "bad"
		}
		response := blobstore.NewPutResponse(requestId, sha384Hash)
		err := s.managedStorage.ProofOfAccessResponse(response)
		if expectError {
			c.Check(err, gc.NotNil)
		} else {
			c.Check(err, gc.IsNil)
			if err == nil {
				r, length, err := s.managedStorage.GetForEnvironment("env", fmt.Sprintf("path/to/blob%d", index))
				c.Check(err, gc.IsNil)
				if err == nil {
					data, err := ioutil.ReadAll(r)
					c.Check(err, gc.IsNil)
					c.Check(data, gc.DeepEquals, blob)
					c.Check(int(length), gc.DeepEquals, len(blob))
				}
			}
		}
		wg.Done()
	}()
}
Пример #26
0
func main() {
	o := []output{
		{"sample-enc-mpeg1.mpg", AV_CODEC_ID_MPEG1VIDEO, make(chan *Frame)},
		{"sample-enc-mpeg2.mpg", AV_CODEC_ID_MPEG2VIDEO, make(chan *Frame)},
		{"sample-enc-mpeg4.mp4", AV_CODEC_ID_MPEG4, make(chan *Frame)},
	}

	wg := new(sync.WaitGroup)
	wCount := 0

	for _, item := range o {
		wg.Add(1)
		go encodeWorker(item, wg)
		wCount++
	}

	var srcFrame *Frame
	j := 0

	for srcFrame = range GenSyntVideoNewFrame(320, 200, AV_PIX_FMT_YUV420P) {
		srcFrame.SetPts(j)
		for i := 0; i < wCount; i++ {
			Retain(srcFrame)
			o[i].data <- srcFrame
		}
		j += 1
		Release(srcFrame)
	}

	for _, item := range o {
		close(item.data)
	}

	wg.Wait()
}
Пример #27
0
func (cp *meowProxy) Serve(wg *sync.WaitGroup) {
	defer func() {
		wg.Done()
	}()
	ln, err := net.Listen("tcp", cp.addr)
	if err != nil {
		fmt.Println("listen meow failed:", err)
		return
	}
	info.Printf("meow proxy address %s\n", cp.addr)

	for {
		conn, err := ln.Accept()
		if err != nil {
			errl.Printf("meow proxy(%s) accept %v\n", ln.Addr(), err)
			if isErrTooManyOpenFd(err) {
				connPool.CloseAll()
			}
			time.Sleep(time.Millisecond)
			continue
		}
		ssConn := ss.NewConn(conn, cp.cipher.Copy())
		c := newClientConn(ssConn, cp)
		go c.serve()
	}
}
Пример #28
0
func (s *Scheduler) TryNextPendingBuild(logger lager.Logger, versions *algorithm.VersionsDB, job atc.JobConfig, resources atc.ResourceConfigs, resourceTypes atc.ResourceTypes) Waiter {
	logger = logger.Session("try-next-pending")

	wg := new(sync.WaitGroup)

	wg.Add(1)
	go func() {
		defer wg.Done()

		build, found, err := s.PipelineDB.GetNextPendingBuild(job.Name)
		if err != nil {
			logger.Error("failed-to-get-next-pending-build", err)
			return
		}

		if !found {
			return
		}

		jobService, err := NewJobService(job, s.PipelineDB, s.Scanner)
		if err != nil {
			logger.Error("failed-to-get-job-service", err)
			return
		}

		s.ScheduleAndResumePendingBuild(logger, versions, build, job, resources, resourceTypes, jobService)
	}()

	return wg
}
Пример #29
0
func (n *Node) threadProcessor(wg *sync.WaitGroup) {
	defer wg.Done()
	for {
		select {
		case thread := <-n.CThread:
			//log.Printf("processing /%s/thread/%d", thread.Board, thread.No)
			n.Storage.PersistThread(thread)
			if t, err := DownloadThread(thread.Board, thread.No); err == nil {
				n.Stats.Incr(METRIC_THREADS, 1)
				var postNos []int
				for _, post := range t.Posts {
					// TODO iff post.Time >= thread.LM
					postNos = append(postNos, post.No)
					n.CPost <- post
					n.Stats.Incr(METRIC_POSTS, 1)
				}
				n.Storage.PersistThreadPosts(t, postNos)
			} else {
				log.Print("Error downloading thread: ", err)
			}
		case <-n.stopThread:
			n.stopThread <- true
			//log.Print("Thread routine stopped")
			return
		}
	}
}
Пример #30
0
func finalizeTestBalancerJob(j *TestBalancerJob, i int, wg *sync.WaitGroup, t *testing.T) { // {{{
	defer wg.Done()
	startedAt := time.Now()
	j.Wait()
	finishedAt := time.Now()
	t.Logf("[TestBalancerJob:%d] Executed: '%s' -> [%.6fs]", i, j.WorkerInfo, finishedAt.Sub(startedAt).Seconds())
} // }}}