Пример #1
1
func createRunningPod(wg *sync.WaitGroup, c *client.Client, name, ns, image string, labels map[string]string) {
	defer GinkgoRecover()
	defer wg.Done()
	pod := &api.Pod{
		TypeMeta: unversioned.TypeMeta{
			Kind: "Pod",
		},
		ObjectMeta: api.ObjectMeta{
			Name:   name,
			Labels: labels,
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:  name,
					Image: image,
				},
			},
			DNSPolicy: api.DNSDefault,
		},
	}
	_, err := c.Pods(ns).Create(pod)
	expectNoError(err)
	expectNoError(waitForPodRunningInNamespace(c, name, ns))
}
Пример #2
1
// Run runs the query concurrently, and returns the results.
func (q *Query) Run() []interface{} {
	rand.Seed(time.Now().UnixNano())
	var w sync.WaitGroup
	var l sync.Mutex
	places := make([]interface{}, len(q.Journey))
	for i, r := range q.Journey {
		w.Add(1)
		go func(types string, i int) {
			defer w.Done()
			response, err := q.find(types)
			if err != nil {
				log.Println("Failed to find places:", err)
				return
			}
			if len(response.Results) == 0 {
				log.Println("No places found for", types)
				return
			}
			for _, result := range response.Results {
				for _, photo := range result.Photos {
					photo.URL = "https://maps.googleapis.com/maps/api/place/photo?" +
						"maxwidth=1000&photoreference=" + photo.PhotoRef + "&key=" + APIKey
				}
			}
			randI := rand.Intn(len(response.Results))
			l.Lock()
			places[i] = response.Results[randI]
			l.Unlock()
		}(r, i)
	}
	w.Wait() // wait for everything to finish
	return places
}
Пример #3
1
// Work turns on the worker
func (w *Worker) Work(wg *sync.WaitGroup) {
	defer wg.Done()
	for {
		select {
		// safely stop the worker
		case <-w.stop:
			return
		case task := <-w.reader:
			tasks, err := w.processFn(task)
			if err != nil {
				if task.Retries < MaxRetries-1 {
					task.Retries++
					w.writer <- task
					continue
				}
			}

			// submit any new tasks returned by the old one
			if tasks != nil {
				for _, t := range tasks {
					w.writer <- t
				}
			}
		}
	}
}
Пример #4
1
func (hp *httpProxy) Serve(wg *sync.WaitGroup) {
	defer func() {
		wg.Done()
	}()
	ln, err := net.Listen("tcp", hp.addr)
	if err != nil {
		fmt.Println("listen http failed:", err)
		return
	}
	host, _, _ := net.SplitHostPort(hp.addr)
	var pacURL string
	if host == "" || host == "0.0.0.0" {
		pacURL = fmt.Sprintf("http://<hostip>:%s/pac", hp.port)
	} else if hp.addrInPAC == "" {
		pacURL = fmt.Sprintf("http://%s/pac", hp.addr)
	} else {
		pacURL = fmt.Sprintf("http://%s/pac", hp.addrInPAC)
	}
	info.Printf("listen http %s, PAC url %s\n", hp.addr, pacURL)

	for {
		conn, err := ln.Accept()
		if err != nil {
			errl.Printf("http proxy(%s) accept %v\n", ln.Addr(), err)
			if isErrTooManyOpenFd(err) {
				connPool.CloseAll()
			}
			time.Sleep(time.Millisecond)
			continue
		}
		c := newClientConn(conn, hp)
		go c.serve()
	}
}
Пример #5
1
func (a *apiServer) runPipeline(pipelineInfo *pps.PipelineInfo) error {
	ctx, cancel := context.WithCancel(context.Background())
	a.lock.Lock()
	a.cancelFuncs[*pipelineInfo.Pipeline] = cancel
	a.lock.Unlock()
	var loopErr error
	//TODO this gets really weird with branching... we need to figure out what that looks like.
	mostRecentCommit := make(map[pfs.Repo]*pfs.Commit)
	var lock sync.Mutex
	var wg sync.WaitGroup
	for _, inputRepo := range pipelineInfo.InputRepo {
		inputRepo := inputRepo
		wg.Add(1)
		go func() {
			defer wg.Done()
			var lastCommit *pfs.Commit
			listCommitRequest := &pfs.ListCommitRequest{
				Repo:       inputRepo,
				CommitType: pfs.CommitType_COMMIT_TYPE_READ,
				From:       lastCommit,
				Block:      true,
			}
			commitInfos, err := a.pfsAPIClient.ListCommit(ctx, listCommitRequest)
			if err != nil && loopErr == nil {
				loopErr = err
				return
			}
			for _, commitInfo := range commitInfos.CommitInfo {
				lock.Lock()
				mostRecentCommit[*inputRepo] = commitInfo.Commit
				var commits []*pfs.Commit
				for _, commit := range mostRecentCommit {
					commits = append(commits, commit)
				}
				lock.Unlock()
				if len(commits) < len(pipelineInfo.InputRepo) {
					// we don't yet have a commit for every input repo so there's no way to run the job
					continue
				}
				outParentCommit, err := a.bestParent(pipelineInfo, commitInfo)
				if err != nil && loopErr == nil {
					loopErr = err
					return
				}
				_, err = a.jobAPIClient.CreateJob(
					ctx,
					&pps.CreateJobRequest{
						Spec: &pps.CreateJobRequest_Pipeline{
							Pipeline: pipelineInfo.Pipeline,
						},
						InputCommit:  []*pfs.Commit{commitInfo.Commit},
						OutputParent: outParentCommit,
					},
				)
			}
		}()
	}
	wg.Wait()
	return loopErr
}
Пример #6
1
func TestFatalRxError(t *testing.T) {
	t.Parallel()

	conn := mustConnect(t, *defaultConnConfig)
	defer closeConn(t, conn)

	var wg sync.WaitGroup
	wg.Add(1)
	go func() {
		defer wg.Done()
		var n int32
		var s string
		err := conn.QueryRow("select 1::int4, pg_sleep(10)::varchar").Scan(&n, &s)
		if err, ok := err.(pgx.PgError); !ok || err.Severity != "FATAL" {
			t.Fatalf("Expected QueryRow Scan to return fatal PgError, but instead received %v", err)
		}
	}()

	otherConn, err := pgx.Connect(*defaultConnConfig)
	if err != nil {
		t.Fatalf("Unable to establish connection: %v", err)
	}
	defer otherConn.Close()

	if _, err := otherConn.Exec("select pg_terminate_backend($1)", conn.Pid); err != nil {
		t.Fatalf("Unable to kill backend PostgreSQL process: %v", err)
	}

	wg.Wait()

	if conn.IsAlive() {
		t.Fatal("Connection should not be live but was")
	}
}
Пример #7
0
func (s *managedStorageSuite) checkPutResponse(c *gc.C, index int, wg *sync.WaitGroup,
	requestId int64, sha384Hash string, blob []byte) {

	// After a random time, respond to a previously queued put request and check the result.
	go func() {
		delay := rand.Intn(3)
		time.Sleep(time.Duration(delay) * time.Millisecond)
		expectError := index == 2
		if expectError {
			sha384Hash = "bad"
		}
		response := blobstore.NewPutResponse(requestId, sha384Hash)
		err := s.managedStorage.ProofOfAccessResponse(response)
		if expectError {
			c.Check(err, gc.NotNil)
		} else {
			c.Check(err, gc.IsNil)
			if err == nil {
				r, length, err := s.managedStorage.GetForEnvironment("env", fmt.Sprintf("path/to/blob%d", index))
				c.Check(err, gc.IsNil)
				if err == nil {
					data, err := ioutil.ReadAll(r)
					c.Check(err, gc.IsNil)
					c.Check(data, gc.DeepEquals, blob)
					c.Check(int(length), gc.DeepEquals, len(blob))
				}
			}
		}
		wg.Done()
	}()
}
Пример #8
0
func (cd *CheckDocker) GetData() error {
	errChan := make(chan error)
	var err error
	var wg sync.WaitGroup

	wg.Add(2)

	go func(cd *CheckDocker, errChan chan error) {
		defer wg.Done()

		cd.dockerInfoData, err = cd.dockerclient.Info()
		if err != nil {
			errChan <- err
		}
	}(cd, errChan)

	go func(cd *CheckDocker, errChan chan error) {
		defer wg.Done()

		cd.dockerContainersData, err = cd.dockerclient.ListContainers(dockerlib.ListContainersOptions{})
		if err != nil {
			errChan <- err
		}
	}(cd, errChan)

	go func() {
		wg.Wait()
		close(errChan)
	}()

	err = <-errChan

	return err
}
Пример #9
0
func (n *network) Run(ctx context.Context) {
	wg := sync.WaitGroup{}

	log.Info("Watching for new subnet leases")
	evts := make(chan []subnet.Event)
	wg.Add(1)
	go func() {
		subnet.WatchLeases(ctx, n.sm, n.name, n.lease, evts)
		wg.Done()
	}()

	n.rl = make([]netlink.Route, 0, 10)
	wg.Add(1)
	go func() {
		n.routeCheck(ctx)
		wg.Done()
	}()

	defer wg.Wait()

	for {
		select {
		case evtBatch := <-evts:
			n.handleSubnetEvents(evtBatch)

		case <-ctx.Done():
			return
		}
	}
}
Пример #10
0
func (rm *ReplicationManager) syncReplicationController(controllerSpec api.ReplicationController) error {
	s := labels.Set(controllerSpec.DesiredState.ReplicaSelector).AsSelector()
	podList, err := rm.kubeClient.ListPods(s)
	if err != nil {
		return err
	}
	filteredList := rm.filterActivePods(podList.Items)
	diff := len(filteredList) - controllerSpec.DesiredState.Replicas
	if diff < 0 {
		diff *= -1
		wait := sync.WaitGroup{}
		wait.Add(diff)
		glog.Infof("Too few replicas, creating %d\n", diff)
		for i := 0; i < diff; i++ {
			go func() {
				defer wait.Done()
				rm.podControl.createReplica(controllerSpec)
			}()
		}
		wait.Wait()
	} else if diff > 0 {
		glog.Infof("Too many replicas, deleting %d\n", diff)
		wait := sync.WaitGroup{}
		wait.Add(diff)
		for i := 0; i < diff; i++ {
			go func(ix int) {
				defer wait.Done()
				rm.podControl.deletePod(filteredList[ix].ID)
			}(i)
		}
		wait.Wait()
	}
	return nil
}
Пример #11
0
// ReadWrite does read and write in parallel.
// qRead is num goroutines for reading.
// qWrite is num goroutines for writing.
// Assume n divisible by (qRead + qWrite).
func ReadWrite(n, qRead, qWrite int, newFunc func() HashMap, b *testing.B) {
	q := qRead + qWrite
	check(n, q)
	work := intPairArray(n)
	b.StartTimer()
	for i := 0; i < b.N; i++ { // N reps.
		h := newFunc()
		var wg sync.WaitGroup
		for j := 0; j < qRead; j++ { // Read goroutines.
			wg.Add(1)
			go func(j int) {
				defer wg.Done()
				start, end := workRange(n, q, j)
				for k := start; k < end; k++ {
					h.Get(work[k].Key)
				}
			}(j)
		}

		for j := qRead; j < q; j++ { // Write goroutines.
			wg.Add(1)
			go func(j int) {
				defer wg.Done()
				start, end := workRange(n, q, j)
				for k := start; k < end; k++ {
					h.Put(work[k].Key, work[k].Val)
				}
			}(j)
		}
		wg.Wait()
	}
}
Пример #12
0
// Gathers data for all servers.
func (h *HttpJson) Gather(acc telegraf.Accumulator) error {
	var wg sync.WaitGroup

	errorChannel := make(chan error, len(h.Servers))

	for _, server := range h.Servers {
		wg.Add(1)
		go func(server string) {
			defer wg.Done()
			if err := h.gatherServer(acc, server); err != nil {
				errorChannel <- err
			}
		}(server)
	}

	wg.Wait()
	close(errorChannel)

	// Get all errors and return them as one giant error
	errorStrings := []string{}
	for err := range errorChannel {
		errorStrings = append(errorStrings, err.Error())
	}

	if len(errorStrings) == 0 {
		return nil
	}
	return errors.New(strings.Join(errorStrings, "\n"))
}
Пример #13
0
func (ts *tribServer) getTribValuesFromHashIds(user string, hashIds []string) ([]string, error) {
	var err error
	tribValues := make([]string, len(hashIds))
	returnValues := make([]string, len(hashIds))

	var wg sync.WaitGroup

	wg.Add(len(hashIds))

	for i := range hashIds {
		go func(i int) {
			defer wg.Done()
			tribValues[i], err = ts.Libstore.Get(makeTribId(user, hashIds[i]))
			if err != nil {
				if err != libstore.ErrorKeyNotFound { // ignore cross inconsistency
					panic(err)

				}
			}
		}(i)
	}
	wg.Wait()

	j := 0
	for i := range tribValues {
		if tribValues[i] != "" {
			returnValues[j] = tribValues[i]
			j++
		}
	}

	return returnValues[0:j], nil
}
Пример #14
0
// cWriteShards writes shards concurrently
func cWriteShards(out []io.Writer, in [][]byte) error {
	if len(out) != len(in) {
		panic("internal error: in and out size does not match")
	}
	var errs = make(chan error, len(out))
	var wg sync.WaitGroup
	wg.Add(len(out))
	for i := range in {
		go func(i int) {
			defer wg.Done()
			if out[i] == nil {
				errs <- nil
				return
			}
			n, err := out[i].Write(in[i])
			if err != nil {
				errs <- StreamWriteError{Err: err, Stream: i}
				return
			}
			if n != len(in[i]) {
				errs <- StreamWriteError{Err: io.ErrShortWrite, Stream: i}
			}
		}(i)
	}
	wg.Wait()
	close(errs)
	for err := range errs {
		if err != nil {
			return err
		}
	}

	return nil
}
Пример #15
0
// Serve serves SFTP connections until the streams stop or the SFTP subsystem
// is stopped.
func (svr *Server) Serve() error {
	var wg sync.WaitGroup
	wg.Add(sftpServerWorkerCount)
	for i := 0; i < sftpServerWorkerCount; i++ {
		go func() {
			defer wg.Done()
			if err := svr.sftpServerWorker(); err != nil {
				svr.rwc.Close() // shuts down recvPacket
			}
		}()
	}

	var err error
	for {
		var pktType uint8
		var pktBytes []byte
		pktType, pktBytes, err = recvPacket(svr.rwc)
		if err != nil {
			break
		}
		svr.pktChan <- rxPacket{fxp(pktType), pktBytes}
	}

	close(svr.pktChan) // shuts down sftpServerWorkers
	wg.Wait()          // wait for all workers to exit

	// close any still-open files
	for handle, file := range svr.openFiles {
		fmt.Fprintf(svr.debugStream, "sftp server file with handle %q left open: %v\n", handle, file.Name())
		file.Close()
	}
	return err // error from recvPacket
}
Пример #16
0
// TestStoreRangeUpReplicate verifies that the replication queue will notice
// under-replicated ranges and replicate them.
func TestStoreRangeUpReplicate(t *testing.T) {
	defer leaktest.AfterTest(t)
	mtc := startMultiTestContext(t, 3)
	defer mtc.Stop()

	// Initialize the gossip network.
	var wg sync.WaitGroup
	wg.Add(len(mtc.stores))
	key := gossip.MakePrefixPattern(gossip.KeyStorePrefix)
	mtc.stores[0].Gossip().RegisterCallback(key, func(_ string, _ roachpb.Value) { wg.Done() })
	for _, s := range mtc.stores {
		s.GossipStore()
	}
	wg.Wait()

	// Once we know our peers, trigger a scan.
	mtc.stores[0].ForceReplicationScanAndProcess()

	// The range should become available on every node.
	if err := util.IsTrueWithin(func() bool {
		for _, s := range mtc.stores {
			r := s.LookupReplica(roachpb.RKey("a"), roachpb.RKey("b"))
			if r == nil {
				return false
			}
		}
		return true
	}, replicationTimeout); err != nil {
		t.Fatal(err)
	}
}
Пример #17
0
Файл: site.go Проект: jaden/hugo
func pageRenderer(s *Site, pages <-chan *Page, results chan<- error, wg *sync.WaitGroup) {
	defer wg.Done()
	for p := range pages {
		var layouts []string

		if !p.IsRenderable() {
			self := "__" + p.TargetPath()
			_, err := s.Tmpl.New(self).Parse(string(p.Content))
			if err != nil {
				results <- err
				continue
			}
			layouts = append(layouts, self)
		} else {
			layouts = append(layouts, p.Layout()...)
			layouts = append(layouts, "_default/single.html")
		}

		b, err := s.renderPage("page "+p.FullFilePath(), p, s.appendThemeTemplates(layouts)...)
		if err != nil {
			results <- err
		} else {
			results <- s.WriteDestPage(p.TargetPath(), b)
		}
	}
}
Пример #18
0
func (t *testRunner) Run() bool {

	reschan := make(chan *testResult)
	wg := sync.WaitGroup{}
	for _, route := range t.api.Routes {
		wg.Add(1)

		go func(route Route) {

			reschan <- t.invokeTest(route.Path, route.Test)
			wg.Done()
		}(route)

	}

	go func() {
		wg.Wait()
		close(reschan)
	}()

	success := true
	for res := range reschan {
		if res == nil {
			continue
		}

		if res.isFailure() {
			success = false
		}
	}

	return success

}
Пример #19
0
func (cp *meowProxy) Serve(wg *sync.WaitGroup) {
	defer func() {
		wg.Done()
	}()
	ln, err := net.Listen("tcp", cp.addr)
	if err != nil {
		fmt.Println("listen meow failed:", err)
		return
	}
	info.Printf("meow proxy address %s\n", cp.addr)

	for {
		conn, err := ln.Accept()
		if err != nil {
			errl.Printf("meow proxy(%s) accept %v\n", ln.Addr(), err)
			if isErrTooManyOpenFd(err) {
				connPool.CloseAll()
			}
			time.Sleep(time.Millisecond)
			continue
		}
		ssConn := ss.NewConn(conn, cp.cipher.Copy())
		c := newClientConn(ssConn, cp)
		go c.serve()
	}
}
Пример #20
0
// Kill all containers in a pod.  Returns the number of containers deleted and an error if one occurs.
func (kl *Kubelet) killContainersInPod(pod *api.BoundPod, dockerContainers dockertools.DockerContainers) (int, error) {
	podFullName := GetPodFullName(pod)

	count := 0
	errs := make(chan error, len(pod.Spec.Containers))
	wg := sync.WaitGroup{}
	for _, container := range pod.Spec.Containers {
		// TODO: Consider being more aggressive: kill all containers with this pod UID, period.
		if dockerContainer, found, _ := dockerContainers.FindPodContainer(podFullName, pod.UID, container.Name); found {
			count++
			wg.Add(1)
			go func() {
				err := kl.killContainer(dockerContainer)
				if err != nil {
					glog.Errorf("Failed to delete container: %v; Skipping pod %q", err, podFullName)
					errs <- err
				}
				wg.Done()
			}()
		}
	}
	wg.Wait()
	close(errs)
	if len(errs) > 0 {
		errList := []error{}
		for err := range errs {
			errList = append(errList, err)
		}
		return -1, fmt.Errorf("failed to delete containers (%v)", errList)
	}
	return count, nil
}
Пример #21
0
// Test that simultaneous RemoveAll do not report an error.
// As long as it gets removed, we should be happy.
func TestRemoveAllRace(t *testing.T) {
	if runtime.GOOS == "windows" {
		// Windows has very strict rules about things like
		// removing directories while someone else has
		// them open. The racing doesn't work out nicely
		// like it does on Unix.
		t.Skip("skipping on windows")
	}

	n := runtime.GOMAXPROCS(16)
	defer runtime.GOMAXPROCS(n)
	root, err := ioutil.TempDir("", "issue")
	if err != nil {
		t.Fatal(err)
	}
	mkdirTree(t, root, 1, 6)
	hold := make(chan struct{})
	var wg sync.WaitGroup
	for i := 0; i < 4; i++ {
		wg.Add(1)
		go func() {
			defer wg.Done()
			<-hold
			err := RemoveAll(root)
			if err != nil {
				t.Errorf("unexpected error: %T, %q", err, err)
			}
		}()
	}
	close(hold) // let workers race to remove root
	wg.Wait()
}
Пример #22
0
func connectSwarms(t *testing.T, ctx context.Context, swarms []*Swarm) {

	var wg sync.WaitGroup
	connect := func(s *Swarm, dst peer.ID, addr ma.Multiaddr) {
		// TODO: make a DialAddr func.
		s.peers.AddAddr(dst, addr, peer.PermanentAddrTTL)
		if _, err := s.Dial(ctx, dst); err != nil {
			t.Fatal("error swarm dialing to peer", err)
		}
		wg.Done()
	}

	log.Info("Connecting swarms simultaneously.")
	for _, s1 := range swarms {
		for _, s2 := range swarms {
			if s2.local != s1.local { // don't connect to self.
				wg.Add(1)
				connect(s1, s2.LocalPeer(), s2.ListenAddresses()[0]) // try the first.
			}
		}
	}
	wg.Wait()

	for _, s := range swarms {
		log.Infof("%s swarm routing table: %s", s.local, s.Peers())
	}
}
Пример #23
0
// RunParallel spawns a goroutine per task in the given queue
func RunParallel(task Task, numTasks, numWorkers int) {
	start := time.Now()
	if numWorkers <= 0 {
		numWorkers = numTasks
	}
	defer func() {
		glog.Infof("RunParallel took %v for %d tasks and %d workers", time.Since(start), numTasks, numWorkers)
	}()
	var wg sync.WaitGroup
	semCh := make(chan struct{}, numWorkers)
	wg.Add(numTasks)
	for id := 0; id < numTasks; id++ {
		go func(id int) {
			semCh <- struct{}{}
			err := task(id)
			if err != nil {
				glog.Fatalf("Worker failed with %v", err)
			}
			<-semCh
			wg.Done()
		}(id)
	}
	wg.Wait()
	close(semCh)
}
Пример #24
0
func TestOutputHTTPSSL(t *testing.T) {
	wg := new(sync.WaitGroup)
	quit := make(chan int)

	// Origing and Replay server initialization
	server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		wg.Done()
	}))

	input := NewTestInput()
	output := NewHTTPOutput(server.URL, &HTTPOutputConfig{})

	Plugins.Inputs = []io.Reader{input}
	Plugins.Outputs = []io.Writer{output}

	go Start(quit)

	wg.Add(2)

	input.EmitPOST()
	input.EmitGET()

	wg.Wait()
	close(quit)
}
Пример #25
0
func copyLoop(a net.Conn, b net.Conn) error {
	// Note: b is always the pt connection.  a is the SOCKS/ORPort connection.
	errChan := make(chan error, 2)

	var wg sync.WaitGroup
	wg.Add(2)

	go func() {
		defer wg.Done()
		defer b.Close()
		defer a.Close()
		_, err := io.Copy(b, a)
		errChan <- err
	}()
	go func() {
		defer wg.Done()
		defer a.Close()
		defer b.Close()
		_, err := io.Copy(a, b)
		errChan <- err
	}()

	// Wait for both upstream and downstream to close.  Since one side
	// terminating closes the other, the second error in the channel will be
	// something like EINVAL (though io.Copy() will swallow EOF), so only the
	// first error is returned.
	wg.Wait()
	if len(errChan) > 0 {
		return <-errChan
	}

	return nil
}
Пример #26
0
func BenchmarkHTTPOutput(b *testing.B) {
	wg := new(sync.WaitGroup)
	quit := make(chan int)

	server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		time.Sleep(50 * time.Millisecond)
		wg.Done()
	}))
	defer server.Close()

	input := NewTestInput()
	output := NewHTTPOutput(server.URL, &HTTPOutputConfig{})

	Plugins.Inputs = []io.Reader{input}
	Plugins.Outputs = []io.Writer{output}

	go Start(quit)

	for i := 0; i < b.N; i++ {
		wg.Add(1)
		input.EmitPOST()
	}

	wg.Wait()

	close(quit)
}
Пример #27
0
// Modify renames the given images
func (g *GceImages) DeprecateImages(opts *DeprecateOptions) error {
	var (
		wg          sync.WaitGroup
		mu          sync.Mutex // protects multiErrors
		multiErrors error
	)

	for _, n := range opts.Names {
		wg.Add(1)
		go func(name string) {
			st := &compute.DeprecationStatus{
				State: opts.State,
			}

			_, err := g.svc.Deprecate(g.config.ProjectID, name, st).Do()
			if err != nil {
				mu.Lock()
				multiErrors = multierror.Append(multiErrors, err)
				mu.Unlock()
			}

			wg.Done()
		}(n)
	}

	wg.Wait()
	return multiErrors
}
Пример #28
0
func TestHTTPOutputKeepOriginalHost(t *testing.T) {
	wg := new(sync.WaitGroup)
	quit := make(chan int)

	input := NewTestInput()

	server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
		if req.Host != "custom-host.com" {
			t.Error("Wrong header", req.Host)
		}

		wg.Done()
	}))
	defer server.Close()

	headers := HTTPHeaders{HTTPHeader{"Host", "custom-host.com"}}
	Settings.modifierConfig = HTTPModifierConfig{headers: headers}

	output := NewHTTPOutput(server.URL, &HTTPOutputConfig{Debug: false, OriginalHost: true})

	Plugins.Inputs = []io.Reader{input}
	Plugins.Outputs = []io.Writer{output}

	go Start(quit)

	wg.Add(1)
	input.EmitGET()

	wg.Wait()

	close(quit)

	Settings.modifierConfig = HTTPModifierConfig{}
}
Пример #29
0
func (n *Node) threadProcessor(wg *sync.WaitGroup) {
	defer wg.Done()
	for {
		select {
		case thread := <-n.CThread:
			//log.Printf("processing /%s/thread/%d", thread.Board, thread.No)
			n.Storage.PersistThread(thread)
			if t, err := DownloadThread(thread.Board, thread.No); err == nil {
				n.Stats.Incr(METRIC_THREADS, 1)
				var postNos []int
				for _, post := range t.Posts {
					// TODO iff post.Time >= thread.LM
					postNos = append(postNos, post.No)
					n.CPost <- post
					n.Stats.Incr(METRIC_POSTS, 1)
				}
				n.Storage.PersistThreadPosts(t, postNos)
			} else {
				log.Print("Error downloading thread: ", err)
			}
		case <-n.stopThread:
			n.stopThread <- true
			//log.Print("Thread routine stopped")
			return
		}
	}
}
Пример #30
0
func NewHttpService(context interface {
	Acquire()
	Release()
}, server *http.Server) (s *HttpService, err error) {
	s = &HttpService{}
	addr := server.Addr
	if addr == "" {
		addr = ":http"
	}

	s.listener, err = net.Listen("tcp", addr)
	if err != nil {
		return
	}

	var w sync.WaitGroup
	w.Add(1)
	context.Acquire()
	go func() {
		defer context.Release()
		l := s.listener
		w.Done()

		server.Serve(l)
	}()

	return
}