Esempio n. 1
1
File: serve.go Progetto: logan/heim
func (cmd *serveEmbedCmd) run(ctx scope.Context, args []string) error {
	listener, err := net.Listen("tcp", cmd.addr)
	if err != nil {
		return err
	}

	closed := false
	m := sync.Mutex{}
	closeListener := func() {
		m.Lock()
		if !closed {
			listener.Close()
			closed = true
		}
		m.Unlock()
	}

	// Spin off goroutine to watch ctx and close listener if shutdown requested.
	go func() {
		<-ctx.Done()
		closeListener()
	}()

	if err := http.Serve(listener, cmd); err != nil {
		fmt.Printf("http[%s]: %s\n", cmd.addr, err)
		return err
	}

	closeListener()
	ctx.WaitGroup().Done()
	return ctx.Err()
}
Esempio n. 2
1
// Run runs the query concurrently, and returns the results.
func (q *Query) Run() []interface{} {
	rand.Seed(time.Now().UnixNano())
	var w sync.WaitGroup
	var l sync.Mutex
	places := make([]interface{}, len(q.Journey))
	for i, r := range q.Journey {
		w.Add(1)
		go func(types string, i int) {
			defer w.Done()
			response, err := q.find(types)
			if err != nil {
				log.Println("Failed to find places:", err)
				return
			}
			if len(response.Results) == 0 {
				log.Println("No places found for", types)
				return
			}
			for _, result := range response.Results {
				for _, photo := range result.Photos {
					photo.URL = "https://maps.googleapis.com/maps/api/place/photo?" +
						"maxwidth=1000&photoreference=" + photo.PhotoRef + "&key=" + APIKey
				}
			}
			randI := rand.Intn(len(response.Results))
			l.Lock()
			places[i] = response.Results[randI]
			l.Unlock()
		}(r, i)
	}
	w.Wait() // wait for everything to finish
	return places
}
Esempio n. 3
1
// GetLookupdTopicChannels returns a []string containing a union of the channels
// from all the given lookupd for the given topic
func GetLookupdTopicChannels(topic string, lookupdHTTPAddrs []string) ([]string, error) {
	success := false
	allChannels := make([]string, 0)
	var lock sync.Mutex
	var wg sync.WaitGroup
	for _, addr := range lookupdHTTPAddrs {
		wg.Add(1)
		endpoint := fmt.Sprintf("http://%s/channels?topic=%s", addr, url.QueryEscape(topic))
		log.Printf("LOOKUPD: querying %s", endpoint)
		go func(endpoint string) {
			data, err := util.ApiRequest(endpoint)
			lock.Lock()
			defer lock.Unlock()
			defer wg.Done()
			if err != nil {
				log.Printf("ERROR: lookupd %s - %s", endpoint, err.Error())
				return
			}
			success = true
			// {"data":{"channels":["test"]}}
			channels, _ := data.Get("channels").StringArray()
			allChannels = util.StringUnion(allChannels, channels)
		}(endpoint)
	}
	wg.Wait()
	sort.Strings(allChannels)
	if success == false {
		return nil, errors.New("unable to query any lookupd")
	}
	return allChannels, nil
}
Esempio n. 4
1
// Put implements the Putter interface.
func (mp *MultiPutter) Put(username string, creds map[string]interface{}) error {
	var (
		err error
		mu  sync.Mutex
		wg  sync.WaitGroup
	)

	for _, p := range mp.Putters {
		wg.Add(1)

		go func(p Putter) {
			defer wg.Done()

			if e := p.Put(username, creds); e != nil {
				mu.Lock()
				err = multierror.Append(err, e)
				mu.Unlock()
			}
		}(p)
	}

	wg.Wait()

	return err
}
Esempio n. 5
1
func (a *apiServer) runPipeline(pipelineInfo *pps.PipelineInfo) error {
	ctx, cancel := context.WithCancel(context.Background())
	a.lock.Lock()
	a.cancelFuncs[*pipelineInfo.Pipeline] = cancel
	a.lock.Unlock()
	var loopErr error
	//TODO this gets really weird with branching... we need to figure out what that looks like.
	mostRecentCommit := make(map[pfs.Repo]*pfs.Commit)
	var lock sync.Mutex
	var wg sync.WaitGroup
	for _, inputRepo := range pipelineInfo.InputRepo {
		inputRepo := inputRepo
		wg.Add(1)
		go func() {
			defer wg.Done()
			var lastCommit *pfs.Commit
			listCommitRequest := &pfs.ListCommitRequest{
				Repo:       inputRepo,
				CommitType: pfs.CommitType_COMMIT_TYPE_READ,
				From:       lastCommit,
				Block:      true,
			}
			commitInfos, err := a.pfsAPIClient.ListCommit(ctx, listCommitRequest)
			if err != nil && loopErr == nil {
				loopErr = err
				return
			}
			for _, commitInfo := range commitInfos.CommitInfo {
				lock.Lock()
				mostRecentCommit[*inputRepo] = commitInfo.Commit
				var commits []*pfs.Commit
				for _, commit := range mostRecentCommit {
					commits = append(commits, commit)
				}
				lock.Unlock()
				if len(commits) < len(pipelineInfo.InputRepo) {
					// we don't yet have a commit for every input repo so there's no way to run the job
					continue
				}
				outParentCommit, err := a.bestParent(pipelineInfo, commitInfo)
				if err != nil && loopErr == nil {
					loopErr = err
					return
				}
				_, err = a.jobAPIClient.CreateJob(
					ctx,
					&pps.CreateJobRequest{
						Spec: &pps.CreateJobRequest_Pipeline{
							Pipeline: pipelineInfo.Pipeline,
						},
						InputCommit:  []*pfs.Commit{commitInfo.Commit},
						OutputParent: outParentCommit,
					},
				)
			}
		}()
	}
	wg.Wait()
	return loopErr
}
Esempio n. 6
0
func handleResponse(conn net.Conn, request *protocol.VMessRequest, output chan<- *alloc.Buffer, finish *sync.Mutex, isUDP bool) {
	defer finish.Unlock()
	defer close(output)
	responseKey := md5.Sum(request.RequestKey[:])
	responseIV := md5.Sum(request.RequestIV[:])

	decryptResponseReader, err := v2io.NewAesDecryptReader(responseKey[:], responseIV[:], conn)
	if err != nil {
		log.Error("VMessOut: Failed to create decrypt reader: %v", err)
		return
	}

	buffer, err := v2net.ReadFrom(decryptResponseReader, nil)
	if err != nil {
		log.Error("VMessOut: Failed to read VMess response (%d bytes): %v", buffer.Len(), err)
		return
	}
	if buffer.Len() < 4 || !bytes.Equal(buffer.Value[:4], request.ResponseHeader[:]) {
		log.Warning("VMessOut: unexepcted response header. The connection is probably hijacked.")
		return
	}
	log.Info("VMessOut received %d bytes from %s", buffer.Len()-4, conn.RemoteAddr().String())

	buffer.SliceFrom(4)
	output <- buffer

	if !isUDP {
		v2net.ReaderToChan(output, decryptResponseReader)
	}

	return
}
Esempio n. 7
0
// golang.org/issue/13924
// This used to fail after many iterations, especially with -race:
// go test -v -run=TestTransportDoubleCloseOnWriteError -count=500 -race
func TestTransportDoubleCloseOnWriteError(t *testing.T) {
	var (
		mu   sync.Mutex
		conn net.Conn // to close if set
	)

	st := newServerTester(t,
		func(w http.ResponseWriter, r *http.Request) {
			mu.Lock()
			defer mu.Unlock()
			if conn != nil {
				conn.Close()
			}
		},
		optOnlyServer,
	)
	defer st.Close()

	tr := &Transport{
		TLSClientConfig: tlsConfigInsecure,
		DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {
			tc, err := tls.Dial(network, addr, cfg)
			if err != nil {
				return nil, err
			}
			mu.Lock()
			defer mu.Unlock()
			conn = tc
			return tc, nil
		},
	}
	defer tr.CloseIdleConnections()
	c := &http.Client{Transport: tr}
	c.Get(st.ts.URL)
}
Esempio n. 8
0
func memoizeBytes() byteDescription {
	cache := map[int64]string{}
	suffixes := []string{"B", "KB", "MB", "GB", "TB", "PB"}
	maxLen := len(suffixes) - 1

	var cacheMu sync.Mutex

	return func(b int64) string {
		cacheMu.Lock()
		defer cacheMu.Unlock()

		description, ok := cache[b]
		if ok {
			return description
		}

		bf := float64(b)
		i := 0
		description = ""
		for {
			if bf/BytesPerKB < 1 || i >= maxLen {
				description = fmt.Sprintf("%.2f%s", bf, suffixes[i])
				break
			}
			bf /= BytesPerKB
			i += 1
		}
		cache[b] = description
		return description
	}
}
Esempio n. 9
0
func TestPseudoRandomSend(t *testing.T) {
	n := 100
	for _, chanCap := range []int{0, n} {
		c := make(chan int, chanCap)
		l := make([]int, n)
		var m sync.Mutex
		m.Lock()
		go func() {
			for i := 0; i < n; i++ {
				runtime.Gosched()
				l[i] = <-c
			}
			m.Unlock()
		}()
		for i := 0; i < n; i++ {
			select {
			case c <- 1:
			case c <- 0:
			}
		}
		m.Lock() // wait
		n0 := 0
		n1 := 0
		for _, i := range l {
			n0 += (i + 1) % 2
			n1 += i
		}
		if n0 <= n/10 || n1 <= n/10 {
			t.Errorf("Want pseudorandom, got %d zeros and %d ones (chan cap %d)", n0, n1, chanCap)
		}
	}
}
Esempio n. 10
0
// Returns the host containers, non-Kubernetes containers, and an error (if any).
func (self *kubeNodeMetrics) getNodesInfo(nodeList *nodes.NodeList, start, end time.Time) ([]api.Container, []api.Container, error) {
	var (
		lock sync.Mutex
		wg   sync.WaitGroup
	)
	hostContainers := make([]api.Container, 0, len(nodeList.Items))
	rawContainers := make([]api.Container, 0, len(nodeList.Items))
	for host, info := range nodeList.Items {
		wg.Add(1)
		go func(host nodes.Host, info nodes.Info) {
			defer wg.Done()
			if hostContainer, containers, err := self.updateStats(host, info, start, end); err == nil {
				lock.Lock()
				defer lock.Unlock()
				if hostContainers != nil {
					hostContainers = append(hostContainers, *hostContainer)
				}
				rawContainers = append(rawContainers, containers...)
			}
		}(host, info)
	}
	wg.Wait()

	return hostContainers, rawContainers, nil
}
Esempio n. 11
0
func (this *VMessOutboundHandler) handleRequest(session *encoding.ClientSession, conn internet.Connection, request *protocol.RequestHeader, payload *alloc.Buffer, input v2io.Reader, finish *sync.Mutex) {
	defer finish.Unlock()

	writer := v2io.NewBufferedWriter(conn)
	defer writer.Release()
	session.EncodeRequestHeader(request, writer)

	bodyWriter := session.EncodeRequestBody(writer)
	var streamWriter v2io.Writer = v2io.NewAdaptiveWriter(bodyWriter)
	if request.Option.Has(protocol.RequestOptionChunkStream) {
		streamWriter = vmessio.NewAuthChunkWriter(streamWriter)
	}
	if err := streamWriter.Write(payload); err != nil {
		conn.SetReusable(false)
	}
	writer.SetCached(false)

	err := v2io.Pipe(input, streamWriter)
	if err != io.EOF {
		conn.SetReusable(false)
	}

	if request.Option.Has(protocol.RequestOptionChunkStream) {
		err := streamWriter.Write(alloc.NewSmallBuffer().Clear())
		if err != nil {
			conn.SetReusable(false)
		}
	}
	streamWriter.Release()
	return
}
Esempio n. 12
0
func diameter(digests []string, diffStore diff.DiffStore) int {
	// TODO Parallelize.
	lock := sync.Mutex{}
	max := 0
	wg := sync.WaitGroup{}
	for {
		if len(digests) <= 2 {
			break
		}
		wg.Add(1)
		go func(d1 string, d2 []string) {
			defer wg.Done()
			dms, err := diffStore.Get(d1, d2)
			if err != nil {
				glog.Errorf("Unable to get diff: %s", err)
				return
			}
			localMax := 0
			for _, dm := range dms {
				if dm.NumDiffPixels > localMax {
					localMax = dm.NumDiffPixels
				}
			}
			lock.Lock()
			defer lock.Unlock()
			if localMax > max {
				max = localMax
			}
		}(digests[0], digests[1:2])
		digests = digests[1:]
	}
	wg.Wait()
	return max
}
Esempio n. 13
0
func (wr *Wrangler) getMastersPosition(shards []*topo.ShardInfo) (map[*topo.ShardInfo]myproto.ReplicationPosition, error) {
	mu := sync.Mutex{}
	result := make(map[*topo.ShardInfo]myproto.ReplicationPosition)

	wg := sync.WaitGroup{}
	rec := concurrency.AllErrorRecorder{}
	for _, si := range shards {
		wg.Add(1)
		go func(si *topo.ShardInfo) {
			defer wg.Done()
			log.Infof("Gathering master position for %v", si.MasterAlias)
			ti, err := wr.ts.GetTablet(si.MasterAlias)
			if err != nil {
				rec.RecordError(err)
				return
			}

			pos, err := wr.ai.MasterPosition(ti, wr.ActionTimeout())
			if err != nil {
				rec.RecordError(err)
				return
			}

			log.Infof("Got master position for %v", si.MasterAlias)
			mu.Lock()
			result[si] = pos
			mu.Unlock()
		}(si)
	}
	wg.Wait()
	return result, rec.Error()
}
Esempio n. 14
0
func main() {
	flag.Parse()

	if *zookeeper == "" {
		printUsageErrorAndExit("You have to provide a zookeeper connection string using -zookeeper, or the ZOOKEEPER_PEERS environment variable")
	}

	conf := kazoo.NewConfig()
	conf.Timeout = time.Duration(*zookeeperTimeout) * time.Millisecond

	kz, err := kazoo.NewKazooFromConnectionString(*zookeeper, conf)
	if err != nil {
		printErrorAndExit(69, "Failed to connect to Zookeeper: %v", err)
	}
	defer func() { _ = kz.Close() }()

	topics, err := kz.Topics()
	if err != nil {
		printErrorAndExit(69, "Failed to get Kafka topics from Zookeeper: %v", err)
	}
	sort.Sort(topics)

	var (
		wg     sync.WaitGroup
		l      sync.Mutex
		stdout = make([]string, len(topics))
	)

	for i, topic := range topics {
		wg.Add(1)
		go func(i int, topic *kazoo.Topic) {
			defer wg.Done()

			buffer := bytes.NewBuffer(make([]byte, 0))

			partitions, err := topic.Partitions()
			if err != nil {
				printErrorAndExit(69, "Failed to get Kafka topic partitions from Zookeeper: %v", err)
			}

			fmt.Fprintf(buffer, "Topic: %s\tPartitions: %d\n", topic.Name, len(partitions))

			for _, partition := range partitions {
				leader, _ := partition.Leader()
				isr, _ := partition.ISR()

				fmt.Fprintf(buffer, "\tPartition: %d\tReplicas: %v\tLeader: %d\tISR: %v\n", partition.ID, partition.Replicas, leader, isr)
			}

			l.Lock()
			stdout[i] = buffer.String()
			l.Unlock()
		}(i, topic)
	}

	wg.Wait()
	for _, msg := range stdout {
		fmt.Print(msg)
	}
}
Esempio n. 15
0
// generic loop (executed in a goroutine) that periodically wakes up to walk
// the priority queue and call the callback
func (c *Channel) pqWorker(pq *pqueue.PriorityQueue, mutex *sync.Mutex, callback func(item *pqueue.Item)) {
	ticker := time.NewTicker(defaultWorkerWait)
	for {
		select {
		case <-ticker.C:
		case <-c.exitChan:
			goto exit
		}
		now := time.Now().UnixNano()
		for {
			mutex.Lock()
			item, _ := pq.PeekAndShift(now)
			mutex.Unlock()

			if item == nil {
				break
			}

			callback(item)
		}
	}

exit:
	log.Printf("CHANNEL(%s): closing ... pqueue worker", c.name)
	ticker.Stop()
}
Esempio n. 16
0
func cacher(regMap map[*regexp.Regexp]string) func(string) string {
	var cache = make(map[string]string)
	var cacheMu sync.Mutex

	return func(ext string) string {
		cacheMu.Lock()
		defer cacheMu.Unlock()

		memoized, ok := cache[ext]
		if ok {
			return memoized
		}

		bExt := []byte(ext)
		for regEx, mimeType := range regMap {
			if regEx != nil && regEx.Match(bExt) {
				memoized = mimeType
				break
			}
		}

		cache[ext] = memoized
		return memoized
	}
}
Esempio n. 17
0
func (f *File) Save() (err error) {
	var done sync.Mutex
	done.Lock()
	f.cbs <- func() {
		defer done.Unlock()
		tmpPath := f.path + "." + strconv.FormatInt(rand.Int63(), 10)
		var tmpF *os.File
		tmpF, err = os.Create(tmpPath)
		if err != nil {
			return
		}
		defer tmpF.Close()
		buf := new(bytes.Buffer)
		err = json.NewEncoder(buf).Encode(f.Obj)
		if err != nil {
			return
		}
		// indent
		indentBuf := new(bytes.Buffer)
		err = json.Indent(indentBuf, buf.Bytes(), "", "    ")
		if err != nil {
			return
		}
		_, err = tmpF.Write(indentBuf.Bytes())
		if err != nil {
			return
		}
		err = os.Rename(tmpPath, f.path)
		if err != nil {
			return
		}
	}
	done.Lock()
	return
}
Esempio n. 18
0
func TestDNSAdd(t *testing.T) {
	mtx := sync.Mutex{}
	published := map[string]entry{}
	s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		mtx.Lock()
		defer mtx.Unlock()
		parts := strings.SplitN(r.URL.Path, "/", 4)
		containerID, ip := parts[2], net.ParseIP(parts[3])
		fqdn := r.FormValue("fqdn")
		published[fqdn] = entry{containerID, ip}
		w.WriteHeader(http.StatusNoContent)
	}))
	defer s.Close()

	client := weave.NewClient(s.URL)
	err := client.AddDNSEntry(mockHostname, mockContainerID, mockIP)
	if err != nil {
		t.Fatal(err)
	}

	want := map[string]entry{
		mockHostname: {mockContainerID, mockIP},
	}
	if !reflect.DeepEqual(published, want) {
		t.Fatal(test.Diff(published, want))
	}
}
Esempio n. 19
0
// GetNSQDTopics returns a []string containing all the topics
// produced by the given nsqd
func GetNSQDTopics(nsqdHTTPAddrs []string) ([]string, error) {
	topics := make([]string, 0)
	var lock sync.Mutex
	var wg sync.WaitGroup
	success := false
	for _, addr := range nsqdHTTPAddrs {
		wg.Add(1)
		endpoint := fmt.Sprintf("http://%s/stats?format=json", addr)
		log.Printf("NSQD: querying %s", endpoint)

		go func(endpoint string) {
			data, err := util.ApiRequest(endpoint)
			lock.Lock()
			defer lock.Unlock()
			defer wg.Done()
			if err != nil {
				log.Printf("ERROR: lookupd %s - %s", endpoint, err.Error())
				return
			}
			success = true
			topicList, _ := data.Get("topics").Array()
			for i := range topicList {
				topicInfo := data.Get("topics").GetIndex(i)
				topics = util.StringAdd(topics, topicInfo.Get("topic_name").MustString())
			}
		}(endpoint)
	}
	wg.Wait()
	sort.Strings(topics)
	if success == false {
		return nil, errors.New("unable to query any nsqd")
	}
	return topics, nil
}
Esempio n. 20
0
func findImportGoPath(pkgName string, symbols map[string]bool) (string, bool, error) {
	// Fast path for the standard library.
	// In the common case we hopefully never have to scan the GOPATH, which can
	// be slow with moving disks.
	if pkg, rename, ok := findImportStdlib(pkgName, symbols); ok {
		return pkg, rename, nil
	}

	// TODO(sameer): look at the import lines for other Go files in the
	// local directory, since the user is likely to import the same packages
	// in the current Go file.  Return rename=true when the other Go files
	// use a renamed package that's also used in the current file.

	pkgIndexOnce.Do(loadPkgIndex)

	// Collect exports for packages with matching names.
	var wg sync.WaitGroup
	var pkgsMu sync.Mutex // guards pkgs
	// full importpath => exported symbol => True
	// e.g. "net/http" => "Client" => True
	pkgs := make(map[string]map[string]bool)
	pkgIndex.Lock()
	for _, pkg := range pkgIndex.m[pkgName] {
		wg.Add(1)
		go func(importpath, dir string) {
			defer wg.Done()
			exports := loadExports(dir)
			if exports != nil {
				pkgsMu.Lock()
				pkgs[importpath] = exports
				pkgsMu.Unlock()
			}
		}(pkg.importpath, pkg.dir)
	}
	pkgIndex.Unlock()
	wg.Wait()

	// Filter out packages missing required exported symbols.
	for symbol := range symbols {
		for importpath, exports := range pkgs {
			if !exports[symbol] {
				delete(pkgs, importpath)
			}
		}
	}
	if len(pkgs) == 0 {
		return "", false, nil
	}

	// If there are multiple candidate packages, the shortest one wins.
	// This is a heuristic to prefer the standard library (e.g. "bytes")
	// over e.g. "github.com/foo/bar/bytes".
	shortest := ""
	for importPath := range pkgs {
		if shortest == "" || len(importPath) < len(shortest) {
			shortest = importPath
		}
	}
	return shortest, false, nil
}
Esempio n. 21
0
func testChanSendBarrier(useSelect bool) {
	var wg sync.WaitGroup
	var globalMu sync.Mutex
	outer := 100
	inner := 100000
	if testing.Short() {
		outer = 10
		inner = 1000
	}
	for i := 0; i < outer; i++ {
		wg.Add(1)
		go func() {
			defer wg.Done()
			var garbage []byte
			for j := 0; j < inner; j++ {
				_, err := doRequest(useSelect)
				_, ok := err.(myError)
				if !ok {
					panic(1)
				}
				garbage = make([]byte, 1<<10)
			}
			globalMu.Lock()
			global = garbage
			globalMu.Unlock()
		}()
	}
	wg.Wait()
}
Esempio n. 22
0
func (socket *mongoSocket) loginRun(db string, query, result interface{}, f func() error) error {
	var mutex sync.Mutex
	var replyErr error
	mutex.Lock()

	op := queryOp{}
	op.query = query
	op.collection = db + ".$cmd"
	op.limit = -1
	op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) {
		defer mutex.Unlock()

		if err != nil {
			replyErr = err
			return
		}

		err = bson.Unmarshal(docData, result)
		if err != nil {
			replyErr = err
		} else {
			// Must handle this within the read loop for the socket, so
			// that concurrent login requests are properly ordered.
			replyErr = f()
		}
	}

	err := socket.Query(&op)
	if err != nil {
		return err
	}
	mutex.Lock() // Wait.
	return replyErr
}
Esempio n. 23
0
// EnumerateAll runs fn for each blob in src.
// If fn returns an error, iteration stops and fn isn't called again.
// EnumerateAll will not return concurrently with fn.
func EnumerateAll(src BlobEnumerator, fn func(blob.SizedRef) error) error {
	const batchSize = 1000
	var mu sync.Mutex // protects returning with an error while fn is still running
	after := ""
	errc := make(chan error, 1)
	for {
		ch := make(chan blob.SizedRef, 16)
		n := 0
		go func() {
			var err error
			for sb := range ch {
				if err != nil {
					continue
				}
				mu.Lock()
				err = fn(sb)
				mu.Unlock()
				after = sb.Ref.String()
				n++
			}
			errc <- err
		}()
		err := src.EnumerateBlobs(ch, after, batchSize)
		if err != nil {
			mu.Lock() // make sure fn callback finished; no need to unlock
			return err
		}
		if err := <-errc; err != nil {
			return err
		}
		if n == 0 {
			return nil
		}
	}
}
Esempio n. 24
0
func main() {
	var mtx sync.Mutex
	var cnd *sync.Cond
	var cnds [N]*sync.Cond
	var mtxs [N]sync.Mutex
	cnd = sync.NewCond(&mtx)
	for i := 0; i < N; i++ {
		cnds[i] = sync.NewCond(&mtxs[i])
	}
	for i := 0; i < N; i++ {
		go func(me int, m *sync.Mutex, c1 *sync.Cond, c2 *sync.Cond) {
			fmt.Printf("Hello, world. %d\n", me)
			if me == 0 {
				cnd.Signal()
			}
			for j := 0; j < 10000000; j++ {
				m.Lock()
				c1.Wait()
				m.Unlock()
				c2.Signal()
			}
			if me == N-1 {
				cnd.Signal()
			}
		}(i, &mtxs[i], cnds[i], cnds[(i+1)%N])
	}
	mtx.Lock()
	cnd.Wait()
	mtx.Unlock()
	cnds[0].Signal()
	mtx.Lock()
	cnd.Wait()
	mtx.Unlock()
}
Esempio n. 25
0
func (this *VMessOutboundHandler) handleResponse(session *raw.ClientSession, conn net.Conn, request *proto.RequestHeader, dest v2net.Destination, output chan<- *alloc.Buffer, finish *sync.Mutex) {
	defer finish.Unlock()
	defer close(output)

	reader := v2io.NewBufferedReader(conn)

	header, err := session.DecodeResponseHeader(reader)
	if err != nil {
		log.Warning("VMessOut: Failed to read response: ", err)
		return
	}
	go this.handleCommand(dest, header.Command)

	reader.SetCached(false)
	decryptReader := session.DecodeResponseBody(conn)

	var bodyReader v2io.Reader
	if request.Option.IsChunkStream() {
		bodyReader = vmessio.NewAuthChunkReader(decryptReader)
	} else {
		bodyReader = v2io.NewAdaptiveReader(decryptReader)
	}

	v2io.ReaderToChan(output, bodyReader)

	return
}
Esempio n. 26
0
func GenMessages(c *C, prefix, topic string, keys map[string]int) map[string][]*sarama.ProducerMessage {
	config := NewConfig()
	config.ClientID = "producer"
	config.Kafka.SeedPeers = testKafkaPeers
	producer, err := SpawnGracefulProducer(config)
	c.Assert(err, IsNil)

	messages := make(map[string][]*sarama.ProducerMessage)
	var wg sync.WaitGroup
	var lock sync.Mutex
	for key, count := range keys {
		for i := 0; i < count; i++ {
			key := key
			message := fmt.Sprintf("%s:%s:%d", prefix, key, i)
			spawn(&wg, func() {
				keyEncoder := sarama.StringEncoder(key)
				msgEncoder := sarama.StringEncoder(message)
				prodMsg, err := producer.Produce(topic, keyEncoder, msgEncoder)
				c.Assert(err, IsNil)
				log.Infof("*** produced: topic=%s, partition=%d, offset=%d, message=%s",
					topic, prodMsg.Partition, prodMsg.Offset, message)
				lock.Lock()
				messages[key] = append(messages[key], prodMsg)
				lock.Unlock()
			})
		}
	}
	wg.Wait()
	// Sort the produced messages in ascending order of their offsets.
	for _, keyMessages := range messages {
		sort.Sort(MessageSlice(keyMessages))
	}
	return messages
}
Esempio n. 27
0
func (c *Corpus) scanPrefix(mu *sync.Mutex, s sorted.KeyValue, prefix string) (err error) {
	typeKey := typeOfKey(prefix)
	fn, ok := corpusMergeFunc[typeKey]
	if !ok {
		panic("No registered merge func for prefix " + prefix)
	}

	n, t0 := 0, time.Now()
	it := queryPrefixString(s, prefix)
	defer closeIterator(it, &err)
	for it.Next() {
		n++
		if n == 1 {
			mu.Lock()
			defer mu.Unlock()
		}
		if err := fn(c, it.KeyBytes(), it.ValueBytes()); err != nil {
			return err
		}
	}
	if logCorpusStats {
		d := time.Since(t0)
		log.Printf("Scanned prefix %q: %d rows, %v", prefix[:len(prefix)-1], n, d)
	}
	return nil
}
Esempio n. 28
0
// NewAddresses fetches EC2 IP address list from each region.
//
// If log is nil, defaultLogger is used instead.
func NewAddresses(clients *amazon.Clients, log logging.Logger) *Addresses {
	if log == nil {
		log = defaultLogger
	}
	a := newAddresses()
	var wg sync.WaitGroup
	var mu sync.Mutex // protects a.m
	for region, client := range clients.Regions() {
		wg.Add(1)
		go func(region string, client *amazon.Client) {
			defer wg.Done()
			addresses, err := client.Addresses()
			if err != nil {
				log.Error("[%s] fetching IP addresses error: %s", region, err)
				return
			}
			log.Info("[%s] fetched %d addresses", region, len(addresses))
			var ok bool
			mu.Lock()
			if _, ok = a.m[client]; !ok {
				a.m[client] = addresses
			}
			mu.Unlock()
			if ok {
				panic(fmt.Errorf("[%s] duplicated client=%p: %+v", region, client, addresses))
			}
		}(region, client)
	}
	wg.Wait()
	return a
}
Esempio n. 29
0
// Modify renames the given images
func (g *GceImages) DeprecateImages(opts *DeprecateOptions) error {
	var (
		wg          sync.WaitGroup
		mu          sync.Mutex // protects multiErrors
		multiErrors error
	)

	for _, n := range opts.Names {
		wg.Add(1)
		go func(name string) {
			st := &compute.DeprecationStatus{
				State: opts.State,
			}

			_, err := g.svc.Deprecate(g.config.ProjectID, name, st).Do()
			if err != nil {
				mu.Lock()
				multiErrors = multierror.Append(multiErrors, err)
				mu.Unlock()
			}

			wg.Done()
		}(n)
	}

	wg.Wait()
	return multiErrors
}
Esempio n. 30
0
func benchmarkMutexLock(b *testing.B) {
	var lock sync.Mutex
	for i := 0; i < b.N; i++ {
		lock.Lock()
		lock.Unlock()
	}
}