Ejemplo n.º 1
1
Archivo: server.go Proyecto: logan/heim
func Serve(ctx scope.Context, addr string) {
	http.Handle("/metrics", prometheus.Handler())

	listener, err := net.Listen("tcp", addr)
	if err != nil {
		ctx.Terminate(err)
	}

	closed := false
	m := sync.Mutex{}
	closeListener := func() {
		m.Lock()
		if !closed {
			listener.Close()
			closed = true
		}
		m.Unlock()
	}

	// Spin off goroutine to watch ctx and close listener if shutdown requested.
	go func() {
		<-ctx.Done()
		closeListener()
	}()

	if err := http.Serve(listener, nil); err != nil {
		fmt.Printf("http[%s]: %s\n", addr, err)
		ctx.Terminate(err)
	}

	closeListener()
	ctx.WaitGroup().Done()
}
Ejemplo n.º 2
1
// Run runs the query concurrently, and returns the results.
func (q *Query) Run() []interface{} {
	rand.Seed(time.Now().UnixNano())
	var w sync.WaitGroup
	var l sync.Mutex
	places := make([]interface{}, len(q.Journey))
	for i, r := range q.Journey {
		w.Add(1)
		go func(types string, i int) {
			defer w.Done()
			response, err := q.find(types)
			if err != nil {
				log.Println("Failed to find places:", err)
				return
			}
			if len(response.Results) == 0 {
				log.Println("No places found for", types)
				return
			}
			for _, result := range response.Results {
				for _, photo := range result.Photos {
					photo.URL = "https://maps.googleapis.com/maps/api/place/photo?" +
						"maxwidth=1000&photoreference=" + photo.PhotoRef + "&key=" + APIKey
				}
			}
			randI := rand.Intn(len(response.Results))
			l.Lock()
			places[i] = response.Results[randI]
			l.Unlock()
		}(r, i)
	}
	w.Wait() // wait for everything to finish
	return places
}
Ejemplo n.º 3
1
func (a *apiServer) runPipeline(pipelineInfo *pps.PipelineInfo) error {
	ctx, cancel := context.WithCancel(context.Background())
	a.lock.Lock()
	a.cancelFuncs[*pipelineInfo.Pipeline] = cancel
	a.lock.Unlock()
	var loopErr error
	//TODO this gets really weird with branching... we need to figure out what that looks like.
	mostRecentCommit := make(map[pfs.Repo]*pfs.Commit)
	var lock sync.Mutex
	var wg sync.WaitGroup
	for _, inputRepo := range pipelineInfo.InputRepo {
		inputRepo := inputRepo
		wg.Add(1)
		go func() {
			defer wg.Done()
			var lastCommit *pfs.Commit
			listCommitRequest := &pfs.ListCommitRequest{
				Repo:       inputRepo,
				CommitType: pfs.CommitType_COMMIT_TYPE_READ,
				From:       lastCommit,
				Block:      true,
			}
			commitInfos, err := a.pfsAPIClient.ListCommit(ctx, listCommitRequest)
			if err != nil && loopErr == nil {
				loopErr = err
				return
			}
			for _, commitInfo := range commitInfos.CommitInfo {
				lock.Lock()
				mostRecentCommit[*inputRepo] = commitInfo.Commit
				var commits []*pfs.Commit
				for _, commit := range mostRecentCommit {
					commits = append(commits, commit)
				}
				lock.Unlock()
				if len(commits) < len(pipelineInfo.InputRepo) {
					// we don't yet have a commit for every input repo so there's no way to run the job
					continue
				}
				outParentCommit, err := a.bestParent(pipelineInfo, commitInfo)
				if err != nil && loopErr == nil {
					loopErr = err
					return
				}
				_, err = a.jobAPIClient.CreateJob(
					ctx,
					&pps.CreateJobRequest{
						Spec: &pps.CreateJobRequest_Pipeline{
							Pipeline: pipelineInfo.Pipeline,
						},
						InputCommit:  []*pfs.Commit{commitInfo.Commit},
						OutputParent: outParentCommit,
					},
				)
			}
		}()
	}
	wg.Wait()
	return loopErr
}
Ejemplo n.º 4
1
// GetLookupdTopicChannels returns a []string containing a union of the channels
// from all the given lookupd for the given topic
func GetLookupdTopicChannels(topic string, lookupdHTTPAddrs []string) ([]string, error) {
	success := false
	allChannels := make([]string, 0)
	var lock sync.Mutex
	var wg sync.WaitGroup
	for _, addr := range lookupdHTTPAddrs {
		wg.Add(1)
		endpoint := fmt.Sprintf("http://%s/channels?topic=%s", addr, url.QueryEscape(topic))
		log.Printf("LOOKUPD: querying %s", endpoint)
		go func(endpoint string) {
			data, err := util.ApiRequest(endpoint)
			lock.Lock()
			defer lock.Unlock()
			defer wg.Done()
			if err != nil {
				log.Printf("ERROR: lookupd %s - %s", endpoint, err.Error())
				return
			}
			success = true
			// {"data":{"channels":["test"]}}
			channels, _ := data.Get("channels").StringArray()
			allChannels = util.StringUnion(allChannels, channels)
		}(endpoint)
	}
	wg.Wait()
	sort.Strings(allChannels)
	if success == false {
		return nil, errors.New("unable to query any lookupd")
	}
	return allChannels, nil
}
Ejemplo n.º 5
1
Archivo: serve.go Proyecto: logan/heim
func (cmd *serveEmbedCmd) run(ctx scope.Context, args []string) error {
	listener, err := net.Listen("tcp", cmd.addr)
	if err != nil {
		return err
	}

	closed := false
	m := sync.Mutex{}
	closeListener := func() {
		m.Lock()
		if !closed {
			listener.Close()
			closed = true
		}
		m.Unlock()
	}

	// Spin off goroutine to watch ctx and close listener if shutdown requested.
	go func() {
		<-ctx.Done()
		closeListener()
	}()

	if err := http.Serve(listener, cmd); err != nil {
		fmt.Printf("http[%s]: %s\n", cmd.addr, err)
		return err
	}

	closeListener()
	ctx.WaitGroup().Done()
	return ctx.Err()
}
Ejemplo n.º 6
1
// Put implements the Putter interface.
func (mp *MultiPutter) Put(username string, creds map[string]interface{}) error {
	var (
		err error
		mu  sync.Mutex
		wg  sync.WaitGroup
	)

	for _, p := range mp.Putters {
		wg.Add(1)

		go func(p Putter) {
			defer wg.Done()

			if e := p.Put(username, creds); e != nil {
				mu.Lock()
				err = multierror.Append(err, e)
				mu.Unlock()
			}
		}(p)
	}

	wg.Wait()

	return err
}
Ejemplo n.º 7
0
func startCommunicate(request *protocol.VMessRequest, dest v2net.Destination, ray core.OutboundRay, firstPacket v2net.Packet) error {
	conn, err := net.Dial(dest.Network(), dest.Address().String())
	if err != nil {
		log.Error("Failed to open %s: %v", dest.String(), err)
		if ray != nil {
			close(ray.OutboundOutput())
		}
		return err
	}
	log.Info("VMessOut: Tunneling request to %s via %s", request.Address.String(), dest.String())

	defer conn.Close()

	input := ray.OutboundInput()
	output := ray.OutboundOutput()
	var requestFinish, responseFinish sync.Mutex
	requestFinish.Lock()
	responseFinish.Lock()

	go handleRequest(conn, request, firstPacket, input, &requestFinish)
	go handleResponse(conn, request, output, &responseFinish, dest.IsUDP())

	requestFinish.Lock()
	if tcpConn, ok := conn.(*net.TCPConn); ok {
		tcpConn.CloseWrite()
	}
	responseFinish.Lock()
	return nil
}
Ejemplo n.º 8
0
func cacher(regMap map[*regexp.Regexp]string) func(string) string {
	var cache = make(map[string]string)
	var cacheMu sync.Mutex

	return func(ext string) string {
		cacheMu.Lock()
		defer cacheMu.Unlock()

		memoized, ok := cache[ext]
		if ok {
			return memoized
		}

		bExt := []byte(ext)
		for regEx, mimeType := range regMap {
			if regEx != nil && regEx.Match(bExt) {
				memoized = mimeType
				break
			}
		}

		cache[ext] = memoized
		return memoized
	}
}
Ejemplo n.º 9
0
// generic loop (executed in a goroutine) that periodically wakes up to walk
// the priority queue and call the callback
func (c *Channel) pqWorker(pq *pqueue.PriorityQueue, mutex *sync.Mutex, callback func(item *pqueue.Item)) {
	ticker := time.NewTicker(defaultWorkerWait)
	for {
		select {
		case <-ticker.C:
		case <-c.exitChan:
			goto exit
		}
		now := time.Now().UnixNano()
		for {
			mutex.Lock()
			item, _ := pq.PeekAndShift(now)
			mutex.Unlock()

			if item == nil {
				break
			}

			callback(item)
		}
	}

exit:
	log.Printf("CHANNEL(%s): closing ... pqueue worker", c.name)
	ticker.Stop()
}
Ejemplo n.º 10
0
func benchmarkMutexLock(b *testing.B) {
	var lock sync.Mutex
	for i := 0; i < b.N; i++ {
		lock.Lock()
		lock.Unlock()
	}
}
Ejemplo n.º 11
0
func memoizeBytes() byteDescription {
	cache := map[int64]string{}
	suffixes := []string{"B", "KB", "MB", "GB", "TB", "PB"}
	maxLen := len(suffixes) - 1

	var cacheMu sync.Mutex

	return func(b int64) string {
		cacheMu.Lock()
		defer cacheMu.Unlock()

		description, ok := cache[b]
		if ok {
			return description
		}

		bf := float64(b)
		i := 0
		description = ""
		for {
			if bf/BytesPerKB < 1 || i >= maxLen {
				description = fmt.Sprintf("%.2f%s", bf, suffixes[i])
				break
			}
			bf /= BytesPerKB
			i += 1
		}
		cache[b] = description
		return description
	}
}
Ejemplo n.º 12
0
func createPodWorkers() (*podWorkers, map[types.UID][]syncPodRecord) {
	lock := sync.Mutex{}
	processed := make(map[types.UID][]syncPodRecord)
	fakeRecorder := &record.FakeRecorder{}
	fakeRuntime := &containertest.FakeRuntime{}
	fakeCache := containertest.NewFakeCache(fakeRuntime)
	podWorkers := newPodWorkers(
		func(options syncPodOptions) error {
			func() {
				lock.Lock()
				defer lock.Unlock()
				pod := options.pod
				processed[pod.UID] = append(processed[pod.UID], syncPodRecord{
					name:       pod.Name,
					updateType: options.updateType,
				})
			}()
			return nil
		},
		fakeRecorder,
		queue.NewBasicWorkQueue(&util.RealClock{}),
		time.Second,
		time.Second,
		fakeCache,
	)
	return podWorkers, processed
}
Ejemplo n.º 13
0
func artificialSeed(input []string, power int) [][]string {
	var result [][]string

	if isChainEmpty(input) {
		input = randomChain()[:1]
	}

	var wg sync.WaitGroup
	var mtx sync.Mutex
	for _, word := range input {
		if word == stop {
			break
		}
		for i := 0; i < power; i++ {
			wg.Add(1)
			go func(word string, i int) {
				defer wg.Done()
				for _, mutation := range createSeeds(mutateChain(word, randomChain())) {
					mtx.Lock()
					result = append(result, mutation)
					mtx.Unlock()
					runtime.Gosched()
				}
			}(word, i)
		}
	}
	wg.Wait()

	/*if config.Debug {
		log.Println("artificialSeed(", dump(input)+", "+fmt.Sprint(power)+")="+fmt.Sprint(result))
	}*/

	return result
}
Ejemplo n.º 14
0
func goroutineWork(timestamps *[]int64, mutex *sync.Mutex, i int64, arrayCursor *int, url, token string) error {
	getParam := "?page="
	if strings.Contains(url, "?") {
		getParam = "&page="
	}

	pageUrl := url + getParam + strconv.Itoa(int(i))
	stargazers, _, err := getStargazers(pageUrl, token)
	if err != nil {
		return err
	}

	for _, star := range stargazers {
		var t time.Time
		t, err = time.Parse(time.RFC3339, star.Timestamp)
		if err != nil {
			return fmt.Errorf("An error occured while parsing the timestamp: %v", err)
		}
		timestamp := t.Unix()
		mutex.Lock()
		(*timestamps)[*arrayCursor] = timestamp
		(*arrayCursor) = (*arrayCursor) + 1
		mutex.Unlock()
	}

	return nil
}
Ejemplo n.º 15
0
func GenMessages(c *C, prefix, topic string, keys map[string]int) map[string][]*sarama.ProducerMessage {
	config := NewConfig()
	config.ClientID = "producer"
	config.Kafka.SeedPeers = testKafkaPeers
	producer, err := SpawnGracefulProducer(config)
	c.Assert(err, IsNil)

	messages := make(map[string][]*sarama.ProducerMessage)
	var wg sync.WaitGroup
	var lock sync.Mutex
	for key, count := range keys {
		for i := 0; i < count; i++ {
			key := key
			message := fmt.Sprintf("%s:%s:%d", prefix, key, i)
			spawn(&wg, func() {
				keyEncoder := sarama.StringEncoder(key)
				msgEncoder := sarama.StringEncoder(message)
				prodMsg, err := producer.Produce(topic, keyEncoder, msgEncoder)
				c.Assert(err, IsNil)
				log.Infof("*** produced: topic=%s, partition=%d, offset=%d, message=%s",
					topic, prodMsg.Partition, prodMsg.Offset, message)
				lock.Lock()
				messages[key] = append(messages[key], prodMsg)
				lock.Unlock()
			})
		}
	}
	wg.Wait()
	// Sort the produced messages in ascending order of their offsets.
	for _, keyMessages := range messages {
		sort.Sort(MessageSlice(keyMessages))
	}
	return messages
}
Ejemplo n.º 16
0
func main() {
	flag.Parse()

	if *zookeeper == "" {
		printUsageErrorAndExit("You have to provide a zookeeper connection string using -zookeeper, or the ZOOKEEPER_PEERS environment variable")
	}

	conf := kazoo.NewConfig()
	conf.Timeout = time.Duration(*zookeeperTimeout) * time.Millisecond

	kz, err := kazoo.NewKazooFromConnectionString(*zookeeper, conf)
	if err != nil {
		printErrorAndExit(69, "Failed to connect to Zookeeper: %v", err)
	}
	defer func() { _ = kz.Close() }()

	topics, err := kz.Topics()
	if err != nil {
		printErrorAndExit(69, "Failed to get Kafka topics from Zookeeper: %v", err)
	}
	sort.Sort(topics)

	var (
		wg     sync.WaitGroup
		l      sync.Mutex
		stdout = make([]string, len(topics))
	)

	for i, topic := range topics {
		wg.Add(1)
		go func(i int, topic *kazoo.Topic) {
			defer wg.Done()

			buffer := bytes.NewBuffer(make([]byte, 0))

			partitions, err := topic.Partitions()
			if err != nil {
				printErrorAndExit(69, "Failed to get Kafka topic partitions from Zookeeper: %v", err)
			}

			fmt.Fprintf(buffer, "Topic: %s\tPartitions: %d\n", topic.Name, len(partitions))

			for _, partition := range partitions {
				leader, _ := partition.Leader()
				isr, _ := partition.ISR()

				fmt.Fprintf(buffer, "\tPartition: %d\tReplicas: %v\tLeader: %d\tISR: %v\n", partition.ID, partition.Replicas, leader, isr)
			}

			l.Lock()
			stdout[i] = buffer.String()
			l.Unlock()
		}(i, topic)
	}

	wg.Wait()
	for _, msg := range stdout {
		fmt.Print(msg)
	}
}
Ejemplo n.º 17
0
// NewAddresses fetches EC2 IP address list from each region.
//
// If log is nil, defaultLogger is used instead.
func NewAddresses(clients *amazon.Clients, log logging.Logger) *Addresses {
	if log == nil {
		log = defaultLogger
	}
	a := newAddresses()
	var wg sync.WaitGroup
	var mu sync.Mutex // protects a.m
	for region, client := range clients.Regions() {
		wg.Add(1)
		go func(region string, client *amazon.Client) {
			defer wg.Done()
			addresses, err := client.Addresses()
			if err != nil {
				log.Error("[%s] fetching IP addresses error: %s", region, err)
				return
			}
			log.Info("[%s] fetched %d addresses", region, len(addresses))
			var ok bool
			mu.Lock()
			if _, ok = a.m[client]; !ok {
				a.m[client] = addresses
			}
			mu.Unlock()
			if ok {
				panic(fmt.Errorf("[%s] duplicated client=%p: %+v", region, client, addresses))
			}
		}(region, client)
	}
	wg.Wait()
	return a
}
Ejemplo n.º 18
0
func diameter(digests []string, diffStore diff.DiffStore) int {
	// TODO Parallelize.
	lock := sync.Mutex{}
	max := 0
	wg := sync.WaitGroup{}
	for {
		if len(digests) <= 2 {
			break
		}
		wg.Add(1)
		go func(d1 string, d2 []string) {
			defer wg.Done()
			dms, err := diffStore.Get(d1, d2)
			if err != nil {
				glog.Errorf("Unable to get diff: %s", err)
				return
			}
			localMax := 0
			for _, dm := range dms {
				if dm.NumDiffPixels > localMax {
					localMax = dm.NumDiffPixels
				}
			}
			lock.Lock()
			defer lock.Unlock()
			if localMax > max {
				max = localMax
			}
		}(digests[0], digests[1:2])
		digests = digests[1:]
	}
	wg.Wait()
	return max
}
Ejemplo n.º 19
0
func handleResponse(conn net.Conn, request *protocol.VMessRequest, output chan<- *alloc.Buffer, finish *sync.Mutex, isUDP bool) {
	defer finish.Unlock()
	defer close(output)
	responseKey := md5.Sum(request.RequestKey[:])
	responseIV := md5.Sum(request.RequestIV[:])

	decryptResponseReader, err := v2io.NewAesDecryptReader(responseKey[:], responseIV[:], conn)
	if err != nil {
		log.Error("VMessOut: Failed to create decrypt reader: %v", err)
		return
	}

	buffer, err := v2net.ReadFrom(decryptResponseReader, nil)
	if err != nil {
		log.Error("VMessOut: Failed to read VMess response (%d bytes): %v", buffer.Len(), err)
		return
	}
	if buffer.Len() < 4 || !bytes.Equal(buffer.Value[:4], request.ResponseHeader[:]) {
		log.Warning("VMessOut: unexepcted response header. The connection is probably hijacked.")
		return
	}
	log.Info("VMessOut received %d bytes from %s", buffer.Len()-4, conn.RemoteAddr().String())

	buffer.SliceFrom(4)
	output <- buffer

	if !isUDP {
		v2net.ReaderToChan(output, decryptResponseReader)
	}

	return
}
Ejemplo n.º 20
0
func (wr *Wrangler) getMastersPosition(shards []*topo.ShardInfo) (map[*topo.ShardInfo]myproto.ReplicationPosition, error) {
	mu := sync.Mutex{}
	result := make(map[*topo.ShardInfo]myproto.ReplicationPosition)

	wg := sync.WaitGroup{}
	rec := concurrency.AllErrorRecorder{}
	for _, si := range shards {
		wg.Add(1)
		go func(si *topo.ShardInfo) {
			defer wg.Done()
			log.Infof("Gathering master position for %v", si.MasterAlias)
			ti, err := wr.ts.GetTablet(si.MasterAlias)
			if err != nil {
				rec.RecordError(err)
				return
			}

			pos, err := wr.ai.MasterPosition(ti, wr.ActionTimeout())
			if err != nil {
				rec.RecordError(err)
				return
			}

			log.Infof("Got master position for %v", si.MasterAlias)
			mu.Lock()
			result[si] = pos
			mu.Unlock()
		}(si)
	}
	wg.Wait()
	return result, rec.Error()
}
Ejemplo n.º 21
0
// GetNSQDTopics returns a []string containing all the topics
// produced by the given nsqd
func GetNSQDTopics(nsqdHTTPAddrs []string) ([]string, error) {
	topics := make([]string, 0)
	var lock sync.Mutex
	var wg sync.WaitGroup
	success := false
	for _, addr := range nsqdHTTPAddrs {
		wg.Add(1)
		endpoint := fmt.Sprintf("http://%s/stats?format=json", addr)
		log.Printf("NSQD: querying %s", endpoint)

		go func(endpoint string) {
			data, err := util.ApiRequest(endpoint)
			lock.Lock()
			defer lock.Unlock()
			defer wg.Done()
			if err != nil {
				log.Printf("ERROR: lookupd %s - %s", endpoint, err.Error())
				return
			}
			success = true
			topicList, _ := data.Get("topics").Array()
			for i := range topicList {
				topicInfo := data.Get("topics").GetIndex(i)
				topics = util.StringAdd(topics, topicInfo.Get("topic_name").MustString())
			}
		}(endpoint)
	}
	wg.Wait()
	sort.Strings(topics)
	if success == false {
		return nil, errors.New("unable to query any nsqd")
	}
	return topics, nil
}
Ejemplo n.º 22
0
// Returns the host containers, non-Kubernetes containers, and an error (if any).
func (self *kubeNodeMetrics) getNodesInfo(nodeList *nodes.NodeList, start, end time.Time) ([]api.Container, []api.Container, error) {
	var (
		lock sync.Mutex
		wg   sync.WaitGroup
	)
	hostContainers := make([]api.Container, 0, len(nodeList.Items))
	rawContainers := make([]api.Container, 0, len(nodeList.Items))
	for host, info := range nodeList.Items {
		wg.Add(1)
		go func(host nodes.Host, info nodes.Info) {
			defer wg.Done()
			if hostContainer, containers, err := self.updateStats(host, info, start, end); err == nil {
				lock.Lock()
				defer lock.Unlock()
				if hostContainers != nil {
					hostContainers = append(hostContainers, *hostContainer)
				}
				rawContainers = append(rawContainers, containers...)
			}
		}(host, info)
	}
	wg.Wait()

	return hostContainers, rawContainers, nil
}
Ejemplo n.º 23
0
func testChanSendBarrier(useSelect bool) {
	var wg sync.WaitGroup
	var globalMu sync.Mutex
	outer := 100
	inner := 100000
	if testing.Short() {
		outer = 10
		inner = 1000
	}
	for i := 0; i < outer; i++ {
		wg.Add(1)
		go func() {
			defer wg.Done()
			var garbage []byte
			for j := 0; j < inner; j++ {
				_, err := doRequest(useSelect)
				_, ok := err.(myError)
				if !ok {
					panic(1)
				}
				garbage = make([]byte, 1<<10)
			}
			globalMu.Lock()
			global = garbage
			globalMu.Unlock()
		}()
	}
	wg.Wait()
}
Ejemplo n.º 24
0
func (this *VMessOutboundHandler) handleRequest(session *encoding.ClientSession, conn internet.Connection, request *protocol.RequestHeader, payload *alloc.Buffer, input v2io.Reader, finish *sync.Mutex) {
	defer finish.Unlock()

	writer := v2io.NewBufferedWriter(conn)
	defer writer.Release()
	session.EncodeRequestHeader(request, writer)

	bodyWriter := session.EncodeRequestBody(writer)
	var streamWriter v2io.Writer = v2io.NewAdaptiveWriter(bodyWriter)
	if request.Option.Has(protocol.RequestOptionChunkStream) {
		streamWriter = vmessio.NewAuthChunkWriter(streamWriter)
	}
	if err := streamWriter.Write(payload); err != nil {
		conn.SetReusable(false)
	}
	writer.SetCached(false)

	err := v2io.Pipe(input, streamWriter)
	if err != io.EOF {
		conn.SetReusable(false)
	}

	if request.Option.Has(protocol.RequestOptionChunkStream) {
		err := streamWriter.Write(alloc.NewSmallBuffer().Clear())
		if err != nil {
			conn.SetReusable(false)
		}
	}
	streamWriter.Release()
	return
}
Ejemplo n.º 25
0
func findImportGoPath(pkgName string, symbols map[string]bool) (string, bool, error) {
	// Fast path for the standard library.
	// In the common case we hopefully never have to scan the GOPATH, which can
	// be slow with moving disks.
	if pkg, rename, ok := findImportStdlib(pkgName, symbols); ok {
		return pkg, rename, nil
	}

	// TODO(sameer): look at the import lines for other Go files in the
	// local directory, since the user is likely to import the same packages
	// in the current Go file.  Return rename=true when the other Go files
	// use a renamed package that's also used in the current file.

	pkgIndexOnce.Do(loadPkgIndex)

	// Collect exports for packages with matching names.
	var wg sync.WaitGroup
	var pkgsMu sync.Mutex // guards pkgs
	// full importpath => exported symbol => True
	// e.g. "net/http" => "Client" => True
	pkgs := make(map[string]map[string]bool)
	pkgIndex.Lock()
	for _, pkg := range pkgIndex.m[pkgName] {
		wg.Add(1)
		go func(importpath, dir string) {
			defer wg.Done()
			exports := loadExports(dir)
			if exports != nil {
				pkgsMu.Lock()
				pkgs[importpath] = exports
				pkgsMu.Unlock()
			}
		}(pkg.importpath, pkg.dir)
	}
	pkgIndex.Unlock()
	wg.Wait()

	// Filter out packages missing required exported symbols.
	for symbol := range symbols {
		for importpath, exports := range pkgs {
			if !exports[symbol] {
				delete(pkgs, importpath)
			}
		}
	}
	if len(pkgs) == 0 {
		return "", false, nil
	}

	// If there are multiple candidate packages, the shortest one wins.
	// This is a heuristic to prefer the standard library (e.g. "bytes")
	// over e.g. "github.com/foo/bar/bytes".
	shortest := ""
	for importPath := range pkgs {
		if shortest == "" || len(importPath) < len(shortest) {
			shortest = importPath
		}
	}
	return shortest, false, nil
}
Ejemplo n.º 26
0
func TestDNSAdd(t *testing.T) {
	mtx := sync.Mutex{}
	published := map[string]entry{}
	s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		mtx.Lock()
		defer mtx.Unlock()
		parts := strings.SplitN(r.URL.Path, "/", 4)
		containerID, ip := parts[2], net.ParseIP(parts[3])
		fqdn := r.FormValue("fqdn")
		published[fqdn] = entry{containerID, ip}
		w.WriteHeader(http.StatusNoContent)
	}))
	defer s.Close()

	client := weave.NewClient(s.URL)
	err := client.AddDNSEntry(mockHostname, mockContainerID, mockIP)
	if err != nil {
		t.Fatal(err)
	}

	want := map[string]entry{
		mockHostname: {mockContainerID, mockIP},
	}
	if !reflect.DeepEqual(published, want) {
		t.Fatal(test.Diff(published, want))
	}
}
Ejemplo n.º 27
0
func (c *Corpus) scanPrefix(mu *sync.Mutex, s sorted.KeyValue, prefix string) (err error) {
	typeKey := typeOfKey(prefix)
	fn, ok := corpusMergeFunc[typeKey]
	if !ok {
		panic("No registered merge func for prefix " + prefix)
	}

	n, t0 := 0, time.Now()
	it := queryPrefixString(s, prefix)
	defer closeIterator(it, &err)
	for it.Next() {
		n++
		if n == 1 {
			mu.Lock()
			defer mu.Unlock()
		}
		if err := fn(c, it.KeyBytes(), it.ValueBytes()); err != nil {
			return err
		}
	}
	if logCorpusStats {
		d := time.Since(t0)
		log.Printf("Scanned prefix %q: %d rows, %v", prefix[:len(prefix)-1], n, d)
	}
	return nil
}
Ejemplo n.º 28
0
func (this *VMessOutboundHandler) handleResponse(session *raw.ClientSession, conn net.Conn, request *proto.RequestHeader, dest v2net.Destination, output chan<- *alloc.Buffer, finish *sync.Mutex) {
	defer finish.Unlock()
	defer close(output)

	reader := v2io.NewBufferedReader(conn)

	header, err := session.DecodeResponseHeader(reader)
	if err != nil {
		log.Warning("VMessOut: Failed to read response: ", err)
		return
	}
	go this.handleCommand(dest, header.Command)

	reader.SetCached(false)
	decryptReader := session.DecodeResponseBody(conn)

	var bodyReader v2io.Reader
	if request.Option.IsChunkStream() {
		bodyReader = vmessio.NewAuthChunkReader(decryptReader)
	} else {
		bodyReader = v2io.NewAdaptiveReader(decryptReader)
	}

	v2io.ReaderToChan(output, bodyReader)

	return
}
Ejemplo n.º 29
0
// Modify renames the given images
func (g *GceImages) DeprecateImages(opts *DeprecateOptions) error {
	var (
		wg          sync.WaitGroup
		mu          sync.Mutex // protects multiErrors
		multiErrors error
	)

	for _, n := range opts.Names {
		wg.Add(1)
		go func(name string) {
			st := &compute.DeprecationStatus{
				State: opts.State,
			}

			_, err := g.svc.Deprecate(g.config.ProjectID, name, st).Do()
			if err != nil {
				mu.Lock()
				multiErrors = multierror.Append(multiErrors, err)
				mu.Unlock()
			}

			wg.Done()
		}(n)
	}

	wg.Wait()
	return multiErrors
}
Ejemplo n.º 30
0
func (n *dcSelector) Select(service string, opts ...selector.SelectOption) (selector.Next, error) {
	services, err := n.opts.Registry.GetService(service)
	if err != nil {
		return nil, err
	}

	if len(services) == 0 {
		return nil, selector.ErrNotFound
	}

	var nodes []*registry.Node

	// Filter the nodes for datacenter
	for _, service := range services {
		for _, node := range service.Nodes {
			if node.Metadata["datacenter"] == datacenter {
				nodes = append(nodes, node)
			}
		}
	}

	if len(nodes) == 0 {
		return nil, selector.ErrNotFound
	}

	var i int
	var mtx sync.Mutex

	return func() (*registry.Node, error) {
		mtx.Lock()
		defer mtx.Unlock()
		i++
		return nodes[i%len(nodes)], nil
	}, nil
}