Ejemplo n.º 1
2
func main() {
	var wg sync.WaitGroup
	sc := make(chan os.Signal, 1)
	signal.Notify(sc,
		syscall.SIGINT,
		syscall.SIGTERM,
		syscall.SIGQUIT)

	go func() {
		sig := <-sc
		running = false
		fmt.Printf("main:Got signal:%v", sig)
	}()
	fmt.Printf("main:Mock get id process start!\n")
	db, err := GetDatabase()
	if err != nil {
		fmt.Printf("main:GetDatabase error:%s\n", err.Error())
		return
	}
	idGenerator, err := GetIdGenerator(db, idKey)
	if err != nil {
		fmt.Printf("main:GetIdGenerator error:%s\n", err.Error())
		return
	}
	wg.Add(1)
	go MockGetId(idGenerator, db, &wg)
	wg.Wait()
}
Ejemplo n.º 2
1
func TestFatalRxError(t *testing.T) {
	t.Parallel()

	conn := mustConnect(t, *defaultConnConfig)
	defer closeConn(t, conn)

	var wg sync.WaitGroup
	wg.Add(1)
	go func() {
		defer wg.Done()
		var n int32
		var s string
		err := conn.QueryRow("select 1::int4, pg_sleep(10)::varchar").Scan(&n, &s)
		if err, ok := err.(pgx.PgError); !ok || err.Severity != "FATAL" {
			t.Fatalf("Expected QueryRow Scan to return fatal PgError, but instead received %v", err)
		}
	}()

	otherConn, err := pgx.Connect(*defaultConnConfig)
	if err != nil {
		t.Fatalf("Unable to establish connection: %v", err)
	}
	defer otherConn.Close()

	if _, err := otherConn.Exec("select pg_terminate_backend($1)", conn.Pid); err != nil {
		t.Fatalf("Unable to kill backend PostgreSQL process: %v", err)
	}

	wg.Wait()

	if conn.IsAlive() {
		t.Fatal("Connection should not be live but was")
	}
}
Ejemplo n.º 3
1
// Run runs the query concurrently, and returns the results.
func (q *Query) Run() []interface{} {
	rand.Seed(time.Now().UnixNano())
	var w sync.WaitGroup
	var l sync.Mutex
	places := make([]interface{}, len(q.Journey))
	for i, r := range q.Journey {
		w.Add(1)
		go func(types string, i int) {
			defer w.Done()
			response, err := q.find(types)
			if err != nil {
				log.Println("Failed to find places:", err)
				return
			}
			if len(response.Results) == 0 {
				log.Println("No places found for", types)
				return
			}
			for _, result := range response.Results {
				for _, photo := range result.Photos {
					photo.URL = "https://maps.googleapis.com/maps/api/place/photo?" +
						"maxwidth=1000&photoreference=" + photo.PhotoRef + "&key=" + APIKey
				}
			}
			randI := rand.Intn(len(response.Results))
			l.Lock()
			places[i] = response.Results[randI]
			l.Unlock()
		}(r, i)
	}
	w.Wait() // wait for everything to finish
	return places
}
Ejemplo n.º 4
1
func (a *apiServer) runPipeline(pipelineInfo *pps.PipelineInfo) error {
	ctx, cancel := context.WithCancel(context.Background())
	a.lock.Lock()
	a.cancelFuncs[*pipelineInfo.Pipeline] = cancel
	a.lock.Unlock()
	var loopErr error
	//TODO this gets really weird with branching... we need to figure out what that looks like.
	mostRecentCommit := make(map[pfs.Repo]*pfs.Commit)
	var lock sync.Mutex
	var wg sync.WaitGroup
	for _, inputRepo := range pipelineInfo.InputRepo {
		inputRepo := inputRepo
		wg.Add(1)
		go func() {
			defer wg.Done()
			var lastCommit *pfs.Commit
			listCommitRequest := &pfs.ListCommitRequest{
				Repo:       inputRepo,
				CommitType: pfs.CommitType_COMMIT_TYPE_READ,
				From:       lastCommit,
				Block:      true,
			}
			commitInfos, err := a.pfsAPIClient.ListCommit(ctx, listCommitRequest)
			if err != nil && loopErr == nil {
				loopErr = err
				return
			}
			for _, commitInfo := range commitInfos.CommitInfo {
				lock.Lock()
				mostRecentCommit[*inputRepo] = commitInfo.Commit
				var commits []*pfs.Commit
				for _, commit := range mostRecentCommit {
					commits = append(commits, commit)
				}
				lock.Unlock()
				if len(commits) < len(pipelineInfo.InputRepo) {
					// we don't yet have a commit for every input repo so there's no way to run the job
					continue
				}
				outParentCommit, err := a.bestParent(pipelineInfo, commitInfo)
				if err != nil && loopErr == nil {
					loopErr = err
					return
				}
				_, err = a.jobAPIClient.CreateJob(
					ctx,
					&pps.CreateJobRequest{
						Spec: &pps.CreateJobRequest_Pipeline{
							Pipeline: pipelineInfo.Pipeline,
						},
						InputCommit:  []*pfs.Commit{commitInfo.Commit},
						OutputParent: outParentCommit,
					},
				)
			}
		}()
	}
	wg.Wait()
	return loopErr
}
Ejemplo n.º 5
1
func main() {
	introText := "SIMPLE TWITTER REFORMATTER \n (╯°□°)╯︵ ┻━┻) \n"
	fmt.Printf(introText)

	key := flag.String("key", "nokey", "Twitter consumer key")
	secret := flag.String("sec", "nosecret", "Twitter consumer secret")
	debug := flag.Bool("debug", false, "Debug logging level")
	numTweets := flag.Int("num", 3, "Number of tweets to retrieve")

	flag.Parse()

	access_token, err := getBearerToken(*key, *secret, *debug)
	if err != nil || access_token == "" {
		log.Fatal("Could not retrieve token to make twitter API request")
		os.Exit(1)
	}

	// Create a very basic channel with tweets getting passed into the expander
	// Wait for it to finish executing before quiting.
	var tweetChannel chan string = make(chan string)
	var wg sync.WaitGroup
	wg.Add(1)
	go tweetRetriever(access_token, *numTweets, tweetChannel, &wg, *debug)
	go textExpander(tweetChannel)
	wg.Wait()
}
Ejemplo n.º 6
0
func connectSwarms(t *testing.T, ctx context.Context, swarms []*Swarm) {

	var wg sync.WaitGroup
	connect := func(s *Swarm, dst peer.ID, addr ma.Multiaddr) {
		// TODO: make a DialAddr func.
		s.peers.AddAddr(dst, addr, peer.PermanentAddrTTL)
		if _, err := s.Dial(ctx, dst); err != nil {
			t.Fatal("error swarm dialing to peer", err)
		}
		wg.Done()
	}

	log.Info("Connecting swarms simultaneously.")
	for _, s1 := range swarms {
		for _, s2 := range swarms {
			if s2.local != s1.local { // don't connect to self.
				wg.Add(1)
				connect(s1, s2.LocalPeer(), s2.ListenAddresses()[0]) // try the first.
			}
		}
	}
	wg.Wait()

	for _, s := range swarms {
		log.Infof("%s swarm routing table: %s", s.local, s.Peers())
	}
}
Ejemplo n.º 7
0
func cliCloseListener(c *cli.Context) {
	args := c.Args()
	if len(args) == 0 {
		Exit("Must specify listenAddr to stop")
	}
	listenAddr := args[0]
	command := btypes.CommandCloseListener{
		Addr: listenAddr,
	}
	wg := sync.WaitGroup{}
	failed := 0
	for _, remote := range Config.Remotes {
		wg.Add(1)
		go func(remote string) {
			defer wg.Done()
			response, err := CloseListener(Config.PrivKey, remote, command)
			if err != nil {
				failed++
				fmt.Printf("%v failure. %v\n", remote, err)
			} else {
				fmt.Printf("%v success. %v\n", remote, response)
			}
		}(remote)
	}
	wg.Wait()
	if 0 < failed {
		os.Exit(1)
	}
}
Ejemplo n.º 8
0
func cliOpenListener(c *cli.Context) {
	args := c.Args()
	if len(args) < 1 {
		Exit("Must specify <listenAddr e.g. [::]:46661>")
	}
	listenAddr := args[0]
	command := btypes.CommandOpenListener{
		Addr: listenAddr,
	}
	wg := sync.WaitGroup{}
	failed := 0
	for _, remote := range Config.Remotes {
		wg.Add(1)
		go func(remote string) {
			defer wg.Done()
			response, err := OpenListener(Config.PrivKey, remote, command)
			if err != nil {
				failed++
				fmt.Printf("%v failure. %v\n", remote, err)
			} else {
				fmt.Printf("%v opened %v.\n", remote, response.Addr)
			}
		}(remote)
	}
	wg.Wait()
	if 0 < failed {
		os.Exit(1)
	}
}
Ejemplo n.º 9
0
// Modify renames the given images
func (g *GceImages) DeprecateImages(opts *DeprecateOptions) error {
	var (
		wg          sync.WaitGroup
		mu          sync.Mutex // protects multiErrors
		multiErrors error
	)

	for _, n := range opts.Names {
		wg.Add(1)
		go func(name string) {
			st := &compute.DeprecationStatus{
				State: opts.State,
			}

			_, err := g.svc.Deprecate(g.config.ProjectID, name, st).Do()
			if err != nil {
				mu.Lock()
				multiErrors = multierror.Append(multiErrors, err)
				mu.Unlock()
			}

			wg.Done()
		}(n)
	}

	wg.Wait()
	return multiErrors
}
Ejemplo n.º 10
0
func BulkStat(parallelism int, files []string) {
	todo := make(chan string, len(files))
	var wg sync.WaitGroup
	wg.Add(parallelism)
	for i := 0; i < parallelism; i++ {
		go func() {
			for {
				fn := <-todo
				if fn == "" {
					break
				}
				_, err := os.Lstat(fn)
				if err != nil {
					log.Fatal("All stats should succeed:", err)
				}
			}
			wg.Done()
		}()
	}

	for _, v := range files {
		todo <- v
	}
	close(todo)
	wg.Wait()
}
Ejemplo n.º 11
0
// Gathers data for all servers.
func (h *HttpJson) Gather(acc telegraf.Accumulator) error {
	var wg sync.WaitGroup

	errorChannel := make(chan error, len(h.Servers))

	for _, server := range h.Servers {
		wg.Add(1)
		go func(server string) {
			defer wg.Done()
			if err := h.gatherServer(acc, server); err != nil {
				errorChannel <- err
			}
		}(server)
	}

	wg.Wait()
	close(errorChannel)

	// Get all errors and return them as one giant error
	errorStrings := []string{}
	for err := range errorChannel {
		errorStrings = append(errorStrings, err.Error())
	}

	if len(errorStrings) == 0 {
		return nil
	}
	return errors.New(strings.Join(errorStrings, "\n"))
}
Ejemplo n.º 12
0
func TestHTTPOutputKeepOriginalHost(t *testing.T) {
	wg := new(sync.WaitGroup)
	quit := make(chan int)

	input := NewTestInput()

	server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
		if req.Host != "custom-host.com" {
			t.Error("Wrong header", req.Host)
		}

		wg.Done()
	}))
	defer server.Close()

	headers := HTTPHeaders{HTTPHeader{"Host", "custom-host.com"}}
	Settings.modifierConfig = HTTPModifierConfig{headers: headers}

	output := NewHTTPOutput(server.URL, &HTTPOutputConfig{Debug: false, OriginalHost: true})

	Plugins.Inputs = []io.Reader{input}
	Plugins.Outputs = []io.Writer{output}

	go Start(quit)

	wg.Add(1)
	input.EmitGET()

	wg.Wait()

	close(quit)

	Settings.modifierConfig = HTTPModifierConfig{}
}
Ejemplo n.º 13
0
// Serve serves SFTP connections until the streams stop or the SFTP subsystem
// is stopped.
func (svr *Server) Serve() error {
	var wg sync.WaitGroup
	wg.Add(sftpServerWorkerCount)
	for i := 0; i < sftpServerWorkerCount; i++ {
		go func() {
			defer wg.Done()
			if err := svr.sftpServerWorker(); err != nil {
				svr.rwc.Close() // shuts down recvPacket
			}
		}()
	}

	var err error
	for {
		var pktType uint8
		var pktBytes []byte
		pktType, pktBytes, err = recvPacket(svr.rwc)
		if err != nil {
			break
		}
		svr.pktChan <- rxPacket{fxp(pktType), pktBytes}
	}

	close(svr.pktChan) // shuts down sftpServerWorkers
	wg.Wait()          // wait for all workers to exit

	// close any still-open files
	for handle, file := range svr.openFiles {
		fmt.Fprintf(svr.debugStream, "sftp server file with handle %q left open: %v\n", handle, file.Name())
		file.Close()
	}
	return err // error from recvPacket
}
Ejemplo n.º 14
0
// TestStoreRangeUpReplicate verifies that the replication queue will notice
// under-replicated ranges and replicate them.
func TestStoreRangeUpReplicate(t *testing.T) {
	defer leaktest.AfterTest(t)
	mtc := startMultiTestContext(t, 3)
	defer mtc.Stop()

	// Initialize the gossip network.
	var wg sync.WaitGroup
	wg.Add(len(mtc.stores))
	key := gossip.MakePrefixPattern(gossip.KeyStorePrefix)
	mtc.stores[0].Gossip().RegisterCallback(key, func(_ string, _ roachpb.Value) { wg.Done() })
	for _, s := range mtc.stores {
		s.GossipStore()
	}
	wg.Wait()

	// Once we know our peers, trigger a scan.
	mtc.stores[0].ForceReplicationScanAndProcess()

	// The range should become available on every node.
	if err := util.IsTrueWithin(func() bool {
		for _, s := range mtc.stores {
			r := s.LookupReplica(roachpb.RKey("a"), roachpb.RKey("b"))
			if r == nil {
				return false
			}
		}
		return true
	}, replicationTimeout); err != nil {
		t.Fatal(err)
	}
}
Ejemplo n.º 15
0
func TestMultipleGetEmpty(t *testing.T) {
	q := New(10)
	var wg sync.WaitGroup
	wg.Add(2)
	results := make([][]interface{}, 2)

	go func() {
		wg.Done()
		local, err := q.Get(1)
		assert.Nil(t, err)
		results[0] = local
		wg.Done()
	}()

	go func() {
		wg.Done()
		local, err := q.Get(1)
		assert.Nil(t, err)
		results[1] = local
		wg.Done()
	}()

	wg.Wait()
	wg.Add(2)

	q.Put(`a`, `b`, `c`)
	wg.Wait()

	if assert.Len(t, results[0], 1) && assert.Len(t, results[1], 1) {
		assert.True(t, (results[0][0] == `a` && results[1][0] == `b`) ||
			(results[0][0] == `b` && results[1][0] == `a`),
			`The array should be a, b or b, a`)
	}
}
Ejemplo n.º 16
0
func (t *testRunner) Run() bool {

	reschan := make(chan *testResult)
	wg := sync.WaitGroup{}
	for _, route := range t.api.Routes {
		wg.Add(1)

		go func(route Route) {

			reschan <- t.invokeTest(route.Path, route.Test)
			wg.Done()
		}(route)

	}

	go func() {
		wg.Wait()
		close(reschan)
	}()

	success := true
	for res := range reschan {
		if res == nil {
			continue
		}

		if res.isFailure() {
			success = false
		}
	}

	return success

}
Ejemplo n.º 17
0
func (n *network) Run(ctx context.Context) {
	wg := sync.WaitGroup{}

	log.Info("Watching for new subnet leases")
	evts := make(chan []subnet.Event)
	wg.Add(1)
	go func() {
		subnet.WatchLeases(ctx, n.sm, n.name, n.lease, evts)
		wg.Done()
	}()

	n.rl = make([]netlink.Route, 0, 10)
	wg.Add(1)
	go func() {
		n.routeCheck(ctx)
		wg.Done()
	}()

	defer wg.Wait()

	for {
		select {
		case evtBatch := <-evts:
			n.handleSubnetEvents(evtBatch)

		case <-ctx.Done():
			return
		}
	}
}
Ejemplo n.º 18
0
func TestOutputHTTPSSL(t *testing.T) {
	wg := new(sync.WaitGroup)
	quit := make(chan int)

	// Origing and Replay server initialization
	server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		wg.Done()
	}))

	input := NewTestInput()
	output := NewHTTPOutput(server.URL, &HTTPOutputConfig{})

	Plugins.Inputs = []io.Reader{input}
	Plugins.Outputs = []io.Writer{output}

	go Start(quit)

	wg.Add(2)

	input.EmitPOST()
	input.EmitGET()

	wg.Wait()
	close(quit)
}
Ejemplo n.º 19
0
func BenchmarkHTTPOutput(b *testing.B) {
	wg := new(sync.WaitGroup)
	quit := make(chan int)

	server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		time.Sleep(50 * time.Millisecond)
		wg.Done()
	}))
	defer server.Close()

	input := NewTestInput()
	output := NewHTTPOutput(server.URL, &HTTPOutputConfig{})

	Plugins.Inputs = []io.Reader{input}
	Plugins.Outputs = []io.Writer{output}

	go Start(quit)

	for i := 0; i < b.N; i++ {
		wg.Add(1)
		input.EmitPOST()
	}

	wg.Wait()

	close(quit)
}
Ejemplo n.º 20
0
// cWriteShards writes shards concurrently
func cWriteShards(out []io.Writer, in [][]byte) error {
	if len(out) != len(in) {
		panic("internal error: in and out size does not match")
	}
	var errs = make(chan error, len(out))
	var wg sync.WaitGroup
	wg.Add(len(out))
	for i := range in {
		go func(i int) {
			defer wg.Done()
			if out[i] == nil {
				errs <- nil
				return
			}
			n, err := out[i].Write(in[i])
			if err != nil {
				errs <- StreamWriteError{Err: err, Stream: i}
				return
			}
			if n != len(in[i]) {
				errs <- StreamWriteError{Err: io.ErrShortWrite, Stream: i}
			}
		}(i)
	}
	wg.Wait()
	close(errs)
	for err := range errs {
		if err != nil {
			return err
		}
	}

	return nil
}
Ejemplo n.º 21
0
func (cd *CheckDocker) GetData() error {
	errChan := make(chan error)
	var err error
	var wg sync.WaitGroup

	wg.Add(2)

	go func(cd *CheckDocker, errChan chan error) {
		defer wg.Done()

		cd.dockerInfoData, err = cd.dockerclient.Info()
		if err != nil {
			errChan <- err
		}
	}(cd, errChan)

	go func(cd *CheckDocker, errChan chan error) {
		defer wg.Done()

		cd.dockerContainersData, err = cd.dockerclient.ListContainers(dockerlib.ListContainersOptions{})
		if err != nil {
			errChan <- err
		}
	}(cd, errChan)

	go func() {
		wg.Wait()
		close(errChan)
	}()

	err = <-errChan

	return err
}
Ejemplo n.º 22
0
// ReadWrite does read and write in parallel.
// qRead is num goroutines for reading.
// qWrite is num goroutines for writing.
// Assume n divisible by (qRead + qWrite).
func ReadWrite(n, qRead, qWrite int, newFunc func() HashMap, b *testing.B) {
	q := qRead + qWrite
	check(n, q)
	work := intPairArray(n)
	b.StartTimer()
	for i := 0; i < b.N; i++ { // N reps.
		h := newFunc()
		var wg sync.WaitGroup
		for j := 0; j < qRead; j++ { // Read goroutines.
			wg.Add(1)
			go func(j int) {
				defer wg.Done()
				start, end := workRange(n, q, j)
				for k := start; k < end; k++ {
					h.Get(work[k].Key)
				}
			}(j)
		}

		for j := qRead; j < q; j++ { // Write goroutines.
			wg.Add(1)
			go func(j int) {
				defer wg.Done()
				start, end := workRange(n, q, j)
				for k := start; k < end; k++ {
					h.Put(work[k].Key, work[k].Val)
				}
			}(j)
		}
		wg.Wait()
	}
}
Ejemplo n.º 23
0
// Test that simultaneous RemoveAll do not report an error.
// As long as it gets removed, we should be happy.
func TestRemoveAllRace(t *testing.T) {
	if runtime.GOOS == "windows" {
		// Windows has very strict rules about things like
		// removing directories while someone else has
		// them open. The racing doesn't work out nicely
		// like it does on Unix.
		t.Skip("skipping on windows")
	}

	n := runtime.GOMAXPROCS(16)
	defer runtime.GOMAXPROCS(n)
	root, err := ioutil.TempDir("", "issue")
	if err != nil {
		t.Fatal(err)
	}
	mkdirTree(t, root, 1, 6)
	hold := make(chan struct{})
	var wg sync.WaitGroup
	for i := 0; i < 4; i++ {
		wg.Add(1)
		go func() {
			defer wg.Done()
			<-hold
			err := RemoveAll(root)
			if err != nil {
				t.Errorf("unexpected error: %T, %q", err, err)
			}
		}()
	}
	close(hold) // let workers race to remove root
	wg.Wait()
}
Ejemplo n.º 24
0
func cliDownloadFile(c *cli.Context) {
	args := c.Args()
	if len(args) != 2 {
		Exit("Must specify <remote-path> <local-path-prefix>")
	}
	remotePath := args[0]
	localPathPrefix := args[1]
	command := btypes.CommandServeFile{
		Path: remotePath,
	}

	wg := sync.WaitGroup{}
	failed := 0
	for _, remote := range Config.Remotes {
		wg.Add(1)
		go func(remote string, localPath string) {
			defer wg.Done()
			n, err := DownloadFile(Config.PrivKey, remote, command, localPath)
			if err != nil {
				failed++
				fmt.Printf("%v failure. %v\n", remote, err)
			} else {
				fmt.Printf("%v success. Wrote %v bytes to %v\n", remote, n, localPath)
			}
		}(remote, Fmt("%v_%v", localPathPrefix, remoteNick(remote)))
	}
	wg.Wait()
	if 0 < failed {
		os.Exit(1)
	}
}
Ejemplo n.º 25
0
// RunParallel spawns a goroutine per task in the given queue
func RunParallel(task Task, numTasks, numWorkers int) {
	start := time.Now()
	if numWorkers <= 0 {
		numWorkers = numTasks
	}
	defer func() {
		glog.Infof("RunParallel took %v for %d tasks and %d workers", time.Since(start), numTasks, numWorkers)
	}()
	var wg sync.WaitGroup
	semCh := make(chan struct{}, numWorkers)
	wg.Add(numTasks)
	for id := 0; id < numTasks; id++ {
		go func(id int) {
			semCh <- struct{}{}
			err := task(id)
			if err != nil {
				glog.Fatalf("Worker failed with %v", err)
			}
			<-semCh
			wg.Done()
		}(id)
	}
	wg.Wait()
	close(semCh)
}
Ejemplo n.º 26
0
func (ts *tribServer) getTribValuesFromHashIds(user string, hashIds []string) ([]string, error) {
	var err error
	tribValues := make([]string, len(hashIds))
	returnValues := make([]string, len(hashIds))

	var wg sync.WaitGroup

	wg.Add(len(hashIds))

	for i := range hashIds {
		go func(i int) {
			defer wg.Done()
			tribValues[i], err = ts.Libstore.Get(makeTribId(user, hashIds[i]))
			if err != nil {
				if err != libstore.ErrorKeyNotFound { // ignore cross inconsistency
					panic(err)

				}
			}
		}(i)
	}
	wg.Wait()

	j := 0
	for i := range tribValues {
		if tribValues[i] != "" {
			returnValues[j] = tribValues[i]
			j++
		}
	}

	return returnValues[0:j], nil
}
Ejemplo n.º 27
0
func copyLoop(a net.Conn, b net.Conn) error {
	// Note: b is always the pt connection.  a is the SOCKS/ORPort connection.
	errChan := make(chan error, 2)

	var wg sync.WaitGroup
	wg.Add(2)

	go func() {
		defer wg.Done()
		defer b.Close()
		defer a.Close()
		_, err := io.Copy(b, a)
		errChan <- err
	}()
	go func() {
		defer wg.Done()
		defer a.Close()
		defer b.Close()
		_, err := io.Copy(a, b)
		errChan <- err
	}()

	// Wait for both upstream and downstream to close.  Since one side
	// terminating closes the other, the second error in the channel will be
	// something like EINVAL (though io.Copy() will swallow EOF), so only the
	// first error is returned.
	wg.Wait()
	if len(errChan) > 0 {
		return <-errChan
	}

	return nil
}
Ejemplo n.º 28
0
// Kill all containers in a pod.  Returns the number of containers deleted and an error if one occurs.
func (kl *Kubelet) killContainersInPod(pod *api.BoundPod, dockerContainers dockertools.DockerContainers) (int, error) {
	podFullName := GetPodFullName(pod)

	count := 0
	errs := make(chan error, len(pod.Spec.Containers))
	wg := sync.WaitGroup{}
	for _, container := range pod.Spec.Containers {
		// TODO: Consider being more aggressive: kill all containers with this pod UID, period.
		if dockerContainer, found, _ := dockerContainers.FindPodContainer(podFullName, pod.UID, container.Name); found {
			count++
			wg.Add(1)
			go func() {
				err := kl.killContainer(dockerContainer)
				if err != nil {
					glog.Errorf("Failed to delete container: %v; Skipping pod %q", err, podFullName)
					errs <- err
				}
				wg.Done()
			}()
		}
	}
	wg.Wait()
	close(errs)
	if len(errs) > 0 {
		errList := []error{}
		for err := range errs {
			errList = append(errList, err)
		}
		return -1, fmt.Errorf("failed to delete containers (%v)", errList)
	}
	return count, nil
}
Ejemplo n.º 29
0
func (rm *ReplicationManager) syncReplicationController(controllerSpec api.ReplicationController) error {
	s := labels.Set(controllerSpec.DesiredState.ReplicaSelector).AsSelector()
	podList, err := rm.kubeClient.ListPods(s)
	if err != nil {
		return err
	}
	filteredList := rm.filterActivePods(podList.Items)
	diff := len(filteredList) - controllerSpec.DesiredState.Replicas
	if diff < 0 {
		diff *= -1
		wait := sync.WaitGroup{}
		wait.Add(diff)
		glog.Infof("Too few replicas, creating %d\n", diff)
		for i := 0; i < diff; i++ {
			go func() {
				defer wait.Done()
				rm.podControl.createReplica(controllerSpec)
			}()
		}
		wait.Wait()
	} else if diff > 0 {
		glog.Infof("Too many replicas, deleting %d\n", diff)
		wait := sync.WaitGroup{}
		wait.Add(diff)
		for i := 0; i < diff; i++ {
			go func(ix int) {
				defer wait.Done()
				rm.podControl.deletePod(filteredList[ix].ID)
			}(i)
		}
		wait.Wait()
	}
	return nil
}
Ejemplo n.º 30
0
func cliStopProcess(c *cli.Context) {
	args := c.Args()
	if len(args) == 0 {
		Exit("Must specify label to stop")
	}
	label := args[0]
	command := btypes.CommandStopProcess{
		Label: label,
		Kill:  true,
	}
	wg := sync.WaitGroup{}
	failed := 0
	for _, remote := range Config.Remotes {
		wg.Add(1)
		go func(remote string) {
			defer wg.Done()
			response, err := StopProcess(Config.PrivKey, remote, command)
			if err != nil {
				failed++
				fmt.Printf("%v failure. %v\n", remote, err)
			} else {
				fmt.Printf("%v success. %v\n", remote, response)
			}
		}(remote)
	}
	wg.Wait()
	if 0 < failed {
		os.Exit(1)
	}
}