Ejemplo n.º 1
1
// TestConcurrentSqliteConns tests concurrent writes to a single in memory database.
func TestConcurrentSqliteConns(t *testing.T) {
	dbMap := NewMemDB()
	repo := NewConnectorConfigRepo(dbMap)

	var (
		once sync.Once
		wg   sync.WaitGroup
	)

	n := 1000
	wg.Add(n)
	for i := 0; i < n; i++ {
		go func() {
			configs := []connector.ConnectorConfig{
				&connector.LocalConnectorConfig{ID: "local"},
			}
			// Setting connector configs both deletes and writes to a single table
			// within a transaction.
			if err := repo.Set(configs); err != nil {
				// Don't print 1000 errors, only the first.
				once.Do(func() {
					t.Errorf("concurrent connections to sqlite3: %v", err)
				})
			}
			wg.Done()
		}()
	}
	wg.Wait()
}
Ejemplo n.º 2
0
// Open connects to the existing window with the given id.
// If ctl is non-nil, Open uses it as the window's control file
// and takes ownership of it.
func Open(id int, ctl *client.Fid) (*Win, error) {
	config := new(sync.Once)
	config.Do(mountAcme)
	if fsysErr != nil {
		return nil, fsysErr
	}
	if ctl == nil {
		var err error
		ctl, err = fsys.Open(fmt.Sprintf("%d/ctl", id), plan9.ORDWR)
		if err != nil {
			return nil, err
		}
	}

	w := new(Win)
	w.id = id
	w.ctl = ctl
	w.next = nil
	w.prev = last
	if last != nil {
		last.next = w
	} else {
		windows = w
	}
	last = w
	return w, nil
}
Ejemplo n.º 3
0
func main() {
	Case := Init(2)
	fmt.Printf("DEADLOCK\n")

	var mu sync.Mutex
	var once sync.Once
	done := make(chan bool)
	go func() {
		if Case == 0 {
			time.Sleep(time.Second)
		}
		mu.Lock()
		once.Do(func() {
			mu.Lock()
			mu.Unlock()
		})
		mu.Unlock()
		done <- true
	}()
	if Case == 1 {
		time.Sleep(time.Second)
	}
	once.Do(func() {
		mu.Lock()
		mu.Unlock()
	})
	<-done
}
Ejemplo n.º 4
0
// Paralyze parallelizes a function and returns a slice containing results and
// a slice containing errors. The results at each index are not mutually exclusive,
// that is if results[i] is not nil, errors[i] is not guaranteed to be nil.
func Paralyze(funcs ...Paralyzable) (results []interface{}, errors []error) {
	var wg sync.WaitGroup
	results = make([]interface{}, len(funcs))
	errors = make([]error, len(funcs))
	wg.Add(len(funcs))

	var panik interface{}
	var panikOnce sync.Once

	for i, fn := range funcs {
		go func(i int, fn Paralyzable) {
			defer func() {
				if r := recover(); r != nil {
					panikOnce.Do(func() { panik = r })
				}
			}()
			defer wg.Done()
			results[i], errors[i] = fn()
		}(i, fn)
	}
	wg.Wait()

	if panik != nil {
		panic(panik)
	}

	return results, errors
}
func make_icom_ctx(underlying io.ReadWriteCloser, is_server bool,
	do_junk bool, PAUSELIM int) *icom_ctx {
	ctx := new(icom_ctx)
	ctx.is_dead = false
	ctx.underlying = underlying
	ctx.our_srv = VSListen()
	ctx.write_ch = make(chan icom_msg)

	// Killswitch is closed when the entire ctx should be abandoned.
	killswitch := make(chan bool)
	ctx.killswitch = killswitch
	var _ks_exec sync.Once
	KILL := func() {
		_ks_exec.Do(func() {
			kilog.Debug("Killswitching!")
			ctx.underlying.Close()
			ctx.is_dead = true
			close(killswitch)
			close(ctx.our_srv.vs_ch)
		})
	}

	// Run the main thing
	go run_icom_ctx(ctx, KILL, is_server, do_junk, PAUSELIM)

	return ctx
}
Ejemplo n.º 6
0
// waitTillInboundEmpty will run f which should end up causing the peer with hostPort in ch
// to have 0 inbound connections. It will fail the test after a second.
func waitTillInboundEmpty(t *testing.T, ch *Channel, hostPort string, f func()) {
	peer, ok := ch.RootPeers().Get(hostPort)
	if !ok {
		return
	}

	inboundEmpty := make(chan struct{})
	var onUpdateOnce sync.Once
	onUpdate := func(p *Peer) {
		if inbound, _ := p.NumConnections(); inbound == 0 {
			onUpdateOnce.Do(func() {
				close(inboundEmpty)
			})
		}
	}
	peer.SetOnUpdate(onUpdate)

	f()

	select {
	case <-inboundEmpty:
		return
	case <-time.After(time.Second):
		t.Errorf("Timed out waiting for peer %v to have no inbound connections", hostPort)
	}
}
Ejemplo n.º 7
0
func checkErr(t *testing.T, err error, once *sync.Once) bool {
	if err.(*url.Error).Err == io.EOF {
		return true
	}
	var errno syscall.Errno
	switch oe := err.(*url.Error).Err.(type) {
	case *net.OpError:
		switch e := oe.Err.(type) {
		case syscall.Errno:
			errno = e
		case *os.SyscallError:
			errno = e.Err.(syscall.Errno)
		}
		if errno == syscall.ECONNREFUSED {
			return true
		} else if err != nil {
			once.Do(func() {
				t.Error("Error on Get:", err)
			})
		}
	default:
		if strings.Contains(err.Error(), "transport closed before response was received") {
			return true
		}
		if strings.Contains(err.Error(), "server closed connection") {
			return true
		}
		fmt.Printf("unknown err: %s, %#v\n", err, err)
	}
	return false
}
Ejemplo n.º 8
0
func Pipe(src io.ReadWriteCloser, dst io.ReadWriteCloser) (int64, int64) {

	var sent, received int64
	var c = make(chan bool)
	var o sync.Once

	close := func() {
		src.Close()
		dst.Close()
		close(c)
	}

	go func() {
		received, _ = io.Copy(src, dst)
		o.Do(close)
	}()

	go func() {
		sent, _ = io.Copy(dst, src)
		o.Do(close)
	}()

	<-c
	return received, sent
}
Ejemplo n.º 9
0
func (qe *QueryEngine) executeStreamSql(logStats *sqlQueryStats, conn PoolConnection, sql string, callback func(interface{}) error) {
	waitStart := time.Now()
	if !qe.streamTokens.Acquire() {
		panic(NewTabletError(FAIL, "timed out waiting for streaming quota"))
	}
	// Guarantee a release in case of unexpected errors.
	var once sync.Once
	defer once.Do(qe.streamTokens.Release)

	waitTime := time.Now().Sub(waitStart)
	waitStats.Add("StreamToken", waitTime)
	// No need create a new log variable for this. Just add to the total
	// connection wait time.
	logStats.WaitingForConnection += waitTime

	logStats.QuerySources |= QUERY_SOURCE_MYSQL
	logStats.NumberOfQueries += 1
	logStats.AddRewrittenSql(sql)
	fetchStart := time.Now()
	err := conn.ExecuteStreamFetch(
		sql,
		func(qr interface{}) error {
			// Release semaphore at first callback.
			once.Do(qe.streamTokens.Release)
			return callback(qr)
		},
		int(qe.streamBufferSize.Get()),
	)
	logStats.MysqlResponseTime += time.Now().Sub(fetchStart)
	if err != nil {
		panic(NewTabletErrorSql(FAIL, err))
	}
}
Ejemplo n.º 10
0
func TestHeartbeatCB(t *testing.T) {
	defer leaktest.AfterTest(t)()

	stopper := stop.NewStopper()
	defer stopper.Stop()

	serverClock := hlc.NewClock(time.Unix(0, 20).UnixNano)
	serverCtx := newNodeTestContext(serverClock, stopper)
	s, ln := newTestServer(t, serverCtx, true)
	remoteAddr := ln.Addr().String()

	RegisterHeartbeatServer(s, &HeartbeatService{
		clock:              serverClock,
		remoteClockMonitor: serverCtx.RemoteClocks,
	})

	// Clocks don't matter in this test.
	clientCtx := newNodeTestContext(serverClock, stopper)

	var once sync.Once
	ch := make(chan struct{})

	clientCtx.HeartbeatCB = func() {
		once.Do(func() {
			close(ch)
		})
	}

	_, err := clientCtx.GRPCDial(remoteAddr)
	if err != nil {
		t.Fatal(err)
	}

	<-ch
}
Ejemplo n.º 11
0
func (s *Storage) Simulate(numPath, perSec int) func() {
	var terminate = make(chan bool)
	for i := 0; i < numPath; i++ {
		_path := "/" + randomString()
		go func() {
			for {
				select {
				case <-time.After(time.Second / time.Duration(perSec)):
					_addr := randomString()
					s.Push(_path, _addr)
				case <-terminate:
					return
				}
			}
		}()
	}

	var once sync.Once
	return func() {
		once.Do(func() {
			for i := 0; i < numPath; i++ {
				terminate <- true
			}
		})
	}
}
Ejemplo n.º 12
0
func (r *Runner) connectIRC() {
	conn := irc.IRC(ircNick, ircNick)
	conn.UseTLS = true
	conn.AddCallback("001", func(*irc.Event) {
		conn.Join(ircRoom)
	})
	var once sync.Once
	ready := make(chan struct{})
	conn.AddCallback("JOIN", func(*irc.Event) {
		once.Do(func() { close(ready) })
	})
	for {
		log.Printf("connecting to IRC server: %s", ircServer)
		err := conn.Connect(ircServer)
		if err == nil {
			break
		}
		log.Printf("error connecting to IRC server: %s", err)
		time.Sleep(time.Second)
	}
	go conn.Loop()
	<-ready
	for msg := range r.ircMsgs {
		conn.Notice(ircRoom, msg)
	}
}
Ejemplo n.º 13
0
// waitTillNConnetions will run f which should end up causing the peer with hostPort in ch
// to have the specified number of inbound and outbound connections.
// If the number of connections does not match after a second, the test is failed.
func waitTillNConnections(t *testing.T, ch *Channel, hostPort string, inbound, outbound int, f func()) {
	peer, ok := ch.RootPeers().Get(hostPort)
	if !ok {
		return
	}

	var (
		i = -1
		o = -1
	)

	inboundEmpty := make(chan struct{})
	var onUpdateOnce sync.Once
	onUpdate := func(p *Peer) {
		if i, o = p.NumConnections(); (i == inbound || inbound == -1) &&
			(o == outbound || outbound == -1) {
			onUpdateOnce.Do(func() {
				close(inboundEmpty)
			})
		}
	}
	peer.SetOnUpdate(onUpdate)

	f()

	select {
	case <-inboundEmpty:
		return
	case <-time.After(time.Second):
		t.Errorf("Timed out waiting for peer %v to have (in: %v, out: %v) connections, got (in: %v, out: %v)",
			hostPort, inbound, outbound, i, o)
	}
}
Ejemplo n.º 14
0
func (s *Service) Serve() {
	announce := stdsync.Once{}

	s.timer.Reset(0)

	s.mut.Lock()
	s.stop = make(chan struct{})
	s.mut.Unlock()

	for {
		select {
		case <-s.timer.C:
			if found := s.process(); found != -1 {
				announce.Do(func() {
					suffix := "s"
					if found == 1 {
						suffix = ""
					}
					l.Infoln("Detected", found, "NAT device"+suffix)
				})
			}
		case <-s.stop:
			s.timer.Stop()
			s.mut.RLock()
			for _, mapping := range s.mappings {
				mapping.clearAddresses()
			}
			s.mut.RUnlock()
			return
		}
	}
}
Ejemplo n.º 15
0
func (dpf DotProviderFactory) GetInstance(dbSource string) DotProvider {
	if dpf.dotProviderMap == nil {
		dpf.dotProviderMap = make(map[string]chan DotProvider)
	}

	_, hasProvider := dpf.dotProviderMap[dbSource]
	if !hasProvider {
		var once sync.Once

		once.Do(func() {
			glog.Errorf("Initializing pool for %s", dbSource)
			dpf.dotProviderMap[dbSource] = make(chan DotProvider, 20)

			for i := 0; i < 20; i++ {
				var dotProvider interface{}
				dotProvider = &DotProviderDB{}
				dotProvider.(DotProvider).Init(dbSource)
				dpf.dotProviderMap[dbSource] <- dotProvider.(DotProvider)
			}
			glog.Errorf("Done initializing pool for %s", dbSource)
		})
	}

	dotProviderFound := <-dpf.dotProviderMap[dbSource]

	return dotProviderFound
}
Ejemplo n.º 16
0
func TestSnapshotStoreClearUsedSnap(t *testing.T) {
	s := &fakeSnapshot{}
	var once sync.Once
	once.Do(func() {})
	ss := &snapshotStore{
		snap:       newSnapshot(raftpb.Snapshot{}, s),
		inUse:      true,
		createOnce: once,
	}

	ss.clearUsedSnap()
	// wait for underlying KV snapshot closed
	testutil.WaitSchedule()
	s.mu.Lock()
	if !s.closed {
		t.Errorf("snapshot closed = %v, want true", s.closed)
	}
	s.mu.Unlock()
	if ss.snap != nil {
		t.Errorf("snapshot = %v, want nil", ss.snap)
	}
	if ss.inUse {
		t.Errorf("isUse = %v, want false", ss.inUse)
	}
	// test createOnce is reset
	if ss.createOnce == once {
		t.Errorf("createOnce fails to reset")
	}
}
Ejemplo n.º 17
0
func NewClientLogger(client client.Client, id int64, rc io.ReadCloser, wc io.WriteCloser, limit int64) LoggerFunc {
	var once sync.Once
	var size int64
	return func(line *build.Line) {
		// annoying hack to only start streaming once the first line is written
		once.Do(func() {
			go func() {
				err := client.Stream(id, rc)
				if err != nil && err != io.ErrClosedPipe {
					logrus.Errorf("Error streaming build logs. %s", err)
				}
			}()
		})

		if size > limit {
			return
		}

		linejson, _ := json.Marshal(line)
		wc.Write(linejson)
		wc.Write([]byte{'\n'})

		size += int64(len(line.Out))
	}
}
Ejemplo n.º 18
0
func main() {
	Init(1)
	fmt.Printf("NODEADLOCK\n")

	var mu1 sync.Mutex
	var mu2 sync.Mutex
	var once sync.Once
	done := make(chan bool)
	for p := 0; p < 2; p++ {
		go func() {
			mu1.Lock()
			mu2.Lock()
			mu2.Unlock()
			once.Do(func() {
				mu2.Lock()
				mu2.Unlock()
			})
			mu2.Lock()
			mu2.Unlock()
			mu1.Unlock()
			done <- true
		}()
	}
	for p := 0; p < 2; p++ {
		<-done
	}
}
Ejemplo n.º 19
0
func TestClient_Watchdog_Timeout(t *testing.T) {
	sm := New(serverSettings)
	var once sync.Once
	sm.mux.HandleFunc("DWR", func(c diam.Conn, m *diam.Message) {
		once.Do(func() { m.Answer(diam.UnableToComply).WriteTo(c) })
	})
	srv := diamtest.NewServer(sm, dict.Default)
	defer srv.Close()
	cli := &Client{
		MaxRetransmits:     3,
		RetransmitInterval: 50 * time.Millisecond,
		EnableWatchdog:     true,
		WatchdogInterval:   50 * time.Millisecond,
		Handler:            New(clientSettings),
		AcctApplicationID: []*diam.AVP{
			diam.NewAVP(avp.AcctApplicationID, avp.Mbit, 0, datatype.Unsigned32(0)),
		},
	}
	c, err := cli.Dial(srv.Address)
	if err != nil {
		t.Fatal(err)
	}
	defer c.Close()
	select {
	case <-c.(diam.CloseNotifier).CloseNotify():
	case <-time.After(500 * time.Millisecond):
		t.Fatal("Timeout waiting for watchdog to disconnect client")
	}
}
Ejemplo n.º 20
0
//
// Run Watcher Protocol
//
func runWatcher(pipe *common.PeerPipe,
	requestMgr RequestMgr,
	handler ActionHandler,
	factory MsgFactory,
	killch <-chan bool,
	readych chan<- bool,
	alivech chan<- bool,
	pingch <-chan bool,
	once *sync.Once) (isKilled bool) {

	// Create a watcher.  The watcher will start a go-rountine, listening to messages coming from peer.
	log.Current.Debugf("WatcherServer.runWatcher(): Start Watcher Protocol")
	watcher := NewFollower(WATCHER, pipe, handler, factory)
	donech := watcher.Start()
	defer watcher.Terminate()

	// notify that the watcher is starting to run.  Only do this once.
	once.Do(func() { readych <- true })

	log.Current.Debugf("WatcherServer.runWatcher(): Watcher is ready to process request")

	var incomings <-chan *RequestHandle
	if requestMgr != nil {
		incomings = requestMgr.GetRequestChannel()
	} else {
		incomings = make(chan *RequestHandle)
	}

	for {
		select {
		case handle, ok := <-incomings:
			if ok {
				// move request to pending queue (waiting for proposal)
				requestMgr.AddPendingRequest(handle)

				// forward the request to the leader
				if !watcher.ForwardRequest(handle.Request) {
					log.Current.Errorf("WatcherServer.processRequest(): fail to send client request to leader. Terminate.")
					return
				}
			} else {
				log.Current.Debugf("WatcherServer.processRequest(): channel for receiving client request is closed. Terminate.")
				return
			}
		case <-killch:
			// server is being explicitly terminated.  Terminate the watcher go-rountine as well.
			log.Current.Debugf("WatcherServer.runTillEnd(): receive kill signal. Terminate.")
			return true
		case <-donech:
			// watcher is done.  Just return.
			log.Current.Debugf("WatcherServer.runTillEnd(): Watcher go-routine terminates. Terminate.")
			return false
		case <-pingch:
			if len(alivech) == 0 {
				alivech <- true
			}
		}
	}
}
Ejemplo n.º 21
0
// batchApiRoutine processes the queue of transfers using the batch endpoint,
// making only one POST call for all objects. The results are then handed
// off to the transfer workers.
func (q *TransferQueue) batchApiRoutine() {
	var startProgress sync.Once

	for {
		batch := q.batcher.Next()
		if batch == nil {
			break
		}

		tracerx.Printf("tq: sending batch of size %d", len(batch))

		transfers := make([]*objectResource, 0, len(batch))
		for _, t := range batch {
			transfers = append(transfers, &objectResource{Oid: t.Oid(), Size: t.Size()})
		}

		objects, err := Batch(transfers, q.transferKind)
		if err != nil {
			if IsNotImplementedError(err) {
				configFile := filepath.Join(LocalGitDir, "config")
				git.Config.SetLocal(configFile, "lfs.batch", "false")

				go q.legacyFallback(batch)
				return
			}

			q.errorc <- err
			q.wait.Add(-len(transfers))
			continue
		}

		startProgress.Do(q.meter.Start)

		for _, o := range objects {
			if _, ok := o.Rel(q.transferKind); ok {
				// This object has an error
				if o.Error != nil {
					q.errorc <- Error(o.Error)
					q.meter.Skip(o.Size)
					q.wait.Done()
					continue
				}

				// This object needs to be transferred
				if transfer, ok := q.transferables[o.Oid]; ok {
					transfer.SetObject(o)
					q.meter.Add(transfer.Name())
					q.transferc <- transfer
				} else {
					q.meter.Skip(transfer.Size())
					q.wait.Done()
				}
			} else {
				q.meter.Skip(o.Size)
				q.wait.Done()
			}
		}
	}
}
Ejemplo n.º 22
0
func main() {
	var once sync.Once
	once.Do(doit)
	once.Do(doit)
	once.Do(doit)
	once.Do(doit)

}
Ejemplo n.º 23
0
func BenchmarkSyncOnce(b *testing.B) {
	var once sync.Once
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			once.Do(nop)
		}
	})
}
Ejemplo n.º 24
0
func throttleChan(in chan *points.Points, ratePerSec int, exit chan bool) chan *points.Points {
	out := make(chan *points.Points, cap(in))

	delimeter := ratePerSec
	chunk := 1

	if ratePerSec > 1000 {
		minRemainder := ratePerSec

		for i := 100; i < 1000; i++ {
			if ratePerSec%i < minRemainder {
				delimeter = i
				minRemainder = ratePerSec % delimeter
			}
		}

		chunk = ratePerSec / delimeter
	}

	step := time.Duration(1e9/delimeter) * time.Nanosecond

	var onceClose sync.Once

	throttleWorker := func() {
		var p *points.Points
		var ok bool

		defer onceClose.Do(func() { close(out) })

		// start flight
		throttleTicker := time.NewTicker(step)
		defer throttleTicker.Stop()

	LOOP:
		for {
			select {
			case <-throttleTicker.C:
				for i := 0; i < chunk; i++ {
					select {
					case p, ok = <-in:
						if !ok {
							break LOOP
						}
					case <-exit:
						break LOOP
					}
					out <- p
				}
			case <-exit:
				break LOOP
			}
		}
	}

	go throttleWorker()

	return out
}
Ejemplo n.º 25
0
func taskRunning(t *Task) taskStateFn {
	waiter := t.cmd.Wait
	var sendOnce sync.Once
	trySend := func(wr *Completion) {
		// guarded with once because we're only allowed to send a single "result" for each
		// process termination. for example, if Kill() results in an error because Wait()
		// already completed we only want to return a single result for the process.
		sendOnce.Do(func() {
			t.tryComplete(wr)
		})
	}
	// listen for normal process completion in a goroutine; don't block because we need to listen for shouldQuit
	waitCh := make(chan *Completion, 1)
	go func() {
		wr := &Completion{}
		defer func() {
			waitCh <- wr
			close(waitCh)
		}()

		if err := waiter(); err != nil {
			if exitError, ok := err.(exitError); ok {
				if waitStatus, ok := exitError.Sys().(syscall.WaitStatus); ok {
					wr.name = t.name
					wr.code = waitStatus.ExitStatus()
					return
				}
			}
			wr.err = fmt.Errorf("task wait ended strangely for %q: %v", t.bin, err)
		} else {
			wr.name = t.name
		}
	}()

	// wait for the process to complete, or else for a "quit" signal on the task (at which point we'll attempt to kill manually)
	select {
	case <-t.shouldQuit:
		// check for tie
		select {
		case wr := <-waitCh:
			// we got a signal to quit, but we're already finished; attempt best effort delvery
			trySend(wr)
		default:
			// Wait() has not exited yet, kill the process
			log.Infof("killing %s : %s", t.name, t.bin)
			pid, err := t.cmd.Kill()
			if err != nil {
				trySend(&Completion{err: fmt.Errorf("failed to kill process: %q pid %d: %v", t.bin, pid, err)})
			}
			// else, Wait() should complete and send a completion event
		}
	case wr := <-waitCh:
		// task has completed before we were told to quit, pass along completion and error information
		trySend(wr)
	}
	return taskShouldRestart
}
Ejemplo n.º 26
0
// setRequestCancel sets the Cancel field of req, if deadline is
// non-zero. The RoundTripper's type is used to determine whether the legacy
// CancelRequest behavior should be used.
func setRequestCancel(req *Request, rt RoundTripper, deadline time.Time) (stopTimer func(), wasCanceled func() bool) {
	if deadline.IsZero() {
		return nop, alwaysFalse
	}

	initialReqCancel := req.Cancel // the user's original Request.Cancel, if any

	cancel := make(chan struct{})
	req.Cancel = cancel

	wasCanceled = func() bool {
		select {
		case <-cancel:
			return true
		default:
			return false
		}
	}

	doCancel := func() {
		// The new way:
		close(cancel)

		// The legacy compatibility way, used only
		// for RoundTripper implementations written
		// before Go 1.5 or Go 1.6.
		type canceler interface {
			CancelRequest(*Request)
		}
		switch v := rt.(type) {
		case *Transport, *http2Transport:
			// Do nothing. The net/http package's transports
			// support the new Request.Cancel channel
		case canceler:
			v.CancelRequest(req)
		}
	}

	stopTimerCh := make(chan struct{})
	var once sync.Once
	stopTimer = func() { once.Do(func() { close(stopTimerCh) }) }

	timer := time.NewTimer(time.Until(deadline))
	go func() {
		select {
		case <-initialReqCancel:
			doCancel()
			timer.Stop()
		case <-timer.C:
			doCancel()
		case <-stopTimerCh:
			timer.Stop()
		}
	}()

	return stopTimer, wasCanceled
}
Ejemplo n.º 27
0
Archivo: scp.go Proyecto: zqzca/back
func (s *Server) handleChannel(ch ssh.NewChannel) {
	id := rand.Int()
	s.Debug("Handling Channel", "id", id, "chan", ch.ChannelType())

	if ch.ChannelType() != "session" {
		s.Info("Received unknown channel type", "chan", ch.ChannelType())
		ch.Reject(ssh.UnknownChannelType, "unknown channel type")
		return
	}

	channel, requests, err := ch.Accept()
	if err != nil {
		s.Error("Failed to accept channe", "err", err)
		return
	}

	var closer sync.Once
	closeChannel := func() {
		s.Debug("Closed Channel", "id", id)
		channel.Close()
	}

	defer closer.Do(closeChannel)

	for req := range requests {
		spew.Dump(req.Type)
		switch req.Type {
		case "exec":
			// Let it through
		case "env":
			if req.WantReply {
				if err = req.Reply(true, nil); err != nil {
					s.Error("Failed to ignore env command", "err", err)
				}
			}
			continue
		default:
			s.Info("Received unhandled request type", "type", req.Type)
			continue
		}

		r := &scpRequest{db: s.DB}
		processors := []processor{
			r.ParseSCPRequest, r.DownloadFile, r.EndConnectionGracefully,
		}

		for _, proc := range processors {
			if err := proc(channel, req); err != nil {
				fmt.Fprintln(channel, "failed to process request:", err.Error())
				// log.Printf("%+v", err)
				break
			}
		}

		closer.Do(closeChannel)
	}
}
Ejemplo n.º 28
0
func NewView() *View {
	engine := qml.NewEngine()
	model := newTeg()
	renderer := newTegRenderer(nil)
	engine.Context().SetVar("tegRenderer", renderer)
	qml.RegisterTypes("TegCtrl", 1, 0, []qml.TypeSpec{
		{
			Init: func(ctrl *Ctrl, obj qml.Object) {
				ctrl.model = model
				ctrl.events = make(chan interface{}, 100)
				ctrl.actions = make(chan interface{}, 100)
				ctrl.errors = make(chan error, 100)
				ctrl.clip = clipboard.New(engine)

				renderer.ctrl = ctrl

				ctrl.Title = DefaultTitle
			},
		},
	})

	component, err := engine.LoadFile("project/qml/tegview.qml")
	if err != nil {
		panic(err)
	}

	win := component.CreateWindow(nil)
	control := win.Root().Property("ctrl").(*Ctrl)

	view := &View{
		win:      win,
		model:    model,
		renderer: renderer,
		childs:   make(chan workspace.Window, 10),
		closed:   make(chan struct{}),
		stop:     make(chan struct{}),
		control:  control,
		id:       model.id,
	}

	var once sync.Once
	quitcode := func() {
		view.control.stopHandling()
		view.model.deselectAll()
		view.model.updateParentGroups()
		close(view.stop)
		close(view.childs)
		close(view.closed)
		win.Destroy()
	}
	quit := func() {
		once.Do(quitcode)
	}
	win.On("closing", quit)
	engine.On("quit", quit)
	return view
}
Ejemplo n.º 29
0
func TestInfRetryNoDeadlock(t *testing.T) {
	N := 100       // Number of events to be send
	Fails := 1000  // Number of fails per event
	NumWorker := 2 // Number of concurrent workers pushing events into retry queue

	ctx := makeContext(1, -1, 0)

	// atomic success counter incremented whenever one event didn't fail
	// Test finishes if i==N
	i := int32(0)

	var closer sync.Once
	worker := func(wg *sync.WaitGroup) {
		defer wg.Done()

		// close queue once done, so other workers waiting for new messages will be
		// released
		defer closer.Do(func() {
			ctx.Close()
		})

		for int(atomic.LoadInt32(&i)) < N {
			msg, open := ctx.receive()
			if !open {
				break
			}

			fails := msg.datum.Event["fails"].(int)
			if fails < Fails {
				msg.datum.Event["fails"] = fails + 1
				ctx.pushFailed(msg)
				continue
			}

			atomic.AddInt32(&i, 1)
		}
	}

	var wg sync.WaitGroup
	wg.Add(NumWorker)
	for w := 0; w < NumWorker; w++ {
		go worker(&wg)
	}

	// push up to N events to workers. If workers deadlock, pushEvents will block.
	for i := 0; i < N; i++ {
		msg := eventsMessage{
			worker: -1,
			datum:  outputs.Data{Event: common.MapStr{"fails": int(0)}},
		}
		ok := ctx.pushEvents(msg, true)
		assert.True(t, ok)
	}

	// wait for all workers to terminate before test timeout
	wg.Wait()
}
Ejemplo n.º 30
0
// Do calls the HandlerFunc associated with the current Breaker
// with the passed in arguments. Returns
func (b *Breaker) Do(f HandlerFunc, timeout time.Duration) error {
	timerChan := make(chan bool, 1)
	errChan := make(chan error, 1)

	defer close(timerChan)
	defer close(errChan)

	var once sync.Once
	var done sync.WaitGroup
	done.Add(1)

	// Setup a timer goroutine to
	// ensure the function runs within a timeout
	go func() {
		time.Sleep(timeout)

		once.Do(func() {
			timerChan <- true
			done.Done()
		})
	}()

	// Setup a goroutine that runs the desired function
	// and sends any error on the error channel
	go func() {
		err := f()

		once.Do(func() {
			if err != nil {
				errChan <- err
			}

			done.Done()
		})
	}()

	// Wait for either the timeout
	// or the function goroutine to complete
	done.Wait()

	var ret error

	select {
	case <-timerChan:
		b.Trip()
		ret = ErrTimeout

	case e := <-errChan:
		b.Trip()
		ret = e

	default:
		ret = nil
	}

	return ret
}