func TestVirtualBoxBuildAbort(t *testing.T) {
	if helpers.SkipIntegrationTests(t, vboxManage, "--version") {
		return
	}

	build := &common.Build{
		GetBuildResponse: common.LongRunningBuild,
		Runner: &common.RunnerConfig{
			RunnerSettings: common.RunnerSettings{
				Executor: "virtualbox",
				VirtualBox: &common.VirtualBoxConfig{
					BaseName:         vboxImage,
					DisableSnapshots: true,
				},
				SSH: vboxSshConfig,
			},
		},
		SystemInterrupt: make(chan os.Signal, 1),
	}

	abortTimer := time.AfterFunc(time.Second, func() {
		t.Log("Interrupt")
		build.SystemInterrupt <- os.Interrupt
	})
	defer abortTimer.Stop()

	timeoutTimer := time.AfterFunc(time.Minute, func() {
		t.Log("Timedout")
		t.FailNow()
	})
	defer timeoutTimer.Stop()

	err := build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})
	assert.EqualError(t, err, "aborted: interrupt")
}
func TestDockerCommandBuildAbort(t *testing.T) {
	if helpers.SkipIntegrationTests(t, "docker", "info") {
		return
	}

	build := &common.Build{
		GetBuildResponse: common.LongRunningBuild,
		Runner: &common.RunnerConfig{
			RunnerSettings: common.RunnerSettings{
				Executor: "docker",
				Docker: &common.DockerConfig{
					Image: "alpine",
				},
			},
		},
		SystemInterrupt: make(chan os.Signal, 1),
	}

	abortTimer := time.AfterFunc(time.Second, func() {
		t.Log("Interrupt")
		build.SystemInterrupt <- os.Interrupt
	})
	defer abortTimer.Stop()

	timeoutTimer := time.AfterFunc(time.Minute, func() {
		t.Log("Timedout")
		t.FailNow()
	})
	defer timeoutTimer.Stop()

	err := build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})
	assert.EqualError(t, err, "aborted: interrupt")
}
Example #3
0
func (s *StreamSuite) TestParseXML(c *C) {
	log.Println("testing ParseXML")
	b, ch := newBlock("testingParseXML", "parsexml")
	go blocks.BlockRoutine(b)
	outChan := make(chan *blocks.Msg)
	ch.AddChan <- &blocks.AddChanMsg{
		Route:   "out",
		Channel: outChan,
	}

	// where to find the xml in input
	ruleMsg := map[string]interface{}{"Path": ".data"}
	toRule := &blocks.Msg{Msg: ruleMsg, Route: "rule"}
	ch.InChan <- toRule

	queryOutChan := make(chan interface{})
	time.AfterFunc(time.Duration(1)*time.Second, func() {
		ch.QueryChan <- &blocks.QueryMsg{RespChan: queryOutChan, Route: "rule"}
	})

	var xmldata = string(`
  <?xml version="1.0" encoding="utf-8"?>
  <OdfBody DocumentType="DT_GM" Date="20130131" Time="140807885" LogicalDate="20130131" Venue="ACV" Language="ENG" FeedFlag="P" DocumentCode="AS0ACV000" Version="3" Serial="1">
    <Competition Code="OG2014">
      <Config SDelay="60" />
    </Competition>
  </OdfBody>
  `)

	time.AfterFunc(time.Duration(2)*time.Second, func() {
		xmlMsg := map[string]interface{}{"data": xmldata}
		postData := &blocks.Msg{Msg: xmlMsg, Route: "in"}
		ch.InChan <- postData
	})

	time.AfterFunc(time.Duration(5)*time.Second, func() {
		ch.QuitChan <- true
	})
	for {
		select {
		case err := <-ch.ErrChan:
			if err != nil {
				c.Errorf(err.Error())
			} else {
				return
			}
		case messageI := <-queryOutChan:
			if !reflect.DeepEqual(messageI, ruleMsg) {
				log.Println("Rule mismatch:", messageI, ruleMsg)
				c.Fail()
			}
		case messageI := <-outChan:
			message := messageI.Msg.(map[string]interface{})
			odfbody := message["OdfBody"].(map[string]interface{})
			competition := odfbody["Competition"].(map[string]interface{})
			c.Assert(odfbody["DocumentType"], Equals, "DT_GM")
			c.Assert(competition["Code"], Equals, "OG2014")
		}
	}
}
Example #4
0
func (ctx *requestCtx) RoundTrip() {
	defer func() {
		ctx.hasPrint = true
		ctx.SetLog("logType", "defer")
		ctx.PrintLog()
	}()

	time.AfterFunc(10*time.Second, func() {
		if !ctx.hasPrint {
			ctx.SetLog("logType", "timeout10")
			ctx.PrintLog()
		}
	})

	removeHeader(ctx.Req)
	rewrite_code := ctx.ser.reqRewrite(ctx)

	ctx.HasBroadcast = ctx.ser.Broadcast_Req(ctx)

	ctx.SetLog("js_rewrite_code", rewrite_code)

	time.AfterFunc(1*time.Second, ctx.saveRequestData)

	if rewrite_code != 200 && rewrite_code != 304 {
		ctx.badGateway(fmt.Errorf("rewrite failed"))
		return
	}
	ctx.ser.proxy.RoundTrip(ctx)
}
Example #5
0
// handleFailedConn handles a connection failed due to a disconnect or any
// other failure. If permanent, it retries the connection after the configured
// retry duration. Otherwise, if required, it makes a new connection request.
// After maxFailedConnectionAttempts new connections will be retried after the
// configured retry duration.
func (cm *ConnManager) handleFailedConn(c *ConnReq) {
	if atomic.LoadInt32(&cm.stop) != 0 {
		return
	}
	if c.Permanent {
		c.retryCount++
		d := time.Duration(c.retryCount) * cm.cfg.RetryDuration
		if d > maxRetryDuration {
			d = maxRetryDuration
		}
		log.Debugf("Retrying connection to %v in %v", c, d)
		time.AfterFunc(d, func() {
			cm.Connect(c)
		})
	} else if cm.cfg.GetNewAddress != nil {
		cm.failedAttempts++
		if cm.failedAttempts >= maxFailedAttempts {
			log.Debugf("Max failed connection attempts reached: [%d] "+
				"-- retrying connection in: %v", maxFailedAttempts,
				cm.cfg.RetryDuration)
			time.AfterFunc(cm.cfg.RetryDuration, func() {
				cm.NewConnReq()
			})
		} else {
			go cm.NewConnReq()
		}
	}
}
Example #6
0
// Create Client transaction.
func (mng *Manager) Send(r *base.Request, dest string) *ClientTransaction {
	log.Debug("Sending to %v: %v", dest, r.String())

	tx := &ClientTransaction{}
	tx.origin = r
	tx.dest = dest
	tx.transport = mng.transport
	tx.tm = mng

	tx.initFSM()

	tx.tu = make(chan *base.Response, 3)
	tx.tu_err = make(chan error, 1)

	tx.timer_a_time = T1
	tx.timer_a = time.AfterFunc(tx.timer_a_time, func() {
		tx.fsm.Spin(client_input_timer_a)
	})
	tx.timer_b = time.AfterFunc(64*T1, func() {
		tx.fsm.Spin(client_input_timer_b)
	})

	// Timer D is set to 32 seconds for unreliable transports, and 0 seconds otherwise.
	tx.timer_d_time = 32 * time.Second

	err := mng.transport.Send(dest, r)
	if err != nil {
		log.Warn("Failed to send message: %s", err.Error())
		tx.fsm.Spin(client_input_transport_err)
	}

	mng.putTx(tx)

	return tx
}
Example #7
0
func (r *Consumer) backoff() {

	if atomic.LoadInt32(&r.stopFlag) == 1 {
		atomic.StoreInt64(&r.backoffDuration, 0)
		return
	}

	// pick a random connection to test the waters
	conns := r.conns()
	if len(conns) == 0 {
		// backoff again
		backoffDuration := 1 * time.Second
		atomic.StoreInt64(&r.backoffDuration, backoffDuration.Nanoseconds())
		time.AfterFunc(backoffDuration, r.backoff)
		return
	}
	idx := r.rng.Intn(len(conns))
	choice := conns[idx]

	r.log(LogLevelWarning,
		"(%s) backoff timeout expired, sending RDY 1",
		choice.String())
	// while in backoff only ever let 1 message at a time through
	err := r.updateRDY(choice, 1)
	if err != nil {
		r.log(LogLevelWarning, "(%s) error updating RDY - %s", choice.String(), err)
		backoffDuration := 1 * time.Second
		atomic.StoreInt64(&r.backoffDuration, backoffDuration.Nanoseconds())
		time.AfterFunc(backoffDuration, r.backoff)
		return
	}

	atomic.StoreInt64(&r.backoffDuration, 0)
}
func TestDockerCommandBuildCancel(t *testing.T) {
	if helpers.SkipIntegrationTests(t, "docker", "info") {
		return
	}

	build := &common.Build{
		GetBuildResponse: common.LongRunningBuild,
		Runner: &common.RunnerConfig{
			RunnerSettings: common.RunnerSettings{
				Executor: "docker",
				Docker: &common.DockerConfig{
					Image: "alpine",
				},
			},
		},
	}

	trace := &common.Trace{Writer: os.Stdout, Abort: make(chan interface{}, 1)}

	abortTimer := time.AfterFunc(time.Second, func() {
		t.Log("Interrupt")
		trace.Abort <- true
	})
	defer abortTimer.Stop()

	timeoutTimer := time.AfterFunc(time.Minute, func() {
		t.Log("Timedout")
		t.FailNow()
	})
	defer timeoutTimer.Stop()

	err := build.Run(&common.Config{}, trace)
	assert.IsType(t, err, &common.BuildError{})
	assert.EqualError(t, err, "canceled")
}
func TestCanStartAndStopBeat(t *testing.T) {
	ab, b := newBeat("throttle_test.yml", nil)

	stopped := make(chan bool)
	killed := make(chan bool)
	time.AfterFunc(5*time.Second, func() {
		killed <- true
		close(killed)
	})

	time.AfterFunc(500*time.Millisecond, func() {
		ab.Stop()
	})

	go func() {
		ab.Run(b)
		stopped <- true
		close(stopped)
	}()

	select {
	case <-stopped:
	case <-killed:
		t.Error("Failed to stop beat in test. Ctrl+C may be necessary..")
	}
}
Example #10
0
func (t *TokenRenewer) renewLoop() {
	t.renewLoopWG.Add(1)
	defer t.renewLoopWG.Done()

	// renews token before it expires (sends the first signal to the goroutine below)
	go time.AfterFunc(t.renewDuration(), t.sendRenewTokenSignal)

	// renew token on signal util remote kite disconnects.
	for {
		select {
		case <-t.signalRenewToken:
			switch err := t.renewToken(); {
			case err == nil:
				go time.AfterFunc(t.renewDuration(), t.sendRenewTokenSignal)
			case err == ErrNoKitesAvailable || strings.Contains(err.Error(), "no kites found"):
				// If kite went down we're not going to renew the token,
				// as we need to dial either way.
				//
				// This case handles a situation, when kite missed
				// disconnect signal (observed to happen with XHR transport).
			default:
				t.localKite.Log.Error("token renewer: %s Cannot renew token for Kite: %s I will retry in %d seconds...",
					err, t.client.ID, retryInterval/time.Second)
				// Need to sleep here litle bit because a signal is sent
				// when an expired token is detected on incoming request.
				// This sleep prevents the signal from coming too fast.
				time.Sleep(1 * time.Second)
				go time.AfterFunc(retryInterval, t.sendRenewTokenSignal)
			}
		case <-t.disconnect:
			return
		}
	}
}
Example #11
0
File: 04.go Project: oywc410/MYPG
func ExampleContextRelationShip() {
	ctx1, cancel1 := context.WithCancel(context.Background())
	ctx2, cancel2 := context.WithCancel(ctx1)
	var wg sync.WaitGroup

	wg.Add(2)
	go func() {
		defer wg.Done()
		log.Printf("waiting for ctx1...")
		<-ctx1.Done()
		log.Printf("ctx1.Done() returns")
	}()

	go func() {
		defer wg.Done()
		log.Printf("waiting for ctx2....")
		<-ctx2.Done()
		log.Printf("ctx2.Done() returns")
	}()

	time.AfterFunc(time.Second*5, cancel2)
	time.AfterFunc(time.Second*1, cancel1) //触发 1 2 同时结束

	wg.Wait()
}
Example #12
0
// Command executes the named program with the given arguments. If it does not
// exit within timeout, it is sent SIGINT (if supported by Go). After
// another timeout, it is killed.
func Command(timeout time.Duration, stdin io.Reader, name string, arg ...string) (io.Reader, error) {
	if _, err := exec.LookPath(name); err != nil {
		return nil, ErrPath
	}
	if Debug {
		slog.Infof("executing command: %v %v", name, arg)
	}
	c := exec.Command(name, arg...)
	b := &bytes.Buffer{}
	c.Stdout = b
	c.Stdin = stdin
	if err := c.Start(); err != nil {
		return nil, err
	}
	timedOut := false
	intTimer := time.AfterFunc(timeout, func() {
		slog.Errorf("Process taking too long. Interrupting: %s %s", name, strings.Join(arg, " "))
		c.Process.Signal(os.Interrupt)
		timedOut = true
	})
	killTimer := time.AfterFunc(timeout*2, func() {
		slog.Errorf("Process taking too long. Killing: %s %s", name, strings.Join(arg, " "))
		c.Process.Signal(os.Kill)
		timedOut = true
	})
	err := c.Wait()
	intTimer.Stop()
	killTimer.Stop()
	if timedOut {
		return nil, ErrTimeout
	}
	return b, err
}
// FinalSave should be called immediately before exiting, and only before
// exiting, in order to flush tasks to disk. It waits a short timeout for state
// to settle if necessary. If unable to reach a steady-state and save within
// this short timeout, it returns an error
func FinalSave(saver statemanager.Saver, taskEngine engine.TaskEngine) error {
	engineDisabled := make(chan error)

	disableTimer := time.AfterFunc(engineDisableTimeout, func() {
		engineDisabled <- errors.New("Timed out waiting for TaskEngine to settle")
	})

	go func() {
		log.Debug("Shutting down task engine")
		taskEngine.Disable()
		disableTimer.Stop()
		engineDisabled <- nil
	}()

	disableErr := <-engineDisabled

	stateSaved := make(chan error)
	saveTimer := time.AfterFunc(finalSaveTimeout, func() {
		stateSaved <- errors.New("Timed out trying to save to disk")
	})
	go func() {
		log.Debug("Saving state before shutting down")
		stateSaved <- saver.ForceSave()
		saveTimer.Stop()
	}()

	saveErr := <-stateSaved

	if disableErr != nil || saveErr != nil {
		return utils.NewMultiError(disableErr, saveErr)
	}
	return nil
}
Example #14
0
func (rn *RaftNode) doActions(actions []Action) {

	for _, action := range actions {
		switch action.(type) {

		case Send:
			//resetting the timer
			rn.timer.Stop()
			rn.timer = time.AfterFunc(time.Duration(1000+rand.Intn(400))*time.Millisecond, func() { rn.timeoutCh <- Timeout{} })

			actionname := action.(Send)

			rn.serverOfCluster.Outbox() <- &cluster.Envelope{Pid: actionname.To, Msg: actionname.Event}
		case Alarm:
			//resetting the timer
			rn.timer.Stop()
			rn.timer = time.AfterFunc(time.Duration(1000+rand.Intn(400))*time.Millisecond, func() { rn.timeoutCh <- Timeout{} })

		case Commit:

			//output commit obtained from statemachine into the Commit Channel
			newaction := action.(Commit)

			rn.CommitChannel() <- &newaction

		case LogStore:

			//creating persistent log files

			lg, _ := log.Open(rn.LogDir + string(rn.server.MyID))

			logstore := action.(LogStore)

			lg.Append(logstore.Data)

		case StateStore:

			//creating files for persistent storage of State

			statestore := action.(StateStore)

			//writing statestore into the persistent storage
			err := writeFile("statestore"+strconv.Itoa(rn.server.MyID), statestore)

			if err != nil {

				//reading from the persistent storage
				_, err := readFile("statestore" + strconv.Itoa(rn.server.MyID))

				if err != nil {

				}
			}

		}

	}

}
Example #15
0
func testJobWithGracelessSuspendResume() {
	job1, _ := grid.CreateJob(&grid.JobDefinition{ID: "job1", Cmd: "", Data: []interface{}{1, 3, 5, 7, 9, 11, 13, 15, 17},
		Description: "", Ctx: &grid.Context{"foo": "bar"},
		Ctrl: &grid.JobControl{MaxConcurrency: 0}})
	time.AfterFunc(300*time.Millisecond, func() { fmt.Println("suspending job", job1); grid.SuspendJob(job1, false) })
	time.AfterFunc(3*time.Second, func() { fmt.Println("resuming job", job1); grid.RetryJob(job1) })
	simulate(7, []grid.JobID{job1})
}
Example #16
0
func setDeadlineImpl(fd *netFD, t time.Time, mode int) error {
	d := t.Sub(time.Now())
	if mode == 'r' || mode == 'r'+'w' {
		fd.rtimedout.setFalse()
	}
	if mode == 'w' || mode == 'r'+'w' {
		fd.wtimedout.setFalse()
	}
	if t.IsZero() || d < 0 {
		// Stop timer
		if mode == 'r' || mode == 'r'+'w' {
			if fd.rtimer != nil {
				fd.rtimer.Stop()
			}
			fd.rtimer = nil
		}
		if mode == 'w' || mode == 'r'+'w' {
			if fd.wtimer != nil {
				fd.wtimer.Stop()
			}
			fd.wtimer = nil
		}
	} else {
		// Interrupt I/O operation once timer has expired
		if mode == 'r' || mode == 'r'+'w' {
			fd.rtimer = time.AfterFunc(d, func() {
				fd.rtimedout.setTrue()
				if fd.raio != nil {
					fd.raio.Cancel()
				}
			})
		}
		if mode == 'w' || mode == 'r'+'w' {
			fd.wtimer = time.AfterFunc(d, func() {
				fd.wtimedout.setTrue()
				if fd.waio != nil {
					fd.waio.Cancel()
				}
			})
		}
	}
	if !t.IsZero() && d < 0 {
		// Interrupt current I/O operation
		if mode == 'r' || mode == 'r'+'w' {
			fd.rtimedout.setTrue()
			if fd.raio != nil {
				fd.raio.Cancel()
			}
		}
		if mode == 'w' || mode == 'r'+'w' {
			fd.wtimedout.setTrue()
			if fd.waio != nil {
				fd.waio.Cancel()
			}
		}
	}
	return nil
}
Example #17
0
func (s *DeDupeSuite) TestDeDupe(c *C) {
	loghub.Start()
	log.Println("testing dedupe")
	b, ch := test_utils.NewBlock("testing dedupe", "dedupe")

	emittedValues := make(map[string]bool)
	go blocks.BlockRoutine(b)
	outChan := make(chan *blocks.Msg)
	ch.AddChan <- &blocks.AddChanMsg{
		Route:   "out",
		Channel: outChan,
	}
	ruleMsg := map[string]interface{}{"Path": ".a"}
	rule := &blocks.Msg{Msg: ruleMsg, Route: "rule"}
	ch.InChan <- rule

	var sampleInput = map[string]interface{}{
		"a": "foobar",
	}

	time.AfterFunc(time.Duration(2)*time.Second, func() {
		postData := &blocks.Msg{Msg: sampleInput, Route: "in"}
		ch.InChan <- postData
	})

	time.AfterFunc(time.Duration(1)*time.Second, func() {
		postData := &blocks.Msg{Msg: sampleInput, Route: "in"}
		ch.InChan <- postData
	})

	time.AfterFunc(time.Duration(1)*time.Second, func() {
		postData := &blocks.Msg{Msg: map[string]interface{}{"a": "baz"}, Route: "in"}
		ch.InChan <- postData
	})

	time.AfterFunc(time.Duration(5)*time.Second, func() {
		ch.QuitChan <- true
	})
	for {
		select {
		case err := <-ch.ErrChan:
			if err != nil {
				c.Errorf(err.Error())
			} else {
				return
			}
		case messageI := <-outChan:
			message := messageI.Msg.(map[string]interface{})
			value := message["a"].(string)
			_, ok := emittedValues[value]
			if ok {
				c.Errorf("block emitted a dupe message", value)
			} else {
				emittedValues[value] = true
			}
		}
	}
}
Example #18
0
func main() {
	time.AfterFunc(2*time.Second, func() {
		fmt.Println("Hello")
	})
	time.AfterFunc(2*time.Second, func() {
		fmt.Println("World")
	})
	time.Sleep(3 * time.Second)
}
Example #19
0
func main() {
	defer func() {
		if r := recover(); r != nil {
			if _, ok := r.(error); !ok {
				fmt.Errorf("pkg: %v", r)
			}
		}
	}()

	fmt.Println("Version:", VERSION)

	if len(os.Args) > 1 {
		mode = os.Args[1]
	}

	if mode != "listen" && mode != "replay" {
		fmt.Println("Usage: \n\tgor listen -h\n\tgor replay -h")
		return
	}

	// Remove mode attr
	os.Args = append(os.Args[:1], os.Args[2:]...)

	flag.Parse()

	if *cpuprofile != "" {
		f, err := os.Create(*cpuprofile)
		if err != nil {
			log.Fatal(err)
		}
		pprof.StartCPUProfile(f)

		time.AfterFunc(60*time.Second, func() {
			pprof.StopCPUProfile()
			f.Close()
			log.Println("Stop profiling after 60 seconds")
		})
	}

	if *memprofile != "" {
		f, err := os.Create(*memprofile)
		if err != nil {
			log.Fatal(err)
		}
		time.AfterFunc(60*time.Second, func() {
			pprof.WriteHeapProfile(f)
			f.Close()
		})
	}

	switch mode {
	case "listen":
		listener.Run()
	case "replay":
		replay.Run()
	}
}
Example #20
0
func (s *GetHTTPSuite) TestGetHTTPXML(c *C) {
	log.Println("testing GetHTTP with XML")
	b, ch := test_utils.NewBlock("testingGetHTTPXML", "gethttp")
	go blocks.BlockRoutine(b)
	outChan := make(chan *blocks.Msg)
	ch.AddChan <- &blocks.AddChanMsg{
		Route:   "out",
		Channel: outChan,
	}

	ruleMsg := map[string]interface{}{"Path": ".url"}
	toRule := &blocks.Msg{Msg: ruleMsg, Route: "rule"}
	ch.InChan <- toRule

	queryOutChan := make(blocks.MsgChan)
	time.AfterFunc(time.Duration(1)*time.Second, func() {
		ch.QueryChan <- &blocks.QueryMsg{MsgChan: queryOutChan, Route: "rule"}
	})

	time.AfterFunc(time.Duration(2)*time.Second, func() {
		xmlMsg := map[string]interface{}{"url": "https://raw.github.com/nytlabs/streamtools/master/examples/odf.xml"}
		postData := &blocks.Msg{Msg: xmlMsg, Route: "in"}
		ch.InChan <- postData
	})

	time.AfterFunc(time.Duration(5)*time.Second, func() {
		ch.QuitChan <- true
	})
	for {
		select {
		case err := <-ch.ErrChan:
			if err != nil {
				c.Errorf(err.Error())
			} else {
				return
			}
		case messageI := <-queryOutChan:
			if !reflect.DeepEqual(messageI, ruleMsg) {
				log.Println("Rule mismatch:", messageI, ruleMsg)
				c.Fail()
			}
		case messageI := <-outChan:
			message := messageI.Msg.(map[string]interface{})
			messageData := message["data"].(string)
			var xmldata = string(`<?xml version="1.0" encoding="utf-8"?>
<OdfBody DocumentType="DT_GM" Date="20130131" Time="140807885" LogicalDate="20130131" Venue="ACV" Language="ENG" FeedFlag="P" DocumentCode="AS0ACV000" Version="3" Serial="1">
	<Competition Code="OG2014">
		<Config SDelay="60" />
	</Competition>
</OdfBody>
`)
			c.Assert(messageData, Equals, xmldata)
		}
	}
}
Example #21
0
func TestJobEndpoint_Allocations_Blocking(t *testing.T) {
	s1 := testServer(t, nil)
	defer s1.Shutdown()
	codec := rpcClient(t, s1)
	testutil.WaitForLeader(t, s1.RPC)

	// Create the register request
	alloc1 := mock.Alloc()
	alloc2 := mock.Alloc()
	alloc2.JobID = "job1"
	state := s1.fsm.State()

	// First upsert an unrelated alloc
	time.AfterFunc(100*time.Millisecond, func() {
		state.UpsertJobSummary(99, mock.JobSummary(alloc1.JobID))
		err := state.UpsertAllocs(100, []*structs.Allocation{alloc1})
		if err != nil {
			t.Fatalf("err: %v", err)
		}
	})

	// Upsert an alloc for the job we are interested in later
	time.AfterFunc(200*time.Millisecond, func() {
		state.UpsertJobSummary(199, mock.JobSummary(alloc2.JobID))
		err := state.UpsertAllocs(200, []*structs.Allocation{alloc2})
		if err != nil {
			t.Fatalf("err: %v", err)
		}
	})

	// Lookup the jobs
	get := &structs.JobSpecificRequest{
		JobID: "job1",
		QueryOptions: structs.QueryOptions{
			Region:        "global",
			MinQueryIndex: 50,
		},
	}
	var resp structs.JobAllocationsResponse
	start := time.Now()
	if err := msgpackrpc.CallWithCodec(codec, "Job.Allocations", get, &resp); err != nil {
		t.Fatalf("err: %v", err)
	}

	if elapsed := time.Since(start); elapsed < 200*time.Millisecond {
		t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
	}
	if resp.Index != 200 {
		t.Fatalf("Bad index: %d %d", resp.Index, 200)
	}
	if len(resp.Allocations) != 1 || resp.Allocations[0].JobID != "job1" {
		t.Fatalf("bad: %#v", resp.Allocations)
	}
}
Example #22
0
func TestAllocEndpoint_GetAlloc_Blocking(t *testing.T) {
	s1 := testServer(t, nil)
	defer s1.Shutdown()
	state := s1.fsm.State()
	codec := rpcClient(t, s1)
	testutil.WaitForLeader(t, s1.RPC)

	// Create the allocs
	alloc1 := mock.Alloc()
	alloc2 := mock.Alloc()

	// First create an unrelated alloc
	time.AfterFunc(100*time.Millisecond, func() {
		state.UpsertJobSummary(99, mock.JobSummary(alloc1.JobID))
		err := state.UpsertAllocs(100, []*structs.Allocation{alloc1})
		if err != nil {
			t.Fatalf("err: %v", err)
		}
	})

	// Create the alloc we are watching later
	time.AfterFunc(200*time.Millisecond, func() {
		state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))
		err := state.UpsertAllocs(200, []*structs.Allocation{alloc2})
		if err != nil {
			t.Fatalf("err: %v", err)
		}
	})

	// Lookup the allocs
	get := &structs.AllocSpecificRequest{
		AllocID: alloc2.ID,
		QueryOptions: structs.QueryOptions{
			Region:        "global",
			MinQueryIndex: 50,
		},
	}
	var resp structs.SingleAllocResponse
	start := time.Now()
	if err := msgpackrpc.CallWithCodec(codec, "Alloc.GetAlloc", get, &resp); err != nil {
		t.Fatalf("err: %v", err)
	}

	if elapsed := time.Since(start); elapsed < 200*time.Millisecond {
		t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
	}
	if resp.Index != 200 {
		t.Fatalf("Bad index: %d %d", resp.Index, 200)
	}
	if resp.Alloc == nil || resp.Alloc.ID != alloc2.ID {
		t.Fatalf("bad: %#v", resp.Alloc)
	}
}
Example #23
0
//Process incoming events from StateMachine.
func processEvents(server cluster.Server, SM *sm.State_Machine, myConf *Config) {
	var incm incomming
	for {
		select {
		case incm := <-SM.CommMedium.ActionCh:
			switch incm.(type) {
			case sm.Send:
				msg := incm.(sm.Send)
				processOutbox(server, SM, msg)

			case sm.Alarm:
				//Reset the timer of timeout.
				myConf.TimeLock.Lock()
				myConf.DoTO.Stop()
				al := incm.(sm.Alarm)
				if al.T == LTO {
					myConf.DoTO = time.AfterFunc(time.Duration(LTIME)*time.Millisecond, func() {
						myConf.DoTO.Stop()
						SM.CommMedium.TimeoutCh <- nil
					})
				}
				if al.T == CTO {
					myConf.ElectionTimeout = (CTIME + rand.Intn(RANGE))
					myConf.DoTO = time.AfterFunc(time.Duration(myConf.ElectionTimeout)*time.Millisecond, func() {
						myConf.DoTO.Stop()
						SM.CommMedium.TimeoutCh <- nil
					})
				}
				if al.T == FTO {
					myConf.ElectionTimeout = (FTIME + rand.Intn(RANGE))
					if SM.Id == 1 {
						myConf.ElectionTimeout = 1000
					}
					myConf.DoTO = time.AfterFunc(time.Duration(myConf.ElectionTimeout)*time.Millisecond, func() {
						myConf.DoTO.Stop()
						SM.CommMedium.TimeoutCh <- nil
					})
				}
				myConf.TimeLock.Unlock()

			case sm.Commit:

			case sm.LoggStore:
				//for adding log into db.
				msg := incm.(sm.LoggStore)
				storeData(msg.Data, myConf, msg.Index)

			case sm.StateStore:
			}
		}
	}
	fmt.Println("Bye", incm)
}
// NewLEDPane creates an LEDPane with the data and timers initialised
// the app is passed in so that the pane can access the data and methods in it
func NewLEDPane(a *TwitterApp) *LEDPane {
	p := &LEDPane{
		lastTap:         time.Now(),
		lastDoubleTap:   time.Now(),
		app:             a,
		hasStoredTweets: false,
		numberOfTweets:  1, // to avoid divide by zero error the first time it's run
	}

	p.updateTimer = time.AfterFunc(0, p.UpdateStatus)
	p.tapTimer = time.AfterFunc(0, p.TapAction)
	return p
}
Example #25
0
// RoundTrip satisfies http.RoundTripper
func (f *FramesRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
	if f.err != nil {
		return nil, f.err
	}

	start := time.Now()
	sendT := time.AfterFunc(f.Timeout, func() {
		log.Printf("framesweb: %v request for %v is taking longer than %v",
			req.Method, req.URL, f.Timeout)
	})

	c, err := f.Dialer.Dial()
	if err != nil {
		f.err = err
		return nil, err
	}

	err = req.Write(c)
	if err != nil {
		f.err = err
		c.Close()
		return nil, err
	}

	if !sendT.Stop() {
		log.Printf("framesweb: completed %v request for %v in %v",
			req.Method, req.URL, time.Since(start))
	}

	start = time.Now()
	endT := time.AfterFunc(f.Timeout, func() {
		log.Printf("framesweb: response for %v %v is taking longer than %v",
			req.Method, req.URL, f.Timeout)
	})

	b := bufio.NewReader(c)
	res, err := http.ReadResponse(b, req)
	if err == nil {
		res.Body = &channelBodyCloser{
			res.Body,
			c,
			f,
			req,
			start,
			endT}
	} else {
		f.err = err
		c.Close()
	}
	return res, err
}
Example #26
0
func TestEvalEndpoint_Allocations_Blocking(t *testing.T) {
	s1 := testServer(t, nil)
	defer s1.Shutdown()
	state := s1.fsm.State()
	codec := rpcClient(t, s1)
	testutil.WaitForLeader(t, s1.RPC)

	// Create the allocs
	alloc1 := mock.Alloc()
	alloc2 := mock.Alloc()

	// Upsert an unrelated alloc first
	time.AfterFunc(100*time.Millisecond, func() {
		err := state.UpsertAllocs(100, []*structs.Allocation{alloc1})
		if err != nil {
			t.Fatalf("err: %v", err)
		}
	})

	// Upsert an alloc which will trigger the watch later
	time.AfterFunc(200*time.Millisecond, func() {
		err := state.UpsertAllocs(200, []*structs.Allocation{alloc2})
		if err != nil {
			t.Fatalf("err: %v", err)
		}
	})

	// Lookup the eval
	get := &structs.EvalSpecificRequest{
		EvalID: alloc2.EvalID,
		QueryOptions: structs.QueryOptions{
			Region:        "global",
			MinQueryIndex: 50,
		},
	}
	var resp structs.EvalAllocationsResponse
	start := time.Now()
	if err := msgpackrpc.CallWithCodec(codec, "Eval.Allocations", get, &resp); err != nil {
		t.Fatalf("err: %v", err)
	}

	if elapsed := time.Since(start); elapsed < 200*time.Millisecond {
		t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
	}
	if resp.Index != 200 {
		t.Fatalf("Bad index: %d %d", resp.Index, 200)
	}
	if len(resp.Allocations) != 1 || resp.Allocations[0].ID != alloc2.ID {
		t.Fatalf("bad: %#v", resp.Allocations)
	}
}
Example #27
0
func (c *conn) checkKeepAlive() {
	var f func()
	f = func() {
		if time.Now().Unix()-c.lastUpdate > int64(1.5*float32(c.app.cfg.KeepAlive)) {
			log.Info("keepalive timeout")
			c.c.Close()
			return
		} else {
			time.AfterFunc(time.Duration(c.app.cfg.KeepAlive)*time.Second, f)
		}
	}

	time.AfterFunc(time.Duration(c.app.cfg.KeepAlive)*time.Second, f)
}
func TestPortAssignment(t *testing.T) {
	buf := makeFs(map[string]interface{}{
		"main.go": `package main

		import (
			"fmt"
			"os"
		)

		func main() {
			fmt.Printf("127.0.0.1:%s", os.Getenv("PORT"))
		}`,
	})
	fs := tar.NewReader(bytes.NewReader(buf))

	ctr1, err := lcc.CreateContainer(fs, nil)
	if err != nil {
		t.Fatalf("Could not start container: %s", err)
	}
	go ctr1.Cleanup()

	timer := time.AfterFunc(1*time.Second, func() {
		t.Fatalf("Timeout occured")
	})
	ctr1.Wait()
	timer.Stop()

	fs = tar.NewReader(bytes.NewReader(buf))
	ctr2, err := lcc.CreateContainer(fs, nil)
	if err != nil {
		t.Fatalf("Could not start container: %s", err)
	}
	go ctr2.Cleanup()

	timer = time.AfterFunc(1*time.Second, func() {
		t.Fatalf("Timeout occured")
	})
	ctr2.Wait()
	timer.Stop()

	if ctr1.Logs() != ctr1.Address().String() {
		t.Fatalf("Specified and injected ports differ. Injected %s, got %s", ctr1.Address(), ctr1.Logs())
	}
	if ctr2.Logs() != ctr2.Address().String() {
		t.Fatalf("Specified and injected ports differ. Injected %s, got %s", ctr2.Address(), ctr2.Logs())
	}
	if ctr1.Logs() == ctr2.Logs() {
		t.Fatalf("Same port was assigned")
	}
}
func TestConnPoolSoonAvailable(t *testing.T) {
	defer func(d time.Duration) { ConnPoolAvailWaitTime = d }(ConnPoolAvailWaitTime)
	defer func() { ConnPoolCallback = nil }()

	m := map[string]int{}
	timings := []time.Duration{}
	ConnPoolCallback = func(host string, source string, start time.Time, err error) {
		m[source] = m[source] + 1
		timings = append(timings, time.Since(start))
	}

	cp := newConnectionPool("h", &basicAuth{}, 3, 4)
	cp.mkConn = testMkConn

	seenClients := map[*memcached.Client]bool{}

	// build some connections

	var aClient *memcached.Client
	for {
		sc, err := cp.GetWithTimeout(time.Millisecond)
		if err == ErrTimeout {
			break
		}
		if err != nil {
			t.Fatalf("Error getting connection from pool: %v", err)
		}
		aClient = sc
		seenClients[sc] = true
	}

	time.AfterFunc(time.Millisecond, func() { cp.Return(aClient) })

	ConnPoolAvailWaitTime = time.Second

	sc, err := cp.Get()
	if err != nil || sc != aClient {
		t.Errorf("Expected a successful connection, got %v/%v", sc, err)
	}

	// Try again, but let's close it while we're stuck in secondary wait
	time.AfterFunc(time.Millisecond, func() { cp.Close() })

	sc, err = cp.Get()
	if err != errClosedPool {
		t.Errorf("Expected a closed pool, got %v/%v", sc, err)
	}

	t.Logf("Callback report: %v, timings: %v", m, timings)
}
Example #30
0
func (t *TrustStore) reload() error {
	t.Lock()
	defer t.Unlock()

	matches, err := filepath.Glob(filepath.Join(t.path, "*.json"))
	if err != nil {
		return err
	}
	statements := make([]*trustgraph.Statement, len(matches))
	for i, match := range matches {
		f, err := os.Open(match)
		if err != nil {
			return err
		}
		statements[i], err = trustgraph.LoadStatement(f, nil)
		if err != nil {
			f.Close()
			return err
		}
		f.Close()
	}
	if len(statements) == 0 {
		if t.autofetch {
			logrus.Debugf("No grants, fetching")
			t.fetcher = time.AfterFunc(t.fetchTime, t.fetch)
		}
		return nil
	}

	grants, expiration, err := trustgraph.CollapseStatements(statements, true)
	if err != nil {
		return err
	}

	t.expiration = expiration
	t.graph = trustgraph.NewMemoryGraph(grants)
	logrus.Debugf("Reloaded graph with %d grants expiring at %s", len(grants), expiration)

	if t.autofetch {
		nextFetch := expiration.Sub(time.Now())
		if nextFetch < 0 {
			nextFetch = defaultFetchtime
		} else {
			nextFetch = time.Duration(0.8 * (float64)(nextFetch))
		}
		t.fetcher = time.AfterFunc(nextFetch, t.fetch)
	}

	return nil
}