// SetEmitDiagnosticNotices toggles whether diagnostic notices
// are emitted. Diagnostic notices contain potentially sensitive
// circumvention network information; only enable this in environments
// where notices are handled securely (for example, don't include these
// notices in log files which users could post to public forums).
func SetEmitDiagnosticNotices(enable bool) {
	if enable {
		atomic.StoreInt32(&noticeLogDiagnostics, 1)
	} else {
		atomic.StoreInt32(&noticeLogDiagnostics, 0)
	}
}
Beispiel #2
0
// ResetElectionTimeoutMs sets the minimum and maximum election timeouts to the
// passed values, and returns the old values.
func ResetElectionTimeoutMs(newMin, newMax int) (int, int) {
	oldMin := atomic.LoadInt32(&minimumElectionTimeoutMs)
	oldMax := atomic.LoadInt32(&maximumElectionTimeoutMs)
	atomic.StoreInt32(&minimumElectionTimeoutMs, int32(newMin))
	atomic.StoreInt32(&maximumElectionTimeoutMs, int32(newMax))
	return int(oldMin), int(oldMax)
}
Beispiel #3
0
// SetDisabled turns replica scanning off or on as directed. Note that while
// disabled, removals are still processed.
func (rs *replicaScanner) SetDisabled(disabled bool) {
	if disabled {
		atomic.StoreInt32(&rs.disabled, 1)
	} else {
		atomic.StoreInt32(&rs.disabled, 0)
	}
}
// please do not change these two functions.
func (px *Paxos) setunreliable(what bool) {
	if what {
		atomic.StoreInt32(&px.unreliable, 1)
	} else {
		atomic.StoreInt32(&px.unreliable, 0)
	}
}
func (c *checkerT) setHasChecked(b bool) {
	if b {
		atomic.StoreInt32(&c.check, 1)
	} else {
		atomic.StoreInt32(&c.check, 0)
	}
}
Beispiel #6
0
//
// AwaitOrFail, the count must be <= 0 before the duration expires.
// Will Return true on timeout or cancel.
//
func (c *CountdownLatch) AwaitTimeout(duration time.Duration) bool {

	c.awaitMutex.Lock()
	defer func() {
		atomic.StoreInt32(&c.completed, 1)
		c.awaitMutex.Unlock()
	}()

	if c.Completed() {
		panic(LatchError{LatchCompleted: true})
	}

	to := time.After(duration)

	for {
		select {

		case <-to:
			atomic.StoreInt32(&c.timedOut, 1)
			return true

		case <-c.cancel:
			atomic.StoreInt32(&c.canceled, 1)
			return true

		case <-c.zeroReached:
			atomic.StoreInt32(&c.completed, 1)
			return false
			break
		}
	}
}
Beispiel #7
0
// NOTE: pass in goodTxs because mem.txs can mutate concurrently.
func (mem *Mempool) recheckTxs(goodTxs []types.Tx) {
	if len(goodTxs) == 0 {
		return
	}
	atomic.StoreInt32(&mem.rechecking, 1)
	mem.recheckCursor = mem.txs.Front()
	mem.recheckEnd = mem.txs.Back()

	for _, tx := range goodTxs {
		err := sm.ExecTx(mem.cache, tx, false, nil)
		if err != nil {
			// Tx became invalidated due to newly committed block.
			mem.txs.Remove(mem.recheckCursor)
			mem.recheckCursor.DetachPrev()
		}
		if mem.recheckCursor == mem.recheckEnd {
			mem.recheckCursor = nil
		} else {
			mem.recheckCursor = mem.recheckCursor.Next()
		}
		if mem.recheckCursor == nil {
			// Done!
			atomic.StoreInt32(&mem.rechecking, 0)
		}
	}
}
Beispiel #8
0
func (b *AtomicBool) Write(value bool) {
	if value {
		atomic.StoreInt32(&(b.val), 1)
	} else {
		atomic.StoreInt32(&(b.val), 0)
	}
}
Beispiel #9
0
func test9400(t *testing.T) {
	// We synchronize through a shared variable, so we need two procs
	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))

	// Start signaller
	atomic.StoreInt32(&issue9400.Baton, 0)
	go func() {
		// Wait for RewindAndSetgid
		for atomic.LoadInt32(&issue9400.Baton) == 0 {
			runtime.Gosched()
		}
		// Broadcast SIGSETXID
		runtime.LockOSThread()
		C.setgid(0)
		// Indicate that signalling is done
		atomic.StoreInt32(&issue9400.Baton, 0)
	}()

	// Grow the stack and put down a test pattern
	const pattern = 0x123456789abcdef
	var big [1024]uint64 // len must match assmebly
	for i := range big {
		big[i] = pattern
	}

	// Temporarily rewind the stack and trigger SIGSETXID
	issue9400.RewindAndSetgid()

	// Check test pattern
	for i := range big {
		if big[i] != pattern {
			t.Fatalf("entry %d of test pattern is wrong; %#x != %#x", i, big[i], uint64(pattern))
		}
	}
}
Beispiel #10
0
// SetDebug turns on/off debugging mode, that causes Fatalf to panic
func SetDebug(enabled bool) {
	if enabled {
		atomic.StoreInt32(&debug, 1)
	} else {
		atomic.StoreInt32(&debug, 0)
	}
}
Beispiel #11
0
// please do not change these two functions.
func (sm *ShardMaster) setunreliable(what bool) {
	if what {
		atomic.StoreInt32(&sm.unreliable, 1)
	} else {
		atomic.StoreInt32(&sm.unreliable, 0)
	}
}
Beispiel #12
0
// RecrusiveRewatch implements RecursiveWatcher interface. It fails:
//
//   * with errNotWatched when the given path is not being watched
//   * with errInvalidEventSet when oldevent does not match the current event set
//   * with errAlreadyWatched when watch-point given by the oldpath was meant to
//     be relocated to newpath, but the newpath is already watched
//   * a non-nil error when setting the watch-point with FSEvents fails
//
// TODO(rjeczalik): Improve handling of watch-point relocation? See two TODOs
// that follows.
func (fse *fsevents) RecursiveRewatch(oldpath, newpath string, oldevent, newevent Event) error {
	switch [2]bool{oldpath == newpath, oldevent == newevent} {
	case [2]bool{true, true}:
		w, ok := fse.watches[oldpath]
		if !ok {
			return errNotWatched
		}
		atomic.StoreInt32(&w.isrec, 1)
		return nil
	case [2]bool{true, false}:
		w, ok := fse.watches[oldpath]
		if !ok {
			return errNotWatched
		}
		if !atomic.CompareAndSwapUint32(&w.events, uint32(oldevent), uint32(newevent)) {
			return errors.New("invalid event state diff")
		}
		atomic.StoreInt32(&w.isrec, 1)
		return nil
	default:
		// TODO(rjeczalik): rewatch newpath only if exists?
		// TODO(rjeczalik): migrate w.prev to new watch?
		if _, ok := fse.watches[newpath]; ok {
			return errAlreadyWatched
		}
		if err := fse.Unwatch(oldpath); err != nil {
			return err
		}
		// TODO(rjeczalik): revert unwatch if watch fails?
		return fse.watch(newpath, newevent, 1)
	}
}
Beispiel #13
0
func TestPauseMetadata(t *testing.T) {
	opts := NewOptions()
	opts.Logger = newTestLogger(t)
	_, _, nsqd := mustStartNSQD(opts)
	defer os.RemoveAll(opts.DataPath)
	defer nsqd.Exit()

	// avoid concurrency issue of async PersistMetadata() calls
	atomic.StoreInt32(&nsqd.isLoading, 1)
	topicName := "pause_metadata" + strconv.Itoa(int(time.Now().Unix()))
	topic := nsqd.GetTopic(topicName)
	channel := topic.GetChannel("ch")
	atomic.StoreInt32(&nsqd.isLoading, 0)
	nsqd.PersistMetadata()

	b, _ := metadataForChannel(nsqd, 0, 0).Get("paused").Bool()
	equal(t, b, false)

	channel.Pause()
	b, _ = metadataForChannel(nsqd, 0, 0).Get("paused").Bool()
	equal(t, b, false)

	nsqd.PersistMetadata()
	b, _ = metadataForChannel(nsqd, 0, 0).Get("paused").Bool()
	equal(t, b, true)

	channel.UnPause()
	b, _ = metadataForChannel(nsqd, 0, 0).Get("paused").Bool()
	equal(t, b, true)

	nsqd.PersistMetadata()
	b, _ = metadataForChannel(nsqd, 0, 0).Get("paused").Bool()
	equal(t, b, false)
}
Beispiel #14
0
func (c *monitorServCli) setValid(valid bool) {
	if valid {
		atomic.StoreInt32(&c.valid, validMark)
	} else {
		atomic.StoreInt32(&c.valid, invalidMark)
	}
}
Beispiel #15
0
func NewOutboundPeer(addr string, ethereum *Ethereum, caps Caps) *Peer {

	p := &Peer{
		outputQueue: make(chan *ethwire.Msg, outputBufferSize),
		quit:        make(chan bool),
		ethereum:    ethereum,
		inbound:     false,
		connected:   0,
		disconnect:  0,
		caps:        caps,
		Version:     fmt.Sprintf("/Ethereum(G) v%s/%s", ethutil.Config.Ver, runtime.GOOS),
	}

	// Set up the connection in another goroutine so we don't block the main thread
	go func() {
		conn, err := net.DialTimeout("tcp", addr, 30*time.Second)

		if err != nil {
			ethutil.Config.Log.Debugln("Connection to peer failed", err)
			p.Stop()
			return
		}
		p.conn = conn

		// Atomically set the connection state
		atomic.StoreInt32(&p.connected, 1)
		atomic.StoreInt32(&p.disconnect, 0)

		p.Start()
	}()

	return p
}
Beispiel #16
0
func (mailbox *endpointWriterMailbox) processMessages() {
	//we are about to start processing messages, we can safely reset the message flag of the mailbox
	atomic.StoreInt32(&mailbox.hasMoreMessages, mailboxHasNoMessages)
	batchSize := mailbox.batchSize
	done := false

	for !done {
		if sysMsg, ok := mailbox.systemMailbox.Pop(); ok {

			first := sysMsg.(actor.SystemMessage)
			mailbox.systemInvoke(first)
		} else if userMsg, ok := mailbox.userMailbox.PopMany(batchSize); ok {

			mailbox.userInvoke(userMsg)
		} else {
			done = true
			break
		}
		runtime.Gosched()
	}

	//set mailbox to idle
	atomic.StoreInt32(&mailbox.schedulerStatus, mailboxIdle)
	//check if there are still messages to process (sent after the message loop ended)
	if atomic.SwapInt32(&mailbox.hasMoreMessages, mailboxHasNoMessages) == mailboxHasMoreMessages {
		mailbox.schedule()
	}

}
Beispiel #17
0
//
// Await countdown to <=0 or return true if canceled
//
func (c *CountdownLatch) Await() bool {

	c.awaitMutex.Lock()
	defer func() {
		atomic.StoreInt32(&c.completed, 1)
		c.awaitMutex.Unlock()
	}()

	if c.Completed() {
		panic(LatchError{LatchCompleted: true})
	}

	for {
		select {

		case <-c.cancel:
			//			atomic.StoreInt32(&c.canceled, 1)
			return true

		case <-c.zeroReached:
			atomic.StoreInt32(&c.completed, 1)
			return false
			break
		}
	}
}
Beispiel #18
0
// 返回名称为key的list的长度
func (this *RedisListHelper) GetLength(key string) int {
	var (
		n   int
		err error
	)

	if atomic.CompareAndSwapInt32(&this.readMark1, 0, 1) {
		n, err = redis.Int(this.readCon1.Do("LLEN", key))
		atomic.StoreInt32(&this.readMark1, 0)

	} else if atomic.CompareAndSwapInt32(&this.readMark2, 0, 1) {
		n, err = redis.Int(this.readCon2.Do("LLEN", key))
		atomic.StoreInt32(&this.readMark2, 0)

	} else if atomic.CompareAndSwapInt32(&this.readMark3, 0, 1) {
		n, err = redis.Int(this.readCon3.Do("LLEN", key))
		atomic.StoreInt32(&this.readMark3, 0)

	} else if atomic.CompareAndSwapInt32(&this.readMark4, 0, 1) {
		n, err = redis.Int(this.readCon4.Do("LLEN", key))
		atomic.StoreInt32(&this.readMark4, 0)
	}

	if err != nil {
		return 0
	}
	return n
}
Beispiel #19
0
// LogInit parses option flags and sets up logging.
func (s *Server) LogInit() {
	// Reset
	LogSetup()

	if s.opts.Logtime {
		log.SetFlags(log.LstdFlags)
	}
	if s.opts.NoLog {
		atomic.StoreInt32(&nolog, 1)
	}
	if s.opts.LogFile != "" {
		flags := os.O_WRONLY | os.O_APPEND | os.O_CREATE
		file, err := os.OpenFile(s.opts.LogFile, flags, 0660)
		if err != nil {
			PrintAndDie(fmt.Sprintf("Error opening logfile: %q", s.opts.LogFile))
		}
		log.SetOutput(file)
	}
	if s.opts.Debug {
		Log(s.opts)
		atomic.StoreInt32(&debug, 1)
		Log("DEBUG is on")
	}
	if s.opts.Trace {
		atomic.StoreInt32(&trace, 1)
		Log("TRACE is on")
	}
}
Beispiel #20
0
// If this is set to true, then a connection will be reused if it has a temporary error.
// Otherwise, the connection will be dropped on a temporary error.
func (self *Pool) ShouldAcceptTempError(yes bool) {
	if yes {
		atomic.StoreInt32(&self.acceptTempError, 1)
	} else {
		atomic.StoreInt32(&self.acceptTempError, 0)
	}
}
Beispiel #21
0
// Set sets the value of the boolean to true or false
func (b *Bool) Set(value bool) {
	if value {
		atomic.StoreInt32(&b.value, 1)
	} else {
		atomic.StoreInt32(&b.value, 0)
	}
}
Beispiel #22
0
// set sets the int32 to the given boolean.
func (a *atomicBool) set(value bool) {
	if value {
		atomic.StoreInt32(&a.v, 1)
		return
	}
	atomic.StoreInt32(&a.v, 0)
}
Beispiel #23
0
func (c *checkerT) setIsAvailable(b bool) {
	if b {
		atomic.StoreInt32(&c.avail, 1)
	} else {
		atomic.StoreInt32(&c.avail, 0)
	}
}
Beispiel #24
0
// please do not change these two functions.
func (kv *ShardKV) Setunreliable(what bool) {
	if what {
		atomic.StoreInt32(&kv.unreliable, 1)
	} else {
		atomic.StoreInt32(&kv.unreliable, 0)
	}
}
// please do not change these two functions.
func (pb *PBServer) setunreliable(what bool) {
	if what {
		atomic.StoreInt32(&pb.unreliable, 1)
	} else {
		atomic.StoreInt32(&pb.unreliable, 0)
	}
}
// please do not change these two functions.
func (kv *KVPaxos) setunreliable(what bool) {
	if what {
		atomic.StoreInt32(&kv.unreliable, 1)
	} else {
		atomic.StoreInt32(&kv.unreliable, 0)
	}
}
Beispiel #27
0
func (sq *SubmitQueue) setEmergencyMergeStop(stopMerges bool) {
	if stopMerges {
		atomic.StoreInt32(&sq.emergencyMergeStopFlag, 1)
	} else {
		atomic.StoreInt32(&sq.emergencyMergeStopFlag, 0)
	}
}
Beispiel #28
0
// update keeps track of the downloader events. Please be aware that this is a one shot type of update loop.
// It's entered once and as soon as `Done` or `Failed` has been broadcasted the events are unregistered and
// the loop is exited. This to prevent a major security vuln where external parties can DOS you with blocks
// and halt your mining operation for as long as the DOS continues.
func (self *Miner) update() {
	events := self.mux.Subscribe(downloader.StartEvent{}, downloader.DoneEvent{}, downloader.FailedEvent{})
out:
	for ev := range events.Chan() {
		switch ev.Data.(type) {
		case downloader.StartEvent:
			atomic.StoreInt32(&self.canStart, 0)
			if self.Mining() {
				self.Stop()
				atomic.StoreInt32(&self.shouldStart, 1)
				glog.V(logger.Info).Infoln("Mining operation aborted due to sync operation")
			}
		case downloader.DoneEvent, downloader.FailedEvent:
			shouldStart := atomic.LoadInt32(&self.shouldStart) == 1

			atomic.StoreInt32(&self.canStart, 1)
			atomic.StoreInt32(&self.shouldStart, 0)
			if shouldStart {
				self.Start(self.coinbase, self.threads)
			}
			// unsubscribe. we're only interested in this event once
			events.Unsubscribe()
			// stop immediately and ignore all further pending events
			break out
		}
	}
}
Beispiel #29
0
// SetDisabled turns queue processing off or on as directed.
func (bq *baseQueue) SetDisabled(disabled bool) {
	if disabled {
		atomic.StoreInt32(&bq.disabled, 1)
	} else {
		atomic.StoreInt32(&bq.disabled, 0)
	}
}
Beispiel #30
0
Datei: cache.go Projekt: lzap/xxo
func (creader *CacheReader) setTerminate(value bool) {
	if value {
		atomic.StoreInt32(creader.terminate, 1)
	} else {
		atomic.StoreInt32(creader.terminate, 0)
	}
}