예제 #1
0
// GetAndResetMessageCount returns the current message counters and resets them
// to 0. This function is threadsafe.
func GetAndResetMessageCount() (messages, dropped, discarded, filtered, noroute uint32) {
	return atomic.SwapUint32(&messageCount, 0),
		atomic.SwapUint32(&droppedCount, 0),
		atomic.SwapUint32(&discardedCount, 0),
		atomic.SwapUint32(&filteredCount, 0),
		atomic.SwapUint32(&noRouteCount, 0)
}
예제 #2
0
파일: tunny.go 프로젝트: vonwenm/tunny
func (pool *WorkPool) setRunning(running bool) {
	if running {
		atomic.SwapUint32(&pool.running, 1)
	} else {
		atomic.SwapUint32(&pool.running, 0)
	}
}
예제 #3
0
파일: speed.go 프로젝트: zengkid/goproxy
func (sc *SpeedCounter) loop_count() {
	for !sc.s.closed {
		sc.readbps = atomic.SwapUint32(&sc.readcnt, 0) / SHRINK_TIME
		sc.writebps = atomic.SwapUint32(&sc.writecnt, 0) / SHRINK_TIME
		time.Sleep(SHRINK_TIME * time.Second)
	}
}
예제 #4
0
func redeployServiceRunOnceAtATime(serviceNameToRedeploy string, haproxyServiceName string) (alreadyRunning bool, err error) {

	if RedeployServiceIsNotRunningThenSet() {
		defer atomic.SwapUint32(&redeployServiceIsRunning, 0)
		err := redeployService(serviceNameToRedeploy, haproxyServiceName)
		atomic.SwapUint32(&redeployServiceIsRunning, 0)

		return false, err
	} else {
		// is already running
		return true, nil
	}
}
예제 #5
0
func (prod *Websocket) pushMessage(msg core.Message) {
	messageText, _ := prod.ProducerBase.Format(msg)

	if prod.clientIdx&0x7FFFFFFF > 0 {
		// There are new clients available
		currentIdx := prod.clientIdx >> 31
		activeIdx := (currentIdx + 1) & 1

		// Store away the current client list and reset it
		activeConns := &prod.clients[activeIdx]
		oldConns := activeConns.conns
		activeConns.conns = activeConns.conns[:]
		activeConns.doneCount = 0

		// Switch new and current client list
		if currentIdx == 0 {
			currentIdx = atomic.SwapUint32(&prod.clientIdx, 1<<31)
		} else {
			currentIdx = atomic.SwapUint32(&prod.clientIdx, 0)
		}

		// Wait for new list writer to finish
		count := currentIdx & 0x7FFFFFFF
		currentIdx = currentIdx >> 31
		spin := shared.NewSpinner(shared.SpinPriorityHigh)

		for prod.clients[currentIdx].doneCount != count {
			spin.Yield()
		}

		// Add new connections to old connections
		newConns := &prod.clients[currentIdx]
		newConns.conns = append(oldConns, newConns.conns...)
	}

	// Process the active connections
	activeIdx := ((prod.clientIdx >> 31) + 1) & 1
	activeConns := &prod.clients[activeIdx]

	for i := 0; i < len(activeConns.conns); i++ {
		client := activeConns.conns[i]
		if _, err := client.Write(messageText); err != nil {
			activeConns.conns = append(activeConns.conns[:i], activeConns.conns[i+1:]...)
			if closeErr := client.Close(); closeErr == nil {
				Log.Error.Print("Websocket: ", err)
			}
			i--
		}
	}
}
예제 #6
0
func atomics() {
	_ = atomic.LoadUint32(&x)             // ERROR "intrinsic substitution for LoadUint32"
	atomic.StoreUint32(&x, 1)             // ERROR "intrinsic substitution for StoreUint32"
	atomic.AddUint32(&x, 1)               // ERROR "intrinsic substitution for AddUint32"
	atomic.SwapUint32(&x, 1)              // ERROR "intrinsic substitution for SwapUint32"
	atomic.CompareAndSwapUint32(&x, 1, 2) // ERROR "intrinsic substitution for CompareAndSwapUint32"
}
예제 #7
0
// Flush writes the content of the buffer to a given resource and resets the
// internal state, i.e. the buffer is empty after a call to Flush.
// Writing will be done in a separate go routine to be non-blocking.
//
// The validate callback will be called after messages have been successfully
// written to the io.Writer.
// If validate returns false the buffer will not be resetted (automatic retry).
// If validate is nil a return value of true is assumed (buffer reset).
//
// The onError callback will be called if the io.Writer returned an error.
// If onError returns false the buffer will not be resetted (automatic retry).
// If onError is nil a return value of true is assumed (buffer reset).
func (batch *MessageBatch) Flush(assemble AssemblyFunc) {
	if batch.IsEmpty() {
		return // ### return, nothing to do ###
	}

	// Only one flush at a time
	batch.flushing.IncWhenDone()

	// Switch the buffers so writers can go on writing
	flushSet := atomic.SwapUint32(&batch.activeSet, (batch.activeSet&messageBatchIndexMask)^messageBatchIndexMask)

	flushIdx := flushSet >> messageBatchIndexShift
	writerCount := flushSet & messageBatchCountMask
	flushQueue := &batch.queue[flushIdx]
	spin := shared.NewSpinner(shared.SpinPriorityHigh)

	// Wait for remaining writers to finish
	for writerCount != atomic.LoadUint32(&flushQueue.doneCount) {
		spin.Yield()
	}

	// Write data and reset buffer asynchronously
	go shared.DontPanic(func() {
		defer batch.flushing.Done()

		messageCount := shared.MinI(int(writerCount), len(flushQueue.messages))
		assemble(flushQueue.messages[:messageCount])
		atomic.StoreUint32(&flushQueue.doneCount, 0)
		batch.Touch()
	})
}
예제 #8
0
파일: conn.go 프로젝트: erkl/wire
func (c *conn) maybeClose(reuse bool) {
	const halfReused = 1
	const halfClosed = 2

	// As far as the caller is concerned, can the connection be kept alive
	// and reused for another round-trip?
	var next uint32

	if reuse {
		next = halfReused
	} else {
		next = halfClosed
	}

	// Use an atomic swap to make sure we only close the connection once.
	prev := atomic.SwapUint32(&c.state, next)

	// Don't do anything unless we're the latter of the two "ends" (reading
	// and writing) to finish with the connection.
	if prev == 0 {
		return
	}

	// Either reuse or close the connection.
	if reuse && prev == halfReused {
		c.raw.SetReadDeadline(time.Time{})
		c.state = 0
		c.t.putIdle(c)
	} else {
		c.Close()
	}
}
예제 #9
0
// Flush writes the content of the buffer to a given resource and resets the
// internal state, i.e. the buffer is empty after a call to Flush.
// Writing will be done in a separate go routine to be non-blocking.
//
// The validate callback will be called after messages have been successfully
// written to the io.Writer.
// If validate returns false the buffer will not be resetted (automatic retry).
// If validate is nil a return value of true is assumed (buffer reset).
//
// The onError callback will be called if the io.Writer returned an error.
// If onError returns false the buffer will not be resetted (automatic retry).
// If onError is nil a return value of true is assumed (buffer reset).
func (batch *MessageBatch) Flush(resource io.Writer, validate func() bool, onError func(error) bool) {
	if batch.IsEmpty() {
		return // ### return, nothing to do ###
	}

	// Only one flush at a time
	batch.flushing.Lock()

	// Switch the buffers so writers can go on writing
	// If a previous flush failed we need to continue where we stopped

	var flushSet uint32
	if batch.activeSet&0x80000000 != 0 {
		flushSet = atomic.SwapUint32(&batch.activeSet, 0|batch.queue[0].doneCount)
	} else {
		flushSet = atomic.SwapUint32(&batch.activeSet, 0x80000000|batch.queue[1].doneCount)
	}

	flushIdx := flushSet >> 31
	writerCount := flushSet & 0x7FFFFFFF
	flushQueue := &batch.queue[flushIdx]

	// Wait for remaining writers to finish
	for writerCount != flushQueue.doneCount {
		runtime.Gosched()
	}

	// Write data and reset buffer asynchronously
	go func() {
		defer shared.RecoverShutdown()
		defer batch.flushing.Unlock()

		_, err := resource.Write(flushQueue.buffer[:flushQueue.contentLen])

		if err == nil {
			if validate == nil || validate() {
				flushQueue.reset()
			}
		} else {
			if onError == nil || onError(err) {
				flushQueue.reset()
			}
		}

		batch.Touch()
	}()
}
예제 #10
0
파일: aws.go 프로젝트: ycaihua/gizmo
// Stop will block until the consumer has stopped consuming
// messages.
func (s *subscriber) Stop() error {
	if s.isStopped() {
		return errors.New("sqs subscriber is already stopped")
	}
	exit := make(chan error)
	s.stop <- exit
	atomic.SwapUint32(&s.stopped, uint32(1))
	return <-exit
}
예제 #11
0
func (c *limitedConn) Close() (err error) {
	if atomic.SwapUint32(&c.closed, 1) == 1 {
		return errors.New("network connection already closed")
	}

	// Substract 1 by adding the two-complement of -1
	numConns := atomic.AddUint64(&c.listener.numConns, ^uint64(0))
	log.Tracef("Closed a connection and left %v remaining", numConns)
	return c.Conn.Close()
}
예제 #12
0
파일: peer.go 프로젝트: pombredanne/pbtc
func (p *Peer) startup() {
	if atomic.SwapUint32(&p.started, 1) == 1 {
		return
	}

	p.wg.Add(3)
	go p.goSend()
	go p.goReceive()
	go p.goProcess()
}
예제 #13
0
func (batch *scribeMessageBatch) flush(scribe *scribe.ScribeClient, onError func(error)) {
	if batch.isEmpty() {
		return // ### return, nothing to do ###
	}

	// Only one flush at a time

	batch.flushing.Lock()

	// Switch the buffers so writers can go on writing

	var flushSet uint32
	if batch.activeSet&0x80000000 != 0 {
		flushSet = atomic.SwapUint32(&batch.activeSet, 0)
	} else {
		flushSet = atomic.SwapUint32(&batch.activeSet, 0x80000000)
	}

	flushIdx := flushSet >> 31
	writerCount := flushSet & 0x7FFFFFFF
	flushQueue := &batch.queue[flushIdx]

	// Wait for remaining writers to finish

	for writerCount != flushQueue.doneCount {
		// Spin
	}

	go func() {
		defer batch.flushing.Unlock()

		_, err := scribe.Log(flushQueue.buffer[:writerCount])
		flushQueue.contentLen = 0
		flushQueue.doneCount = 0
		batch.touch()

		if err != nil {
			onError(err)
		}
	}()
}
예제 #14
0
파일: peer.go 프로젝트: pombredanne/pbtc
func (p *Peer) pushVersion() {
	if atomic.SwapUint32(&p.sent, 1) == 1 {
		return
	}

	msg := wire.NewMsgVersion(p.me, p.you, p.nonce, 0)
	msg.AddUserAgent(agentName, agentVersion)
	msg.AddrYou.Services = wire.SFNodeNetwork
	msg.Services = wire.SFNodeNetwork
	msg.ProtocolVersion = int32(wire.RejectVersion)
	p.sendQ <- msg
}
예제 #15
0
파일: scheme.go 프로젝트: myshkin5/netspel
func (s *Scheme) startReporter() {
	s.done.Add(1)

	go func() {
		defer s.done.Done()

		ticker := time.NewTicker(s.reportCycle)
		for {
			<-ticker.C
			if s.isClosed() {
				break
			}

			report := Report{}
			report.MessageCount = atomic.SwapUint32(&s.messageCount, 0)
			report.ByteCount = atomic.SwapUint64(&s.byteCount, 0)
			report.ErrorCount = atomic.SwapUint32(&s.errorCount, 0)
			s.reporter.Report(report)
		}
	}()
}
예제 #16
0
func (rs *RateStat32) manage() {
	for range time.Tick(rs.interval) {
		bucket := atomic.LoadInt32(&rs.curBucket)
		bucket++
		if bucket >= rs.length {
			bucket = 0
		}
		old := atomic.SwapUint32(&rs.Buckets[bucket], 0)
		atomic.StoreInt32(&rs.curBucket, bucket)
		atomic.AddUint64(&rs.curValue, -uint64(old))
	}
}
예제 #17
0
파일: worker.go 프로젝트: vonwenm/tunny
func (wrapper *workerWrapper) Open() {
	if extWorker, ok := wrapper.worker.(TunnyExtendedWorker); ok {
		extWorker.TunnyInitialize()
	}

	wrapper.readyChan = make(chan int)
	wrapper.jobChan = make(chan interface{})
	wrapper.outputChan = make(chan interface{})

	atomic.SwapUint32(&wrapper.poolOpen, uint32(1))

	go wrapper.Loop()
}
예제 #18
0
파일: peer.go 프로젝트: pombredanne/pbtc
func (p *Peer) shutdown() {
	if atomic.SwapUint32(&p.done, 1) == 1 {
		return
	}

	close(p.sigRecv)
	close(p.sigProcess)
	close(p.sigSend)

	p.wg.Wait()

	if p.conn != nil {
		p.conn.Close()
	}

	p.mgr.Stopped(p)
}
예제 #19
0
func redeployServiceMaybeDeferred(serviceNameToRedeploy string, haproxyServiceName string) error {
	var err error
	var alreadyRunning bool
	for {
		alreadyRunning, err = redeployServiceRunOnceAtATime(serviceNameToRedeploy, haproxyServiceName)
		if alreadyRunning {
			atomic.SwapUint32(&redeployServiceShouldRunAgain, 1)
			log.Printf("service %s already redeploying: registered to redeploy oncy again later", serviceNameToRedeploy)
			return nil
		}
		if err != nil {
			log.Printf("redeploy service %s resulted in error: %s", serviceNameToRedeploy, err)
		}
		if !redeployServiceShouldRunAgainThenReset() {
			break
		}
		log.Printf("redeploy service %s done. Now run again...", serviceNameToRedeploy)
	}
	return err
}
예제 #20
0
func (g *Governor) Do(name string) {
	defer func() {
		if r := recover(); r != nil {
			log.Println("governor do panic'd!", r)
		}
	}()

	// swap with 0 ensures only the first ready swap will get a 1
	if ready := atomic.SwapUint32(&g.ready, 0); ready == 0 {
		if g.blocking {
			g.ready_cond.Wait()
		}
		return
	}

	// lock
	g.Lock()
	defer func() {
		// let one in
		atomic.StoreUint32(&g.ready, 1)
		// clear the rest
		if g.blocking {
			g.ready_cond.Broadcast()
		}
		// unlock the fun
		g.Unlock()
	}()

	g.f(name)

	if last_run, exists := g.last_run[name]; !exists {
		g.last_run[name] = time.Now()
		time.Sleep(g.interval)
	} else if remainder := g.interval - time.Since(last_run); remainder > 0 {
		g.last_run[name] = time.Now()
		time.Sleep(remainder)
	}

}
예제 #21
0
파일: issue16985.go 프로젝트: achanda/go
func main() {
	buffer := []byte("T")
	for i := 0; i < len(buffer); {
		atomic.AddUint32(&count, 1)
		_ = buffer[i]
		i++
		i++
	}

	for i := 0; i < len(buffer); {
		atomic.CompareAndSwapUint32(&count, 0, 1)
		_ = buffer[i]
		i++
		i++
	}

	for i := 0; i < len(buffer); {
		atomic.SwapUint32(&count, 1)
		_ = buffer[i]
		i++
		i++
	}
}
예제 #22
0
// Update updates GC pauses times.
func (p *Pauses) Update() {
	p.m.Lock()
	defer p.m.Unlock()

	runtime.ReadMemStats(&p.mem)
	numGC := atomic.SwapUint32(&p.n, p.mem.NumGC)
	i := numGC % uint32(len(p.mem.PauseNs))
	j := p.mem.NumGC % uint32(len(p.mem.PauseNs))
	if p.mem.NumGC-numGC >= uint32(len(p.mem.PauseNs)) {
		for i = 0; i < uint32(len(p.mem.PauseNs)); i++ {
			p.r.Update(int64(p.mem.PauseNs[i]))
		}
	} else {
		if i > j {
			for ; i < uint32(len(p.mem.PauseNs)); i++ {
				p.r.Update(int64(p.mem.PauseNs[i]))
			}
			i = 0
		}
		for ; i < j; i++ {
			p.r.Update(int64(p.mem.PauseNs[i]))
		}
	}
}
예제 #23
0
func (sc *SpeedCounter) Update() {
	c := atomic.SwapUint32(&sc.cnt, 0)
	sc.Spd = c / UPDATE_INTERVAL
	sc.All += uint64(c)
}
예제 #24
0
파일: atomic.go 프로젝트: sgotti/stolon
// Swap sets the given value and returns the previous value.
func (b *Bool) Swap(new bool) bool {
	return truthy(atomic.SwapUint32(&b.v, boolToInt(new)))
}
예제 #25
0
파일: atomic.go 프로젝트: sgotti/stolon
// Swap atomically swaps the wrapped uint32 and returns the old value.
func (i *Uint32) Swap(n uint32) uint32 {
	return atomic.SwapUint32(&i.v, n)
}
예제 #26
0
파일: worker.go 프로젝트: vonwenm/tunny
// Follow this with Join(), otherwise terminate isn't called on the worker
func (wrapper *workerWrapper) Close() {
	close(wrapper.jobChan)

	// Breaks the worker out of a Ready() -> false loop
	atomic.SwapUint32(&wrapper.poolOpen, uint32(0))
}
예제 #27
0
// Test aborting a schema change backfill transaction and check that the
// backfill is completed correctly. The backfill transaction is aborted at a
// time when it thinks it has processed all the rows of the table. Later,
// before the transaction is retried, the table is populated with more rows
// that a backfill chunk, requiring the backfill to forget that it is at the
// end of its processing and needs to continue on to process two more chunks
// of data.
func TestAbortSchemaChangeBackfill(t *testing.T) {
	defer leaktest.AfterTest(t)()
	var backfillNotification, commandsDone chan struct{}
	var dontAbortBackfill uint32
	params, _ := createTestServerParams()
	const maxValue = 100
	backfillCount := int64(0)
	retriedBackfill := int64(0)
	var retriedSpan roachpb.Span

	// Disable asynchronous schema change execution to allow synchronous path
	// to trigger start of backfill notification.
	params.Knobs = base.TestingKnobs{
		SQLExecutor: &csql.ExecutorTestingKnobs{
			// Fix the priority to guarantee that a high priority transaction
			// pushes a lower priority one.
			FixTxnPriority: true,
		},
		SQLSchemaChanger: &csql.SchemaChangerTestingKnobs{
			RunBeforeBackfillChunk: func(sp roachpb.Span) error {
				switch atomic.LoadInt64(&backfillCount) {
				case 0:
					// Keep track of the span provided with the first backfill
					// attempt.
					retriedSpan = sp
				case 1:
					// Ensure that the second backfill attempt provides the
					// same span as the first.
					if sp.Equal(retriedSpan) {
						atomic.AddInt64(&retriedBackfill, 1)
					}
				}
				return nil
			},
			RunAfterBackfillChunk: func() {
				atomic.AddInt64(&backfillCount, 1)
				if atomic.SwapUint32(&dontAbortBackfill, 1) == 1 {
					return
				}
				// Close channel to notify that the backfill has been
				// completed but hasn't yet committed.
				close(backfillNotification)
				// Receive signal that the commands that push the backfill
				// transaction have completed; The backfill will attempt
				// to commit and will abort.
				<-commandsDone
			},
			AsyncExecNotification: asyncSchemaChangerDisabled,
			BackfillChunkSize:     maxValue,
		},
	}
	server, sqlDB, kvDB := serverutils.StartServer(t, params)
	defer server.Stopper().Stop()

	if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.test (k INT PRIMARY KEY, v INT);
`); err != nil {
		t.Fatal(err)
	}

	// Bulk insert enough rows to exceed the chunk size.
	inserts := make([]string, maxValue+1)
	for i := 0; i < maxValue+1; i++ {
		inserts[i] = fmt.Sprintf(`(%d, %d)`, i, i)
	}
	if _, err := sqlDB.Exec(`INSERT INTO t.test VALUES ` + strings.Join(inserts, ",")); err != nil {
		t.Fatal(err)
	}

	// The two drop cases (column and index) do not need to be tested here
	// because the INSERT down below will not insert an entry for a dropped
	// column or index, however, it's still nice to have them just in case
	// INSERT gets messed up.
	testCases := []struct {
		sql string
		// Each schema change adds/drops a schema element that affects the
		// number of keys representing a table row.
		expectedNumKeysPerRow int
	}{
		{"ALTER TABLE t.test ADD COLUMN x DECIMAL DEFAULT (DECIMAL '1.4')", 2},
		{"ALTER TABLE t.test DROP x", 1},
		{"CREATE UNIQUE INDEX foo ON t.test (v)", 2},
		{"DROP INDEX t.test@foo", 1},
	}

	for i, testCase := range testCases {
		t.Run(testCase.sql, func(t *testing.T) {
			// Delete two rows so that the table size is smaller than a backfill
			// chunk. The two values will be added later to make the table larger
			// than a backfill chunk after the schema change backfill is aborted.
			for i := 0; i < 2; i++ {
				if _, err := sqlDB.Exec(`DELETE FROM t.test WHERE k = $1`, i); err != nil {
					t.Fatal(err)
				}
			}

			backfillNotification = make(chan struct{})
			commandsDone = make(chan struct{})
			atomic.StoreUint32(&dontAbortBackfill, 0)
			// Run the column schema change in a separate goroutine.
			var wg sync.WaitGroup
			wg.Add(1)
			go func() {
				// Start schema change that eventually runs a backfill.
				if _, err := sqlDB.Exec(testCase.sql); err != nil {
					t.Error(err)
				}

				wg.Done()
			}()

			// Wait until the schema change backfill has finished writing its
			// intents.
			<-backfillNotification

			// Delete a row that will push the backfill transaction.
			if _, err := sqlDB.Exec(`
BEGIN TRANSACTION PRIORITY HIGH;
DELETE FROM t.test WHERE k = 2;
COMMIT;
			`); err != nil {
				t.Fatal(err)
			}

			// Add missing rows so that the table exceeds the size of a
			// backfill chunk.
			for i := 0; i < 3; i++ {
				if _, err := sqlDB.Exec(`INSERT INTO t.test VALUES($1, $2)`, i, i); err != nil {
					t.Fatal(err)
				}
			}

			// Release backfill so that it can try to commit and in the
			// process discover that it was aborted.
			close(commandsDone)

			wg.Wait() // for schema change to complete

			// Backfill retry happened.
			if count, e := atomic.SwapInt64(&retriedBackfill, 0), int64(1); count != e {
				t.Fatalf("expected = %d, found = %d", e, count)
			}
			// 1 failed + 2 retried backfill chunks.
			expectNumBackfills := int64(3)
			if i == len(testCases)-1 {
				// The DROP INDEX case: The above INSERTs do not add any index
				// entries for the inserted rows, so the index remains smaller
				// than a backfill chunk and is dropped in a single retried
				// backfill chunk.
				expectNumBackfills = 2
			}
			if count := atomic.SwapInt64(&backfillCount, 0); count != expectNumBackfills {
				t.Fatalf("expected = %d, found = %d", expectNumBackfills, count)
			}

			// Verify the number of keys left behind in the table to validate
			// schema change operations.
			tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test")
			tablePrefix := roachpb.Key(keys.MakeTablePrefix(uint32(tableDesc.ID)))
			tableEnd := tablePrefix.PrefixEnd()
			if kvs, err := kvDB.Scan(context.TODO(), tablePrefix, tableEnd, 0); err != nil {
				t.Fatal(err)
			} else if e := testCase.expectedNumKeysPerRow * (maxValue + 1); len(kvs) != e {
				t.Fatalf("expected %d key value pairs, but got %d", e, len(kvs))
			}
		})
	}
}
예제 #28
0
func SwapUint32(addr *uint32, new uint32) uint32 {
	return orig.SwapUint32(addr, new)
}
예제 #29
0
파일: atomic.go 프로젝트: sasha-s/atomic
// Swap atomically stores the new value and returns the old one.
func (a *Uint32) Swap(v uint32) uint32 {
	return atomic.SwapUint32(&a.x, v)
}
예제 #30
0
// Start starts a new profiling session.
// The caller should call the Stop method on the value returned
// to cleanly stop profiling.
func Start(options ...func(*profile)) interface {
	Stop()
} {
	if !atomic.CompareAndSwapUint32(&started, 0, 1) {
		log.Fatal("profile: Start() already called")
	}

	var prof profile
	for _, option := range options {
		option(&prof)
	}

	path, err := func() (string, error) {
		if p := prof.path; p != "" {
			return p, os.MkdirAll(p, 0777)
		}
		return ioutil.TempDir("", "profile")
	}()

	if err != nil {
		log.Fatalf("profile: could not create initial output directory: %v", err)
	}

	switch prof.mode {
	case cpuMode:
		fn := filepath.Join(path, "cpu.pprof")
		f, err := os.Create(fn)
		if err != nil {
			log.Fatalf("profile: could not create cpu profile %q: %v", fn, err)
		}
		if !prof.quiet {
			log.Printf("profile: cpu profiling enabled, %s", fn)
		}
		pprof.StartCPUProfile(f)
		prof.closers = append(prof.closers, func() {
			pprof.StopCPUProfile()
			f.Close()
		})

	case memMode:
		fn := filepath.Join(path, "mem.pprof")
		f, err := os.Create(fn)
		if err != nil {
			log.Fatalf("profile: could not create memory profile %q: %v", fn, err)
		}
		old := runtime.MemProfileRate
		runtime.MemProfileRate = prof.memProfileRate
		if !prof.quiet {
			log.Printf("profile: memory profiling enabled (rate %d), %s", runtime.MemProfileRate, fn)
		}
		prof.closers = append(prof.closers, func() {
			pprof.Lookup("heap").WriteTo(f, 0)
			f.Close()
			runtime.MemProfileRate = old
		})

	case blockMode:
		fn := filepath.Join(path, "block.pprof")
		f, err := os.Create(fn)
		if err != nil {
			log.Fatalf("profile: could not create block profile %q: %v", fn, err)
		}
		runtime.SetBlockProfileRate(1)
		if !prof.quiet {
			log.Printf("profile: block profiling enabled, %s", fn)
		}
		prof.closers = append(prof.closers, func() {
			pprof.Lookup("block").WriteTo(f, 0)
			f.Close()
			runtime.SetBlockProfileRate(0)
		})
	}

	if !prof.noShutdownHook {
		go func() {
			c := make(chan os.Signal, 1)
			signal.Notify(c, os.Interrupt)
			<-c

			log.Println("profile: caught interrupt, stopping profiles")
			prof.Stop()

			os.Exit(0)
		}()
	}

	prof.closers = append(prof.closers, func() {
		atomic.SwapUint32(&started, 0)
	})

	return &prof
}