Beispiel #1
0
func CommandExecutor(app *global.AssemblyWithComponents) (action.Result, error) {
	var e exec.OsExecutor
	var commandWords []string
	appName := ""
	commandWords = strings.Fields(app.Command)
	log.Debug("Command Executor entry: %s\n", app)
	megam_home, ckberr := config.GetString("megam_home")
	if ckberr != nil {
		return nil, ckberr
	}
	pair, perr := global.ParseKeyValuePair(app.Inputs, "domain")
	if perr != nil {
		log.Error("Failed to get the domain value : %s", perr)
	}

	appName = app.Name + "." + pair.Value

	basePath := megam_home + "logs"
	dir := path.Join(basePath, appName)

	fileOutPath := path.Join(dir, appName+"_out")
	fileErrPath := path.Join(dir, appName+"_err")
	if _, err := os.Stat(dir); os.IsNotExist(err) {
		log.Info("Creating directory: %s\n", dir)
		if errm := os.MkdirAll(dir, 0777); errm != nil {
			return nil, errm
		}
	}
	// open output file
	fout, outerr := os.OpenFile(fileOutPath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
	if outerr != nil {
		return nil, outerr
	}
	defer fout.Close()
	// open Error file
	ferr, errerr := os.OpenFile(fileErrPath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
	if errerr != nil {
		return nil, errerr
	}
	defer ferr.Close()

	foutwriter := bufio.NewWriterSize(fout, 1)
	ferrwriter := bufio.NewWriterSize(ferr, 1)
	log.Debug(commandWords)
	log.Debug("Length: %s", len(commandWords))

	defer ferrwriter.Flush()
	defer foutwriter.Flush()

	if len(commandWords) > 0 {
		if err := e.Execute(commandWords[0], commandWords[1:], nil, foutwriter, ferrwriter); err != nil {
			return nil, err
		}
	}

	return &app, nil
}
Beispiel #2
0
func WriteLogsThread(filename string) {
	log.Infof("Started write logs thread to file=%s", filename)

	reopenTick := time.Tick(time.Second * 10)

	fp, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
	var wr *bufio.Writer

	if err != nil {
		log.Errorf("Could not open %s: %s", filename, err.Error())
	} else {
		wr = bufio.NewWriterSize(fp, 65536)
	}

	for {
		select {
		case <-reopenTick:
			if fp != nil {
				fp.Close()
			}

			fp, err = os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
			if err != nil {
				log.Warnf("Could not reopen %s: %s", err.Error())
				wr = nil
				fp = nil
			} else {
				wr = bufio.NewWriterSize(fp, 65536)
			}
		case ev := <-rqhLog:
			l := len(rqhLog)
			evs := make([]*FinishResult, 0, l+1)
			evs = append(evs, ev)

			for i := 0; i < l; i++ {
				evs = append(evs, <-rqhLog)
			}

			if wr != nil {
				encoder := json.NewEncoder(wr)

				for _, e := range evs {
					if err = encoder.Encode(e); err != nil {
						log.Errorf("Could not write to %s: %s", filename, err.Error())
					}
				}

				if err = wr.Flush(); err != nil {
					log.Errorf("Could not flush contents to %s: %s", filename, err.Error())
				}
			} else {
				log.Errorf("Failed to write %d events to rqh log because file %s could not be opened", len(evs), filename)
			}
		}

	}
}
Beispiel #3
0
// handleConnection wraps the in and out connection endpoints for transfer between them.
func handleConnection(clientConn net.Conn, serverConn net.Conn, txHandlers []func([]byte), rxHandlers []func([]byte)) {
	clientReader := bufio.NewReaderSize(clientConn, 8192)
	clientWriter := bufio.NewWriterSize(clientConn, 8192)

	serverReader := bufio.NewReaderSize(serverConn, 8192)
	serverWriter := bufio.NewWriterSize(serverConn, 8192)

	go pipeReaderToWriter(clientReader, serverWriter, txHandlers)
	go pipeReaderToWriter(serverReader, clientWriter, rxHandlers)
}
Beispiel #4
0
func (cmd *cmdSync) SendPSyncCmd(master, passwd string) (pipe.Reader, int64) {
	c := openNetConn(master, passwd)
	br := bufio.NewReaderSize(c, ReaderBufferSize)
	bw := bufio.NewWriterSize(c, WriterBufferSize)

	runid, offset, wait := sendPSyncFullsync(br, bw)
	log.Infof("psync runid = %s offset = %d, fullsync", runid, offset)

	var nsize int64
	for nsize == 0 {
		select {
		case nsize = <-wait:
			if nsize == 0 {
				log.Info("+")
			}
		case <-time.After(time.Second):
			log.Info("-")
		}
	}

	piper, pipew := pipe.NewSize(ReaderBufferSize)

	go func() {
		defer pipew.Close()
		p := make([]byte, 8192)
		for rdbsize := int(nsize); rdbsize != 0; {
			rdbsize -= iocopy(br, pipew, p, rdbsize)
		}
		for {
			n, err := cmd.PSyncPipeCopy(c, br, bw, offset, pipew)
			if err != nil {
				log.PanicErrorf(err, "psync runid = %s, offset = %d, pipe is broken", runid, offset)
			}
			offset += n
			for {
				time.Sleep(time.Second)
				c = openNetConnSoft(master, passwd)
				if c != nil {
					log.Infof("psync reopen connection, offset = %d", offset)
					break
				} else {
					log.Infof("psync reopen connection, failed")
				}
			}
			authPassword(c, passwd)
			br = bufio.NewReaderSize(c, ReaderBufferSize)
			bw = bufio.NewWriterSize(c, WriterBufferSize)
			sendPSyncContinue(br, bw, runid, offset)
		}
	}()
	return piper, nsize
}
Beispiel #5
0
func Benchmark_PubFourQueueSub(b *testing.B) {
	b.StopTimer()
	s := runBenchServer()
	c := createClientConn(b, "localhost", PERF_PORT)
	doDefaultConnect(b, c)
	sendProto(b, c, "SUB foo group1 1\r\n")
	sendProto(b, c, "SUB foo group1 2\r\n")
	sendProto(b, c, "SUB foo group1 3\r\n")
	sendProto(b, c, "SUB foo group1 4\r\n")
	bw := bufio.NewWriterSize(c, defaultSendBufSize)
	sendOp := []byte(fmt.Sprintf("PUB foo 2\r\nok\r\n"))
	ch := make(chan bool)
	expected := len("MSG foo 1 2\r\nok\r\n") * b.N
	go drainConnection(b, c, ch, expected)
	b.StartTimer()

	for i := 0; i < b.N; i++ {
		_, err := bw.Write(sendOp)
		if err != nil {
			b.Fatalf("Received error on PUB write: %v\n", err)
		}
	}
	err := bw.Flush()
	if err != nil {
		b.Fatalf("Received error on FLUSH write: %v\n", err)
	}

	// Wait for connection to be drained
	<-ch

	b.StopTimer()
	c.Close()
	s.Shutdown()
}
Beispiel #6
0
func (c *ClientV2) SetOutputBufferSize(desiredSize int) error {
	var size int

	switch {
	case desiredSize == -1:
		// effectively no buffer (every write will go directly to the wrapped net.Conn)
		size = 1
	case desiredSize == 0:
		// do nothing (use default)
	case desiredSize >= 64 && desiredSize <= int(c.context.nsqd.options.MaxOutputBufferSize):
		size = desiredSize
	default:
		return errors.New(fmt.Sprintf("output buffer size (%d) is invalid", desiredSize))
	}

	if size > 0 {
		c.Lock()
		defer c.Unlock()
		c.OutputBufferSize = size
		err := c.Writer.Flush()
		if err != nil {
			return err
		}
		c.Writer = bufio.NewWriterSize(c.Conn, size)
	}

	return nil
}
Beispiel #7
0
// NewBufferedWriter allocates and returns a BufferedWriter with an internal
// buffer of MaxBlockSize bytes.  If an error occurs writing a block to w, all
// future writes will fail with the same error.  After all data has been
// written, the client should call the Flush method to guarantee all data has
// been forwarded to the underlying io.Writer.
func NewBufferedWriter(w io.Writer) *BufferedWriter {
	_w := NewWriter(w).(*writer)
	return &BufferedWriter{
		w:  _w,
		bw: bufio.NewWriterSize(_w, MaxBlockSize),
	}
}
Beispiel #8
0
// This will process a disconnect when reconnect is allowed.
// The lock should not be held on entering this function.
func (nc *Conn) processReconnect() {

	nc.mu.Lock()
	defer nc.mu.Unlock()

	if !nc.IsClosed() {
		// If we are already in the proper state, just return.
		if nc.status == RECONNECTING {
			return
		}
		nc.status = RECONNECTING
		if nc.conn != nil {
			nc.bw.Flush()
			nc.conn.Close()
		}
		nc.conn = nil
		nc.kickFlusher()

		// FIXME(dlc) - We have an issue here if we have
		// outstanding flush points (pongs) and they were not
		// sent out, but are still in the pipe.

		// Create a pending buffer to underpin the bufio Writer while
		// we are reconnecting.
		nc.pending = &bytes.Buffer{}
		nc.bw = bufio.NewWriterSize(nc.pending, defaultPendingSize)
		nc.err = nil
		go nc.doReconnect()
	}
	// Perform appropriate callback if needed for a disconnect.
	dcb := nc.Opts.DisconnectedCB
	if dcb != nil {
		go dcb(nc)
	}
}
Beispiel #9
0
func NewClientV2(conn net.Conn) *ClientV2 {
	var identifier string
	if conn != nil {
		identifier, _, _ = net.SplitHostPort(conn.RemoteAddr().String())
	}

	return &ClientV2{
		Conn: conn,
		// ReadyStateChan has a buffer of 1 to guarantee that in the event
		// there is a race the state update is not lost
		ReadyStateChan:  make(chan int, 1),
		ExitChan:        make(chan int),
		ConnectTime:     time.Now(),
		ShortIdentifier: identifier,
		LongIdentifier:  identifier,
		Reader:          bufio.NewReaderSize(conn, 16*1024),
		Writer:          bufio.NewWriterSize(conn, 16*1024),
		State:           nsq.StateInit,
		SubEventChan:    make(chan *Channel, 1),

		// heartbeats are client configurable but default to 30s
		HeartbeatInterval:   nsqd.options.clientTimeout / 2,
		HeartbeatUpdateChan: make(chan time.Duration, 1),
	}
}
Beispiel #10
0
// newFileWriter returns a prepared fileWriter for the driver and path. This
// could be considered similar to an "open" call on a regular filesystem.
func newFileWriter(driver storagedriver.StorageDriver, path string) (*bufferedFileWriter, error) {
	fw := fileWriter{
		driver: driver,
		path:   path,
	}

	if fi, err := driver.Stat(path); err != nil {
		switch err := err.(type) {
		case storagedriver.PathNotFoundError:
			// ignore, offset is zero
		default:
			return nil, err
		}
	} else {
		if fi.IsDir() {
			return nil, fmt.Errorf("cannot write to a directory")
		}

		fw.size = fi.Size()
	}

	buffered := bufferedFileWriter{
		fileWriter: fw,
	}
	buffered.bw = bufio.NewWriterSize(&buffered.fileWriter, fileWriterBufferSize)

	return &buffered, nil
}
Beispiel #11
0
func (l *BinLog) openNewLogFile() error {
	var err error
	lastName := l.getLogFile()

	logPath := path.Join(l.path, lastName)
	if l.logFile, err = os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY, 0666); err != nil {
		log.Error("open new logfile error %s", err.Error())
		return err
	}

	if l.cfg.MaxFileNum > 0 && len(l.logNames) == l.cfg.MaxFileNum {
		l.purge(1)
	}

	l.logNames = append(l.logNames, lastName)

	if l.logWb == nil {
		l.logWb = bufio.NewWriterSize(l.logFile, 1024)
	} else {
		l.logWb.Reset(l.logFile)
	}

	if err = l.flushIndex(); err != nil {
		return err
	}

	return nil
}
Beispiel #12
0
// rotateFile closes the syncBuffer's file and starts a new one.
func (sb *syncBuffer) rotateFile(now time.Time) error {
	if sb.file != nil {
		sb.Flush()
		sb.file.Close()
	}
	var err error
	sb.file, _, err = create(severityName[sb.sev], now)
	sb.nbytes = 0
	if err != nil {
		return err
	}

	sb.Writer = bufio.NewWriterSize(sb.file, bufferSize)
	_, month, day := now.Date()
	sb.createdDate = fmt.Sprintf("%02d%02d", month, day)

	// Write header.
	var buf bytes.Buffer
	fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05"))
	fmt.Fprintf(&buf, "Running on machine: %s\n", host)
	fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH)
	fmt.Fprintf(&buf, "Log line format: [DIEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n")
	n, err := sb.file.Write(buf.Bytes())
	sb.nbytes += uint64(n)
	return err
}
Beispiel #13
0
// NewSuperBlock new a super block struct.
func NewSuperBlock(file string) (b *SuperBlock, err error) {
	b = &SuperBlock{}
	b.file = file
	b.buf = make([]byte, NeedleMaxSize)
	if b.w, err = os.OpenFile(file, os.O_WRONLY|os.O_CREATE, 0664); err != nil {
		log.Errorf("os.OpenFile(\"%s\", os.O_WRONLY|os.O_CREATE, 0664) error(%v)", file, err)
		return
	}
	b.bw = bufio.NewWriterSize(b.w, NeedleMaxSize)
	if b.r, err = os.OpenFile(file, os.O_RDONLY, 0664); err != nil {
		log.Errorf("os.OpenFile(\"%s\", os.O_RDONLY, 0664) error(%v)", file, err)
		goto failed
	}
	if err = b.init(); err != nil {
		log.Errorf("block: %s init error(%v)", file, err)
		goto failed
	}
	return
failed:
	if b.w != nil {
		b.w.Close()
	}
	if b.r != nil {
		b.r.Close()
	}
	return
}
Beispiel #14
0
func newAsyncConnHdl(spec *ConnectionSpec) (async *asyncConnHdl, err Error) {
	//	here := "newAsynConnHDL";
	connHdl, err := newConnHdl(spec)
	if err == nil && connHdl != nil {
		async = new(asyncConnHdl)
		if async != nil {
			async.super = connHdl
			//			var e error
			async.writer = bufio.NewWriterSize(connHdl.conn, spec.wBufSize)

			async.pendingReqs = make(chan asyncReqPtr, spec.reqChanCap)
			async.pendingResps = make(chan asyncReqPtr, spec.rspChanCap)
			async.faults = make(chan asyncReqPtr, spec.reqChanCap) // not sure about sizing here ...

			async.reqProcCtl = make(workerCtl)
			async.rspProcCtl = make(workerCtl)
			async.heartbeatCtl = make(workerCtl)
			async.managerCtl = make(workerCtl)

			async.feedback = make(chan workerStatus)
			async.shutdown = make(chan bool, 1)

			return
		}
	}
	// fall through here on errors only
	if debug() {
		log.Println("Error creating asyncConnHdl: ", err)
		//		err =  os.NewError("Error creating asyncConnHdl");
	}
	return nil, err
}
Beispiel #15
0
// Run starts the pipeline.
func (p *Pipeline) Run() (int64, error) {
	last := p.input
	for _, f := range p.filters {
		log.Printf("Link %v -> %v", last, f)
		if err := f.Link(last); err != nil {
			return 0, err
		}
		last = f
	}
	buf := bufio.NewWriterSize(p.output, *outputBuffer)
	n, err := io.Copy(buf, last)
	if err != nil {
		return n, fmt.Errorf("Couldn't pipe data: %s", err)
	}
	log.Print("copied")
	if err := buf.Flush(); err != nil {
		return n, fmt.Errorf("Couldn't flush data: %s", err)
	}
	log.Print("flushed")
	if err := p.output.Close(); err != nil {
		return n, fmt.Errorf("Couldn't close pipeline: %s", err)
	}
	log.Print("closed")
	return n, nil
}
Beispiel #16
0
func main() {
	flag.Parse()
	servenv.Init("vt_binlog_player")

	if *startPosFile == "" {
		relog.Fatal("start-pos-file was not supplied.")
	}

	if *dbConfigFile == "" {
		relog.Fatal("Cannot start without db-config-file")
	}

	blp, err := initBinlogPlayer(*startPosFile, *dbConfigFile, *lookupConfigFile, *dbCredFile, *useCheckpoint, *debug, *port)
	if err != nil {
		relog.Fatal("Error in initializing binlog player - '%v'", err)
	}
	blp.txnBatch = *txnBatch
	blp.maxTxnInterval = time.Duration(*maxTxnInterval) * time.Second
	blp.execDdl = *execDdl

	if *tables != "" {
		tables := strings.Split(*tables, ",")
		blp.tables = make([]string, len(tables))
		for i, table := range tables {
			blp.tables[i] = strings.TrimSpace(table)
		}
		relog.Info("len tables %v tables %v", len(blp.tables), blp.tables)
	}

	relog.Info("BinlogPlayer client for keyrange '%v:%v' starting @ '%v'",
		blp.startPosition.KeyrangeStart,
		blp.startPosition.KeyrangeEnd,
		blp.startPosition.Position)

	if *port != 0 {
		umgmt.AddStartupCallback(func() {
			umgmt.StartHttpServer(fmt.Sprintf(":%v", *port))
		})
	}
	umgmt.AddStartupCallback(func() {
		c := make(chan os.Signal, 1)
		signal.Notify(c, syscall.SIGTERM)
		go func() {
			for sig := range c {
				umgmt.SigTermHandler(sig)
			}
		}()
	})
	umgmt.AddCloseCallback(func() {
		close(interrupted)
	})

	//Make a request to the server and start processing the events.
	stdout = bufio.NewWriterSize(os.Stdout, 16*1024)
	err = blp.applyBinlogEvents()
	if err != nil {
		relog.Error("Error in applying binlog events, err %v", err)
	}
	relog.Info("vt_binlog_player done")
}
// newBufferedWriter creates a new buffered writer struct.
// bufferSize -- size of memory buffer in bytes
// flushPeriod -- period in which data flushes from memory buffer in milliseconds. 0 - turn off this functionality
func newBufferedWriter(innerWriter io.Writer, bufferSize int, flushPeriod time.Duration) (*bufferedWriter, error) {

	if innerWriter == nil {
		return nil, errors.New("Argument is nil: innerWriter")
	}
	if flushPeriod < 0 {
		return nil, errors.New(fmt.Sprintf("flushPeriod can not be less than 0. Got: %d", flushPeriod))
	}

	if bufferSize <= 0 {
		return nil, errors.New(fmt.Sprintf("bufferSize can not be less or equal to 0. Got: %d", bufferSize))
	}

	buffer := bufio.NewWriterSize(innerWriter, bufferSize)

	/*if err != nil {
		return nil, err
	}*/

	newWriter := new(bufferedWriter)

	newWriter.innerWriter = innerWriter
	newWriter.buffer = buffer
	newWriter.bufferSize = bufferSize
	newWriter.flushPeriod = flushPeriod * 1e6
	newWriter.bufferMutex = new(sync.Mutex)

	if flushPeriod != 0 {
		go newWriter.flushPeriodically()
	}

	return newWriter, nil
}
Beispiel #18
0
// compress returns an io.ReadCloser which will supply a compressed version of
// the provided Reader. The caller must close the ReadCloser after reading the
// compressed data.
//
// Note that this function returns a reader instead of taking a writer as an
// argument so that it can be used with httpBlobWriter's ReadFrom method.
// Using httpBlobWriter's Write method would send a PATCH request for every
// Write call.
//
// The second return value is a channel that gets closed when the goroutine
// is finished. This allows the caller to make sure the goroutine finishes
// before it releases any resources connected with the reader that was
// passed in.
func compress(in io.Reader) (io.ReadCloser, chan struct{}) {
	compressionDone := make(chan struct{})

	pipeReader, pipeWriter := io.Pipe()
	// Use a bufio.Writer to avoid excessive chunking in HTTP request.
	bufWriter := bufio.NewWriterSize(pipeWriter, compressionBufSize)
	compressor := gzip.NewWriter(bufWriter)

	go func() {
		_, err := io.Copy(compressor, in)
		if err == nil {
			err = compressor.Close()
		}
		if err == nil {
			err = bufWriter.Flush()
		}
		if err != nil {
			pipeWriter.CloseWithError(err)
		} else {
			pipeWriter.Close()
		}
		close(compressionDone)
	}()

	return pipeReader, compressionDone
}
Beispiel #19
0
func (sysver SystemVerificationCheck) Check() (warnings, errors []error) {
	// Create a buffered writer and choose a quite large value (1M) and suppose the output from the system verification test won't exceed the limit
	// Run the system verification check, but write to out buffered writer instead of stdout
	bufw := bufio.NewWriterSize(os.Stdout, 1*1024*1024)
	reporter := &system.StreamReporter{WriteStream: bufw}

	var errs []error
	// All the validators we'd like to run:
	var validators = []system.Validator{
		&system.OSValidator{Reporter: reporter},
		&system.KernelValidator{Reporter: reporter},
		&system.CgroupsValidator{Reporter: reporter},
		&system.DockerValidator{Reporter: reporter},
	}

	// Run all validators
	for _, v := range validators {
		errs = append(errs, v.Validate(system.DefaultSysSpec))
	}

	err := utilerrors.NewAggregate(errs)
	if err != nil {
		// Only print the output from the system verification check if the check failed
		fmt.Println("[preflight] The system verification failed. Printing the output from the verification:")
		bufw.Flush()
		return nil, []error{err}
	}
	return nil, nil
}
Beispiel #20
0
func newTimeoutWriter(conn net.Conn, chunkSize int, timeout time.Duration) *timeoutWriter {
	return &timeoutWriter{
		Timeout: timeout,
		writer:  bufio.NewWriterSize(conn, chunkSize),
		conn:    conn,
	}
}
Beispiel #21
0
func newAsyncConnHdl (spec *ConnectionSpec) (async *asyncConnHdl, err os.Error) {
//	here := "newAsynConnHDL";
	super, err := newConnHdl (spec);
	if err == nil && super != nil {
		async = new(asyncConnHdl);
		if async != nil { 
			async.super = super;
			async.writer, err = bufio.NewWriterSize(super.conn, spec.wBufSize);
			if err == nil {
				async.pendingReqs = make (chan asyncReqPtr, spec.reqChanCap);
				async.pendingResps = make (chan asyncReqPtr, spec.rspChanCap);
				async.faults = make (chan asyncReqPtr, spec.reqChanCap); // not sure about sizing here ...
				
				async.reqProcCtl = make (workerCtl);
				async.rspProcCtl = make (workerCtl);
				async.heartbeatCtl = make (workerCtl);
				async.managerCtl = make (workerCtl);
				
				async.feedback = make (chan workerStatus);
				async.shutdown = make (chan bool, 1);

				
				return;
			} 
			else { super.conn.Close(); }
		} 
	} 
	// fall through here on errors only
	if err == nil { err =  os.NewError("Error creating asyncConnHdl"); } 
	return nil, err;
}
Beispiel #22
0
// Lock should be held
func (c *client) initClient() {
	s := c.srv
	c.cid = atomic.AddUint64(&s.gcid, 1)
	c.bw = bufio.NewWriterSize(c.nc, startBufSize)
	c.subs = make(map[string]*subscription)
	c.debug = (atomic.LoadInt32(&debug) != 0)
	c.trace = (atomic.LoadInt32(&trace) != 0)

	// This is a scratch buffer used for processMsg()
	// The msg header starts with "MSG ",
	// in bytes that is [77 83 71 32].
	c.msgb = [msgScratchSize]byte{77, 83, 71, 32}

	// This is to track pending clients that have data to be flushed
	// after we process inbound msgs from our own connection.
	c.pcd = make(map[*client]struct{})

	// snapshot the string version of the connection
	conn := "-"
	if ip, ok := c.nc.(*net.TCPConn); ok {
		addr := ip.RemoteAddr().(*net.TCPAddr)
		conn = fmt.Sprintf("%s:%d", addr.IP, addr.Port)
	}

	switch c.typ {
	case CLIENT:
		c.ncs = fmt.Sprintf("%s - cid:%d", conn, c.cid)
	case ROUTER:
		c.ncs = fmt.Sprintf("%s - rid:%d", conn, c.cid)
	}
}
Beispiel #23
0
func NewTSMWriter(w io.Writer) (TSMWriter, error) {
	index := &directIndex{
		blocks: map[string]*indexEntries{},
	}

	return &tsmWriter{wrapped: w, w: bufio.NewWriterSize(w, 4*1024*1024), index: index}, nil
}
Beispiel #24
0
// rotateFile closes the syncBuffer's file and starts a new one.
func (sb *syncBuffer) rotateFile(now time.Time) error {
	if sb.file != nil {
		if err := sb.Flush(); err != nil {
			return err
		}
		if err := sb.file.Close(); err != nil {
			return err
		}
	}
	var err error
	sb.file, _, err = create(sb.sev, now)
	sb.nbytes = 0
	if err != nil {
		return err
	}

	sb.Writer = bufio.NewWriterSize(sb.file, bufferSize)

	// Write header.
	var buf bytes.Buffer
	fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05"))
	fmt.Fprintf(&buf, "Running on machine: %s\n", host)
	fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH)
	fmt.Fprintf(&buf, "Log line format: [IWEF]yymmdd hh:mm:ss.uuuuuu file:line msg\n")
	var n int
	n, err = sb.file.Write(buf.Bytes())
	sb.nbytes += uint64(n)
	return err
}
Beispiel #25
0
func (this *Learner) Save(fname string) {
	fp, err := os.OpenFile(fname, os.O_CREATE|os.O_RDWR, 0777)
	if err != nil {
		panic(err)
	}
	writer := bufio.NewWriterSize(fp, 4096*32)
	labelsize := len(this.LabelDict.Id2elem)
	featuresize := len(this.FtDict.Id2elem)
	writer.WriteString(fmt.Sprintf("labelsize\t%d\n", labelsize))
	writer.WriteString(fmt.Sprintf("featuresize\t%d\n", featuresize))
	for i := 0; i < labelsize; i++ {
		label := this.LabelDict.Id2elem[i]
		writer.WriteString(fmt.Sprintf("%s\n", label))
	}
	for i := 0; i < featuresize; i++ {
		ft := this.FtDict.Id2elem[i]
		writer.WriteString(fmt.Sprintf("%s\n", ft))
	}
	for labelid := 0; labelid < labelsize; labelid++ {
		w := this.Weight[labelid]
		for ftid := 0; ftid < featuresize; ftid++ {
			writer.WriteString(fmt.Sprintf("%f\n", w[ftid]))
		}
	}
	writer.Flush()
}
Beispiel #26
0
func (db *DB) defrag() {
	db.DataSeq++
	if db.LogFile != nil {
		db.LogFile.Close()
		db.LogFile = nil
	}
	db.checklogfile()
	bufile := bufio.NewWriterSize(db.LogFile, 0x100000)
	used := make(map[uint32]bool, 10)
	db.Idx.browse(func(key KeyType, rec *oneIdx) bool {
		db.loadrec(rec)
		rec.datpos = uint32(db.addtolog(bufile, key, rec.Slice()))
		rec.DataSeq = db.DataSeq
		used[rec.DataSeq] = true
		rec.freerec()
		return true
	})

	// first write & flush the data file:
	bufile.Flush()
	db.LogFile.Sync()

	// now the index:
	db.Idx.writedatfile() // this will close the file

	db.cleanupold(used)
	db.Idx.ExtraSpaceUsed = 0
}
Beispiel #27
0
// createConn will connect to the server and wrap the appropriate
// bufio structures. It will do the right thing when an existing
// connection is in place.
func (nc *Conn) createConn() (err error) {
	if nc.Opts.Timeout < 0 {
		return ErrBadTimeout
	}
	if _, cur := nc.currentServer(); cur == nil {
		return ErrNoServers
	} else {
		cur.lastAttempt = time.Now()
	}
	nc.conn, err = net.DialTimeout("tcp", nc.url.Host, nc.Opts.Timeout)
	if err != nil {
		return err
	}

	// No clue why, but this stalls and kills performance on Mac (Mavericks).
	// https://code.google.com/p/go/issues/detail?id=6930
	//if ip, ok := nc.conn.(*net.TCPConn); ok {
	//	ip.SetReadBuffer(defaultBufSize)
	//}

	if nc.pending != nil && nc.bw != nil {
		// Move to pending buffer.
		nc.bw.Flush()
	}
	nc.bw = bufio.NewWriterSize(nc.conn, defaultBufSize)
	return nil
}
Beispiel #28
0
func (idx *QdbIndex) writedatfile() {
	idx.DatfileIndex = 1 - idx.DatfileIndex
	idx.VersionSequence++

	//f := new(bytes.Buffer)
	ff, _ := os.Create(fmt.Sprint(idx.IdxFilePath, idx.DatfileIndex))
	f := bufio.NewWriterSize(ff, 0x100000)
	binary.Write(f, binary.LittleEndian, idx.VersionSequence)
	idx.browse(func(key KeyType, rec *oneIdx) bool {
		binary.Write(f, binary.LittleEndian, key)
		binary.Write(f, binary.LittleEndian, rec.datpos)
		binary.Write(f, binary.LittleEndian, rec.datlen)
		binary.Write(f, binary.LittleEndian, rec.DataSeq)
		binary.Write(f, binary.LittleEndian, rec.flags)
		return true
	})
	f.Write([]byte{0xff, 0xff, 0xff, 0xff})
	binary.Write(f, binary.LittleEndian, idx.VersionSequence)
	f.Write([]byte("FINI"))

	//ioutil.WriteFile(fmt.Sprint(idx.IdxFilePath, idx.DatfileIndex), f.Bytes(), 0600)
	f.Flush()
	ff.Close()

	// now delete the previous log
	if idx.file != nil {
		idx.file.Close()
		idx.file = nil
	}
	os.Remove(idx.IdxFilePath + "log")
	os.Remove(fmt.Sprint(idx.IdxFilePath, 1-idx.DatfileIndex))
}
Beispiel #29
0
// Lock should be held
func (c *client) initClient() {
	s := c.srv
	c.cid = atomic.AddUint64(&s.gcid, 1)
	c.bw = bufio.NewWriterSize(c.nc, defaultBufSize)
	c.subs = hashmap.New()

	// This is a scratch buffer used for processMsg()
	// The msg header starts with "MSG ",
	// in bytes that is [77 83 71 32].
	c.msgb = [msgScratchSize]byte{77, 83, 71, 32}

	// This is to track pending clients that have data to be flushed
	// after we process inbound msgs from our own connection.
	c.pcd = make(map[*client]struct{})

	// No clue why, but this stalls and kills performance on Mac (Mavericks).
	//
	//	if ip, ok := c.nc.(*net.TCPConn); ok {
	//		ip.SetReadBuffer(defaultBufSize)
	//		ip.SetWriteBuffer(2*defaultBufSize)
	//	}

	// Set the Ping timer
	c.setPingTimer()

	// Spin up the read loop.
	go c.readLoop()
}
Beispiel #30
0
// Open is part of the intents.file interface. realBSONFiles need to have Open called before
// Read can be called
func (f *realBSONFile) Open() (err error) {
	if f.path == "" {
		// This should not occur normally. All realBSONFile's should have a path
		return fmt.Errorf("error creating BSON file without a path, namespace: %v",
			f.intent.Namespace())
	}
	err = os.MkdirAll(filepath.Dir(f.path), os.ModeDir|os.ModePerm)
	if err != nil {
		return fmt.Errorf("error creating directory for BSON file %v: %v",
			filepath.Dir(f.path), err)
	}

	fileName := f.path
	file, err := os.Create(fileName)
	if err != nil {
		return fmt.Errorf("error creating BSON file %v: %v", fileName, err)
	}
	var writeCloser io.WriteCloser
	if f.gzip {
		writeCloser = gzip.NewWriter(file)
	} else {
		// wrap writer in buffer to reduce load on disk
		writeCloser = writeFlushCloser{
			atomicFlusher{
				bufio.NewWriterSize(file, 32*1024),
			},
		}
	}
	f.WriteCloser = &wrappedWriteCloser{
		WriteCloser: writeCloser,
		inner:       file,
	}

	return nil
}