示例#1
0
func normalizePath(dst, src []byte) []byte {
	dst = dst[:0]
	dst = addLeadingSlash(dst, src)
	dst = decodeArgAppend(dst, src, false)

	// remove duplicate slashes
	b := dst
	bSize := len(b)
	for {
		n := bytes.Index(b, strSlashSlash)
		if n < 0 {
			break
		}
		b = b[n:]
		copy(b, b[1:])
		b = b[:len(b)-1]
		bSize--
	}
	dst = dst[:bSize]

	// remove /./ parts
	b = dst
	for {
		n := bytes.Index(b, strSlashDotSlash)
		if n < 0 {
			break
		}
		nn := n + len(strSlashDotSlash) - 1
		copy(b[n:], b[nn:])
		b = b[:len(b)-nn+n]
	}

	// remove /foo/../ parts
	for {
		n := bytes.Index(b, strSlashDotDotSlash)
		if n < 0 {
			break
		}
		nn := bytes.LastIndexByte(b[:n], '/')
		if nn < 0 {
			nn = 0
		}
		n += len(strSlashDotDotSlash) - 1
		copy(b[nn:], b[n:])
		b = b[:len(b)-n+nn]
	}

	// remove trailing /foo/..
	n := bytes.LastIndex(b, strSlashDotDot)
	if n >= 0 && n+len(strSlashDotDot) == len(b) {
		nn := bytes.LastIndexByte(b[:n], '/')
		if nn < 0 {
			return strSlash
		}
		b = b[:nn+1]
	}

	return b
}
示例#2
0
文件: folders.go 项目: fd/1pwd
func parseFolders(data []byte) (Folders, error) {
	var (
		idx     int
		folders Folders
	)

	idx = bytes.IndexByte(data, '{')
	if idx < 0 {
		return nil, errors.New("invalid folders data")
	}
	data = data[idx:]

	idx = bytes.LastIndexByte(data, '}')
	if idx < 0 {
		return nil, errors.New("invalid folders data")
	}
	data = data[:idx+1]

	err := json.Unmarshal(data, &folders)
	if err != nil {
		return nil, err
	}

	return folders, nil
}
示例#3
0
文件: cli.go 项目: udhos/nexthop
func (c *Client) LineBufferComplete(autoComplete string, attach bool) {

	defer c.mutex.Unlock()
	c.mutex.Lock()

	buf := c.telnetLine

	if !attach {
		// overwrite current label
		spc := bytes.LastIndexByte(buf.lineBuf[:buf.linePos], ' ')
		buf.lineSize = spc + 1
	}

	// append label

	for _, b := range autoComplete {
		if buf.lineSize >= len(buf.lineBuf) {
			break // overflow
		}

		// copy byte
		buf.lineBuf[buf.lineSize] = byte(b)
		buf.lineSize++
	}
	buf.linePos = buf.lineSize
}
示例#4
0
文件: band.go 项目: fd/1pwd
func parseBand(data []byte) (Band, error) {
	var (
		idx  int
		band Band
	)

	idx = bytes.IndexByte(data, '{')
	if idx < 0 {
		return nil, errors.New("invalid band data")
	}
	data = data[idx:]

	idx = bytes.LastIndexByte(data, '}')
	if idx < 0 {
		return nil, errors.New("invalid band data")
	}
	data = data[:idx+1]

	err := json.Unmarshal(data, &band)
	if err != nil {
		return nil, err
	}

	return band, nil
}
示例#5
0
文件: profile.go 项目: fd/1pwd
func parseProfile(data []byte) (*Profile, error) {
	var (
		idx     int
		profile *Profile
	)

	idx = bytes.IndexByte(data, '{')
	if idx < 0 {
		return nil, errors.New("invalid profile data")
	}
	data = data[idx:]

	idx = bytes.LastIndexByte(data, '}')
	if idx < 0 {
		return nil, errors.New("invalid profile data")
	}
	data = data[:idx+1]

	err := json.Unmarshal(data, &profile)
	if err != nil {
		return nil, err
	}

	return profile, nil
}
示例#6
0
// Write ...
func (w *headerWriter) Write(p []byte) (int, error) {
	// TODO: logic for wrapping headers is actually pretty complex for some header types, like received headers
	var total int
	for len(p)+w.curLineLen > w.maxLineLen {
		toWrite := w.maxLineLen - w.curLineLen
		// Wrap at last space, if any
		lastSpace := bytes.LastIndexByte(p[:toWrite], byte(' '))
		if lastSpace > 0 {
			toWrite = lastSpace
		}
		written, err := w.w.Write(p[:toWrite])
		total += written
		if err != nil {
			return total, err
		}
		written, err = w.w.Write([]byte("\r\n "))
		total += written
		if err != nil {
			return total, err
		}
		p = p[toWrite:]
		w.curLineLen = 1 // Continuation lines are indented
	}
	written, err := w.w.Write(p)
	total += written
	w.curLineLen += written
	return total, err
}
示例#7
0
func (h *RequestHeader) parseFirstLine(buf []byte) (int, error) {
	bNext := buf
	var b []byte
	var err error
	for len(b) == 0 {
		if b, bNext, err = nextLine(bNext); err != nil {
			return 0, err
		}
	}

	// parse method
	n := bytes.IndexByte(b, ' ')
	if n <= 0 {
		return 0, fmt.Errorf("cannot find http request method in %q", buf)
	}
	h.method = append(h.method[:0], b[:n]...)
	b = b[n+1:]

	// parse requestURI
	n = bytes.LastIndexByte(b, ' ')
	if n < 0 {
		h.noHTTP11 = true
		n = len(b)
	} else if n == 0 {
		return 0, fmt.Errorf("RequestURI cannot be empty in %q", buf)
	} else if !bytes.Equal(b[n+1:], strHTTP11) {
		h.noHTTP11 = true
	}
	h.requestURI = append(h.requestURI[:0], b[:n]...)

	return len(buf) - len(bNext), nil
}
示例#8
0
文件: uri.go 项目: hambster/fasthttp
// LastPathSegment returns the last part of uri path after '/'.
//
// Examples:
//
//    * For /foo/bar/baz.html path returns baz.html.
//    * For /foo/bar/ returns empty byte slice.
//    * For /foobar.js returns foobar.js.
func (x *URI) LastPathSegment() []byte {
	path := x.Path()
	n := bytes.LastIndexByte(path, '/')
	if n < 0 {
		return path
	}
	return path[n+1:]
}
示例#9
0
func getOffsetXLineBack(contents []byte, lines int64) (offset int64) {
	for offset = int64(len(contents)); offset != -1 && lines > 0; lines-- {
		offset = int64(bytes.LastIndexByte(contents[:offset], newline))
	}
	if offset == -1 {
		offset = 0
	}

	return
}
示例#10
0
文件: logrot.go 项目: xi2/logrot
// Open opens the file at path for writing in append mode. If it does
// not exist it is created with permissions of perm.
//
// The returned WriteCloser keeps track of the size of the file and
// the position of the most recent newline. If during a call to Write
// a particular byte to be written would cause the file size to exceed
// maxSize bytes, and at least one newline has been written to the
// file already, then a rotation occurs before the byte is written. A
// rotation is the following procedure:
//
// Let N = highest n such that <path>.<n>.gz exists or zero
// otherwise. Let M = maxFiles. Starting at n = N, while n > M-2 and n
// > 0 delete <path>.<n>.gz and decrement n. Then, while n > 0, rename
// <path>.<n>.gz to <path>.<n+1>.gz and decrement n. Next, if M > 1,
// the contents of <path> up to and including the final newline are
// gzipped and saved to the file <path>.1.gz . Lastly, the contents of
// <path> beyond the final newline are copied to the beginning of the
// file and <path> is truncated to contain just those contents.
//
// It is safe to call Write/Close from multiple goroutines.
func Open(path string, perm os.FileMode, maxSize int64, maxFiles int) (io.WriteCloser, error) {
	if maxSize < 1 {
		return nil, errors.New("logrot: maxSize < 1")
	}
	if maxFiles < 1 {
		return nil, errors.New("logrot: maxFiles < 1")
	}
	// if path exists determine size and check path is a regular file.
	var size int64
	fi, err := os.Lstat(path)
	if err != nil && !os.IsNotExist(err) {
		return nil, err
	}
	if err == nil {
		if fi.Mode()&os.ModeType != 0 {
			return nil, fmt.Errorf("logrot: %s is not a regular file", path)
		}
		size = fi.Size()
	}
	// open path for reading/writing, creating it if necessary.
	file, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, perm)
	if err != nil {
		return nil, err
	}
	// determine last newline position within file by reading backwards.
	var lastNewline int64 = -1
	const bufExp = 13 // 8KB buffer
	buf := make([]byte, 1<<bufExp)
	off := ((size - 1) >> bufExp) << bufExp
	bufSz := size - off
	for off >= 0 {
		_, err = file.ReadAt(buf[:bufSz], off)
		if err != nil {
			_ = file.Close()
			return nil, err
		}
		i := bytes.LastIndexByte(buf[:bufSz], '\n')
		if i != -1 {
			lastNewline = off + int64(i)
			break
		}
		off -= 1 << bufExp
		bufSz = 1 << bufExp
	}
	return &writeCloser{
		path:        path,
		perm:        perm,
		maxSize:     maxSize,
		maxFiles:    maxFiles,
		file:        file,
		size:        size,
		lastNewline: lastNewline,
	}, nil
}
func parseNetstatLine(line [][]byte) (port, pid int, err error) {
	n := bytes.LastIndexByte(line[1], ':')
	if n < 0 {
		return port, pid, errors.New("parsing port")
	}
	port, err = strconv.Atoi(string(line[1][n+1:]))
	if err != nil {
		return
	}
	pid, err = strconv.Atoi(string(line[4]))
	return
}
示例#12
0
func (u *URI) updateBytes(newURI, buf []byte) []byte {
	if len(newURI) == 0 {
		return buf
	}

	n := bytes.Index(newURI, strSlashSlash)
	if n >= 0 {
		// absolute uri
		var b [32]byte
		schemeOriginal := b[:0]
		if len(u.scheme) > 0 {
			schemeOriginal = append([]byte(nil), u.scheme...)
		}
		u.Parse(nil, newURI)
		if len(schemeOriginal) > 0 && len(u.scheme) == 0 {
			u.scheme = append(u.scheme[:0], schemeOriginal...)
		}
		return buf
	}

	if newURI[0] == '/' {
		// uri without host
		buf = u.appendSchemeHost(buf[:0])
		buf = append(buf, newURI...)
		u.Parse(nil, buf)
		return buf
	}

	// relative path
	switch newURI[0] {
	case '?':
		// query string only update
		u.SetQueryStringBytes(newURI[1:])
		return append(buf[:0], u.FullURI()...)
	case '#':
		// update only hash
		u.SetHashBytes(newURI[1:])
		return append(buf[:0], u.FullURI()...)
	default:
		// update the last path part after the slash
		path := u.Path()
		n = bytes.LastIndexByte(path, '/')
		if n < 0 {
			panic("BUG: path must contain at least one slash")
		}
		buf = u.appendSchemeHost(buf[:0])
		buf = appendQuotedPath(buf, path[:n+1])
		buf = append(buf, newURI...)
		u.Parse(nil, buf)
		return buf
	}
}
示例#13
0
func json2Filef(url string, save_to string, maxPage string) (int, error) {
	//fmt.Println(url, maxPage)
	for {
		ll = append(ll, time.Now())
		statusCode, body, err := fasthttp.Get(nil, url)
		if err != nil {
			return 0, err
		}
		if 200 != statusCode {
			return 0, fmt.Errorf("!200")
		}

		anti_spider := "smPolicy=tmallrateweb-rate-anti_Spider-checklogin"
		idx_anti_spider := bytes.Index(body, []byte(anti_spider))
		paginator_empty := "\"paginator\":\"\""
		idx_paginator_empty := bytes.Index(body, []byte(paginator_empty))
		if idx_anti_spider >= 0 || idx_paginator_empty >= 0 {
			time.Sleep(5 * time.Second)
			continue
		}

		out := make([]byte, len(body)*2)
		_, bytesWritten, err := iconv.Convert(body, out, "gbk", "utf-8")

		//fmt.Println(bytesRead, bytesWritten, err)
		idx := bytes.IndexByte(out, '(')
		if idx < 0 || bytesWritten < idx+1 {
			return 0, fmt.Errorf("idx error")
		}
		out = out[idx+1 : bytesWritten]
		//fmt.Println(string(out))
		idx_end := bytes.LastIndexByte(out, ')')
		if idx_end < 0 {
			return 0, fmt.Errorf("idx_end<0, )")
		}
		out = out[:idx_end]
		fmt.Println(save_to)
		ioutil.WriteFile(save_to, out, 0666)

		time.Sleep(1 * time.Second)
		if len(maxPage) > 0 {
			return strconv.Atoi(pickString(body, maxPage, ":", ","))
		} else {
			return 0, nil
		}
	}
}
示例#14
0
文件: engine.go 项目: yzx226/scrapy
func json2Filef(t *ShopType, sellerId, itemId string) (int, error) {
	if ex := os.MkdirAll(fmt.Sprintf("%s/%s/%s/%s", root, t.Type, sellerId, itemId), 0777); ex != nil {
		fmt.Println(ex)
		return 0, ex
	}

	for p, max := 1, 1; p <= max; {
		url := fmt.Sprintf(t.RateFormat, itemId, sellerId, p)
		// fmt.Println(url)
		if body, ok := fetchData(url); ok {
			if len(body) < 100 {
				if "jsonp({\"status\":1111,\"wait\":5})" == string(body) {
					time.Sleep(5 * time.Minute)
				}
			}
			if p == 1 {
				lastPage := pickString(body, "\"lastPage\":", ",\"page\"")
				fmt.Println("lastPage", lastPage)
				if v_max, err := strconv.Atoi(lastPage); err == nil {
					max = v_max
				} else {
					fmt.Println(err)
				}
			}
			out := make([]byte, len(body)*2)
			_, bytesWritten, _ := iconv.Convert(body, out, "gbk", "utf-8")
			idx := bytes.IndexByte(out, '(')
			if idx < 0 || bytesWritten < idx+1 {
				return 0, fmt.Errorf("idx error")
			}
			out = out[idx+1 : bytesWritten]
			idx_end := bytes.LastIndexByte(out, ')')
			if idx_end < 0 {
				return 0, fmt.Errorf("idx_end<0, )")
			}
			out = out[:idx_end]

			save_to := fmt.Sprintf("%s/%s/%s/%s/%s.%d.log", root, t.Type, sellerId, itemId, itemId, p)
			fmt.Printf("%s %d/%d [%d]\n%s\n", url, p, max, len(out), save_to)
			ioutil.WriteFile(save_to, out, 0666)
			time.Sleep(time.Duration(rand.Intn(5)) * time.Second)

			p++
		}
	}
	return 0, nil
}
示例#15
0
文件: scanner.go 项目: mewmew/uc
// NewFromBytes returns a new scanner lexing from input.
func NewFromBytes(input []byte) Scanner {
	// Append new line to files not ending with new line.
	appendNewLine := false
	lastNewLine := bytes.LastIndexByte(input, '\n')
	for _, r := range string(input[lastNewLine+1:]) {
		if !strings.ContainsRune(whitespace, r) {
			// Non-whitespace character located after the last new line character.
			appendNewLine = true
			break
		}
	}
	if appendNewLine {
		input = append(input, '\n')
	}

	return lexer.NewLexer(input)
}
示例#16
0
文件: main.go 项目: kaaLabs15/embello
func expectEcho(match string, immed bool, flusher func(string)) (string, bool) {
	var collected []byte
	for {
		data := readWithTimeout()
		collected = append(collected, data...)
		if bytes.HasSuffix(collected, []byte(match)) {
			bytesBefore := len(collected) - len(match)
			return string(collected[:bytesBefore]), true
		}
		if immed || len(data) == 0 {
			return string(collected), false
		}
		if n := bytes.LastIndexByte(collected, '\n'); n >= 0 {
			flusher(string(collected[:n+1]))
			collected = collected[n+1:]
		}
	}
}
示例#17
0
func lastIndexLines(s []byte, n *int) int64 {
	i := len(s) - 1

	for i > 0 {
		o := bytes.LastIndexByte(s[:i], '\n')
		if o < 0 {
			break
		}

		i = o
		*n--
		if *n == 0 {
			break
		}
	}

	return int64(i)
}
示例#18
0
func (merger *OutputMerger) Add(name string, r io.ReadCloser) {
	merger.wg.Add(1)
	go func() {
		var pending []byte
		var buf [4 << 10]byte
		for {
			n, err := r.Read(buf[:])
			if n != 0 {
				pending = append(pending, buf[:n]...)
				if pos := bytes.LastIndexByte(pending, '\n'); pos != -1 {
					out := pending[:pos+1]
					if merger.tee != nil {
						merger.tee.Write(out)
					}
					select {
					case merger.Output <- append([]byte{}, out...):
						r := copy(pending[:], pending[pos+1:])
						pending = pending[:r]
					default:
					}
				}
			}
			if err != nil {
				if len(pending) != 0 {
					pending = append(pending, '\n')
					if merger.tee != nil {
						merger.tee.Write(pending)
					}
					select {
					case merger.Output <- pending:
					default:
					}
				}
				r.Close()
				select {
				case merger.Err <- fmt.Errorf("failed to read from %v: %v", name, err):
				default:
				}
				merger.wg.Done()
				return
			}
		}
	}()
}
示例#19
0
文件: multipart.go 项目: achanda/go
// scanUntilBoundary scans buf to identify how much of it can be safely
// returned as part of the Part body.
// dashBoundary is "--boundary".
// nlDashBoundary is "\r\n--boundary" or "\n--boundary", depending on what mode we are in.
// The comments below (and the name) assume "\n--boundary", but either is accepted.
// total is the number of bytes read out so far. If total == 0, then a leading "--boundary" is recognized.
// readErr is the read error, if any, that followed reading the bytes in buf.
// scanUntilBoundary returns the number of data bytes from buf that can be
// returned as part of the Part body and also the error to return (if any)
// once those data bytes are done.
func scanUntilBoundary(buf, dashBoundary, nlDashBoundary []byte, total int64, readErr error) (int, error) {
	if total == 0 {
		// At beginning of body, allow dashBoundary.
		if bytes.HasPrefix(buf, dashBoundary) {
			switch matchAfterPrefix(buf, dashBoundary, readErr) {
			case -1:
				return len(dashBoundary), nil
			case 0:
				return 0, nil
			case +1:
				return 0, io.EOF
			}
		}
		if bytes.HasPrefix(dashBoundary, buf) {
			return 0, readErr
		}
	}

	// Search for "\n--boundary".
	if i := bytes.Index(buf, nlDashBoundary); i >= 0 {
		switch matchAfterPrefix(buf[i:], nlDashBoundary, readErr) {
		case -1:
			return i + len(nlDashBoundary), nil
		case 0:
			return i, nil
		case +1:
			return i, io.EOF
		}
	}
	if bytes.HasPrefix(nlDashBoundary, buf) {
		return 0, readErr
	}

	// Otherwise, anything up to the final \n is not part of the boundary
	// and so must be part of the body.
	// Also if the section from the final \n onward is not a prefix of the boundary,
	// it too must be part of the body.
	i := bytes.LastIndexByte(buf, nlDashBoundary[0])
	if i >= 0 && bytes.HasPrefix(nlDashBoundary, buf[i:]) {
		return i, nil
	}
	return len(buf), readErr
}
示例#20
0
文件: logs.go 项目: vmware/vic
// Find the offset we want to start tailing from.
// This should either be beginning-of-file or tailLines
// newlines from the EOF.
func findSeekPos(f *os.File) int64 {
	defer trace.End(trace.Begin(""))
	nlines := tailLines
	readPos, err := f.Seek(0, 2)
	// If for some reason we can't seek, we will just start tailing from beginning-of-file
	if err != nil {
		return int64(0)
	}

	// Buffer so we can seek nBytes (default: 1k) at a time
	buf := make([]byte, nBytes)

	for readPos > 0 {
		// Go back nBytes from the last readPos we've seen (stopping at beginning-of-file)
		// and read the next nBytes
		readPos -= int64(len(buf))
		if readPos < 0 {
			// We don't want to overlap our read with previous reads...
			buf = buf[:(int(readPos) + nBytes)]
			readPos = 0
		}
		bufend, err := f.ReadAt(buf, readPos)

		// It's OK to get io.EOF here.  Anything else is bad.
		if err != nil && err != io.EOF {
			log.Errorf("Error reading from file %s: %s", f.Name(), err)
			return 0
		}

		// Start from the end of the buffer and start looking for newlines
		for bufend > 0 {
			bufend = bytes.LastIndexByte(buf[:bufend], '\n')
			if bufend < 0 {
				break
			}
			nlines--
			if nlines < 0 {
				return readPos + int64(bufend) + 1
			}
		}
	}
	return 0
}
示例#21
0
文件: jsonp.go 项目: fishedee/fishgo
func DecodeJsonp(data []byte, value interface{}) (string, error) {
	leftIndex := bytes.IndexByte(data, '(')
	rightIndex := bytes.LastIndexByte(data, ')')
	if leftIndex == -1 || rightIndex == -1 || leftIndex >= rightIndex {
		return "", errors.New("invalid jsonp format " + string(data))
	}
	functionName := string(data[:leftIndex])
	data = data[leftIndex+1 : rightIndex]
	var valueDynamic interface{}
	err := json.Unmarshal(data, &valueDynamic)
	if err != nil {
		return "", err
	}
	err = MapToArray(valueDynamic, value, "jsonp")
	if err != nil {
		return "", err
	}
	return strings.Trim(functionName, " "), nil
}
示例#22
0
func parseResponseStatus(s []byte) (uint16, []byte, error) {
	if isDebug {
		debugf("parseResponseStatus: %s", s)
	}

	p := bytes.IndexByte(s, ' ')
	if p == -1 {
		return 0, nil, errors.New("Not able to identify status code")
	}

	code, _ := parseInt(s[0:p])

	p = bytes.LastIndexByte(s, ' ')
	if p == -1 {
		return uint16(code), nil, errors.New("Not able to identify status code")
	}
	phrase := s[p+1:]
	return uint16(code), phrase, nil
}
示例#23
0
// Refresh reloads all the data associated with this process.
func (p *UnixProcess) Refresh() error {
	var (
		procPath = fmt.Sprintf("/proc/%d/", p.pid)
		data     []byte
		err      error
	)

	data, err = ioutil.ReadFile(procPath + "stat")
	if err != nil {
		return err
	}

	data = data[bytes.IndexRune(data, ')')+2:]
	_, err = fmt.Fscanf(bytes.NewReader(data), "%c %d", new(rune), &p.ppid)
	if err != nil {
		return err
	}

	data, err = ioutil.ReadFile(procPath + "cmdline")
	if err != nil {
		return err
	}
	if len(data) == 0 {
		return errors.New("empty name")
	}

	// Remove arguments
	if ind := bytes.IndexByte(data, 0); ind >= 0 {
		data = data[:ind]
	}
	if ind := bytes.IndexByte(data, ' '); ind >= 0 {
		data = data[:ind]
	}
	// Remove path to the executable
	if ind := bytes.LastIndexByte(data, '/'); ind >= 0 {
		data = data[ind+1:]
	}

	p.binary = string(data)

	return nil
}
示例#24
0
文件: uri.go 项目: hambster/fasthttp
func (x *URI) updateBytes(newURI, buf []byte) []byte {
	if len(newURI) == 0 {
		return buf
	}
	if newURI[0] == '/' {
		// uri without host
		buf = x.appendSchemeHost(buf[:0])
		buf = append(buf, newURI...)
		x.Parse(nil, buf)
		return buf
	}

	n := bytes.Index(newURI, strColonSlashSlash)
	if n >= 0 {
		// absolute uri
		x.Parse(nil, newURI)
		return buf
	}

	// relative path
	if newURI[0] == '?' {
		// query string only update
		x.SetQueryStringBytes(newURI[1:])
		return buf
	}

	path := x.Path()
	n = bytes.LastIndexByte(path, '/')
	if n < 0 {
		panic("BUG: path must contain at least one slash")
	}
	buf = x.appendSchemeHost(buf[:0])
	buf = appendQuotedPath(buf, path[:n+1])
	buf = append(buf, newURI...)
	x.Parse(nil, buf)
	return buf
}
示例#25
0
文件: main.go 项目: flier/gohs
/**
 * This is the function that will be called for each match that occurs. @a ctx
 * is to allow you to have some application-specific state that you will get
 * access to for each match. In our simple example we're just going to use it
 * to pass in the pattern that was being searched for so we can print it out.
 */
func eventHandler(id uint, from, to uint64, flags uint, context interface{}) error {
	inputData := context.([]byte)

	start := bytes.LastIndexByte(inputData[:from], '\n')
	end := int(to) + bytes.IndexByte(inputData[to:], '\n')

	if start == -1 {
		start = 0
	} else {
		start += 1
	}

	if end == -1 {
		end = len(inputData)
	}

	if *flagByteOffset {
		fmt.Printf("%d: ", start)
	}

	fmt.Printf("%s%s%s\n", inputData[start:from], theme(string(inputData[from:to])), inputData[to:end])

	return nil
}
示例#26
0
文件: slog.go 项目: polydawn/go-slog
// must be full lines or we will have a sad time
func (s *Slog) write(msg []byte) {
	s.mu.Lock()
	defer s.mu.Unlock()
	// scan for the last (or any) cuts so we can decide what to do
	lastBreak := bytes.LastIndexByte(msg, '\n') + 1
	//	fmt.Printf("::> %q ; %d/%d <::\n", string(msg), lastBreak, len(msg))
	if lastBreak <= 0 {
		s.partial.Write(msg)
		return
	}
	// roll back over our previous banner print
	s.retract(s.footprint)
	// if we have any buffered partial lines, flush that first
	if s.partial.Len() > 0 {
		io.Copy(s.wr, s.partial)
		s.partial.Reset()
	}
	// now flush all other complete lines
	io.Copy(s.wr, bytes.NewBuffer(msg[0:lastBreak]))
	// and the banner again
	io.Copy(s.wr, bytes.NewBuffer(s.bannerMemo))
	// retain anything after lastbreak in the partial line buffer
	s.partial.Write(msg[lastBreak:])
}
示例#27
0
func (p *Peer) PktProcess(data []byte, tap io.Writer, reorderable bool) bool {
	if len(data) < MinPktLength {
		return false
	}
	if !p.Encless && len(data) > len(p.bufR)-S20BS {
		return false
	}
	var out []byte
	p.BusyR.Lock()
	if p.Encless {
		var err error
		out, err = EnclessDecode(
			p.Key,
			data[len(data)-NonceSize:],
			data[:len(data)-NonceSize],
		)
		if err != nil {
			p.FramesUnauth++
			p.BusyR.Unlock()
			return false
		}
	} else {
		for i := 0; i < SSize; i++ {
			p.bufR[i] = 0
		}
		copy(p.bufR[S20BS:], data[TagSize:])
		salsa20.XORKeyStream(
			p.bufR[:S20BS+len(data)-TagSize-NonceSize],
			p.bufR[:S20BS+len(data)-TagSize-NonceSize],
			data[len(data)-NonceSize:],
			p.Key,
		)
		copy(p.keyAuthR[:], p.bufR[:SSize])
		copy(p.tagR[:], data[:TagSize])
		if !poly1305.Verify(p.tagR, data[TagSize:], p.keyAuthR) {
			p.FramesUnauth++
			p.BusyR.Unlock()
			return false
		}
		out = p.bufR[S20BS : S20BS+len(data)-TagSize-NonceSize]
	}

	// Check if received nonce is known to us in either of two buckets.
	// If yes, then this is ignored duplicate.
	// Check from the oldest bucket, as in most cases this will result
	// in constant time check.
	// If Bucket0 is filled, then it becomes Bucket1.
	p.NonceCipher.Decrypt(
		data[len(data)-NonceSize:],
		data[len(data)-NonceSize:],
	)
	p.nonceRecv = binary.BigEndian.Uint64(data[len(data)-NonceSize:])
	if reorderable {
		_, p.nonceFound0 = p.nonceBucket0[p.nonceRecv]
		_, p.nonceFound1 = p.nonceBucket1[p.nonceRecv]
		if p.nonceFound0 || p.nonceFound1 || p.nonceRecv+2*NonceBucketSize < p.nonceLatest {
			p.FramesDup++
			p.BusyR.Unlock()
			return false
		}
		p.nonceBucket0[p.nonceRecv] = struct{}{}
		p.nonceBucketN++
		if p.nonceBucketN == NonceBucketSize {
			p.nonceBucket1 = p.nonceBucket0
			p.nonceBucket0 = make(map[uint64]struct{}, NonceBucketSize)
			p.nonceBucketN = 0
		}
	} else {
		if p.nonceRecv != p.NonceExpect {
			p.FramesDup++
			p.BusyR.Unlock()
			return false
		}
		p.NonceExpect += 2
	}
	if p.nonceRecv > p.nonceLatest {
		p.nonceLatest = p.nonceRecv
	}

	p.FramesIn++
	atomic.AddUint64(&p.BytesIn, uint64(len(data)))
	p.LastPing = time.Now()
	p.pktSizeR = bytes.LastIndexByte(out, PadByte)
	if p.pktSizeR == -1 {
		p.BusyR.Unlock()
		return false
	}
	// Validate the pad
	for i := p.pktSizeR + 1; i < len(out); i++ {
		if out[i] != 0 {
			p.BusyR.Unlock()
			return false
		}
	}

	if p.pktSizeR == 0 {
		p.HeartbeatRecv++
		p.BusyR.Unlock()
		return true
	}
	p.BytesPayloadIn += uint64(p.pktSizeR)
	tap.Write(out[:p.pktSizeR])
	p.BusyR.Unlock()
	return true
}
示例#28
0
func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) {
	// Check that the content length is not too large for us to handle.
	if req.ContentLength > h.MaxBodySize {
		tooLarge(res)
		return
	}
	now := time.Now()

	// Handle gzip request bodies
	body := req.Body
	var err error
	if req.Header.Get("Content-Encoding") == "gzip" {
		body, err = gzip.NewReader(req.Body)
		defer body.Close()
		if err != nil {
			log.Println("E! " + err.Error())
			badRequest(res)
			return
		}
	}
	body = http.MaxBytesReader(res, body, h.MaxBodySize)

	var return400 bool
	var hangingBytes bool
	buf := h.pool.get()
	defer h.pool.put(buf)
	bufStart := 0
	for {
		n, err := io.ReadFull(body, buf[bufStart:])
		if err != nil && err != io.ErrUnexpectedEOF && err != io.EOF {
			log.Println("E! " + err.Error())
			// problem reading the request body
			badRequest(res)
			return
		}

		if err == io.EOF {
			if return400 {
				badRequest(res)
			} else {
				res.WriteHeader(http.StatusNoContent)
			}
			return
		}

		if hangingBytes {
			i := bytes.IndexByte(buf, '\n')
			if i == -1 {
				// still didn't find a newline, keep scanning
				continue
			}
			// rotate the bit remaining after the first newline to the front of the buffer
			i++ // start copying after the newline
			bufStart = len(buf) - i
			if bufStart > 0 {
				copy(buf, buf[i:])
			}
			hangingBytes = false
			continue
		}

		if err == io.ErrUnexpectedEOF {
			// finished reading the request body
			if err := h.parse(buf[:n+bufStart], now); err != nil {
				log.Println("E! " + err.Error())
				return400 = true
			}
			if return400 {
				badRequest(res)
			} else {
				res.WriteHeader(http.StatusNoContent)
			}
			return
		}

		// if we got down here it means that we filled our buffer, and there
		// are still bytes remaining to be read. So we will parse up until the
		// final newline, then push the rest of the bytes into the next buffer.
		i := bytes.LastIndexByte(buf, '\n')
		if i == -1 {
			// drop any line longer than the max buffer size
			log.Printf("E! http_listener received a single line longer than the maximum of %d bytes",
				len(buf))
			hangingBytes = true
			return400 = true
			bufStart = 0
			continue
		}
		if err := h.parse(buf[:i], now); err != nil {
			log.Println("E! " + err.Error())
			return400 = true
		}
		// rotate the bit remaining after the last newline to the front of the buffer
		i++ // start copying after the newline
		bufStart = len(buf) - i
		if bufStart > 0 {
			copy(buf, buf[i:])
		}
	}
}
示例#29
0
func (t *Follower) follow() error {
	_, err := t.file.Seek(t.config.Offset, t.config.Whence)
	if err != nil {
		return err
	}

	var (
		eventChan = make(chan fsnotify.Event)
		errChan   = make(chan error)
	)

	t.watcher, err = fsnotify.NewWatcher()
	if err != nil {
		return err
	}

	defer t.watcher.Close()
	go t.watchFileEvents(eventChan, errChan)

	t.watcher.Add(t.filename)

	for {
		for {
			// discard leading NUL bytes
			var discarded int

			for {
				b, _ := t.reader.Peek(peekSize)
				i := bytes.LastIndexByte(b, '\x00')

				if i > 0 {
					n, _ := t.reader.Discard(i + 1)
					discarded += n
				}

				if i+1 < peekSize {
					break
				}
			}

			s, err := t.reader.ReadBytes('\n')
			if err != nil && err != io.EOF {
				return err
			}

			// if we encounter EOF before a line delimiter,
			// ReadBytes() will return the remaining bytes,
			// so push them back onto the buffer, rewind
			// our seek position, and wait for further file changes.
			// we also have to save our dangling byte count in the event
			// that we want to re-open the file and seek to the end
			if err == io.EOF {
				l := len(s)

				t.offset, err = t.file.Seek(-int64(l), io.SeekCurrent)
				if err != nil {
					return err
				}

				t.reader.Reset(t.file)
				break
			}

			t.sendLine(s, discarded)
		}

		// we're now at EOF, so wait for changes
		select {
		case evt := <-eventChan:
			switch evt.Op {

			// as soon as something is written, go back and read until EOF.
			case fsnotify.Chmod:
				fallthrough

			case fsnotify.Write:
				fi, err := t.file.Stat()
				if err != nil {
					if !os.IsNotExist(err) {
						return err
					}

					// it's possible that an unlink can cause fsnotify.Chmod,
					// so attempt to rewatch if the file is missing
					if err := t.rewatch(); err != nil {
						return err
					}

					continue
				}

				// file was truncated, seek to the beginning
				if t.offset > fi.Size() {
					t.offset, err = t.file.Seek(0, io.SeekStart)
					if err != nil {
						return err
					}

					t.reader.Reset(t.file)
				}

				continue

			// if a file is removed or renamed
			// and re-opening is desired, see if it appears
			// again within a 1 second deadline. this should be enough time
			// to see the file again for log rotation programs with this behavior
			default:
				if !t.config.Reopen {
					return nil
				}

				if err := t.rewatch(); err != nil {
					return err
				}

				continue
			}

		// any errors that come from fsnotify
		case err := <-errChan:
			return err

		// a request to stop
		case <-t.closeCh:
			t.watcher.Remove(t.filename)
			return nil

		// fall back to 10 second polling if we haven't received any fsevents
		// stat the file, if it's still there, just continue and try to read bytes
		// if not, go through our re-opening routine
		case <-time.After(10 * time.Second):
			_, err := t.file.Stat()
			if err == nil {
				continue
			}

			if !os.IsNotExist(err) {
				return err
			}

			if err := t.rewatch(); err != nil {
				return err
			}

			continue
		}
	}

	return nil
}