Esempio n. 1
0
func main() {
	s := []byte("123456789")
	fmt.Println(bytes.LastIndexAny(s, "789"))
	fmt.Println(bytes.LastIndexAny(s, "987"))
	fmt.Println(bytes.LastIndexAny(s, "0"))
	fmt.Println(bytes.LastIndexAny(s, ""))
}
Esempio n. 2
0
// Parse a line of the PeerGuardian Text Lists (P2P) Format. Returns !ok but
// no error if a line doesn't contain a range but isn't erroneous, such as
// comment and blank lines.
func ParseBlocklistP2PLine(l []byte) (r Range, ok bool, err error) {
	l = bytes.TrimSpace(l)
	if len(l) == 0 || bytes.HasPrefix(l, []byte("#")) {
		return
	}
	// TODO: Check this when IPv6 blocklists are available.
	colon := bytes.LastIndexAny(l, ":")
	if colon == -1 {
		err = errors.New("missing colon")
		return
	}
	hyphen := bytes.IndexByte(l[colon+1:], '-')
	if hyphen == -1 {
		err = errors.New("missing hyphen")
		return
	}
	hyphen += colon + 1
	r.Description = string(l[:colon])
	r.First = net.ParseIP(string(l[colon+1 : hyphen]))
	minifyIP(&r.First)
	r.Last = net.ParseIP(string(l[hyphen+1:]))
	minifyIP(&r.Last)
	if r.First == nil || r.Last == nil || len(r.First) != len(r.Last) {
		err = errors.New("bad IP range")
		return
	}
	ok = true
	return
}
Esempio n. 3
0
func lastIndexAny(s []byte, chars string) {
	if i := bytes.LastIndexAny(s, chars); i == -1 {
		log.Printf("No unicode characters in %q appear in %s", chars, s)
	} else {
		log.Printf("A unicode character in %q appears last at index %d in %s", chars, i, s)
	}
}
Esempio n. 4
0
func linkWikiWords(content []byte) []byte {
	buf := make([]byte, 0, len(content)*5/4)
	for {
		m := pageLinkPat.FindSubmatchIndex(content)
		if m == nil {
			buf = append(buf, content...)
			break // no more matches
		}

		// don't link inside a code block--blackfriday can't handle it
		i := bytes.LastIndexAny(content[:m[0]], "\r\n")
		if i < 0 {
			i = 0
		} else {
			i++
		}
		if bytes.Compare(content[i:i+4], []byte("    ")) == 0 {
			buf = append(buf, content...)
			break
		}

		buf = append(buf, content[0:m[0]]...)
		// m[0] - start of [[, m[1] - after ]]
		// m[2],m[3] - ! (turning off linking)
		// m[4],m[5] - link text
		// m[6],m[7] - page
		// m[8],m[9] - WikiWord
		if m[2] > 0 { // don't link
			buf = append(buf, content[m[6]:m[7]]...)
		} else if m[6] > 0 { // double-bracketed page link
			page := content[m[6]:m[7]]
			var linktext []byte
			if m[4] > 0 { // replacement link text
				linktext = content[m[4]:m[5]]
			} else {
				linktext = page
			}
			page = bytes.Replace(page, []byte(" "), []byte("-"), -1)
			buf = linkWikiPage(buf, page, linktext)
		} else { // WikiWord
			page := content[m[8]:m[9]]
			if allUpperCase(page) { // don't link acronyms
				buf = append(buf, page...)
			} else {
				buf = linkWikiPage(buf, page, nil)
			}
		}

		content = content[m[1]:]
	}
	return buf
}
Esempio n. 5
0
func linesSurrounding(b []byte, start int, end int) string {
	startOfFirstLine := bytes.LastIndexAny(b[0:start], "\r\n")
	if startOfFirstLine == -1 {
		startOfFirstLine = 0
	} else {
		startOfFirstLine += 1 // Go past that newline
	}
	endOfLastLine := bytes.IndexAny(b[end:], "\r\n")
	if endOfLastLine == -1 {
		endOfLastLine = len(b)
	} else {
		endOfLastLine += end // Add the rest of the string's length
	}
	return string(b[startOfFirstLine:endOfLastLine])
}
Esempio n. 6
0
func (self *ProxyServer) ProcessRequest(response http.ResponseWriter, request *http.Request) *ProxyError {

	var (
		proxyConfig  *ProxyConfig
		origin, edge string
		edgePort     string
		readErr      error
		inputStream  io.Reader
		outputStream io.Writer
	)

	// We need the proxy config to handle the inbound hostname.
	if len(request.Host) > 0 {
		hostParts := strings.Split(request.Host, ":")
		if len(hostParts) > 1 {
			edgePort = ":" + hostParts[1]
		}
		proxyConfig, edge = self.GetConfiguration(hostParts[0])

		// Some string manipulation to preserve the requested subdomains.
		// e.g. edge=foobar.com, request.Host=images.foobar.com:8080, proxyConfig.Origin=examples.com
		// images.foobar.com:8080 -> images.examples.com
		hostParts = strings.Split(request.Host, edge+edgePort)
		origin = strings.Join(hostParts, proxyConfig.Origin)
	}

	// No proxy config, no response.
	if proxyConfig == nil {
		return &ProxyError{
			Code: 501,
			Msg:  "Unable to identify requested service.",
			Err:  fmt.Errorf("Did not know the requested host: %s", request.Host),
		}
	}

	// Check to make sure we are dealing with a human before we waste any more resouces.
	if HumanCookieCheck {
		if humanCookie, err := request.Cookie(HumanCookieName); err != nil || !self.Captcha.ValidToken(humanCookie.Value) {
			return self.Captcha.Challenge(response, request, edge)
		}
	}

	// Clean up the request for passing on to the next server.
	request.URL.Scheme = "http"
	if request.TLS != nil || proxyConfig.OriginProto == "https" {
		request.URL.Scheme = "https"
	}
	request.URL.Host = origin
	request.RequestURI = ""
	request.Host = ""

	if tmp := proxyConfig.Inbound.Replace(request.Referer()); tmp != request.Referer() {
		if proxyConfig.OriginProto == "https" {
			parts := strings.Split(tmp, "http")
			tmp = strings.Join(parts, "https")
		}
		request.Header["Referer"][0] = tmp
	}

	// Do the request to the remote server
	resp, err := self.Client.Do(request)
	if err != nil {
		// Unwind the redirect prevention.
		if urlErr, ok := err.(*url.Error); ok {
			if _, ok = urlErr.Err.(*NoRedirectError); ok {
				err = nil
			}
		}
		if err != nil {
			return &ProxyError{
				Code: 502,
				Msg:  "Unable to contact upstream service.",
				Err:  err,
			}
		}
	}

	// Process the Headers.
	for k, values := range resp.Header {
		for _, v := range values {
			// We single out location here because the port matters for it.
			// Things like Set Cookie and others don't care about ports.
			// Depending on which way is more common, this should evolve.
			if "Location" == k {
				parts := strings.Split(v, proxyConfig.Origin)
				v = strings.Join(parts, edge+edgePort)
				if proxyConfig.OriginProto == "https" {
					parts = strings.Split(v, "https://")
					v = strings.Join(parts, "http://")
				}
			} else {
				v = proxyConfig.Outbound.Replace(v)
			}

			response.Header().Add(k, v)
		}
	}
	replaceContent := strings.Contains(resp.Header.Get("Content-Type"), "text") ||
		strings.Contains(resp.Header.Get("Content-Type"), "application/javascript")
	gzipContent := strings.Contains(resp.Header.Get("Content-Encoding"), "gzip")

	if replaceContent {
		response.Header().Del("Content-Length")
	}

	// Flush the headers down stream.
	// NOTE: This means errors after here are going to look like truncated content.
	response.WriteHeader(resp.StatusCode)

	if gzipContent && replaceContent {
		// Close doesn't exist on io.Reader and io.Writer
		// so using the specific types to register the defer.
		gzp := self.takeGzip(resp.Body, response)
		defer self.giveGzip(gzp)
		inputStream = gzp.Reader
		outputStream = gzp.Writer
	} else {
		inputStream = resp.Body
		outputStream = response
	}

	// Buffer management
	pos, gcount := 0, 0
	buffer := self.takeBuffer()
	defer self.giveBuffer(buffer)

	readErr = nil
	for readErr == nil && err == nil {
		// Read from server and write to client.
		gcount, readErr = inputStream.Read(buffer[pos:])

		if gcount > 0 || pos+gcount > 0 {
			if replaceContent {
				// Splitting a Slice shouldn't copy data,
				pieces := bytes.Split(buffer[0:pos+gcount], []byte(proxyConfig.Origin))
				if len(pieces) > 1 {
					for _, chunk := range pieces[:len(pieces)-1] {
						if proxyConfig.OriginProto == "https" {
							protoOffset := bytes.LastIndex(chunk, []byte("https://"))
							if protoOffset > -1 {
								// The existance of any of these characters
								// between the proto and the domain name
								// imply we have gone past a single element
								abortOffset := bytes.LastIndexAny(chunk[protoOffset:], "='\" <>")
								if abortOffset == -1 {
									// Getting pretty indented. should move it to a function.
									gcount, err = outputStream.Write(chunk[:protoOffset+4])
									chunk = chunk[protoOffset+5:]
								}
							}
						}
						gcount, err = outputStream.Write(chunk)
						gcount, err = outputStream.Write([]byte(edge))
						gcount, err = outputStream.Write([]byte(edgePort))
					}
					lastChunk := pieces[len(pieces)-1]
					if readErr != nil {
						gcount, err = outputStream.Write(lastChunk)
					} else {
						pos = copy(buffer, lastChunk)
					}
				} else {
					if readErr != nil {
						gcount, err = outputStream.Write(buffer[:pos+gcount])
						pos = 0
					} else if pos+gcount > len(proxyConfig.Origin) {
						remainder := buffer[pos+gcount-len(proxyConfig.Origin) : pos+gcount]
						gcount, err = outputStream.Write(buffer[:pos+gcount-len(proxyConfig.Origin)])
						pos = copy(buffer, remainder)
					} else {
						pos += gcount
					}
				}
			} else {
				gcount, err = outputStream.Write(buffer[:gcount])
			}
		}
		if f, ok := response.(http.Flusher); ok {
			f.Flush()
		}
	}
	return nil
}
Esempio n. 7
0
// handles cross-references
//
// 	1. Cross-Reference Table (§7.5.4) and File Trailer (§7.5.5)
// 	2. Cross-Reference Streams (§7.5.8) (since PDF-1.5)
// 	3. Hybrid (§7.5.8.4) (since PDF-1.5)
//
// The method used can be determined by following the
// startxref reference. If the referenced position is an
// indirect object, then method 2 is used. Otherwise if the
// trailer has an XRefStm entry, then method 3 is used.
// Otherwise method 1 is used.
func (file *File) loadReferences() error {
	// find EOF tag to ignore junk in the file after it
	eofOffset := bytes.LastIndex(file.mmap, []byte("%%EOF"))
	if eofOffset == -1 {
		return errors.New("file does not have PDF ending")
	}

	// find last startxref
	startxrefOffset := bytes.LastIndex(file.mmap, []byte("startxref"))
	if startxrefOffset == -1 {
		return errors.New("could not find startxref")
	}

	digits := "0123456789"
	xrefStart := bytes.IndexAny(file.mmap[startxrefOffset:], digits)
	if xrefStart == -1 {
		return errors.New("could not find beginning of startxref reference")
	}
	xrefStart += startxrefOffset
	xrefEnd := bytes.LastIndexAny(file.mmap[xrefStart:eofOffset], digits)
	if xrefEnd == -1 {
		return errors.New("could not find end of startxref reference")
	}
	xrefEnd += xrefStart + 1

	xrefOffset64, err := strconv.ParseUint(string(file.mmap[xrefStart:xrefEnd]), 10, 64)
	if err != nil {
		return err
	}
	xrefOffset := int(xrefOffset64)

	refs, trailer, err := file.parseReferences(xrefOffset)
	if err != nil {
		return err
	}

	file.prev = Integer(xrefOffset)
	file.objects = refs
	if size, ok := trailer[Name("Size")].(Integer); !ok {
		return fmt.Errorf("trailer %q=%v", Name("Size"), trailer[Name("Size")])
	} else {
		file.size = uint(size)
	}

	// fill in values from the trailer
	if root, ok := trailer[Name("Root")]; ok {
		file.Root = root.(ObjectReference)
	}

	if encrypt, ok := trailer[Name("Encrypt")]; ok {
		file.Encrypt = encrypt.(Dictionary)
	}

	if info, ok := trailer[Name("Info")]; ok {
		file.Info = info.(ObjectReference)
	}

	if id, ok := trailer[Name("ID")]; ok {
		file.ID = id.(Array)
	}

	// println(string(file.mmap[xrefStart:xrefEnd]), xrefOffset)
	// println(string(file.mmap[xrefOffset : xrefOffset+200]))

	return nil
}