Example #1
0
func (l *semaLimiter) Release(tr trace.Trace) {
	if tr != nil {
		tr.LazyPrintf("releasing semalimiter")
	}
	<-l.sem
	if tr != nil {
		tr.LazyPrintf("semalimiter released")
	}
}
Example #2
0
func (l *semaLimiter) Acquire(tr trace.Trace) {
	if tr != nil {
		tr.LazyPrintf("Acquiring semalimiter out of %d", l.Limit())
	}
	l.sem <- struct{}{}
	if tr != nil {
		tr.LazyPrintf("semalimiter acquired")
	}
}
Example #3
0
func TraceAnswer(tr trace.Trace, m *dns.Msg) {
	if m.Rcode != dns.RcodeSuccess {
		rcode := dns.RcodeToString[m.Rcode]
		tr.LazyPrintf(rcode)
	}
	for _, rr := range m.Answer {
		tr.LazyPrintf(rr.String())
	}
}
Example #4
0
func (c *cachingResolver) Query(r *dns.Msg, tr trace.Trace) (*dns.Msg, error) {
	stats.cacheTotal.Add(1)

	// To keep it simple we only cache single-question queries.
	if len(r.Question) != 1 {
		tr.LazyPrintf("cache bypass: multi-question query")
		stats.cacheBypassed.Add(1)
		return c.back.Query(r, tr)
	}

	question := r.Question[0]

	c.mu.RLock()
	answer, hit := c.answer[question]
	c.mu.RUnlock()

	if hit {
		tr.LazyPrintf("cache hit")
		stats.cacheHits.Add(1)

		reply := &dns.Msg{
			MsgHdr: dns.MsgHdr{
				Id:            r.Id,
				Response:      true,
				Authoritative: false,
				Rcode:         dns.RcodeSuccess,
			},
			Question: r.Question,
			Answer:   answer,
		}

		return reply, nil
	}

	tr.LazyPrintf("cache miss")
	stats.cacheMisses.Add(1)

	reply, err := c.back.Query(r, tr)
	if err != nil {
		return reply, err
	}

	if err = wantToCache(question, reply); err != nil {
		tr.LazyPrintf("cache not recording reply: %v", err)
		return reply, nil
	}

	answer = reply.Answer
	ttl := limitTTL(answer)

	// Only store answers if they're going to stay around for a bit,
	// there's not much point in caching things we have to expire quickly.
	if ttl < minTTL {
		return reply, nil
	}

	// Store the answer in the cache, but don't exceed 2k entries.
	// TODO: Do usage based eviction when we're approaching ~1.5k.
	c.mu.Lock()
	if len(c.answer) < maxCacheSize {
		setTTL(answer, ttl)
		c.answer[question] = answer
		stats.cacheRecorded.Add(1)
	}
	c.mu.Unlock()

	return reply, nil
}
Example #5
0
func (r *httpsResolver) Query(req *dns.Msg, tr trace.Trace) (*dns.Msg, error) {
	// Only answer single-question queries.
	// In practice, these are all we get, and almost no server supports
	// multi-question requests anyway.
	if len(req.Question) != 1 {
		return nil, fmt.Errorf("multi-question query")
	}

	question := req.Question[0]
	// Only answer IN-class queries, which are the ones used in practice.
	if question.Qclass != dns.ClassINET {
		return nil, fmt.Errorf("query class != IN")
	}

	// Build the query and send the request.
	v := url.Values{}
	v.Set("name", question.Name)
	v.Set("type", dns.TypeToString[question.Qtype])
	// TODO: add random_padding.

	url := r.Upstream + "?" + v.Encode()
	if glog.V(3) {
		tr.LazyPrintf("GET %q", url)
	}

	hr, err := r.client.Get(url)
	if err != nil {
		return nil, fmt.Errorf("GET failed: %v", err)
	}
	tr.LazyPrintf("%s  %s", hr.Proto, hr.Status)
	defer hr.Body.Close()

	if hr.StatusCode != http.StatusOK {
		return nil, fmt.Errorf("Response status: %s", hr.Status)
	}

	// Read the HTTPS response, and parse the JSON.
	body, err := ioutil.ReadAll(hr.Body)
	if err != nil {
		return nil, fmt.Errorf("Failed to read body: %v", err)
	}

	jr := &jsonResponse{}
	err = json.Unmarshal(body, jr)
	if err != nil {
		return nil, fmt.Errorf("Failed to unmarshall: %v", err)
	}

	if len(jr.Question) != 1 {
		return nil, fmt.Errorf("Wrong number of questions in the response")
	}

	// Build the DNS response.
	resp := &dns.Msg{
		MsgHdr: dns.MsgHdr{
			Id:       req.Id,
			Response: true,
			Opcode:   req.Opcode,
			Rcode:    jr.Status,

			Truncated:          jr.TC,
			RecursionDesired:   jr.RD,
			RecursionAvailable: jr.RA,
			AuthenticatedData:  jr.AD,
			CheckingDisabled:   jr.CD,
		},
		Question: []dns.Question{
			dns.Question{
				Name:   jr.Question[0].Name,
				Qtype:  jr.Question[0].Type,
				Qclass: dns.ClassINET,
			}},
	}

	for _, answer := range jr.Answer {
		// TODO: This "works" but is quite hacky. Is there a better way,
		// without doing lots of data parsing?
		s := fmt.Sprintf("%s %d IN %s %s",
			answer.Name, answer.TTL,
			dns.TypeToString[answer.Type], answer.Data)
		rr, err := dns.NewRR(s)
		if err != nil {
			return nil, fmt.Errorf("Error parsing answer: %v", err)
		}

		resp.Answer = append(resp.Answer, rr)
	}

	return resp, nil
}
Example #6
0
func logAttrs(r trace.Trace, a fuseops.InodeAttributes) {
	r.LazyPrintf(
		"res: size=%d, mode=%s atime=%s mtime=%s", a.Size, a.Mode, a.Atime, a.Mtime,
	)
}
Example #7
0
// netTraceIntegrator is passed into basictracer as NewSpanEventListener
// and causes all traces to be registered with the net/trace endpoint.
func netTraceIntegrator() func(basictracer.SpanEvent) {
	var tr trace.Trace
	return func(e basictracer.SpanEvent) {
		switch t := e.(type) {
		case basictracer.EventCreate:
			tr = trace.New("tracing", t.OperationName)
			tr.SetMaxEvents(maxLogsPerSpan)
		case basictracer.EventFinish:
			tr.Finish()
		case basictracer.EventTag:
			tr.LazyPrintf("%s:%v", t.Key, t.Value)
		case basictracer.EventLogFields:
			// TODO(radu): when LightStep supports arbitrary fields, we should make
			// the formatting of the message consistent with that. Until then we treat
			// legacy events that just have an "event" key specially.
			if len(t.Fields) == 1 && t.Fields[0].Key() == "event" {
				tr.LazyPrintf("%s", t.Fields[0].Value())
			} else {
				var buf bytes.Buffer
				for i, f := range t.Fields {
					if i > 0 {
						buf.WriteByte(' ')
					}
					fmt.Fprintf(&buf, "%s:%v", f.Key(), f.Value())
				}

				tr.LazyPrintf("%s", buf.String())
			}
		case basictracer.EventLog:
			panic("EventLog is deprecated")
		}
	}
}
Example #8
0
// netTraceIntegrator is passed into basictracer as NewSpanEventListener
// and causes all traces to be registered with the net/trace endpoint.
func netTraceIntegrator() func(basictracer.SpanEvent) {
	var tr trace.Trace
	return func(e basictracer.SpanEvent) {
		switch t := e.(type) {
		case basictracer.EventCreate:
			tr = trace.New("tracing", t.OperationName)
			tr.SetMaxEvents(maxLogsPerSpan)
		case basictracer.EventFinish:
			tr.Finish()
		case basictracer.EventTag:
			tr.LazyPrintf("%s:%v", t.Key, t.Value)
		case basictracer.EventLogFields:
			var buf bytes.Buffer
			for i, f := range t.Fields {
				if i > 0 {
					buf.WriteByte(' ')
				}
				fmt.Fprintf(&buf, "%s:%v", f.Key(), f.Value())
			}

			tr.LazyPrintf("%s", buf.String())
		case basictracer.EventLog:
			if t.Payload != nil {
				tr.LazyPrintf("%s (payload %v)", t.Event, t.Payload)
			} else {
				tr.LazyPrintf("%s", t.Event)
			}
		}
	}
}
Example #9
0
func search(tr trace.Trace, db database, q string) (*SearchResult, stringsp.Set, error) {
	tokens := gcse.AppendTokens(nil, []byte(q))
	tokenList := tokens.Elements()
	log.Printf("tokens for query %s: %v", q, tokens)

	var hits []*Hit

	N := db.PackageCount()
	textIdfs := make([]float64, len(tokenList))
	nameIdfs := make([]float64, len(tokenList))
	for i := range textIdfs {
		textIdfs[i] = idf(db.PackageCountOfToken(gcse.IndexTextField, tokenList[i]), N)
		nameIdfs[i] = idf(db.PackageCountOfToken(gcse.IndexNameField, tokenList[i]), N)
	}

	db.Search(map[string]stringsp.Set{gcse.IndexTextField: tokens},
		func(docID int32, data interface{}) error {
			hit := &Hit{}
			var ok bool
			hit.HitInfo, ok = data.(gcse.HitInfo)
			if !ok {
				log.Print("ok = false")
			}

			hit.MatchScore = gcse.CalcMatchScore(&hit.HitInfo, tokenList, textIdfs, nameIdfs)
			hit.Score = math.Max(hit.StaticScore, hit.TestStaticScore) * hit.MatchScore

			hits = append(hits, hit)
			return nil
		})
	tr.LazyPrintf("Got %d hits for query %q", len(hits), q)

	swapHits := func(i, j int) {
		hits[i], hits[j] = hits[j], hits[i]
	}
	sortp.SortF(len(hits), func(i, j int) bool {
		// true if doc i is before doc j
		ssi, ssj := hits[i].Score, hits[j].Score
		if ssi > ssj {
			return true
		}
		if ssi < ssj {
			return false
		}
		sci, scj := hits[i].StarCount, hits[j].StarCount
		if sci > scj {
			return true
		}
		if sci < scj {
			return false
		}
		pi, pj := hits[i].Package, hits[j].Package
		if len(pi) < len(pj) {
			return true
		}
		if len(pi) > len(pj) {
			return false
		}
		return pi < pj
	}, swapHits)

	tr.LazyPrintf("Results sorted")

	if len(hits) < 5000 {
		// Adjust Score by down ranking duplicated packages
		pkgCount := make(map[string]int)
		for _, hit := range hits {
			cnt := pkgCount[hit.Name] + 1
			pkgCount[hit.Name] = cnt
			if cnt > 1 && hit.ImportedLen == 0 && hit.TestImportedLen == 0 {
				hit.Score /= float64(cnt)
			}
		}
		// Re-sort
		sortp.BubbleF(len(hits), func(i, j int) bool {
			return hits[i].Score > hits[j].Score
		}, swapHits)
		tr.LazyPrintf("Results reranked")
	}
	return &SearchResult{
		TotalResults: len(hits),
		Hits:         hits,
	}, tokens, nil
}