// protoToRecord converts a RequestLog, the internal Protocol Buffer // representation of a single request-level log, to a Record, its // corresponding external representation. func protoToRecord(rl *log_proto.RequestLog) *Record { return &Record{ AppID: *rl.AppId, VersionID: *rl.VersionId, RequestID: *rl.RequestId, IP: *rl.Ip, Nickname: proto.GetString(rl.Nickname), StartTime: *rl.StartTime, EndTime: *rl.EndTime, Latency: *rl.Latency, MCycles: *rl.Mcycles, Method: *rl.Method, Resource: *rl.Resource, HTTPVersion: *rl.HttpVersion, Status: *rl.Status, ResponseSize: *rl.ResponseSize, Referrer: proto.GetString(rl.Referrer), UserAgent: proto.GetString(rl.UserAgent), URLMapEntry: *rl.UrlMapEntry, Combined: *rl.Combined, APIMCycles: proto.GetInt64(rl.ApiMcycles), Host: proto.GetString(rl.Host), Cost: proto.GetFloat64(rl.Cost), TaskQueueName: proto.GetString(rl.TaskQueueName), TaskName: proto.GetString(rl.TaskName), WasLoadingRequest: proto.GetBool(rl.WasLoadingRequest), PendingTime: proto.GetInt64(rl.PendingTime), Finished: proto.GetBool(rl.Finished), AppLogs: protoToAppLogs(rl.Line), } }
func (t *Iterator) next() (*Key, *pb.EntityProto, error) { if t.err != nil { return nil, nil, t.err } // Issue datastore_v3/Next RPCs as necessary. for len(t.res.Result) == 0 { if !proto.GetBool(t.res.MoreResults) { t.err = Done return nil, nil, t.err } t.offset -= proto.GetInt32(t.res.SkippedResults) if t.offset < 0 { t.offset = 0 } if err := callNext(t.c, &t.res, t.offset, t.limit, zeroLimitMeansUnlimited); err != nil { t.err = err return nil, nil, t.err } // For an Iterator, a zero limit means unlimited. if t.limit == 0 { continue } t.limit -= int32(len(t.res.Result)) if t.limit > 0 { continue } t.limit = 0 if proto.GetBool(t.res.MoreResults) { t.err = errors.New("datastore: internal error: limit exhausted but more_results is true") return nil, nil, t.err } } // Pop the EntityProto from the front of t.res.Result and // extract its key. var e *pb.EntityProto e, t.res.Result = t.res.Result[0], t.res.Result[1:] if e.Key == nil { return nil, nil, errors.New("datastore: internal error: server did not return a key") } k, err := protoToKey(e.Key) if err != nil || k.Incomplete() { return nil, nil, errors.New("datastore: internal error: server returned an invalid key") } if proto.GetBool(t.res.KeysOnly) { return k, nil, nil } return k, e, nil }
func (t *Iterator) next() (*Key, *pb.EntityProto, os.Error) { if t.err != nil { return nil, nil, t.err } // Issue an RPC if necessary. call := false if t.res == nil { call = true t.res = &pb.QueryResult{} } else if len(t.res.Result) == 0 { if !proto.GetBool(t.res.MoreResults) { t.err = Done return nil, nil, t.err } call = true t.res.Reset() } if call { if t.offset != 0 { if t.offset < 0 || t.offset > math.MaxInt32 { t.err = os.NewError("datastore: query offset overflow") return nil, nil, t.err } if t.req.Offset == nil { t.req.Offset = new(int32) } *t.req.Offset = int32(t.offset) } t.err = t.c.Call("datastore_v3", "RunQuery", t.req, t.res) if t.err != nil { return nil, nil, t.err } } if len(t.res.Result) == 0 { // TODO: This code is probably broken for // queries with offset > 1000. t.err = Done return nil, nil, t.err } t.offset++ // Pop the EntityProto from the front of t.res.Result and // extract its key. var e *pb.EntityProto e, t.res.Result = t.res.Result[0], t.res.Result[1:] if e.Key == nil { return nil, nil, os.NewError("datastore: internal error: server did not return a key") } k, err := protoToKey(e.Key) if err != nil || k.Incomplete() { return nil, nil, os.NewError("datastore: internal error: server returned an invalid key") } if t.keysOnly { return k, nil, nil } return k, e, nil }
// loadMapEntry converts a Property into an entry of an existing Map, // or into an element of a slice-valued Map entry. func loadMapEntry(m Map, k *Key, p *pb.Property) os.Error { var ( result interface{} sliceType reflect.Type ) switch { case p.Value.Int64Value != nil: if p.Meaning != nil && *p.Meaning == pb.Property_GD_WHEN { result = Time(*p.Value.Int64Value) sliceType = reflect.TypeOf([]Time(nil)) } else { result = *p.Value.Int64Value sliceType = reflect.TypeOf([]int64(nil)) } case p.Value.BooleanValue != nil: result = *p.Value.BooleanValue sliceType = reflect.TypeOf([]bool(nil)) case p.Value.StringValue != nil: if p.Meaning != nil && *p.Meaning == pb.Property_BLOB { result = []byte(*p.Value.StringValue) sliceType = reflect.TypeOf([][]byte(nil)) } else if p.Meaning != nil && *p.Meaning == pb.Property_BLOBKEY { result = appengine.BlobKey(*p.Value.StringValue) sliceType = reflect.TypeOf([]appengine.BlobKey(nil)) } else { result = *p.Value.StringValue sliceType = reflect.TypeOf([]string(nil)) } case p.Value.DoubleValue != nil: result = *p.Value.DoubleValue sliceType = reflect.TypeOf([]float64(nil)) case p.Value.Referencevalue != nil: key, err := referenceValueToKey(p.Value.Referencevalue) if err != nil { return err } result = key sliceType = reflect.TypeOf([]*Key(nil)) default: return nil } name := proto.GetString(p.Name) if proto.GetBool(p.Multiple) { var s reflect.Value if x := m[name]; x != nil { s = reflect.ValueOf(x) } else { s = reflect.MakeSlice(sliceType, 0, 0) } s = reflect.Append(s, reflect.ValueOf(result)) m[name] = s.Interface() } else { m[name] = result } return nil }
func (c *CheckResult) stringMap() (smap map[string]string) { smap = map[string]string{ "Hostname": fmt.Sprintf("%s", proto.GetString(c.Hostname)), "ServiceName": fmt.Sprintf("%s", proto.GetString(c.ServiceName)), "Status": fmt.Sprintf("%d", int32((*c.Status))), "CheckPassive": fmt.Sprintf("%d", func() (i int32) { if proto.GetBool(c.CheckPassive) { i = 1 } else { i = 0 } return i }()), "CheckOutput": fmt.Sprintf("%s", strings.Trim(strconv.Quote(proto.GetString(c.CheckOutput)), "\"")), "StartTimestamp": fmt.Sprintf("%f", float64(proto.GetInt64(c.StartTimestamp))/1000000000), "EndTimestamp": fmt.Sprintf("%f", float64(proto.GetInt64(c.EndTimestamp))/1000000000), "TimeNow": fmt.Sprintf("%d", time.Seconds()), } return smap }
// RoundTrip issues a single HTTP request and returns its response. Per the // http.RoundTripper interface, RoundTrip only returns an error if there // was a problem with the request being malformed // (ErrInvalidFetchRequest) or the URL Fetch proxy fails (ErrFetch). // Note that HTTP response codes such as 5xx, 403, 404, etc are not // errors as far as the transport is concerned and will be returned // with err set to nil. func (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err os.Error) { methNum, ok := pb.URLFetchRequest_RequestMethod_value[req.Method] if !ok { return nil, &ErrInvalidFetchRequest{"Unsupported method: " + req.Method, nil} } method := pb.URLFetchRequest_RequestMethod(methNum) freq := &pb.URLFetchRequest{ Method: &method, Url: proto.String(req.URL.String()), FollowRedirects: proto.Bool(false), // http.Client's responsibility MustValidateServerCertificate: proto.Bool(!t.AllowInvalidServerCertificate), } if t.DeadlineSeconds != 0 { freq.Deadline = proto.Float64(t.DeadlineSeconds) } for k, vals := range req.Header { for _, val := range vals { freq.Header = append(freq.Header, &pb.URLFetchRequest_Header{ Key: proto.String(k), Value: proto.String(val), }) } } if methodAcceptsRequestBody[req.Method] { freq.Payload, err = ioutil.ReadAll(req.Body) if err != nil { return nil, &ErrInvalidFetchRequest{"Failed to read body", err} } } fres := &pb.URLFetchResponse{} if err := t.Context.Call("urlfetch", "Fetch", freq, fres); err != nil { return nil, &ErrFetch{err.String()} } res = &http.Response{} res.StatusCode = int(*fres.StatusCode) res.Status = fmt.Sprintf("%d %s", res.StatusCode, statusCodeToText(res.StatusCode)) res.Header = http.Header(make(map[string][]string)) res.RequestMethod = req.Method // Faked: res.ProtoMajor = 1 res.ProtoMinor = 1 res.Proto = "HTTP/1.1" res.Close = true for _, h := range fres.Header { hkey := http.CanonicalHeaderKey(*h.Key) hval := *h.Value if hkey == "Content-Length" { // Will get filled in below for all but HEAD requests. if req.Method == "HEAD" { res.ContentLength, _ = strconv.Atoi64(hval) } continue } res.Header.Add(hkey, hval) } if req.Method != "HEAD" { res.ContentLength = int64(len(fres.Content)) } truncated := proto.GetBool(fres.ContentWasTruncated) res.Body = &bodyReader{content: fres.Content, truncated: truncated} return }
// loadStructField converts a Property into a field of an existing struct, // or into an element of a slice-typed struct field. // It returns an error message, or "" for success. func loadStructField(sv reflect.Value, p *pb.Property) string { fieldName := proto.GetString(p.Name) v := sv.FieldByName(fieldName) if !v.IsValid() { return "no such struct field" } if unexported(fieldName) { return "unexported struct field" } var slice reflect.Value if proto.GetBool(p.Multiple) { if v.Kind() != reflect.Slice { return "multiple-valued property requires a slice field type" } if v.Len() > maxSliceFieldLen-1 { return "slice is too long" } slice = v v = reflect.New(v.Type().Elem()).Elem() } switch v.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: if p.Value.Int64Value == nil { return typeMismatchReason(p, v) } x := *p.Value.Int64Value if v.OverflowInt(x) { return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type()) } v.SetInt(x) case reflect.Bool: if p.Value.BooleanValue == nil { return typeMismatchReason(p, v) } v.SetBool(*p.Value.BooleanValue) case reflect.String: if p.Value.StringValue == nil { return typeMismatchReason(p, v) } v.SetString(*p.Value.StringValue) case reflect.Float32, reflect.Float64: if p.Value.DoubleValue == nil { return typeMismatchReason(p, v) } x := *p.Value.DoubleValue if v.OverflowFloat(x) { return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type()) } v.SetFloat(x) case reflect.Ptr: if _, ok := v.Interface().(*Key); !ok { return typeMismatchReason(p, v) } if p.Value.Referencevalue == nil { return typeMismatchReason(p, v) } k, err := referenceValueToKey(p.Value.Referencevalue) if err != nil { return "stored key was invalid" } v.Set(reflect.ValueOf(k)) case reflect.Slice: if _, ok := v.Interface().([]byte); !ok { return typeMismatchReason(p, v) } if p.Value.StringValue == nil { return typeMismatchReason(p, v) } b := []byte(*p.Value.StringValue) v.Set(reflect.ValueOf(b)) default: return typeMismatchReason(p, v) } if slice.IsValid() { slice.Set(reflect.Append(slice, v)) } return "" }
// Count returns the number of results for the query. func (q *Query) Count(c appengine.Context) (int, error) { // Check that the query is well-formed. if q.err != nil { return 0, q.err } // Run a copy of the query, with keysOnly true, and an adjusted offset. // We also set the limit to zero, as we don't want any actual entity data, // just the number of skipped results. newQ := *q newQ.keysOnly = true newQ.limit = 0 if q.limit == 0 { // If the original query was unlimited, set the new query's offset to maximum. newQ.offset = math.MaxInt32 } else { newQ.offset = q.offset + q.limit if newQ.offset < 0 { // Do the best we can, in the presence of overflow. newQ.offset = math.MaxInt32 } } req := &pb.Query{} if err := newQ.toProto(req, c.FullyQualifiedAppID(), zeroLimitMeansZero); err != nil { return 0, err } res := &pb.QueryResult{} if err := c.Call("datastore_v3", "RunQuery", req, res, nil); err != nil { return 0, err } // n is the count we will return. For example, suppose that our original // query had an offset of 4 and a limit of 2008: the count will be 2008, // provided that there are at least 2012 matching entities. However, the // RPCs will only skip 1000 results at a time. The RPC sequence is: // call RunQuery with (offset, limit) = (2012, 0) // 2012 == newQ.offset // response has (skippedResults, moreResults) = (1000, true) // n += 1000 // n == 1000 // call Next with (offset, limit) = (1012, 0) // 1012 == newQ.offset - n // response has (skippedResults, moreResults) = (1000, true) // n += 1000 // n == 2000 // call Next with (offset, limit) = (12, 0) // 12 == newQ.offset - n // response has (skippedResults, moreResults) = (12, false) // n += 12 // n == 2012 // // exit the loop // n -= 4 // n == 2008 var n int32 for { // The QueryResult should have no actual entity data, just skipped results. if len(res.Result) != 0 { return 0, errors.New("datastore: internal error: Count request returned too much data") } n += proto.GetInt32(res.SkippedResults) if !proto.GetBool(res.MoreResults) { break } if err := callNext(c, res, newQ.offset-n, 0, zeroLimitMeansZero); err != nil { return 0, err } } n -= q.offset if n < 0 { // If the offset was greater than the number of matching entities, // return 0 instead of negative. n = 0 } return int(n), nil }