func request_get_NEAREST(q *btrdb.Quasar, w http.ResponseWriter, r *http.Request) { atomic.AddInt32(&outstandingHttpReqs, 1) defer func() { atomic.AddInt32(&outstandingHttpReqs, -1) }() r.ParseForm() ids := r.Form.Get(":uuid") id := uuid.Parse(ids) if id == nil { doError(w, "malformed uuid") return } t, ok, msg := parseInt(r.Form.Get("time"), -(16 << 56), (48 << 56)) if !ok { doError(w, "bad time: "+msg) return } bws := r.Form.Get("backwards") bw := bws != "" rec, _, err := q.QueryNearestValue(id, t, bw, btrdb.LatestGeneration) if err != nil { doError(w, "Bad query: "+err.Error()) return } w.Write([]byte(fmt.Sprintf("[%d, %f]", rec.Time, rec.Val))) }
func request_post_INSERT(q *btrdb.Quasar, w http.ResponseWriter, r *http.Request) { atomic.AddInt32(&outstandingHttpReqs, 1) defer func() { atomic.AddInt32(&outstandingHttpReqs, -1) }() then := time.Now() dec := json.NewDecoder(r.Body) var ins insert_t dec.UseNumber() err := dec.Decode(&ins) if err != nil { doError(w, "malformed quasar HTTP insert") return } id := uuid.Parse(ins.Uuid) if id == nil { doError(w, "malformed uuid") return } //log.Printf("Got %+v", ins) recs := make([]qtree.Record, len(ins.Readings)) //Check the format of the insert and copy to Record for i := 0; i < len(ins.Readings); i++ { if len(ins.Readings[i]) != 2 { doError(w, fmt.Sprintf("reading %d is malformed", i)) return } t, ok, msg := parseInt(string(ins.Readings[i][0].(json.Number)), btrdb.MinimumTime, btrdb.MaximumTime) if !ok { doError(w, fmt.Sprintf("reading %d time malformed: %s", i, msg)) return } val, err := strconv.ParseFloat(string(ins.Readings[i][1].(json.Number)), 64) if err != nil { doError(w, fmt.Sprintf("value %d malformed: %s", i, err)) return } recs[i].Time = t recs[i].Val = val } q.InsertValues(id, recs) //log.Printf("got %+v", recs) delta := time.Now().Sub(then) w.Write([]byte(fmt.Sprintf("OK %d records, %.2f ms\n", len(recs), float64(delta.Nanoseconds()/1000)/1000))) }
func (c *CPInterface) dispatchCommands(q *btrdb.Quasar, conn net.Conn) { //This governs the stream rmtx := sync.Mutex{} wmtx := sync.Mutex{} log.Info("cpnp connection") for !c.isShuttingDown { rmtx.Lock() seg, err := capn.ReadFromStream(conn, nil) if err != nil { log.Warning("ERR (%v) :: %v", conn.RemoteAddr(), err) conn.Close() break } rmtx.Unlock() go func() { seg := seg req := ReadRootRequest(seg) mkresp := func() (Response, *capn.Segment) { rvseg := capn.NewBuffer(nil) resp := NewRootResponse(rvseg) resp.SetEchoTag(req.EchoTag()) return resp, rvseg } sendresp := func(seg *capn.Segment) { wmtx.Lock() seg.WriteTo(conn) wmtx.Unlock() } switch req.Which() { case REQUEST_QUERYSTANDARDVALUES: //log.Info("QSV\n") st := req.QueryStandardValues().StartTime() et := req.QueryStandardValues().EndTime() uuid := uuid.UUID(req.QueryStandardValues().Uuid()) ver := req.QueryStandardValues().Version() //log.Info("[REQ=QsV] st=%v, et=%v, uuid=%v, gen=%v", st, et, uuid, ver) if ver == 0 { ver = btrdb.LatestGeneration } recordc, errorc, gen := q.QueryValuesStream(uuid, st, et, ver) if recordc == nil { log.Warning("RESPONDING ERR: %v", err) resp, rvseg := mkresp() resp.SetStatusCode(STATUSCODE_INTERNALERROR) resp.SetFinal(true) sendresp(rvseg) return } else { bufarr := make([]qtree.Record, 0, 4096) for { resp, rvseg := mkresp() fail := false fin := false for { select { case _, ok := <-errorc: if ok { fin = true fail = true goto donestandard } case r, ok := <-recordc: if !ok { fin = true goto donestandard } bufarr = append(bufarr, r) if len(bufarr) == cap(bufarr) { goto donestandard } } } donestandard: if fail { resp.SetStatusCode(STATUSCODE_INTERNALERROR) resp.SetFinal(true) //consume channels go func() { for _ = range recordc { } }() go func() { for _ = range errorc { } }() sendresp(rvseg) return } records := NewRecords(rvseg) rl := NewRecordList(rvseg, len(bufarr)) rla := rl.ToArray() for i, v := range bufarr { rla[i].SetTime(v.Time) rla[i].SetValue(v.Val) } records.SetVersion(gen) records.SetValues(rl) resp.SetRecords(records) resp.SetStatusCode(STATUSCODE_OK) if fin { resp.SetFinal(true) } sendresp(rvseg) bufarr = bufarr[:0] if fin { return } } } case REQUEST_QUERYSTATISTICALVALUES: st := req.QueryStatisticalValues().StartTime() et := req.QueryStatisticalValues().EndTime() uuid := uuid.UUID(req.QueryStatisticalValues().Uuid()) pw := req.QueryStatisticalValues().PointWidth() ver := req.QueryStatisticalValues().Version() if ver == 0 { ver = btrdb.LatestGeneration } recordc, errorc, gen := q.QueryStatisticalValuesStream(uuid, st, et, ver, pw) if recordc == nil { log.Warning("RESPONDING ERR: %v", err) resp, rvseg := mkresp() resp.SetStatusCode(STATUSCODE_INTERNALERROR) resp.SetFinal(true) sendresp(rvseg) return } else { bufarr := make([]qtree.StatRecord, 0, 4096) for { resp, rvseg := mkresp() fail := false fin := false for { select { case _, ok := <-errorc: if ok { fin = true fail = true goto donestat } case r, ok := <-recordc: if !ok { fin = true goto donestat } bufarr = append(bufarr, r) if len(bufarr) == cap(bufarr) { goto donestat } } } donestat: if fail { resp.SetStatusCode(STATUSCODE_INTERNALERROR) resp.SetFinal(true) //consume channels go func() { for _ = range recordc { } }() go func() { for _ = range errorc { } }() sendresp(rvseg) return } records := NewStatisticalRecords(rvseg) rl := NewStatisticalRecordList(rvseg, len(bufarr)) rla := rl.ToArray() for i, v := range bufarr { rla[i].SetTime(v.Time) rla[i].SetCount(v.Count) rla[i].SetMin(v.Min) rla[i].SetMean(v.Mean) rla[i].SetMax(v.Max) } records.SetVersion(gen) records.SetValues(rl) resp.SetStatisticalRecords(records) resp.SetStatusCode(STATUSCODE_OK) if fin { resp.SetFinal(true) } sendresp(rvseg) bufarr = bufarr[:0] if fin { return } } } case REQUEST_QUERYVERSION: //ul := req. ul := req.QueryVersion().Uuids() ull := ul.ToArray() resp, rvseg := mkresp() rvers := NewVersions(rvseg) vlist := rvseg.NewUInt64List(len(ull)) ulist := rvseg.NewDataList(len(ull)) for i, v := range ull { ver, err := q.QueryGeneration(uuid.UUID(v)) if err != nil { resp.SetStatusCode(STATUSCODE_INTERNALERROR) resp.SetFinal(true) sendresp(rvseg) return } //I'm not sure that the array that sits behind the uuid slice will stick around //so I'm copying it. uuid := make([]byte, 16) copy(uuid, v) vlist.Set(i, ver) ulist.Set(i, uuid) } resp.SetStatusCode(STATUSCODE_OK) rvers.SetUuids(ulist) rvers.SetVersions(vlist) resp.SetVersionList(rvers) resp.SetFinal(true) sendresp(rvseg) case REQUEST_QUERYNEARESTVALUE: resp, rvseg := mkresp() t := req.QueryNearestValue().Time() id := uuid.UUID(req.QueryNearestValue().Uuid()) ver := req.QueryNearestValue().Version() if ver == 0 { ver = btrdb.LatestGeneration } back := req.QueryNearestValue().Backward() rv, gen, err := q.QueryNearestValue(id, t, back, ver) switch err { case nil: resp.SetStatusCode(STATUSCODE_OK) records := NewRecords(rvseg) rl := NewRecordList(rvseg, 1) rla := rl.ToArray() rla[0].SetTime(rv.Time) rla[0].SetValue(rv.Val) records.SetVersion(gen) records.SetValues(rl) resp.SetRecords(records) case qtree.ErrNoSuchPoint: resp.SetStatusCode(STATUSCODE_NOSUCHPOINT) default: resp.SetStatusCode(STATUSCODE_INTERNALERROR) } resp.SetFinal(true) sendresp(rvseg) case REQUEST_QUERYCHANGEDRANGES: resp, rvseg := mkresp() id := uuid.UUID(req.QueryChangedRanges().Uuid()) sgen := req.QueryChangedRanges().FromGeneration() egen := req.QueryChangedRanges().ToGeneration() if egen == 0 { egen = btrdb.LatestGeneration } resolution := req.QueryChangedRanges().Resolution() rv, ver, err := q.QueryChangedRanges(id, sgen, egen, resolution) switch err { case nil: resp.SetStatusCode(STATUSCODE_OK) ranges := NewRanges(rvseg) ranges.SetVersion(ver) crl := NewChangedRangeList(rvseg, len(rv)) crla := crl.ToArray() for i := 0; i < len(rv); i++ { crla[i].SetStartTime(rv[i].Start) crla[i].SetEndTime(rv[i].End) } ranges.SetValues(crl) resp.SetChangedRngList(ranges) default: log.Critical("qcr error: ", err) resp.SetStatusCode(STATUSCODE_INTERNALERROR) } resp.SetFinal(true) sendresp(rvseg) case REQUEST_INSERTVALUES: resp, rvseg := mkresp() uuid := uuid.UUID(req.InsertValues().Uuid()) rl := req.InsertValues().Values() rla := rl.ToArray() if len(rla) != 0 { qtr := make([]qtree.Record, len(rla)) for i, v := range rla { qtr[i] = qtree.Record{Time: v.Time(), Val: v.Value()} } q.InsertValues(uuid, qtr) } if req.InsertValues().Sync() { q.Flush(uuid) } resp.SetFinal(true) resp.SetStatusCode(STATUSCODE_OK) sendresp(rvseg) case REQUEST_DELETEVALUES: resp, rvseg := mkresp() id := uuid.UUID(req.DeleteValues().Uuid()) stime := req.DeleteValues().StartTime() etime := req.DeleteValues().EndTime() err := q.DeleteRange(id, stime, etime) switch err { case nil: resp.SetStatusCode(STATUSCODE_OK) default: resp.SetStatusCode(STATUSCODE_INTERNALERROR) } resp.SetFinal(true) sendresp(rvseg) default: log.Critical("weird segment") } }() } }
func request_post_BRACKET(q *btrdb.Quasar, w http.ResponseWriter, r *http.Request) { atomic.AddInt32(&outstandingHttpReqs, 1) defer func() { atomic.AddInt32(&outstandingHttpReqs, -1) }() dec := json.NewDecoder(r.Body) req := bracket_req{} err := dec.Decode(&req) if err != nil { doError(w, "bad request") return } if len(req.UUIDS) == 0 { doError(w, "no uuids") return } rv := bracket_resp{} rv.Brackets = make([][]int64, len(req.UUIDS)) var min, max int64 var minset, maxset bool for i, u := range req.UUIDS { uid := uuid.Parse(u) if uid == nil { doError(w, "malformed uuid") return } rec, _, err := q.QueryNearestValue(uid, btrdb.MinimumTime+1, false, btrdb.LatestGeneration) if err == qtree.ErrNoSuchStream { rv.Brackets[i] = make([]int64, 2) rv.Brackets[i][0] = -1 rv.Brackets[i][1] = -1 continue } if err != nil { doError(w, "Bad query: "+err.Error()) return } start := rec.Time if !minset || start < min { min = start minset = true } rec, _, err = q.QueryNearestValue(uid, btrdb.MaximumTime-1, true, btrdb.LatestGeneration) if err != nil { doError(w, "Bad query: "+err.Error()) return } end := rec.Time if !maxset || end > max { max = end maxset = true } rv.Brackets[i] = make([]int64, 2) rv.Brackets[i][0] = start rv.Brackets[i][1] = end } rv.Merged = make([]int64, 2) if minset && maxset { rv.Merged[0] = min rv.Merged[1] = max } else { doError(w, "Bad query: none of those streams exist") return } err = json.NewEncoder(w).Encode(rv) if err != nil { doError(w, "JSON error: "+err.Error()) return } return }
func request_get_CSV(q *btrdb.Quasar, w http.ResponseWriter, r *http.Request) { atomic.AddInt32(&outstandingHttpReqs, 1) defer func() { atomic.AddInt32(&outstandingHttpReqs, -1) }() r.ParseForm() ids := r.Form.Get(":uuid") id := uuid.Parse(ids) if id == nil { log.Critical("ids: '%v'", ids) doError(w, "malformed uuid") return } st, ok, msg := parseInt(r.Form.Get("starttime"), -(16 << 56), (48 << 56)) if !ok { doError(w, "bad start time: "+msg) return } et, ok, msg := parseInt(r.Form.Get("endtime"), -(16 << 56), (48 << 56)) if !ok { doError(w, "bad end time: "+msg) return } if et <= st { doError(w, "end time <= start time") return } versions := r.Form.Get("ver") if versions == "" { versions = "0" } //Technically this is incorrect, but I doubt we will overflow this versioni, ok, msg := parseInt(versions, 0, 1<<63-1) version := uint64(versioni) if !ok { doError(w, "malformed version: "+msg) return } if version == 0 { version = btrdb.LatestGeneration } unitoftime := r.Form.Get("unitoftime") divisor := int64(1) switch unitoftime { case "": fallthrough case "ms": divisor = 1000000 //ns to ms case "ns": divisor = 1 case "us": divisor = 1000 //ns to us case "s": divisor = 1000000000 //ns to s default: doError(w, "unitoftime must be 'ns', 'ms', 'us' or 's'") return } if st >= btrdb.MaximumTime/divisor || st <= btrdb.MinimumTime/divisor { doError(w, "start time out of bounds") return } if et >= btrdb.MaximumTime/divisor || et <= btrdb.MinimumTime/divisor { doError(w, "end time out of bounds") return } st *= divisor et *= divisor pws := r.Form.Get("pw") pw := uint8(0) if pws != "" { pwl, ok, msg := parseInt(pws, 0, 63) if !ok { doError(w, "bad point width: "+msg) return } if divisor != 1 { doError(w, "statistical results require unitoftime=ns") return } pw = uint8(pwl) } logh("QSVSn", fmt.Sprintf("u=%s st=%v et=%v pw=%v", id.String(), st, et, pw), r) rvchan, echan, _ := q.QueryStatisticalValuesStream(id, st, et, version, pw) w.WriteHeader(200) w.Write([]byte("Time[ns],Mean,Min,Max,Count\n")) for { select { case v, ok := <-rvchan: if ok { w.Write([]byte(fmt.Sprintf("%d,%f,%f,%f,%d\n", v.Time, v.Mean, v.Min, v.Max, v.Count))) } else { //Done return } case err, ok := <-echan: if ok { w.Write([]byte(fmt.Sprintf("!ABORT ERROR: %v", err))) return } } } return }
func request_get_VRANGE(q *btrdb.Quasar, w http.ResponseWriter, r *http.Request) { atomic.AddInt32(&outstandingHttpReqs, 1) defer func() { atomic.AddInt32(&outstandingHttpReqs, -1) }() r.ParseForm() ids := r.Form.Get(":uuid") id := uuid.Parse(ids) if id == nil { log.Critical("ids: '%v'", ids) doError(w, "malformed uuid") return } st, ok, msg := parseInt(r.Form.Get("starttime"), -(16 << 56), (48 << 56)) if !ok { doError(w, "bad start time: "+msg) return } et, ok, msg := parseInt(r.Form.Get("endtime"), -(16 << 56), (48 << 56)) if !ok { doError(w, "bad end time: "+msg) return } if et <= st { doError(w, "end time <= start time") return } versions := r.Form.Get("ver") if versions == "" { versions = "0" } //Technically this is incorrect, but I doubt we will overflow this versioni, ok, msg := parseInt(versions, 0, 1<<63-1) version := uint64(versioni) if !ok { doError(w, "malformed version: "+msg) return } if version == 0 { version = btrdb.LatestGeneration } unitoftime := r.Form.Get("unitoftime") uot := struct { UnitofTime string }{unitoftime} divisor := int64(1) switch unitoftime { case "": fallthrough case "ms": divisor = 1000000 //ns to ms case "ns": divisor = 1 case "us": divisor = 1000 //ns to us case "s": divisor = 1000000000 //ns to s default: doError(w, "unitoftime must be 'ns', 'ms', 'us' or 's'") return } if st >= btrdb.MaximumTime/divisor || st <= btrdb.MinimumTime/divisor { doError(w, "start time out of bounds") return } if et >= btrdb.MaximumTime/divisor || et <= btrdb.MinimumTime/divisor { doError(w, "end time out of bounds") return } st *= divisor et *= divisor pws := r.Form.Get("pw") pw := uint8(0) if pws != "" { pwl, ok, msg := parseInt(pws, 0, 63) if !ok { doError(w, "bad point width: "+msg) return } if divisor != 1 { doError(w, "statistical results require unitoftime=ns") return } pw = uint8(pwl) } if pws != "" { //log.Info("HTTP REQ id=%s pw=%v", id.String(), pw) logh("QSV", fmt.Sprintf("st=%v et=%v pw=%v u=%s", st, et, pw, id.String()), r) res, rgen, err := q.QueryStatisticalValues(id, st, et, version, pw) if err != nil { doError(w, "query error: "+err.Error()) return } resf := make([][]interface{}, len(res)) contents := make([]interface{}, len(res)*6) for i := 0; i < len(res); i++ { resf[i] = contents[i*6 : (i+1)*6] resf[i][0] = res[i].Time / 1000000 //ms since epoch resf[i][1] = res[i].Time % 1000000 //nanoseconds left over resf[i][2] = res[i].Min resf[i][3] = res[i].Mean resf[i][4] = res[i].Max resf[i][5] = res[i].Count } rv := []struct { Uuid string `json:"uuid"` XReadings [][]interface{} Version uint64 `json:"version"` }{ {id.String(), resf, rgen}, } err = json.NewEncoder(w).Encode(rv) if err != nil { doError(w, "JSON error: "+err.Error()) return } return } else { logh("QV", fmt.Sprintf("st=%v et=%v pw=%v u=%s", st, et, pw, id.String()), r) res, rgen, err := q.QueryValues(id, st, et, version) if err != nil { doError(w, "query error: "+err.Error()) return } resf := make([][]interface{}, len(res)) contents := make([]interface{}, len(res)*2) for i := 0; i < len(res); i++ { resf[i] = contents[i*2 : (i+1)*2] resf[i][0] = res[i].Time / divisor resf[i][1] = res[i].Val } //props := struct{Uot string `json:"UnitofTime"`}{"foo"} rv := []struct { Uuid string `json:"uuid"` Readings [][]interface{} Version uint64 `json:"version"` Properties interface{} `json:"Properties"` }{ {id.String(), resf, rgen, uot}, } err = json.NewEncoder(w).Encode(rv) if err != nil { doError(w, "JSON error: "+err.Error()) return } return } //res, err := q. }
func request_post_MULTICSV_IMPL(q *btrdb.Quasar, w http.ResponseWriter, bdy io.Reader, r *http.Request) { dec := json.NewDecoder(bdy) req := multi_csv_req{} err := dec.Decode(&req) if err != nil { doError(w, "bad request") return } if len(req.UUIDS) != len(req.Labels) { doError(w, "UUIDS and Labels must be the same length") return } uids := make([]uuid.UUID, len(req.UUIDS)) for i := 0; i < len(uids); i++ { uids[i] = uuid.Parse(req.UUIDS[i]) if uids[i] == nil { doError(w, "UUID "+string(i)+" is malformed") return } } unitoftime := req.UnitofTime divisor := int64(1) switch unitoftime { case "": fallthrough case "ms": divisor = 1000000 //ns to ms case "ns": divisor = 1 case "us": divisor = 1000 //ns to us case "s": divisor = 1000000000 //ns to s default: doError(w, "unitoftime must be 'ns', 'ms', 'us' or 's'") return } if req.StartTime >= btrdb.MaximumTime/divisor || req.StartTime <= btrdb.MinimumTime/divisor { doError(w, "start time out of bounds") return } if req.EndTime >= btrdb.MaximumTime/divisor || req.EndTime <= btrdb.MinimumTime/divisor { doError(w, "end time out of bounds") return } st := req.StartTime * divisor et := req.EndTime * divisor if req.PointWidth < 0 || req.PointWidth >= 63 { doError(w, "PointWidth must be between 0 and 63") return } pw := uint8(req.PointWidth) chanVs := make([]chan qtree.StatRecord, len(uids)) chanEs := make([]chan error, len(uids)) chanBad := make([]bool, len(uids)) chanHead := make([]qtree.StatRecord, len(uids)) for i := 0; i < len(uids); i++ { logh("QSVS", fmt.Sprintf("u=%v st=%v et=%v pw=%v", uids[i].String(), st, et, pw), r) chanVs[i], chanEs[i], _ = q.QueryStatisticalValuesStream(uids[i], st, et, btrdb.LatestGeneration, pw) } reload := func(c int) { select { case v, ok := <-chanVs[c]: if ok { chanHead[c] = v } else { chanBad[c] = true } case e, ok := <-chanEs[c]: if ok { log.Critical("MultiCSV error: ", e) chanBad[c] = true } } } emit := func(r qtree.StatRecord) { w.Write([]byte(fmt.Sprintf(",%d,%f,%f,%f", r.Count, r.Min, r.Mean, r.Max))) } emitb := func() { w.Write([]byte(",,,,")) } emitt := func(t int64) { w.Write([]byte(fmt.Sprintf("%d", t))) } emitnl := func() { w.Write([]byte("\n")) } //Prime the first results for i := 0; i < len(uids); i++ { reload(i) } w.Header().Set("Content-Disposition", "attachment; filename=\"quasar_results.csv\"") //Print the headers w.Write([]byte("Time[ns]")) for i := 0; i < len(uids); i++ { w.Write([]byte(fmt.Sprintf(",%s(cnt),%s(min),%s(mean),%s(max)", req.Labels[i], req.Labels[i], req.Labels[i], req.Labels[i]))) } w.Write([]byte("\n")) //Now merge out the results st = st &^ ((1 << pw) - 1) for t := st; t < et; t += (1 << pw) { //First locate the min time minset := false min := int64(0) for i := 0; i < len(uids); i++ { for !chanBad[i] && chanHead[i].Time < t { log.Warning("discarding duplicate time %v:%v", i, chanHead[i].Time) reload(i) } if !chanBad[i] && (!minset || chanHead[i].Time < min) { minset = true min = chanHead[i].Time } } if minset == false { //We are done. There are no more live streams return } //If the min time is later than t, emit blank lines until we catch up for ; t < min; t += (1 << pw) { emitt(t) emitnl() } if t != min { log.Critical("WTF t=%v, min=%v, pw=%v, dt=%v, dm=%v delte=%v", t, min, 1<<pw, t&((1<<pw)-1), min&((1<<pw)-1), min-t) log.Panic("wellfuck") } //Now emit all values at that time emitt(t) for i := 0; i < len(uids); i++ { if !chanBad[i] && chanHead[i].Time == min { emit(chanHead[i]) reload(i) } else { emitb() } } emitnl() } }
func request_post_LEGACYINSERT(q *btrdb.Quasar, w http.ResponseWriter, r *http.Request) { atomic.AddInt32(&outstandingHttpReqs, 1) defer func() { atomic.AddInt32(&outstandingHttpReqs, -1) }() then := time.Now() records, err := processJSON(r.Body) if err != nil { doError(w, "malformed body") return } for _, r := range records { if r.UUID == "" { continue } id := uuid.Parse(r.UUID) if id == nil { doError(w, "malformed uuid") return } recs := make([]qtree.Record, len(r.Readings)) //Check the format of the insert and copy to Record for i := 0; i < len(r.Readings); i++ { uot := r.Properties.UnitofTime var uotmult int64 switch uot { default: fallthrough case "s": uotmult = 1000000000 case "ms": uotmult = 1000000 case "us": uotmult = 1000 case "ns": uotmult = 1 } if len(r.Readings[i]) != 2 { doError(w, fmt.Sprintf("reading %d of record %v is malformed", i, r.UUID)) return } t, ok, msg := parseInt(string(r.Readings[i][0].(json.Number)), btrdb.MinimumTime, btrdb.MaximumTime) if !ok { doError(w, fmt.Sprintf("reading %d time malformed: %s", i, msg)) return } if t >= (btrdb.MaximumTime/uotmult) || t <= (btrdb.MinimumTime/uotmult) { doError(w, fmt.Sprintf("reading %d time out of range", i)) return } t *= uotmult var val float64 bval, ok := r.Readings[i][1].(bool) if ok { if bval { val = 1 } else { val = 0 } } else { val, err = strconv.ParseFloat(string(r.Readings[i][1].(json.Number)), 64) if err != nil { doError(w, fmt.Sprintf("value %d malformed: %s", i, err)) return } } recs[i].Time = t recs[i].Val = val } q.InsertValues(id, recs) } delta := time.Now().Sub(then) w.Write([]byte(fmt.Sprintf("OK %.2f ms\n", float64(delta.Nanoseconds()/1000)/1000))) }