func randmatstat(t int) (float64, float64) { n := 5 var v stats.Stats var w stats.Stats for i := 0; i < t; i++ { a := matrix.Zeros(n, n) b := matrix.Zeros(n, n) c := matrix.Zeros(n, n) d := matrix.Zeros(n, n) for j := 0; j < n; j++ { for k := 0; k < n; k++ { a.Set(j, k, rand.NormFloat64()) b.Set(j, k, rand.NormFloat64()) c.Set(j, k, rand.NormFloat64()) d.Set(j, k, rand.NormFloat64()) } } P := matrix.Zeros(n, 4*n) for j := 0; j < n; j++ { for k := 0; k < n; k++ { P.Set(j, k, a.Get(j, k)) P.Set(j, n+k, b.Get(j, k)) P.Set(j, 2*n+k, c.Get(j, k)) P.Set(j, 3*n+k, d.Get(j, k)) } } Q := matrix.Zeros(2*n, 2*n) for j := 0; j < n; j++ { for k := 0; k < n; k++ { Q.Set(j, k, a.Get(j, k)) Q.Set(j, n+k, b.Get(j, k)) Q.Set(n+j, k, c.Get(j, k)) Q.Set(n+j, n+k, d.Get(j, k)) } } P = matrix.Product(matrix.Transpose(P), P) P = matrix.Product(P, P) P = matrix.Product(P, P) Q = matrix.Product(matrix.Transpose(Q), Q) Q = matrix.Product(Q, Q) Q = matrix.Product(Q, Q) v.Update(P.Trace()) w.Update(Q.Trace()) } return v.PopulationStandardDeviation() / float64(v.Count()) / v.Mean(), w.PopulationStandardDeviation() / float64(w.Count()) / w.Mean() }
func handleExecuteQuery(rw http.ResponseWriter, req *http.Request) { startTime := time.Now() db, err := sql.Open("sqlite3", *dataSrc) if err != nil { http.Error(rw, err.Error(), http.StatusInternalServerError) return } defer db.Close() var ( request struct { Features map[string]float64 `json:"features"` Geo *geoData `json:"geo"` MaxResults int `json:"maxResults"` MinScore float64 `json:"minScore"` Modes map[string]string `json:"modes"` Profile map[string]float64 `json:"profile"` Resolution int `json:"resolution"` SortAsc bool `json:"sortAsc"` SortKey string `json:"sortKey"` WalkingDist float64 `json:"walkingDist"` } response struct { Columns map[string]*column `json:"columns"` Count int `json:"count"` MinScore float64 `json:"minScore"` Records []record `json:"records"` ElapsedTime int64 `json:"elapsedTime"` } ) if err := json.NewDecoder(req.Body).Decode(&request); err != nil { http.Error(rw, err.Error(), http.StatusInternalServerError) return } var geo *geoData if request.Geo != nil { geo = &geoData{request.Geo.Latitude, request.Geo.Longitude} } allEntries, err := fetchRecords(db, queryContext{geo, request.Profile, request.WalkingDist}) if err != nil { http.Error(rw, err.Error(), http.StatusInternalServerError) return } features := fixFeatures(request.Features) modes := fixModes(request.Modes) matchedEntries := findRecords(allEntries, features, modes, request.MinScore) sorter := recordSorter{entries: matchedEntries, key: request.SortKey, ascending: request.SortAsc} sorter.sort() var wg sync.WaitGroup wg.Add(len(features)) response.Columns = make(map[string]*column) for name := range features { response.Columns[name] = new(column) go func(name string) { defer wg.Done() col := response.Columns[name] col.Bracket = bracket{Max: -1.0, Min: 1.0} col.Hints = project(allEntries, features, modes, name, request.MinScore, request.Resolution) col.Mode = modes[name].String() col.Steps = request.Resolution col.Value = features[name] var d stats.Stats for _, record := range matchedEntries { if feature, ok := record.features[name]; ok { d.Update(feature) } } if d.Count() > 0 { var dev float64 if d.Count() > 1 { dev = d.SampleStandardDeviation() * 3 } mean := d.Mean() col.Bracket.Max = math.Min(mean+dev, d.Max()) col.Bracket.Min = math.Max(mean-dev, d.Min()) } }(name) } wg.Wait() response.Count = len(matchedEntries) response.MinScore = request.MinScore response.ElapsedTime = time.Since(startTime).Nanoseconds() if len(matchedEntries) > request.MaxResults { response.Records = matchedEntries[:request.MaxResults] } else { response.Records = matchedEntries } js, err := json.Marshal(response) if err != nil { http.Error(rw, err.Error(), http.StatusInternalServerError) return } rw.Header().Set("Content-Type", "application/json") rw.Write(js) }
func (c ping_collector) CollectOnce() newcore.CollectResult { var ( md newcore.MultiDataPoint d stats.Stats p = fastping.NewPinger() rtt_chan = make(chan float64) ) ip, err := net.ResolveIPAddr("ip4:icmp", c.config.Target) if err != nil { logging.Errorf("ping_collector: DNS resolve error: %v", err) return newcore.CollectResult{ Collected: nil, Next: time.Now().Add(c.interval), Err: fmt.Errorf("ping_collector: DNS resolve error: %v", err), } } p.MaxRTT = c.timeout p.AddIPAddr(ip) p.OnRecv = func(addr *net.IPAddr, rtt time.Duration) { rtt_chan <- float64(rtt.Nanoseconds() / 1000 / 1000) } go func() { for i := 0; i < c.config.Packets; i++ { err = p.Run() if err != nil { logging.Errorf("ping_collector run err: ", err) } } close(rtt_chan) }() for rtt := range rtt_chan { d.Update(rtt) } md = append(md, newcore.NewDP(c.prefix, fmt.Sprintf("%s.%s", c.config.Metric, "time_min"), d.Min(), c.tags, "", "", "")) md = append(md, newcore.NewDP(c.prefix, fmt.Sprintf("%s.%s", c.config.Metric, "time_max"), d.Max(), c.tags, "", "", "")) md = append(md, newcore.NewDP(c.prefix, fmt.Sprintf("%s.%s", c.config.Metric, "time_avg"), d.Mean(), c.tags, "", "", "")) std := d.SampleStandardDeviation() if math.IsNaN(std) { std = 0 } md = append(md, newcore.NewDP(c.prefix, fmt.Sprintf("%s.%s", c.config.Metric, "time_mdev"), std, c.tags, "", "", "")) md = append(md, newcore.NewDP(c.prefix, fmt.Sprintf("%s.%s", c.config.Metric, "ip"), ip.IP.String(), c.tags, "", "", "")) lost_pct := float64((c.config.Packets-d.Count())/c.config.Packets) * 100 md = append(md, newcore.NewDP(c.prefix, fmt.Sprintf("%s.%s", c.config.Metric, "lost_pct"), lost_pct, c.tags, "", "", "")) return newcore.CollectResult{ Collected: md, Next: time.Now().Add(c.interval), Err: nil, } }