func AllSorted(c string, o string) []*User { users := make([]*User, 0) column, order := sortingKeys(c, o) query := fmt.Sprintf("%s ORDER BY %s %s", findAllSQL, column, order) rows, err := db.DB.Query(query) if err != nil { log.Fatal(err) } defer rows.Close() for rows.Next() { u := &User{} err := scan(u, rows) if err != nil { log.Fatal(err) } users = append(users, u) } err = rows.Err() if err != nil { if err == sql.ErrNoRows { return users } //TODO Logging log.Fatal(err) } if column == "email" { i := sort.Interface(byEmailDomain(users)) if order == "DESC" { i = sort.Reverse(byEmailDomain(users)) } sort.Sort(i) } if column == "company" { i := sort.Interface(byCompany(users)) if order == "DESC" { i = sort.Reverse(byCompany(users)) } sort.Sort(i) } return users }
func main() { flag.Parse() runtime.GOMAXPROCS(runtime.NumCPU()) if len(ms) == 0 { ms = append(ms, "cpu") } cfg := &runner.Config{ BatchSize: *batchSize, Measurements: ms, SeriesCount: *seriesCount, PointCount: *pointCount, Concurrency: *concurrency, BatchInterval: *batchInterval, Database: *database, Address: *address, Precision: *precision, } totalPoints, failedRequests, responseTimes, timer := runner.Run(cfg) sort.Sort(sort.Reverse(sort.Interface(responseTimes))) total := int64(0) for _, t := range responseTimes { total += int64(t.Value) } mean := total / int64(len(responseTimes)) fmt.Printf("Wrote %d points at average rate of %.0f\n", totalPoints, float64(totalPoints)/timer.Elapsed().Seconds()) fmt.Printf("%d requests failed for %d total points that didn't get posted.\n", failedRequests, failedRequests**batchSize) fmt.Println("Average response time: ", time.Duration(mean)) fmt.Println("Slowest response times:") for _, r := range responseTimes[:100] { fmt.Println(time.Duration(r.Value)) } }
func Sorter(i interface{}) { switch i.(type) { case []string: r, ok := i.([]string) //type assertion if !ok { break } sort.StringSlice(r).Sort() case []int: r, ok := i.([]int) if !ok { break } sort.Sort(sort.IntSlice(r)) case People: r, ok := i.(People) if !ok { break } sort.Sort(sort.Interface(r)) } }
limitations under the License. */ package shuffle import ( "fmt" "math/rand" "sort" "strings" "testing" "time" ) // shuffle.Interface is a subset of sort.Interface var _ Interface = sort.Interface(nil) func fillAZ(az []string) { b := []byte{65} for i := 0; i < 26; i++ { az[i] = string(b) b[0]++ } } func makeAZ() []string { var az [26]string fillAZ(az[:]) return az[:] }
list []item } func (v vector) Len() int { return len(v.list) } func (v vector) Less(i, j int) bool { return v.list[i].id < v.list[j].id } func (v vector) Swap(i, j int) { v.list[i], v.list[j] = v.list[j], v.list[i] } var _ = sort.Interface(&vector{}) func (v vector) String() string { buf := []byte{'{'} for i, x := range v.list { if i > 0 { buf = append(buf, ' ') } buf = strconv.AppendUint(buf, uint64(x.id), 10) buf = append(buf, ':') buf = strconv.AppendUint(buf, uint64(x.t), 10) } buf = append(buf, '}') return string(buf) }
func main() { var cfg *runner.Config var err error runtime.GOMAXPROCS(runtime.NumCPU()) flag.Parse() if *test == "" { fmt.Println("'-test' flag is required") return } cfg, err = runner.DecodeFile(*test) if err != nil { fmt.Println(err) return } if *batchSize != 0 { cfg.Write.BatchSize = *batchSize } if *concurrency != 0 { cfg.Write.Concurrency = *concurrency } if *batchInterval != 0*time.Second { cfg.Write.BatchInterval = batchInterval.String() } if *database != "" { cfg.Write.Database = *database } if *address != "" { cfg.Write.Address = *address } if *precision != "" { cfg.Write.Precision = *precision } d := make(chan struct{}) seriesQueryResults := make(chan runner.QueryResults) if cfg.SeriesQuery.Enabled { go runner.SeriesQuery(cfg, d, seriesQueryResults) } measurementQueryResults := make(chan runner.QueryResults) ts := make(chan time.Time) if cfg.MeasurementQuery.Enabled { go runner.MeasurementQuery(cfg, ts, measurementQueryResults) } // Get the stress results totalPoints, failedRequests, responseTimes, timer := runner.Run(cfg, d, ts) sort.Sort(sort.Reverse(sort.Interface(responseTimes))) total := int64(0) for _, t := range responseTimes { total += int64(t.Value) } mean := total / int64(len(responseTimes)) fmt.Printf("Wrote %d points at average rate of %.0f\n", totalPoints, float64(totalPoints)/timer.Elapsed().Seconds()) fmt.Printf("%d requests failed for %d total points that didn't get posted.\n", failedRequests, failedRequests**batchSize) fmt.Println("Average response time: ", time.Duration(mean)) fmt.Println("Slowest response times:") for _, r := range responseTimes[:100] { fmt.Println(time.Duration(r.Value)) } // Get series query results if cfg.SeriesQuery.Enabled { qrs := <-seriesQueryResults queryTotal := int64(0) for _, qt := range qrs.ResponseTimes { queryTotal += int64(qt.Value) } seriesQueryMean := queryTotal / int64(len(qrs.ResponseTimes)) fmt.Printf("Queried Series %d times with a average response time of %v milliseconds\n", qrs.TotalQueries, time.Duration(seriesQueryMean).Seconds()*1000) } // Get measurement query results if cfg.MeasurementQuery.Enabled { qrs := <-measurementQueryResults queryTotal := int64(0) for _, qt := range qrs.ResponseTimes { queryTotal += int64(qt.Value) } seriesQueryMean := queryTotal / int64(len(qrs.ResponseTimes)) fmt.Printf("Queried Measurement %d times with a average response time of %v milliseconds\n", qrs.TotalQueries, time.Duration(seriesQueryMean).Seconds()*1000) } return }
package translation import ( "sort" "testing" ) // Check this here to avoid unnecessary import of sort package. var _ = sort.Interface(make(SortableByID, 0, 0)) func TestNewSingleTranslation(t *testing.T) { t.Skipf("not implemented") } func TestNewPluralTranslation(t *testing.T) { t.Skipf("not implemented") }
func main() { var cfg *runner.Config var err error runtime.GOMAXPROCS(runtime.NumCPU()) flag.Parse() cfg = runner.NewConfig() if len(ms) == 0 { ms = append(ms, "cpu") } for _, m := range ms { cfg.Series = append(cfg.Series, runner.NewSeries(m, 100, 100000)) } if *test != "" { cfg, err = runner.DecodeFile(*test) if err != nil { fmt.Println(err) return } } d := make(chan struct{}) seriesQueryResults := make(chan runner.QueryResults) if cfg.SeriesQuery.Enabled { go runner.SeriesQuery(cfg, d, seriesQueryResults) } measurementQueryResults := make(chan runner.QueryResults) ts := make(chan time.Time) if cfg.MeasurementQuery.Enabled { go runner.MeasurementQuery(cfg, ts, measurementQueryResults) } // Get the stress results totalPoints, failedRequests, responseTimes, timer := runner.Run(cfg, d, ts) sort.Sort(sort.Reverse(sort.Interface(responseTimes))) total := int64(0) for _, t := range responseTimes { total += int64(t.Value) } mean := total / int64(len(responseTimes)) fmt.Printf("Wrote %d points at average rate of %.0f\n", totalPoints, float64(totalPoints)/timer.Elapsed().Seconds()) fmt.Printf("%d requests failed for %d total points that didn't get posted.\n", failedRequests, failedRequests**batchSize) fmt.Println("Average response time: ", time.Duration(mean)) fmt.Println("Slowest response times:") for _, r := range responseTimes[:100] { fmt.Println(time.Duration(r.Value)) } // Get series query results if cfg.SeriesQuery.Enabled { qrs := <-seriesQueryResults queryTotal := int64(0) for _, qt := range qrs.ResponseTimes { queryTotal += int64(qt.Value) } seriesQueryMean := queryTotal / int64(len(qrs.ResponseTimes)) fmt.Printf("Queried Series %d times with a average response time of %v milliseconds\n", qrs.TotalQueries, time.Duration(seriesQueryMean).Seconds()*1000) } // Get measurement query results if cfg.MeasurementQuery.Enabled { qrs := <-measurementQueryResults queryTotal := int64(0) for _, qt := range qrs.ResponseTimes { queryTotal += int64(qt.Value) } seriesQueryMean := queryTotal / int64(len(qrs.ResponseTimes)) fmt.Printf("Queried Measurement %d times with a average response time of %v milliseconds\n", qrs.TotalQueries, time.Duration(seriesQueryMean).Seconds()*1000) } return }