func GetIndexHostNode(indexName, bucketName, serverUserName, serverPassword, hostaddress string) (string, error) { client := &http.Client{} address := "http://" + hostaddress + "/indexStatus" req, _ := http.NewRequest("GET", address, nil) req.SetBasicAuth(serverUserName, serverPassword) req.Header.Add("Content-Type", "application/x-www-form-urlencoded; charset=UTF-8") resp, err := client.Do(req) if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusAccepted { log.Printf(address) log.Printf("%v", req) log.Printf("%v", resp) log.Printf("Get indexStatus failed") } // todo : error out if response is error tc.HandleError(err, "Get Stats") defer resp.Body.Close() response := make(map[string]interface{}) body, _ := ioutil.ReadAll(resp.Body) err = json.Unmarshal(body, &response) if err != nil { tc.HandleError(err, "Get IndexStatus :: Unmarshal of response body") return "", nil } c, e := CreateClient(hostaddress, "2itest") if e != nil { return "", e } defer c.Close() defnID, _ := GetDefnID(c, bucketName, indexName) indexes := response["indexes"].([]interface{}) for _, index := range indexes { i := index.(map[string]interface{}) if i["id"].(float64) == float64(defnID) { hosts := i["hosts"].([]interface{}) return hosts[0].(string), nil } } return "", errors.New("Index not found in /indexStatus") }
func RunCommands(cluster string, cfg *Config, statsW io.Writer) (*Result, error) { t0 := time.Now() var result Result var clients []*qclient.GsiClient var jobQ chan *Job var aggrQ chan *JobResult var wg1, wg2 sync.WaitGroup if len(cfg.LatencyBuckets) == 0 { cfg.LatencyBuckets = defaultLatencyBuckets } config := c.SystemConfig.SectionConfig("queryport.client.", true) config.SetValue("settings.poolSize", int(cfg.Concurrency)) client, err := qclient.NewGsiClient(cluster, config) if err != nil { return nil, err } defer client.Close() indexes, err := client.Refresh() if err != nil { return nil, err } clients = make([]*qclient.GsiClient, cfg.Clients) for i := 0; i < cfg.Clients; i++ { c, err := qclient.NewGsiClient(cluster, config) if err != nil { return nil, err } defer c.Close() clients[i] = c } jobQ = make(chan *Job, cfg.Concurrency*1000) aggrQ = make(chan *JobResult, cfg.Concurrency*1000) for i := 0; i < cfg.Concurrency; i++ { wg1.Add(1) go Worker(jobQ, clients[i%cfg.Clients], aggrQ, &wg1) } wg2.Add(1) go ResultAggregator(aggrQ, statsW, &wg2) for i, spec := range cfg.ScanSpecs { if spec.Id == 0 { spec.Id = uint64(i) } for _, index := range indexes { if index.Definition.Bucket == spec.Bucket && index.Definition.Name == spec.Index { spec.DefnId = uint64(index.Definition.DefnId) } } hFn := func(v int64) string { if v == math.MinInt64 { return "0" } else if v == math.MaxInt64 { return "inf" } return fmt.Sprint(time.Nanosecond * time.Duration(v)) } res := new(ScanResult) res.ErrorCount = platform.NewAlignedUint64(0) res.LatencyHisto.Init(cfg.LatencyBuckets, hFn) res.Id = spec.Id result.ScanResults = append(result.ScanResults, res) } // warming up GsiClient for _, client := range clients { for _, spec := range cfg.ScanSpecs { job := &Job{spec: spec, result: nil} RunJob(client, job, nil) break } } fmt.Println("GsiClients warmed up ...") result.WarmupDuration = float64(time.Since(t0).Nanoseconds()) / float64(time.Second) // Round robin scheduling of jobs var allFinished bool loop: for { allFinished = true for i, spec := range cfg.ScanSpecs { if iter := platform.LoadUint32(&spec.iteration); iter < spec.Repeat+1 { j := &Job{ spec: spec, result: result.ScanResults[i], } jobQ <- j platform.AddUint32(&spec.iteration, 1) allFinished = false } } if allFinished { break loop } } close(jobQ) wg1.Wait() close(aggrQ) wg2.Wait() return &result, err }