func (as *AgaveSender) handleOneItem(subgroup string, values string) { var url bytes.Buffer if err := URLTEMPLATE.Execute(&url, struct { Group string Values string Time int64 Template string Title string Graphname string Step int64 }{ subgroup, values, time.Now().Unix(), as.GraphTemplate, as.GraphName, as.GraphName, as.Step, }); err != nil { logger.Errf("%s unable to generate template %s", as.Id, err) return } as.sendPoint(url.String()) }
func (as *AgaveSender) sendPoint(url string) { for _, host := range as.Hosts { req, _ := http.NewRequest("GET", fmt.Sprintf("http://%s%s", host, url), nil) req.Header = DEFAULT_HEADERS logger.Debugf("%s %s", as.Id, req.URL) resp, err := AgaveHttpClient.Do(req) if err != nil { logger.Errf("%s Unable to do request %s", as.Id, err) continue } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { logger.Errf("%s %s %d %s", as.Id, req.URL, resp.StatusCode, err) continue } logger.Infof("%s %s %d %s", as.Id, req.URL, resp.StatusCode, body) } }
func Send(request *cocaine.Request, response *cocaine.Response) { raw := <-request.Read() var task Task err := common.Unpack(raw, &task) if err != nil { response.ErrorMsg(-100, err.Error()) return } logger.Debugf("%s Task: %v", task.Id, task) task.Config.Id = task.Id task.Config.Hosts, err = getAgaveHosts() if err != nil { response.ErrorMsg(-100, err.Error()) return } if len(task.Config.Fields) == 0 { task.Config.Fields = DEFAULT_FIELDS } if task.Config.Step == 0 { task.Config.Step = DEFAULT_STEP } logger.Debugf("%s Fields: %v Step: %d", task.Id, task.Config.Fields, task.Config.Step) as, err := agave.NewAgaveSender(task.Config) if err != nil { logger.Errf("%s Unexpected error %s", task.Id, err) response.ErrorMsg(-100, err.Error()) response.Close() return } as.Send(task.Data) response.Write("OK") response.Close() }
func (r *RazladkiSender) Send(data tasks.DataType, timestamp uint64) error { res, err := r.send(data, timestamp) if err != nil { return err } var buffer = new(bytes.Buffer) if err = json.NewEncoder(buffer).Encode(res); err != nil { return err } url := fmt.Sprintf("http://%s/save_new_data_json/%s", r.Host, r.Project) logger.Infof("%s send to url %s, data %s", r.id, url, buffer.Bytes()) req, err := http.NewRequest("POST", url, buffer) if err != nil { return err } resp, err := RazladkiHttpClient.Do(req) if err != nil { logger.Errf("%s unable to do http request: %v", r.id, err) return err } defer resp.Body.Close() logger.Infof("%s response status %d %s", r.id, resp.StatusCode, resp.Status) if resp.StatusCode != http.StatusOK { b, err := ioutil.ReadAll(resp.Body) if err != nil { return fmt.Errorf("bad response code %d: %s", resp.StatusCode, resp.Status) } return fmt.Errorf("bad response code %d %s: %s", resp.StatusCode, resp.Status, b) } return nil }
func Parsing(ctx *ParsingContext, task tasks.ParsingTask) (*ParsingResult, error) { logger.Infof("%s start parsing", task.Id) var ( blob []byte err error payload interface{} wg sync.WaitGroup ) blob, err = fetchDataFromTarget(&task) if err != nil { logger.Errf("%s error `%v` occured while fetching data", task.Id, err) return nil, err } if !task.ParsingConfig.SkipParsingStage() { logger.Infof("%s Send data to parsing", task.Id) parsingApp, err := ctx.Resolver.Resolve(ctx.Ctx, common.PARSINGAPP) if err != nil { logger.Errf("%s error `%v` occured while resolving %s", task.Id, err, common.PARSINGAPP) return nil, err } taskToParser, _ := common.Pack([]interface{}{task.Id, task.ParsingConfig.Parser, blob}) if err := parsingApp.Do("enqueue", "parse", taskToParser).Wait(ctx.Ctx, &blob); err != nil { logger.Errf("%s error `%v` occured while parsing data", task.Id, err) return nil, err } } payload = blob if !task.ParsingConfig.Raw { logger.Debugf("%s Use %s for handle data", task.Id, common.DATABASEAPP) datagrid, err := ctx.Resolver.Resolve(ctx.Ctx, common.DATABASEAPP) if err != nil { logger.Errf("%s unable to get DG %v", task.Id, err) return nil, err } var token string if err := datagrid.Do("enqueue", "put", blob).Wait(ctx.Ctx, &token); err != nil { logger.Errf("%s unable to put data to DG %v", task.Id, err) return nil, err } defer func() { taskToDatagrid, _ := common.Pack([]interface{}{token}) datagrid.Do("enqueue", "drop", taskToDatagrid) logger.Debugf("%s Drop table", task.Id) }() payload = token } pr := NewParsingResult() for aggLogName, aggCfg := range task.AggregationConfigs { for k, v := range aggCfg.Data { aggType, err := v.Type() if err != nil { logger.Errf("no type in configuration: %s %s %v", aggLogName, k, v) return nil, err } logger.Debugf("%s Send to %s %s type %s %v", task.Id, aggLogName, k, aggType, v) wg.Add(1) go func(name string, dataName string, v interface{}, configName string) { defer wg.Done() app, err := ctx.Resolver.Resolve(ctx.Ctx, name) if err != nil { logger.Errf("%s %s %s", task.Id, name, err) return } // Task structure t, _ := common.Pack(map[string]interface{}{ "config": v, "token": payload, "prevtime": task.PrevTime, "currtime": task.CurrTime, "id": task.Id, }) var rawRes []byte if err := app.Do("enqueue", "aggregate_host", t).Wait(ctx.Ctx, &rawRes); err != nil { logger.Errf("%s Failed task: %v", task.Id, err) return } logger.Debugf("result for %s %s: %v", configName, dataName, rawRes) pr.Put(configName, dataName, rawRes) }(aggType, k, v, aggLogName) } } wg.Wait() logger.Infof("%s Done", task.Id) return pr, nil }
func (as *AgaveSender) send(data tasks.DataType) (map[string][]string, error) { // Repack data by subgroups logger.Debugf("%s Data to send: %v", as.Id, data) var repacked map[string][]string = make(map[string][]string) for _, aggname := range as.Items { var root, metricname string items := strings.SplitN(aggname, ".", 2) if len(items) > 1 { root, metricname = items[0], items[1] } else { root = items[0] } for subgroup, value := range data[root] { rv := reflect.ValueOf(value) switch rv.Kind() { case reflect.Slice, reflect.Array: if len(metricname) != 0 { // we expect neted map here continue } if len(as.Fields) == 0 || len(as.Fields) != rv.Len() { logger.Errf("%s Unable to send a slice. Fields len %d, len of value %d", as.Id, len(as.Fields), rv.Len()) continue } forJoin := make([]string, 0, len(as.Fields)) for i, field := range as.Fields { forJoin = append(forJoin, fmt.Sprintf("%s:%s", field, common.InterfaceToString(rv.Index(i).Interface()))) } repacked[subgroup] = append(repacked[subgroup], strings.Join(forJoin, "+")) case reflect.Map: if len(metricname) == 0 { continue } key := reflect.ValueOf(metricname) mapVal := rv.MapIndex(key) if !mapVal.IsValid() { continue } value := reflect.ValueOf(mapVal.Interface()) switch value.Kind() { case reflect.Slice, reflect.Array: if len(as.Fields) == 0 || len(as.Fields) != value.Len() { logger.Errf("%s Unable to send a slice. Fields len %d, len of value %d", as.Id, len(as.Fields), rv.Len()) continue } forJoin := make([]string, 0, len(as.Fields)) for i, field := range as.Fields { forJoin = append(forJoin, fmt.Sprintf("%s:%s", field, common.InterfaceToString(value.Index(i).Interface()))) } repacked[subgroup] = append(repacked[subgroup], strings.Join(forJoin, "+")) case reflect.Map: //unsupported default: repacked[subgroup] = append(repacked[subgroup], fmt.Sprintf("%s:%s", metricname, common.InterfaceToString(value.Interface()))) } // } default: if len(metricname) != 0 { // we expect neted map here continue } repacked[subgroup] = append(repacked[subgroup], fmt.Sprintf("%s:%s", root, common.InterfaceToString(value))) } } } return repacked, nil }