func (c *ElasticSearchStorage) SearchFlows(fsq flow.FlowSearchQuery) (*flow.FlowSet, error) { if !c.client.Started() { return nil, errors.New("ElasticSearchStorage is not yet started") } request, err := c.requestFromQuery(fsq) if err != nil { return nil, err } var query map[string]interface{} if fsq.Filter != nil { query = c.formatFilter(fsq.Filter) } request["query"] = query if fsq.Sort { request["sort"] = map[string]interface{}{ "Metric.Last": map[string]string{ "order": "desc", }, } } out, err := c.sendRequest("flow", request) if err != nil { return nil, err } flowset := flow.NewFlowSet() if out.Hits.Len() > 0 { for _, d := range out.Hits.Hits { f := new(flow.Flow) if err := json.Unmarshal([]byte(*d.Source), f); err != nil { return nil, err } flowset.Flows = append(flowset.Flows, f) } } if fsq.Dedup { if err := flowset.Dedup(fsq.DedupBy); err != nil { return nil, err } } return flowset, nil }
func (c *OrientDBStorage) SearchFlows(fsq flow.FlowSearchQuery) (*flow.FlowSet, error) { interval := fsq.PaginationRange filter := fsq.Filter sql := "SELECT FROM Flow" if conditional := filter.Expression(""); conditional != "" { sql += " WHERE " + conditional } if interval != nil { sql += fmt.Sprintf(" LIMIT %d, %d", interval.To-interval.From, interval.From) } if fsq.Sort { sql += " ORDER BY Metric.Last" } docs, err := c.client.Sql(sql) if err != nil { return nil, err } flowset := flow.NewFlowSet() for _, doc := range docs { flow, err := documentToFlow(doc) if err != nil { return nil, err } flowset.Flows = append(flowset.Flows, flow) } if fsq.Dedup { if err := flowset.Dedup(fsq.DedupBy); err != nil { return nil, err } } return flowset, nil }
func TestFlowQuery(t *testing.T) { delay := 500 * time.Second al := flow.NewTableAllocator(delay, delay) f := func(flows []*flow.Flow) {} ft1 := al.Alloc(f) flow.GenerateTestFlows(t, ft1, 1, "probe-tid1") flows1 := flow.GenerateTestFlows(t, ft1, 2, "probe-tid2") ft2 := al.Alloc(f) flows2 := flow.GenerateTestFlows(t, ft2, 3, "probe-tid2") ft1.Start() ft2.Start() time.Sleep(time.Second) obj, _ := proto.Marshal(&flow.FlowSearchQuery{ Filter: &flow.Filter{ BoolFilter: &flow.BoolFilter{ Op: flow.BoolFilterOp_OR, Filters: []*flow.Filter{ &flow.Filter{ TermStringFilter: &flow.TermStringFilter{Key: "NodeTID", Value: "probe-tid2"}, }, &flow.Filter{ TermStringFilter: &flow.TermStringFilter{Key: "ANodeTID", Value: "probe-tid2"}, }, &flow.Filter{ TermStringFilter: &flow.TermStringFilter{Key: "BNodeTID", Value: "probe-tid2"}, }, }, }, }, }) query := &flow.TableQuery{ Type: "FlowSearchQuery", Obj: obj, } reply := al.QueryTable(query) ft1.Stop() ft2.Stop() flowset := flow.NewFlowSet() for _, r := range reply.Obj { var fsr flow.FlowSearchReply if err := proto.Unmarshal(r, &fsr); err != nil { t.Fatal(err.Error()) } flowset.Merge(fsr.FlowSet, flow.MergeContext{}) } if len(flowset.Flows) != len(flows1)+len(flows2) { t.Fatalf("FlowQuery should return at least one flow") } for _, flow := range flowset.Flows { if flow.NodeTID != "probe-tid2" { t.Fatalf("FlowQuery should only return flows with probe-tid2, got: %s", flow) } } }
func (s *FlowGremlinTraversalStep) Exec(last traversal.GraphTraversalStep) (traversal.GraphTraversalStep, error) { var graphTraversal *traversal.GraphTraversal var err error var paramsFilter *flow.Filter if len(s.context.Params) > 0 { if paramsFilter, err = paramsToFilter(s.context.Params...); err != nil { return nil, err } } var interval *flow.Range if s.context.StepContext.Range != nil { interval = &flow.Range{From: 0, To: s.context.StepContext.Range[1]} } flowset := flow.NewFlowSet() switch tv := last.(type) { case *traversal.GraphTraversal: graphTraversal = tv if context := graphTraversal.Graph.GetContext(); context.Time != nil && s.Storage != nil { var flows []*flow.Flow if flows, err = storage.LookupFlows(s.Storage, context, paramsFilter, interval); err == nil { flowset.Flows = append(flowset.Flows, flows...) } } else { flowSearchQuery := &flow.FlowSearchQuery{ Filter: paramsFilter, Range: interval, Sort: s.context.StepContext.Sort, Dedup: s.context.StepContext.Dedup, } flowset, err = s.TableClient.LookupFlows(flowSearchQuery) } if r := s.context.StepContext.Range; r != nil { flowset.Slice(int(r[0]), int(r[1])) } case *traversal.GraphTraversalV: graphTraversal = tv.GraphTraversal hnmap := make(flow.HostNodeIDMap) graphTraversal.Graph.RLock() for _, v := range tv.Values() { node := v.(*graph.Node) if t, ok := node.Metadata()["Type"]; !ok || !common.IsCaptureAllowed(t.(string)) { continue } hnmap[node.Host()] = append(hnmap[node.Host()], string(node.ID)) } graphTraversal.Graph.RUnlock() if context := graphTraversal.Graph.GetContext(); context.Time != nil && s.Storage != nil { var flows []*flow.Flow if flows, err = storage.LookupFlowsByNodes(s.Storage, context, hnmap, paramsFilter, interval); err == nil { flowset.Flows = append(flowset.Flows, flows...) } } else { flowSearchQuery := &flow.FlowSearchQuery{ Filter: paramsFilter, Range: interval, Sort: s.context.StepContext.Sort, Dedup: s.context.StepContext.Dedup, } flowset, err = s.TableClient.LookupFlowsByNodes(hnmap, flowSearchQuery) } if r := s.context.StepContext.Range; r != nil { flowset.Slice(int(r[0]), int(r[1])) } default: return nil, traversal.ExecutionError } if err != nil { logging.GetLogger().Errorf("Error while looking for flows: %s", err.Error()) return nil, err } return &FlowTraversalStep{GraphTraversal: graphTraversal, flowset: flowset}, nil }