Пример #1
0
func TestPipeTimeout(t *testing.T) {
	router := mux.NewRouter()
	pr := NewLocalPipeRouter().(*localPipeRouter)
	RegisterPipeRoutes(router, pr)
	pr.Stop() // we don't want the loop running in the background

	mtime.NowForce(time.Now())
	defer mtime.NowReset()

	// create a new pipe.
	id := "foo"
	ctx := context.Background()
	pipe, _, err := pr.Get(ctx, id, UIEnd)
	if err != nil {
		t.Fatalf("not ok: %v", err)
	}

	// move time forward such that the new pipe should timeout
	mtime.NowForce(mtime.Now().Add(pipeTimeout))
	pr.timeout()
	if !pipe.Closed() {
		t.Fatalf("pipe didn't timeout")
	}

	// move time forward such that the pipe should be GCd
	mtime.NowForce(mtime.Now().Add(gcTimeout))
	pr.garbageCollect()
	if _, ok := pr.pipes[id]; ok {
		t.Fatalf("pipe not gc'd")
	}
}
Пример #2
0
// AddTable appends arbirary key-value pairs to the Node, returning a new node.
func (node Node) AddTable(prefix string, labels map[string]string) Node {
	count := 0
	for key, value := range labels {
		if count >= MaxTableRows {
			break
		}
		node = node.WithLatest(prefix+key, mtime.Now(), value)
		count++
	}
	if len(labels) > MaxTableRows {
		truncationCount := fmt.Sprintf("%d", len(labels)-MaxTableRows)
		node = node.WithLatest(TruncationCountPrefix+prefix, mtime.Now(), truncationCount)
	}
	return node
}
Пример #3
0
func (c *container) GetNode() report.Node {
	c.RLock()
	defer c.RUnlock()
	latest := map[string]string{
		ContainerState:      c.StateString(),
		ContainerStateHuman: c.State(),
	}
	controls := []string{}

	if c.container.State.Paused {
		controls = append(controls, UnpauseContainer)
	} else if c.container.State.Running {
		uptime := (mtime.Now().Sub(c.container.State.StartedAt) / time.Second) * time.Second
		networkMode := ""
		if c.container.HostConfig != nil {
			networkMode = c.container.HostConfig.NetworkMode
		}
		latest[ContainerName] = strings.TrimPrefix(c.container.Name, "/")
		latest[ContainerUptime] = uptime.String()
		latest[ContainerRestartCount] = strconv.Itoa(c.container.RestartCount)
		latest[ContainerNetworkMode] = networkMode
		controls = append(controls, RestartContainer, StopContainer, PauseContainer, AttachContainer, ExecContainer)
	} else {
		controls = append(controls, StartContainer, RemoveContainer)
	}

	result := c.baseNode.WithLatests(latest)
	result = result.WithControls(controls...)
	result = result.WithMetrics(c.metrics())
	return result
}
Пример #4
0
// Ping adds a periodic ping to a websocket connection.
func Ping(c *websocket.Conn) Websocket {
	p := &pingingWebsocket{conn: c}
	p.conn.SetPongHandler(p.pong)
	p.conn.SetReadDeadline(mtime.Now().Add(pongWait))
	p.pinger = time.AfterFunc(pingPeriod, p.ping)
	return p
}
Пример #5
0
// ColorConnected colors nodes with the IsConnected key if
// they have edges to or from them.  Edges to/from yourself
// are not counted here (see #656).
func ColorConnected(r Renderer) Renderer {
	return CustomRenderer{
		Renderer: r,
		RenderFunc: func(input report.Nodes) report.Nodes {
			connected := map[string]struct{}{}
			void := struct{}{}

			for id, node := range input {
				if len(node.Adjacency) == 0 {
					continue
				}

				for _, adj := range node.Adjacency {
					if adj != id {
						connected[id] = void
						connected[adj] = void
					}
				}
			}

			output := input.Copy()
			for id := range connected {
				output[id] = output[id].WithLatest(IsConnected, mtime.Now(), "true")
			}
			return output
		},
	}
}
Пример #6
0
// WriteMessage is a helper method for getting a writer using NextWriter,
// writing the message and closing the writer.
func (p *pingingWebsocket) WriteMessage(messageType int, data []byte) error {
	p.writeLock.Lock()
	defer p.writeLock.Unlock()
	if err := p.conn.SetWriteDeadline(mtime.Now().Add(writeWait)); err != nil {
		return err
	}
	return p.conn.WriteMessage(messageType, data)
}
Пример #7
0
// WithLatests returns a fresh copy of n, with Metadata m merged in.
func (n Node) WithLatests(m map[string]string) Node {
	result := n.Copy()
	ts := mtime.Now()
	for k, v := range m {
		result.Latest = result.Latest.Set(k, ts, v)
	}
	return result
}
Пример #8
0
func (pr *localPipeRouter) garbageCollect() {
	pr.Lock()
	defer pr.Unlock()
	now := mtime.Now()
	for pipeID, pipe := range pr.pipes {
		if pipe.Closed() && now.Sub(pipe.tombstoneTime) >= gcTimeout {
			delete(pr.pipes, pipeID)
		}
	}
}
Пример #9
0
func (p *pingingWebsocket) ping() {
	p.writeLock.Lock()
	defer p.writeLock.Unlock()
	if err := p.conn.WriteControl(websocket.PingMessage, nil, mtime.Now().Add(writeWait)); err != nil {
		log.Errorf("websocket ping error: %v", err)
		p.conn.Close()
		return
	}
	p.pinger.Reset(pingPeriod)
}
Пример #10
0
func (pr *localPipeRouter) Delete(_ context.Context, id string) error {
	pr.Lock()
	defer pr.Unlock()
	p, ok := pr.pipes[id]
	if !ok {
		return nil
	}
	p.Close()
	p.tombstoneTime = mtime.Now()
	return nil
}
Пример #11
0
func (pr *localPipeRouter) Get(_ context.Context, id string, e End) (xfer.Pipe, io.ReadWriter, error) {
	pr.Lock()
	defer pr.Unlock()
	p, ok := pr.pipes[id]
	if !ok {
		log.Infof("Creating pipe id %s", id)
		p = &pipe{
			ui:    end{lastUsedTime: mtime.Now()},
			probe: end{lastUsedTime: mtime.Now()},
			Pipe:  xfer.NewPipe(),
		}
		pr.pipes[id] = p
	}
	if p.Closed() {
		return nil, nil, fmt.Errorf("Pipe %s closed", id)
	}
	end, endIO := p.end(e)
	end.refCount++
	return p, endIO, nil
}
Пример #12
0
// Add adds a report to the collector's internal state. It implements Adder.
func (c *collector) Add(_ context.Context, rpt report.Report) error {
	c.mtx.Lock()
	defer c.mtx.Unlock()
	c.reports = append(c.reports, rpt)
	c.timestamps = append(c.timestamps, mtime.Now())

	c.clean()
	c.cached = nil
	if rpt.Shortcut {
		c.Broadcast()
	}
	return nil
}
Пример #13
0
func (c *collector) clean() {
	var (
		cleanedReports    = make([]report.Report, 0, len(c.reports))
		cleanedTimestamps = make([]time.Time, 0, len(c.timestamps))
		oldest            = mtime.Now().Add(-c.window)
	)
	for i, r := range c.reports {
		if c.timestamps[i].After(oldest) {
			cleanedReports = append(cleanedReports, r)
			cleanedTimestamps = append(cleanedTimestamps, c.timestamps[i])
		}
	}
	c.reports = cleanedReports
	c.timestamps = cleanedTimestamps
}
Пример #14
0
// Report returns a merged report over all added reports. It implements
// Reporter.
func (c *collector) Report(_ context.Context) (report.Report, error) {
	c.mtx.Lock()
	defer c.mtx.Unlock()

	// If the oldest report is still within range,
	// and there is a cached report, return that.
	if c.cached != nil && len(c.reports) > 0 {
		oldest := mtime.Now().Add(-c.window)
		if c.timestamps[0].After(oldest) {
			return *c.cached, nil
		}
	}

	c.clean()
	return c.merger.Merge(c.reports), nil
}
Пример #15
0
// WriteJSON writes the JSON encoding of v to the connection.
func (p *pingingWebsocket) WriteJSON(v interface{}) error {
	p.writeLock.Lock()
	defer p.writeLock.Unlock()
	w, err := p.conn.NextWriter(websocket.TextMessage)
	if err != nil {
		return err
	}
	if err := p.conn.SetWriteDeadline(mtime.Now().Add(writeWait)); err != nil {
		return err
	}
	err1 := codec.NewEncoder(w, &codec.JsonHandle{}).Encode(v)
	err2 := w.Close()
	if err1 != nil {
		return err1
	}
	return err2
}
Пример #16
0
func (pr *consulPipeRouter) Delete(ctx context.Context, id string) error {
	userID, err := pr.userIDer(ctx)
	if err != nil {
		return err
	}
	key := fmt.Sprintf("%s%s-%s", pr.prefix, userID, id)
	log.Infof("Delete %s", key)

	return pr.client.CAS(key, &consulPipe{}, func(in interface{}) (interface{}, bool, error) {
		if in == nil {
			return nil, false, fmt.Errorf("Pipe %s not found", id)
		}
		p := in.(*consulPipe)
		p.DeletedAt = mtime.Now()
		return p, false, nil
	})
}
Пример #17
0
func (pr *localPipeRouter) timeout() {
	pr.Lock()
	defer pr.Unlock()
	now := mtime.Now()
	for id, pipe := range pr.pipes {
		if pipe.Closed() || (pipe.ui.refCount > 0 && pipe.probe.refCount > 0) {
			continue
		}

		if (pipe.ui.refCount == 0 && now.Sub(pipe.ui.lastUsedTime) >= pipeTimeout) ||
			(pipe.probe.refCount == 0 && now.Sub(pipe.probe.lastUsedTime) >= pipeTimeout) {
			log.Infof("Timing out pipe %s", id)
			pipe.Close()
			pipe.tombstoneTime = now
		}
	}
}
Пример #18
0
// Tag adds pod parents to container nodes.
func (r *Reporter) Tag(rpt report.Report) (report.Report, error) {
	for id, n := range rpt.Container.Nodes {
		uid, ok := n.Latest.Lookup(docker.LabelPrefix + "io.kubernetes.pod.uid")
		if !ok {
			continue
		}

		// Tag the pause containers with "does-not-make-connections"
		if isPauseContainer(n, rpt) {
			n = n.WithLatest(report.DoesNotMakeConnections, mtime.Now(), "")
		}

		rpt.Container.Nodes[id] = n.WithParents(report.EmptySets.Add(
			report.Pod,
			report.EmptyStringSet.Add(report.MakePodNodeID(uid)),
		))
	}
	return rpt, nil
}
Пример #19
0
func (r *Reporter) processTopology() (report.Topology, error) {
	t := report.MakeTopology().
		WithMetadataTemplates(MetadataTemplates).
		WithMetricTemplates(MetricTemplates)
	now := mtime.Now()
	deltaTotal, maxCPU, err := r.jiffies()
	if err != nil {
		return t, err
	}

	err = r.walker.Walk(func(p, prev Process) {
		pidstr := strconv.Itoa(p.PID)
		nodeID := report.MakeProcessNodeID(r.scope, pidstr)
		node := report.MakeNode(nodeID)
		for _, tuple := range []struct{ key, value string }{
			{PID, pidstr},
			{Name, p.Name},
			{Cmdline, p.Cmdline},
			{Threads, strconv.Itoa(p.Threads)},
		} {
			if tuple.value != "" {
				node = node.WithLatests(map[string]string{tuple.key: tuple.value})
			}
		}

		if p.PPID > 0 {
			node = node.WithLatests(map[string]string{PPID: strconv.Itoa(p.PPID)})
		}

		if deltaTotal > 0 {
			cpuUsage := float64(p.Jiffies-prev.Jiffies) / float64(deltaTotal) * 100.
			node = node.WithMetric(CPUUsage, report.MakeMetric().Add(now, cpuUsage).WithMax(maxCPU))
		}

		node = node.WithMetric(MemoryUsage, report.MakeMetric().Add(now, float64(p.RSSBytes)).WithMax(float64(p.RSSBytesLimit)))
		node = node.WithMetric(OpenFilesCount, report.MakeMetric().Add(now, float64(p.OpenFilesCount)).WithMax(float64(p.OpenFilesLimit)))

		t.AddNode(node)
	})

	return t, err
}
Пример #20
0
func (pr *consulPipeRouter) Get(ctx context.Context, id string, e app.End) (xfer.Pipe, io.ReadWriter, error) {
	userID, err := pr.userIDer(ctx)
	if err != nil {
		return nil, nil, err
	}
	key := fmt.Sprintf("%s%s-%s", pr.prefix, userID, id)
	log.Infof("Get %s:%s", key, e)

	// Try to ensure the given end of the given pipe
	// is 'owned' by this pipe service replica in consul.
	err = pr.client.CAS(key, &consulPipe{}, func(in interface{}) (interface{}, bool, error) {
		var pipe *consulPipe
		if in == nil {
			pipe = &consulPipe{
				CreatedAt: mtime.Now(),
			}
		} else {
			pipe = in.(*consulPipe)
		}
		if !pipe.DeletedAt.IsZero() {
			return nil, false, fmt.Errorf("Pipe %s has been deleted", key)
		}
		end := pipe.addrFor(e)
		if end != "" && end != pr.advertise {
			return nil, true, fmt.Errorf("Error: Pipe %s has existing connection to %s", key, end)
		}
		pipe.setAddrFor(e, pr.advertise)
		pipe.acquire(e)
		return pipe, false, nil
	})
	if err != nil {
		return nil, nil, err
	}

	pipe := pr.waitForPipe(key)
	myEnd, _ := pipe.Ends()
	if e == app.ProbeEnd {
		_, myEnd = pipe.Ends()
	}
	return pipe, myEnd, nil
}
Пример #21
0
func (pr *localPipeRouter) Release(_ context.Context, id string, e End) error {
	pr.Lock()
	defer pr.Unlock()

	p, ok := pr.pipes[id]
	if !ok {
		return fmt.Errorf("Pipe %s not found", id)
	}

	end, _ := p.end(e)
	end.refCount--
	if end.refCount > 0 {
		return nil
	}

	if !p.Closed() {
		end.lastUsedTime = mtime.Now()
	}

	return nil
}
Пример #22
0
// Add the new control IDs to this NodeControls, producing a fresh NodeControls.
func (nc NodeControls) Add(ids ...string) NodeControls {
	return NodeControls{
		Timestamp: mtime.Now(),
		Controls:  nc.Controls.Add(ids...),
	}
}
Пример #23
0
func (p *pingingWebsocket) pong(string) error {
	p.conn.SetReadDeadline(mtime.Now().Add(pongWait))
	return nil
}
Пример #24
0
func TestSummaries(t *testing.T) {
	{
		// Just a convenient source of some rendered nodes
		have := detailed.Summaries(fixture.Report, render.ProcessRenderer.Render(fixture.Report, render.FilterNoop))
		// The ids of the processes rendered above
		expectedIDs := []string{
			fixture.ClientProcess1NodeID,
			fixture.ClientProcess2NodeID,
			fixture.ServerProcessNodeID,
			fixture.NonContainerProcessNodeID,
			render.IncomingInternetID,
			render.OutgoingInternetID,
		}
		sort.Strings(expectedIDs)

		// It should summarize each node
		ids := []string{}
		for id := range have {
			ids = append(ids, id)
		}
		sort.Strings(ids)
		if !reflect.DeepEqual(expectedIDs, ids) {
			t.Fatalf("Expected Summaries to have summarized every node in the process renderer: %v, but got %v", expectedIDs, ids)
		}
	}

	// It should summarize nodes' metrics
	{
		t1, t2 := mtime.Now().Add(-1*time.Minute), mtime.Now()
		metric := report.MakeMetric().Add(t1, 1).Add(t2, 2)
		input := fixture.Report.Copy()

		input.Process.Nodes[fixture.ClientProcess1NodeID] = input.Process.Nodes[fixture.ClientProcess1NodeID].WithMetrics(report.Metrics{process.CPUUsage: metric})
		have := detailed.Summaries(input, render.ProcessRenderer.Render(input, render.FilterNoop))

		node, ok := have[fixture.ClientProcess1NodeID]
		if !ok {
			t.Fatalf("Expected output to have the node we added the metric to")
		}

		var row report.MetricRow
		ok = false
		for _, metric := range node.Metrics {
			if metric.ID == process.CPUUsage {
				row = metric
				ok = true
				break
			}
		}
		if !ok {
			t.Fatalf("Expected node to have the metric we added")
		}

		// Our summarized MetricRow
		want := report.MetricRow{
			ID:       process.CPUUsage,
			Label:    "CPU",
			Format:   "percent",
			Value:    2,
			Priority: 1,
			Metric: &report.Metric{
				Samples: nil,
				Min:     metric.Min,
				Max:     metric.Max,
				First:   metric.First,
				Last:    metric.Last,
			},
		}
		if !reflect.DeepEqual(want, row) {
			t.Fatalf("Expected to have summarized the node's metrics: %s", test.Diff(want, row))
		}
	}
}
Пример #25
0
func TestNat(t *testing.T) {
	mtime.NowForce(mtime.Now())
	defer mtime.NowReset()

	// test that two containers, on the docker network, get their connections mapped
	// correctly.
	// the setup is this:
	//
	// container2 (10.0.47.2:222222), host2 (2.3.4.5:22223) ->
	//     host1 (1.2.3.4:80), container1 (10.0.47.1:80)

	// from the PoV of host1
	{
		f := makeFlow(updateType)
		addIndependant(&f, 1, "")
		f.Original = addMeta(&f, "original", "2.3.4.5", "1.2.3.4", 222222, 80)
		f.Reply = addMeta(&f, "reply", "10.0.47.1", "2.3.4.5", 80, 222222)
		ct := &mockFlowWalker{
			flows: []flow{f},
		}

		have := report.MakeReport()
		originalID := report.MakeEndpointNodeID("host1", "10.0.47.1", "80")
		have.Endpoint.AddNode(report.MakeNodeWith(originalID, map[string]string{
			Addr:      "10.0.47.1",
			Port:      "80",
			"foo":     "bar",
			Procspied: "true",
		}))

		want := have.Copy()
		wantID := report.MakeEndpointNodeID("host1", "1.2.3.4", "80")
		want.Endpoint.AddNode(report.MakeNodeWith(wantID, map[string]string{
			Addr:      "1.2.3.4",
			Port:      "80",
			"copy_of": originalID,
			"foo":     "bar",
			Procspied: "true",
		}))

		makeNATMapper(ct).applyNAT(have, "host1")
		if !reflect.DeepEqual(want, have) {
			t.Fatal(test.Diff(want, have))
		}
	}

	// form the PoV of host2
	{
		f := makeFlow(updateType)
		addIndependant(&f, 2, "")
		f.Original = addMeta(&f, "original", "10.0.47.2", "1.2.3.4", 22222, 80)
		f.Reply = addMeta(&f, "reply", "1.2.3.4", "2.3.4.5", 80, 22223)
		ct := &mockFlowWalker{
			flows: []flow{f},
		}

		have := report.MakeReport()
		originalID := report.MakeEndpointNodeID("host2", "10.0.47.2", "22222")
		have.Endpoint.AddNode(report.MakeNodeWith(originalID, map[string]string{
			Addr:      "10.0.47.2",
			Port:      "22222",
			"foo":     "baz",
			Procspied: "true",
		}))

		want := have.Copy()
		want.Endpoint.AddNode(report.MakeNodeWith(report.MakeEndpointNodeID("host2", "2.3.4.5", "22223"), map[string]string{
			Addr:      "2.3.4.5",
			Port:      "22223",
			"copy_of": originalID,
			"foo":     "baz",
			Procspied: "true",
		}))

		makeNATMapper(ct).applyNAT(have, "host1")
		if !reflect.DeepEqual(want, have) {
			t.Fatal(test.Diff(want, have))
		}
	}
}
Пример #26
0
					docker.ContainerName: container2Name,
					report.HostNodeID:    serverHostNodeID,
				}).
					WithSets(report.EmptySets.
						Add(docker.ContainerIPs, report.MakeStringSet(container2IP)).
						Add(docker.ContainerIPsWithScopes, report.MakeStringSet(report.MakeAddressNodeID("", container2IP))),
					).WithTopology(report.Container),
				pauseContainerNodeID: report.MakeNodeWith(pauseContainerNodeID, map[string]string{
					docker.ContainerID:   pauseContainerID,
					docker.ContainerName: pauseContainerName,
					report.HostNodeID:    serverHostNodeID,
				}).
					WithSets(report.EmptySets.
						Add(docker.ContainerIPs, report.MakeStringSet(pauseContainerIP)).
						Add(docker.ContainerIPsWithScopes, report.MakeStringSet(report.MakeAddressNodeID("", pauseContainerIP))),
					).WithTopology(report.Container).WithLatest(report.DoesNotMakeConnections, mtime.Now(), ""),
			},
		},
		Host: report.Topology{
			Nodes: report.Nodes{
				serverHostNodeID: report.MakeNodeWith(serverHostNodeID, map[string]string{
					report.HostNodeID: serverHostNodeID,
				}).
					WithSets(report.EmptySets.
						Add(host.LocalNetworks, report.MakeStringSet("192.168.0.0/16")),
					).WithTopology(report.Host),
			},
		},
	}
)