func TestContainer(t *testing.T) { oldDialStub, oldNewClientConnStub := docker.DialStub, docker.NewClientConnStub defer func() { docker.DialStub, docker.NewClientConnStub = oldDialStub, oldNewClientConnStub }() docker.DialStub = func(network, address string) (net.Conn, error) { return nil, nil } reader, writer := io.Pipe() connection := &mockConnection{reader} docker.NewClientConnStub = func(c net.Conn, r *bufio.Reader) docker.ClientConn { return connection } c := docker.NewContainer(container1) err := c.StartGatheringStats() if err != nil { t.Errorf("%v", err) } defer c.StopGatheringStats() runtime.Gosched() // wait for StartGatheringStats goroutine to call connection.Do // Send some stats to the docker container stats := &client.Stats{} stats.MemoryStats.Usage = 12345 if err = json.NewEncoder(writer).Encode(&stats); err != nil { t.Error(err) } // Now see if we go them test.Poll(t, 10*time.Millisecond, "12345", func() interface{} { return c.GetNodeMetadata().Metadata[docker.MemoryUsage] }) }
func TestPipes(t *testing.T) { oldNewPipe := controls.NewPipe defer func() { controls.NewPipe = oldNewPipe }() controls.NewPipe = func(_ controls.PipeClient, _ string) (string, xfer.Pipe, error) { return "pipeid", mockPipe{}, nil } mdc := newMockClient() setupStubs(mdc, func() { registry, _ := docker.NewRegistry(10*time.Second, nil) defer registry.Stop() test.Poll(t, 100*time.Millisecond, true, func() interface{} { _, ok := registry.GetContainer("ping") return ok }) for _, tc := range []string{ docker.AttachContainer, docker.ExecContainer, } { result := controls.HandleControlRequest(xfer.Request{ Control: tc, NodeID: report.MakeContainerNodeID("ping"), }) want := xfer.Response{ Pipe: "pipeid", RawTTY: true, } if !reflect.DeepEqual(result, want) { t.Errorf("diff: %s", test.Diff(want, result)) } } }) }
func TestReverseResolver(t *testing.T) { tests := map[string][]string{ "1.2.3.4": {"test.domain.name"}, "4.3.2.1": {"im.a.little.tea.pot"}, } revRes := newReverseResolver() defer revRes.stop() // Use a mocked resolver function. revRes.Resolver = func(addr string) (names []string, err error) { if names, ok := tests[addr]; ok { return names, nil } return []string{}, errors.New("invalid IP") } // Up the rate limit so the test runs faster. revRes.Throttle = time.Tick(time.Millisecond) for ip, names := range tests { test.Poll(t, 100*time.Millisecond, names, func() interface{} { ns, _ := revRes.get(ip) return ns }) } }
func TestRegistryEvents(t *testing.T) { mdc := newMockClient() setupStubs(mdc, func() { registry, _ := docker.NewRegistry(10 * time.Second) defer registry.Stop() runtime.Gosched() check := func(want []docker.Container) { test.Poll(t, 100*time.Millisecond, want, func() interface{} { return allContainers(registry) }) } { mdc.Lock() mdc.apiContainers = []client.APIContainers{apiContainer1, apiContainer2} mdc.containers["wiff"] = container2 mdc.Unlock() mdc.send(&client.APIEvents{Status: docker.StartEvent, ID: "wiff"}) runtime.Gosched() want := []docker.Container{&mockContainer{container1}, &mockContainer{container2}} check(want) } { mdc.Lock() mdc.apiContainers = []client.APIContainers{apiContainer1} delete(mdc.containers, "wiff") mdc.Unlock() mdc.send(&client.APIEvents{Status: docker.DieEvent, ID: "wiff"}) runtime.Gosched() want := []docker.Container{&mockContainer{container1}} check(want) } { mdc.Lock() mdc.apiContainers = []client.APIContainers{} delete(mdc.containers, "ping") mdc.Unlock() mdc.send(&client.APIEvents{Status: docker.DieEvent, ID: "ping"}) runtime.Gosched() want := []docker.Container{} check(want) } { mdc.send(&client.APIEvents{Status: docker.DieEvent, ID: "doesntexist"}) runtime.Gosched() want := []docker.Container{} check(want) } }) }
func TestCollector(t *testing.T) { log.SetOutput(ioutil.Discard) // Swap out ticker publish := make(chan time.Time) oldTick := tick tick = func(time.Duration) <-chan time.Time { return publish } defer func() { tick = oldTick }() // Build a collector collector := NewCollector(time.Second, "id") defer collector.Stop() concreteCollector, ok := collector.(*realCollector) if !ok { t.Fatal("type assertion failure") } // Build a test publisher reports := make(chan interface{}) ln := testPublisher(t, reports) defer ln.Close() // Connect the collector to the test publisher addr := ln.Addr().String() collector.Add(addr) collector.Add(addr) // test duplicate case runtime.Gosched() // make sure it connects // Push a report through everything r := report.Report{ Address: report.Topology{ NodeMetadatas: report.NodeMetadatas{ report.MakeAddressNodeID("a", "b"): report.NodeMetadata{}, }, }, } reports <- r test.Poll(t, 100*time.Millisecond, 1, func() interface{} { return len(concreteCollector.peek().Address.NodeMetadatas) }) go func() { publish <- time.Now() }() collected := <-collector.Reports() if reflect.DeepEqual(r, collected) { t.Errorf(test.Diff(r, collected)) } collector.Remove(addr) collector.Remove(addr) // test duplicate case }
func TestRegistry(t *testing.T) { mdc := newMockClient() setupStubs(mdc, func() { registry, _ := docker.NewRegistry(10 * time.Second) defer registry.Stop() runtime.Gosched() { want := []docker.Container{&mockContainer{container1}} test.Poll(t, 100*time.Millisecond, want, func() interface{} { return allContainers(registry) }) } { want := []*client.APIImages{&apiImage1} test.Poll(t, 100*time.Millisecond, want, func() interface{} { return allImages(registry) }) } }) }
func TestProbe(t *testing.T) { want := report.MakeReport() node := report.MakeNodeWith(map[string]string{"b": "c"}) want.Endpoint.AddNode("a", node) pub := mockPublisher{make(chan report.Report)} p := New(10*time.Millisecond, 100*time.Millisecond, pub) p.AddReporter(mockReporter{want}) p.Start() defer p.Stop() test.Poll(t, 300*time.Millisecond, want, func() interface{} { return <-pub.have }) }
func TestLookupByPID(t *testing.T) { mdc := newMockClient() setupStubs(mdc, func() { registry, _ := docker.NewRegistry(10 * time.Second) defer registry.Stop() want := docker.Container(&mockContainer{container1}) test.Poll(t, 100*time.Millisecond, want, func() interface{} { var have docker.Container registry.LockedPIDLookup(func(lookup func(int) docker.Container) { have = lookup(1) }) return have }) }) }
func TestBackgroundPublisher(t *testing.T) { mp := mockPublisher{} backgroundPublisher := xfer.NewBackgroundPublisher(&mp) defer backgroundPublisher.Stop() runtime.Gosched() for i := 1; i <= 10; i++ { err := backgroundPublisher.Publish(&bytes.Buffer{}) if err != nil { t.Fatalf("%v", err) } test.Poll(t, 100*time.Millisecond, i, func() interface{} { return mp.count }) } }
func TestWeave(t *testing.T) { weaveClient := &mockWeaveClient{ published: map[string]entry{}, } dockerClient := mockDockerClient{} interfaces := func() ([]app.Interface, error) { return []app.Interface{ { Name: "eth0", Addrs: []net.Addr{ &net.IPAddr{ IP: ip, }, }, }, { Name: "docker0", Addrs: []net.Addr{ &net.IPAddr{ IP: net.ParseIP("4.3.2.1"), }, }, }, }, nil } publisher := app.NewWeavePublisher( weaveClient, dockerClient, interfaces, hostname, containerName) defer publisher.Stop() want := map[string]entry{ hostname: {containerID, ip}, } test.Poll(t, 100*time.Millisecond, want, func() interface{} { weaveClient.Lock() defer weaveClient.Unlock() result := map[string]entry{} for k, v := range weaveClient.published { result[k] = v } return result }) }
func TestRegistry(t *testing.T) { mdc := mockClient // take a copy setupStubs(&mdc, func() { registry, _ := docker.NewRegistry(10 * time.Second) defer registry.Stop() runtime.Gosched() { want := []docker.Container{&mockContainer{container1}} test.Poll(t, 10*time.Millisecond, want, func() interface{} { return allContainers(registry) }) } { have := allImages(registry) want := []*client.APIImages{&apiImage1} if !reflect.DeepEqual(want, have) { t.Errorf("%s", test.Diff(want, have)) } } }) }
func TestConntracker(t *testing.T) { oldExecCmd, oldConntrackPresent := exec.Command, ConntrackModulePresent defer func() { exec.Command, ConntrackModulePresent = oldExecCmd, oldConntrackPresent }() ConntrackModulePresent = func() bool { return true } reader, writer := io.Pipe() exec.Command = func(name string, args ...string) exec.Cmd { return testExec.NewMockCmd(reader) } conntracker, err := NewConntracker(false) if err != nil { t.Fatal(err) } bw := bufio.NewWriter(writer) if _, err := bw.WriteString(XMLHeader); err != nil { t.Fatal(err) } if _, err := bw.WriteString(ConntrackOpenTag); err != nil { t.Fatal(err) } if err := bw.Flush(); err != nil { t.Fatal(err) } have := func() interface{} { result := []Flow{} conntracker.WalkFlows(func(f Flow) { f.Original = nil f.Reply = nil f.Independent = nil result = append(result, f) }) return result } ts := 100 * time.Millisecond // First, assert we have no flows test.Poll(t, ts, []Flow{}, have) // Now add some flows xmlEncoder := xml.NewEncoder(bw) writeFlow := func(f Flow) { if err := xmlEncoder.Encode(f); err != nil { t.Fatal(err) } if _, err := bw.WriteString("\n"); err != nil { t.Fatal(err) } if err := bw.Flush(); err != nil { t.Fatal(err) } } flow1 := makeFlow(New) addMeta(&flow1, "original", "1.2.3.4", "2.3.4.5", 2, 3) addIndependant(&flow1, 1, "") writeFlow(flow1) test.Poll(t, ts, []Flow{flow1}, have) // Now check when we remove the flow, we still get it in the next Walk flow1.Type = Destroy writeFlow(flow1) test.Poll(t, ts, []Flow{flow1}, have) test.Poll(t, ts, []Flow{}, have) // This time we're not going to remove it, but put it in state TIME_WAIT flow1.Type = New writeFlow(flow1) test.Poll(t, ts, []Flow{flow1}, have) flow1.Metas[1].State = TimeWait writeFlow(flow1) test.Poll(t, ts, []Flow{flow1}, have) test.Poll(t, ts, []Flow{}, have) }
func TestPipeClose(t *testing.T) { router := mux.NewRouter() pr := RegisterPipeRoutes(router) defer pr.Stop() server := httptest.NewServer(router) defer server.Close() ip, port, err := net.SplitHostPort(strings.TrimPrefix(server.URL, "http://")) if err != nil { t.Fatal(err) } probeConfig := appclient.ProbeConfig{ ProbeID: "foo", } client, err := appclient.NewAppClient(probeConfig, ip+":"+port, ip+":"+port, nil) if err != nil { t.Fatal(err) } defer client.Stop() // this is the probe end of the pipe pipeID, pipe, err := controls.NewPipe(adapter{client}, "appid") if err != nil { t.Fatal(err) } // this is a client to the app pipeURL := fmt.Sprintf("ws://%s:%s/api/pipe/%s", ip, port, pipeID) conn, _, err := websocket.DefaultDialer.Dial(pipeURL, http.Header{}) if err != nil { t.Fatal(err) } // Send something from pipe -> app -> conn local, _ := pipe.Ends() msg := []byte("hello world") if _, err := local.Write(msg); err != nil { t.Fatal(err) } if _, buf, err := conn.ReadMessage(); err != nil { t.Fatal(err) } else if !bytes.Equal(buf, msg) { t.Fatalf("%v != %v", buf, msg) } // Send something from conn -> app -> probe msg = []byte("goodbye, cruel world") if err := conn.WriteMessage(websocket.BinaryMessage, msg); err != nil { t.Fatal(err) } buf := make([]byte, 1024) if n, err := local.Read(buf); err != nil { t.Fatal(err) } else if !bytes.Equal(msg, buf[:n]) { t.Fatalf("%v != %v", buf, msg) } // Now delete the pipe if err := pipe.Close(); err != nil { t.Fatal(err) } // the client backs off for 1 second before trying to reconnect the pipe, // so we need to wait for longer. test.Poll(t, 2*time.Second, true, func() interface{} { return pipe.Closed() }) }
func TestConntracker(t *testing.T) { oldExecCmd, oldConntrackPresent := exec.Command, ConntrackModulePresent defer func() { exec.Command, ConntrackModulePresent = oldExecCmd, oldConntrackPresent }() ConntrackModulePresent = func() bool { return true } first := true existingConnectionsReader, existingConnectionsWriter := io.Pipe() reader, writer := io.Pipe() exec.Command = func(name string, args ...string) exec.Cmd { if first { first = false return testexec.NewMockCmd(existingConnectionsReader) } return testexec.NewMockCmd(reader) } flowWalker := newConntrackFlowWalker(true) defer flowWalker.stop() // First write out some empty xml for the existing connections ecbw := bufio.NewWriter(existingConnectionsWriter) if _, err := ecbw.WriteString(xmlHeader); err != nil { t.Fatal(err) } if _, err := ecbw.WriteString(conntrackOpenTag); err != nil { t.Fatal(err) } if _, err := ecbw.WriteString(conntrackCloseTag); err != nil { t.Fatal(err) } if err := ecbw.Flush(); err != nil { t.Fatal(err) } // Then write out eventa bw := bufio.NewWriter(writer) if _, err := bw.WriteString(xmlHeader); err != nil { t.Fatal(err) } if _, err := bw.WriteString(conntrackOpenTag); err != nil { t.Fatal(err) } if err := bw.Flush(); err != nil { t.Fatal(err) } have := func() interface{} { result := []flow{} flowWalker.walkFlows(func(f flow) { f.Original = nil f.Reply = nil f.Independent = nil result = append(result, f) }) return result } ts := 100 * time.Millisecond // First, assert we have no flows test.Poll(t, ts, []flow{}, have) // Now add some flows xmlEncoder := xml.NewEncoder(bw) writeFlow := func(f flow) { if err := xmlEncoder.Encode(f); err != nil { t.Fatal(err) } if _, err := bw.WriteString("\n"); err != nil { t.Fatal(err) } if err := bw.Flush(); err != nil { t.Fatal(err) } } flow1 := makeFlow(newType) addMeta(&flow1, "original", "1.2.3.4", "2.3.4.5", 2, 3) addIndependant(&flow1, 1, "") writeFlow(flow1) test.Poll(t, ts, []flow{flow1}, have) // Now check when we remove the flow, we still get it in the next Walk flow1.Type = destroyType writeFlow(flow1) test.Poll(t, ts, []flow{flow1}, have) test.Poll(t, ts, []flow{}, have) // This time we're not going to remove it, but put it in state TIME_WAIT flow1.Type = newType writeFlow(flow1) test.Poll(t, ts, []flow{flow1}, have) flow1.Metas[1].State = timeWait writeFlow(flow1) test.Poll(t, ts, []flow{flow1}, have) test.Poll(t, ts, []flow{}, have) }
func TestContainer(t *testing.T) { log.SetOutput(ioutil.Discard) oldDialStub, oldNewClientConnStub := docker.DialStub, docker.NewClientConnStub defer func() { docker.DialStub, docker.NewClientConnStub = oldDialStub, oldNewClientConnStub }() docker.DialStub = func(network, address string) (net.Conn, error) { return nil, nil } reader, writer := io.Pipe() connection := &mockConnection{reader} docker.NewClientConnStub = func(c net.Conn, r *bufio.Reader) docker.ClientConn { return connection } c := docker.NewContainer(container1) err := c.StartGatheringStats() if err != nil { t.Errorf("%v", err) } defer c.StopGatheringStats() now := time.Unix(12345, 67890).UTC() mtime.NowForce(now) defer mtime.NowReset() // Send some stats to the docker container stats := &client.Stats{} stats.Read = now stats.MemoryStats.Usage = 12345 if err = json.NewEncoder(writer).Encode(&stats); err != nil { t.Error(err) } // Now see if we go them uptime := (now.Sub(startTime) / time.Second) * time.Second want := report.MakeNode().WithLatests(map[string]string{ "docker_container_command": " ", "docker_container_created": "01 Jan 01 00:00 UTC", "docker_container_id": "ping", "docker_container_name": "pong", "docker_image_id": "baz", "docker_label_foo1": "bar1", "docker_label_foo2": "bar2", "docker_container_state": "running", "docker_container_uptime": uptime.String(), }).WithSets(report.EmptySets. Add("docker_container_ports", report.MakeStringSet("1.2.3.4:80->80/tcp", "81/tcp")). Add("docker_container_ips", report.MakeStringSet("1.2.3.4")). Add("docker_container_ips_with_scopes", report.MakeStringSet("scope;1.2.3.4")), ).WithControls( docker.RestartContainer, docker.StopContainer, docker.PauseContainer, docker.AttachContainer, docker.ExecContainer, ).WithMetrics(report.Metrics{ "docker_cpu_total_usage": report.MakeMetric(), "docker_memory_usage": report.MakeMetric().Add(now, 12345), }).WithParents(report.EmptySets. Add(report.ContainerImage, report.MakeStringSet(report.MakeContainerImageNodeID("baz"))), ) test.Poll(t, 100*time.Millisecond, want, func() interface{} { node := c.GetNode("scope", []net.IP{}) node.Latest.ForEach(func(k, v string) { if v == "0" || v == "" { node.Latest = node.Latest.Delete(k) } }) return node }) if c.Image() != "baz" { t.Errorf("%s != baz", c.Image()) } if c.PID() != 2 { t.Errorf("%d != 2", c.PID()) } if have := docker.ExtractContainerIPs(c.GetNode("", []net.IP{})); !reflect.DeepEqual(have, []string{"1.2.3.4"}) { t.Errorf("%v != %v", have, []string{"1.2.3.4"}) } }
func TestContainer(t *testing.T) { log.SetOutput(ioutil.Discard) oldDialStub, oldNewClientConnStub := docker.DialStub, docker.NewClientConnStub defer func() { docker.DialStub, docker.NewClientConnStub = oldDialStub, oldNewClientConnStub }() docker.DialStub = func(network, address string) (net.Conn, error) { return nil, nil } reader, writer := io.Pipe() connection := &mockConnection{reader} docker.NewClientConnStub = func(c net.Conn, r *bufio.Reader) docker.ClientConn { return connection } c := docker.NewContainer(container1) err := c.StartGatheringStats() if err != nil { t.Errorf("%v", err) } defer c.StopGatheringStats() // Send some stats to the docker container stats := &client.Stats{} stats.MemoryStats.Usage = 12345 if err = json.NewEncoder(writer).Encode(&stats); err != nil { t.Error(err) } // Now see if we go them want := report.MakeNode().WithMetadata(map[string]string{ "docker_container_command": " ", "docker_container_created": "01 Jan 01 00:00 UTC", "docker_container_id": "ping", "docker_container_name": "pong", "docker_image_id": "baz", "docker_label_foo1": "bar1", "docker_label_foo2": "bar2", "memory_usage": "12345", "docker_container_state": "running", }).WithSets(report.Sets{ "docker_container_ports": report.MakeStringSet("1.2.3.4:80->80/tcp", "81/tcp"), "docker_container_ips": report.MakeStringSet("1.2.3.4"), "docker_container_ips_with_scopes": report.MakeStringSet("scope;1.2.3.4"), }).WithControls(docker.RestartContainer, docker.StopContainer, docker.PauseContainer) test.Poll(t, 100*time.Millisecond, want, func() interface{} { node := c.GetNode("scope", []net.IP{}) for k, v := range node.Metadata { if v == "0" || v == "" { delete(node.Metadata, k) } } return node }) if c.Image() != "baz" { t.Errorf("%s != baz", c.Image()) } if c.PID() != 1 { t.Errorf("%d != 1", c.PID()) } if have := docker.ExtractContainerIPs(c.GetNode("", []net.IP{})); !reflect.DeepEqual(have, []string{"1.2.3.4"}) { t.Errorf("%v != %v", have, []string{"1.2.3.4"}) } }
func TestWeaveTaggerOverlayTopology(t *testing.T) { w := overlay.NewWeave(mockHostID, weave.MockClient{}) defer w.Stop() // Wait until the reporter reports some nodes test.Poll(t, 300*time.Millisecond, 1, func() interface{} { have, _ := w.Report() return len(have.Overlay.Nodes) }) { // Overlay node should include peer name and nickname have, err := w.Report() if err != nil { t.Fatal(err) } nodeID := report.MakeOverlayNodeID(weave.MockWeavePeerName) node, ok := have.Overlay.Nodes[nodeID] if !ok { t.Errorf("Expected overlay node %q, but not found", nodeID) } if peerName, ok := node.Latest.Lookup(overlay.WeavePeerName); !ok || peerName != weave.MockWeavePeerName { t.Errorf("Expected weave peer name %q, got %q", weave.MockWeavePeerName, peerName) } if peerNick, ok := node.Latest.Lookup(overlay.WeavePeerNickName); !ok || peerNick != weave.MockWeavePeerNickName { t.Errorf("Expected weave peer nickname %q, got %q", weave.MockWeavePeerNickName, peerNick) } if localNetworks, ok := node.Sets.Lookup(host.LocalNetworks); !ok || !reflect.DeepEqual(localNetworks, report.MakeStringSet(weave.MockWeaveDefaultSubnet)) { t.Errorf("Expected weave node local_networks %q, got %q", report.MakeStringSet(weave.MockWeaveDefaultSubnet), localNetworks) } } { // Container nodes should be tagged with their overlay info nodeID := report.MakeContainerNodeID(weave.MockContainerID) have, err := w.Tag(report.Report{ Container: report.MakeTopology().AddNode(report.MakeNodeWith(nodeID, map[string]string{ docker.ContainerID: weave.MockContainerID, })), }) if err != nil { t.Fatal(err) } node, ok := have.Container.Nodes[nodeID] if !ok { t.Errorf("Expected container node %q, but not found", nodeID) } // Should have Weave DNS Hostname if have, ok := node.Latest.Lookup(overlay.WeaveDNSHostname); !ok || have != weave.MockHostname { t.Errorf("Expected weave dns hostname %q, got %q", weave.MockHostname, have) } // Should have Weave MAC Address if have, ok := node.Latest.Lookup(overlay.WeaveMACAddress); !ok || have != weave.MockContainerMAC { t.Errorf("Expected weave mac address %q, got %q", weave.MockContainerMAC, have) } // Should have Weave container ip if have, ok := node.Sets.Lookup(docker.ContainerIPs); !ok || !have.Contains(weave.MockContainerIP) { t.Errorf("Expected container ips to include the weave IP %q, got %q", weave.MockContainerIP, have) } // Should have Weave container ip (with scope) if have, ok := node.Sets.Lookup(docker.ContainerIPsWithScopes); !ok || !have.Contains(mockContainerIPWithScope) { t.Errorf("Expected container ips to include the weave IP (with scope) %q, got %q", mockContainerIPWithScope, have) } } }