func (c *Context) NewScope(scopeType, name string, subnet *net.IPNet, gateway net.IP, dns []net.IP, pools []string) (*Scope, error) { // sanity checks if name == "" { return nil, fmt.Errorf("scope name must not be empty") } if gateway == nil { gateway = net.IPv4(0, 0, 0, 0) } c.Lock() defer c.Unlock() if _, ok := c.scopes[name]; ok { return nil, DuplicateResourceError{resID: name} } switch scopeType { case BridgeScopeType: return c.newBridgeScope(uid.New(), name, subnet, gateway, dns, &IPAM{pools: pools}) case ExternalScopeType: return c.newExternalScope(uid.New(), name, subnet, gateway, dns, &IPAM{pools: pools}) default: return nil, fmt.Errorf("scope type not supported") } }
func (c *Context) newScope(scopeType, name string, subnet *net.IPNet, gateway net.IP, dns []net.IP, pools []string) (*Scope, error) { // sanity checks if name == "" { return nil, fmt.Errorf("scope name must not be empty") } if gateway == nil { gateway = net.IPv4(0, 0, 0, 0) } if _, ok := c.scopes[name]; ok { return nil, DuplicateResourceError{resID: name} } var s *Scope var err error switch scopeType { case constants.BridgeScopeType: s, err = c.newBridgeScope(uid.New(), name, subnet, gateway, dns, pools) case constants.ExternalScopeType: s, err = c.newExternalScope(uid.New(), name, subnet, gateway, dns, pools) default: return nil, fmt.Errorf("scope type not supported") } if err != nil { return nil, err } return s, nil }
func TestIsContainerID(t *testing.T) { validID := uid.New().String() invalidID := "ABC-XZ_@" assert.True(t, isContainerID(validID)) assert.False(t, isContainerID(invalidID)) }
func TestContainerCache(t *testing.T) { NewContainerCache() containerID := uid.New().String() // create a new container container := newTestContainer(containerID) // put it in the cache Containers.Put(container) // still shouldn't have a container because there's no vm assert.Equal(t, len(Containers.cache), 0) // add a test vm addTestVM(container) // put in cache Containers.Put(container) // get all containers -- should have 1 assert.Equal(t, len(Containers.Containers(nil)), 1) // Get specific container cachedContainer := Containers.Container(containerID) // did we find it? assert.NotNil(t, cachedContainer) // do we have this one in the cache? assert.Equal(t, cachedContainer.ExecConfig.ID, containerID) // remove the container Containers.Remove(containerID) assert.Equal(t, len(Containers.cache), 0) // remove non-existent container Containers.Remove("blahblah") }
func TestRepo(t *testing.T) { repoSetup() ref, _ := reference.ParseNamed("busybox:1.25.1") imageID := uid.New() layerID := uid.New() // add busybox:1.25.1 err := RepositoryCache().AddReference(ref, imageID.String(), false, layerID.String(), false) assert.NoError(t, err) // Get will return the imageID for the named object n, err := RepositoryCache().Get(ref) assert.NoError(t, err) assert.Equal(t, imageID.String(), n) // get image id via layer id ig := RepositoryCache().GetImageID(layerID.String()) assert.Equal(t, imageID.String(), ig) // remove busybox from the cache r, err := RepositoryCache().Remove(ref.String(), false) assert.NoError(t, err) assert.Equal(t, ref.String(), r) // busybox is removed, so this should fail x, err := RepositoryCache().Remove(ref.String(), false) assert.Error(t, err) assert.Equal(t, "", x) // add reference by digest ng, _ := reference.ParseNamed("nginx@sha256:7281cf7c854b0dfc7c68a6a4de9a785a973a14f1481bc028e2022bcd6a8d9f64") err = RepositoryCache().AddReference(ng, imageID.String(), false, layerID.String(), false) assert.NoError(t, err) dd := RepositoryCache().Digests(imageID.String()) assert.Equal(t, 1, len(dd)) // remove the digest ngx, err := RepositoryCache().Remove(ng.String(), false) assert.NoError(t, err) assert.Equal(t, ng.String(), ngx) // nada nada := RepositoryCache().Digests(imageID.String()) assert.Equal(t, 0, len(nada)) }
func newEndpoint(container *Container, scope *Scope, ip *net.IP, subnet net.IPNet, gateway net.IP, pciSlot *int32) *Endpoint { e := &Endpoint{ id: uid.New(), container: container, scope: scope, gateway: gateway, subnet: subnet, ip: net.IPv4(0, 0, 0, 0), static: false, ports: make(map[Port]interface{}), } if ip != nil { e.ip = *ip } if !e.ip.IsUnspecified() { e.static = true } return e }
// CreateHandler creates a new container func (handler *ContainersHandlersImpl) CreateHandler(params containers.CreateParams) middleware.Responder { defer trace.End(trace.Begin("")) var err error session := handler.handlerCtx.Session ctx := context.Background() log.Debugf("Path: %#v", params.CreateConfig.Path) log.Debugf("Args: %#v", params.CreateConfig.Args) log.Debugf("Env: %#v", params.CreateConfig.Env) log.Debugf("WorkingDir: %#v", params.CreateConfig.WorkingDir) id := uid.New().String() // Init key for tether privateKey, err := rsa.GenerateKey(rand.Reader, 512) if err != nil { return containers.NewCreateNotFound().WithPayload(&models.Error{Message: err.Error()}) } privateKeyBlock := pem.Block{ Type: "RSA PRIVATE KEY", Headers: nil, Bytes: x509.MarshalPKCS1PrivateKey(privateKey), } m := &executor.ExecutorConfig{ Common: executor.Common{ ID: id, Name: *params.CreateConfig.Name, }, CreateTime: time.Now().UTC().Unix(), Version: version.GetBuild(), Sessions: map[string]*executor.SessionConfig{ id: &executor.SessionConfig{ Common: executor.Common{ ID: id, Name: *params.CreateConfig.Name, }, Tty: *params.CreateConfig.Tty, Attach: *params.CreateConfig.Attach, Cmd: executor.Cmd{ Env: params.CreateConfig.Env, Dir: *params.CreateConfig.WorkingDir, Path: *params.CreateConfig.Path, Args: append([]string{*params.CreateConfig.Path}, params.CreateConfig.Args...), }, StopSignal: *params.CreateConfig.StopSignal, }, }, Key: pem.EncodeToMemory(&privateKeyBlock), LayerID: *params.CreateConfig.Image, RepoName: *params.CreateConfig.RepoName, } if params.CreateConfig.Annotations != nil && len(params.CreateConfig.Annotations) > 0 { m.Annotations = make(map[string]string) for k, v := range params.CreateConfig.Annotations { m.Annotations[k] = v } } log.Infof("CreateHandler Metadata: %#v", m) // Create the executor.ExecutorCreateConfig c := &exec.ContainerCreateConfig{ Metadata: m, ParentImageID: *params.CreateConfig.Image, ImageStoreName: params.CreateConfig.ImageStore.Name, Resources: exec.Resources{ NumCPUs: *params.CreateConfig.NumCpus, MemoryMB: *params.CreateConfig.MemoryMB, }, } h, err := exec.Create(ctx, session, c) if err != nil { log.Errorf("ContainerCreate error: %s", err.Error()) return containers.NewCreateNotFound().WithPayload(&models.Error{Message: err.Error()}) } // send the container id back to the caller return containers.NewCreateOK().WithPayload(&models.ContainerCreatedInfo{ID: id, Handle: h.String()}) }
func TestScopeAddRemoveContainer(t *testing.T) { var err error ctx, err := NewContext(testConfig(), nil) if err != nil { t.Errorf("NewContext() => (nil, %s), want (ctx, nil)", err) return } s := ctx.defaultScope idFoo := uid.New() idBar := uid.New() var tests1 = []struct { c *Container ip *net.IP out *Endpoint err error }{ // no container {nil, nil, nil, fmt.Errorf("")}, // add a new container to scope {&Container{id: idFoo}, nil, &Endpoint{ip: net.IPv4(172, 16, 0, 2), scope: s}, nil}, // container already part of scope {&Container{id: idFoo}, nil, nil, DuplicateResourceError{}}, // container with ip {&Container{id: idBar}, makeIP(172, 16, 0, 3), &Endpoint{ip: net.IPv4(172, 16, 0, 3), scope: s, static: true}, nil}, } for _, te := range tests1 { e := newEndpoint(te.c, s, te.ip, nil) err = s.AddContainer(te.c, e) if te.err != nil { if err == nil { t.Errorf("s.AddContainer() => (_, nil), want (_, err)") continue } if reflect.TypeOf(err) != reflect.TypeOf(te.err) { t.Errorf("s.AddContainer() => (_, %v), want (_, %v)", reflect.TypeOf(err), reflect.TypeOf(te.err)) continue } if te.c == nil { continue } // for any other error other than DuplicateResourcError // verify that the container was not added if _, ok := err.(DuplicateResourceError); !ok { c := s.Container(te.c.ID()) if c != nil { t.Errorf("s.Container(%s) => (%v, %v), want (nil, err)", te.c.ID(), c, err) } } continue } if !e.IP().Equal(te.out.IP()) { t.Errorf("s.AddContainer() => e.IP() == %v, want e.IP() == %v", e.IP(), te.out.IP()) continue } if !e.Gateway().Equal(te.out.Gateway()) { t.Errorf("s.AddContainer() => e.Gateway() == %v, want e.Gateway() == %v", e.Gateway(), te.out.Gateway()) continue } if e.Subnet().String() != s.Subnet().String() { t.Errorf("s.AddContainer() => e.Subnet() == %s, want e.Subnet() == %s", e.Subnet(), s.Subnet()) continue } if e.static != te.out.static { t.Errorf("s.AddContainer() => e.static == %#v, want e.static == %#v", e.static, te.out.static) } if e.container.ID() != te.c.ID() { t.Errorf("s.AddContainer() => e.container == %s, want e.container == %s", e.container.ID(), te.c.ID()) continue } found := false for _, e1 := range s.Endpoints() { if e1 == e { found = true break } } if !found { t.Errorf("s.endpoints does not contain %v", e) } c := s.Container(te.c.id) if c == nil { t.Errorf("s.Container(%s) => nil, want %v", te.c.ID(), te.c) continue } if c.Endpoint(s) != e { t.Errorf("container %s does not contain %v", te.c.ID(), e) } } options := &AddContainerOptions{ Scope: ctx.defaultScope.Name(), } bound := exec.TestHandle("bound") ctx.AddContainer(bound, options) ctx.BindContainer(bound) // test RemoveContainer var tests2 = []struct { c *Container err error }{ // container not found {&Container{id: "c1"}, ResourceNotFoundError{}}, // remove a container {s.Container(idFoo), nil}, } for _, te := range tests2 { err = s.RemoveContainer(te.c) if te.err != nil { if err == nil { t.Errorf("s.RemoveContainer() => nil, want %v", te.err) } continue } // container was removed, verify if err != nil { t.Errorf("s.RemoveContainer() => %s, want nil", err) continue } c := s.Container(te.c.ID()) if c != nil { t.Errorf("s.RemoveContainer() did not remove container %s", te.c.ID()) continue } for _, e := range s.endpoints { if e.container.ID() == te.c.ID() { t.Errorf("s.RemoveContainer() did not remove endpoint for container %s", te.c.ID()) break } } } }
func TestContextRemoveContainer(t *testing.T) { hFoo := exec.NewContainer(uid.New()) ctx, err := NewContext(net.IPNet{IP: net.IPv4(172, 16, 0, 0), Mask: net.CIDRMask(12, 32)}, net.CIDRMask(16, 32)) if err != nil { t.Fatalf("NewContext() => (nil, %s), want (ctx, nil)", err) } scope, err := ctx.NewScope(BridgeScopeType, "scope", nil, nil, nil, nil) if err != nil { t.Fatalf("ctx.NewScope() => (nil, %s), want (scope, nil)", err) } options := &AddContainerOptions{ Scope: scope.Name(), } ctx.AddContainer(hFoo, options) ctx.BindContainer(hFoo) // container that is added to multiple bridge scopes hBar := exec.NewContainer(uid.New()) options.Scope = "default" ctx.AddContainer(hBar, options) options.Scope = scope.Name() ctx.AddContainer(hBar, options) var tests = []struct { h *exec.Handle scope string err error }{ {nil, "", fmt.Errorf("")}, // nil handle {hBar, "bar", fmt.Errorf("")}, // scope not found {hFoo, scope.Name(), fmt.Errorf("")}, // bound container {exec.NewContainer(uid.New()), "default", fmt.Errorf("")}, // container not part of scope {hBar, "default", nil}, {hBar, scope.Name(), nil}, } for i, te := range tests { var ne *executor.NetworkEndpoint if te.h != nil && te.h.ExecConfig.Networks != nil { ne = te.h.ExecConfig.Networks[te.scope] } err = ctx.RemoveContainer(te.h, te.scope) if te.err != nil { // expect error if err == nil { t.Fatalf("%d: ctx.RemoveContainer(%#v, %s) => nil want err", i, te.h, te.scope) } continue } s, err := ctx.resolveScope(te.scope) if err != nil { t.Fatalf(err.Error()) } if s.Container(uid.Parse(te.h.Container.ExecConfig.ID)) != nil { t.Fatalf("container %s is part of scope %s", te.h, s.Name()) } // should have a remove spec for NIC, if container was only part of one bridge scope dcs, err := te.h.Spec.FindNICs(context.TODO(), s.Network()) if err != nil { t.Fatalf(err.Error()) } found := false var d types.BaseVirtualDevice for _, dc := range dcs { if dc.GetVirtualDeviceConfigSpec().Operation != types.VirtualDeviceConfigSpecOperationRemove { continue } d = dc.GetVirtualDeviceConfigSpec().Device found = true break } // if a remove spec for the NIC was found, check if any other // network endpoints are still using it if found { for _, ne := range te.h.ExecConfig.Networks { if atoiOrZero(ne.ID) == spec.VirtualDeviceSlotNumber(d) { t.Fatalf("%d: NIC with pci slot %d is still in use by a network endpoint %#v", i, spec.VirtualDeviceSlotNumber(d), ne) } } } else if ne != nil { // check if remove spec for NIC should have been there for _, ne2 := range te.h.ExecConfig.Networks { if ne.ID == ne2.ID { t.Fatalf("%d: NIC with pci slot %s should have been removed", i, ne.ID) } } } // metadata should be gone if _, ok := te.h.ExecConfig.Networks[te.scope]; ok { t.Fatalf("%d: endpoint metadata for container still present in handle %#v", i, te.h.ExecConfig) } } }
func TestContextBindUnbindContainer(t *testing.T) { ctx, err := NewContext(net.IPNet{IP: net.IPv4(172, 16, 0, 0), Mask: net.CIDRMask(12, 32)}, net.CIDRMask(16, 32)) if err != nil { t.Fatalf("NewContext() => (nil, %s), want (ctx, nil)", err) } scope, err := ctx.NewScope(BridgeScopeType, "scope", nil, nil, nil, nil) if err != nil { t.Fatalf("ctx.NewScope(%s, %s, nil, nil, nil) => (nil, %s)", BridgeScopeType, "scope", err) } foo := exec.NewContainer(uid.New()) added := exec.NewContainer(uid.New()) staticIP := exec.NewContainer(uid.New()) ipErr := exec.NewContainer(uid.New()) alias := exec.NewContainer(uid.New()) aliasErr := exec.NewContainer(uid.New()) options := &AddContainerOptions{ Scope: ctx.DefaultScope().Name(), } // add a container to the default scope if err = ctx.AddContainer(added, options); err != nil { t.Fatalf("ctx.AddContainer(%s, %s, nil) => %s", added, ctx.DefaultScope().Name(), err) } // add a container with a static IP ip := net.IPv4(172, 16, 0, 10) options = &AddContainerOptions{ Scope: ctx.DefaultScope().Name(), IP: &ip, } if err = ctx.AddContainer(staticIP, options); err != nil { t.Fatalf("ctx.AddContainer(%s, %s, nil) => %s", staticIP, ctx.DefaultScope().Name(), err) } options = &AddContainerOptions{ Scope: scope.Name(), } if err = ctx.AddContainer(added, options); err != nil { t.Fatalf("ctx.AddContainer(%s, %s, nil) => %s", added, scope.Name(), err) } // add a container with an ip that is already taken, // causing Scope.BindContainer call to fail gw := ctx.DefaultScope().Gateway() options = &AddContainerOptions{ Scope: scope.Name(), } ctx.AddContainer(ipErr, options) options = &AddContainerOptions{ Scope: ctx.DefaultScope().Name(), IP: &gw, } ctx.AddContainer(ipErr, options) // add a container with correct aliases options = &AddContainerOptions{ Scope: ctx.DefaultScope().Name(), Aliases: []string{"added:foo", ":bar"}, } if err = ctx.AddContainer(alias, options); err != nil { t.Fatalf("ctx.AddContainer(%s, %s, nil) => %s", alias, ctx.DefaultScope().Name(), err) } // add a container with incorrect aliases options = &AddContainerOptions{ Scope: ctx.DefaultScope().Name(), Aliases: []string{"cloud:foo", "bar"}, } if err = ctx.AddContainer(aliasErr, options); err != nil { t.Fatalf("ctx.AddContainer(%s, %s, nil) => %s", aliasErr, ctx.DefaultScope().Name(), err) } var tests = []struct { i int h *exec.Handle scopes []string ips []net.IP static bool err error }{ // container not added to scope {0, foo, []string{}, []net.IP{}, false, fmt.Errorf("")}, // container has bad ip address {1, ipErr, []string{}, nil, false, fmt.Errorf("")}, // successful container bind {2, added, []string{ctx.DefaultScope().Name(), scope.Name()}, []net.IP{net.IPv4(172, 16, 0, 2), net.IPv4(172, 17, 0, 2)}, false, nil}, {3, staticIP, []string{ctx.DefaultScope().Name()}, []net.IP{net.IPv4(172, 16, 0, 10)}, true, nil}, {4, alias, []string{ctx.DefaultScope().Name()}, []net.IP{net.IPv4(172, 16, 0, 3)}, false, nil}, {5, aliasErr, []string{ctx.DefaultScope().Name()}, []net.IP{}, false, fmt.Errorf("")}, } for _, te := range tests { eps, err := ctx.BindContainer(te.h) if te.err != nil { // expect an error if err == nil || eps != nil { t.Fatalf("%d: ctx.BindContainer(%s) => (%#v, %#v), want (%#v, %#v)", te.i, te.h, eps, err, nil, te.err) } con := ctx.Container(uid.Parse(te.h.Container.ExecConfig.ID)) if con != nil { t.Fatalf("%d: ctx.BindContainer(%s) added container %#v", te.i, te.h, con) } continue } // check if the correct endpoints were added con := ctx.Container(uid.Parse(te.h.Container.ExecConfig.ID)) if con == nil { t.Fatalf("%d: ctx.Container(%s) => nil, want %s", te.i, te.h.Container.ExecConfig.ID, te.h.Container.ExecConfig.ID) } if len(con.Scopes()) != len(te.scopes) { t.Fatalf("%d: len(con.Scopes()) %#v != len(te.scopes) %#v", te.i, con.Scopes(), te.scopes) } // check endpoints for i, s := range te.scopes { found := false for _, e := range eps { if e.Scope().Name() != s { continue } found = true if !e.Gateway().Equal(e.Scope().Gateway()) { t.Fatalf("%d: ctx.BindContainer(%s) => endpoint gateway %s, want %s", te.i, te.h, e.Gateway(), e.Scope().Gateway()) } if !e.IP().Equal(te.ips[i]) { t.Fatalf("%d: ctx.BindContainer(%s) => endpoint IP %s, want %s", te.i, te.h, e.IP(), te.ips[i]) } if e.Subnet().String() != e.Scope().Subnet().String() { t.Fatalf("%d: ctx.BindContainer(%s) => endpoint subnet %s, want %s", te.i, te.h, e.Subnet(), e.Scope().Subnet()) } ne := te.h.ExecConfig.Networks[s] if !ne.Static.IP.Equal(te.ips[i]) { t.Fatalf("%d: ctx.BindContainer(%s) => metadata endpoint IP %s, want %s", te.i, te.h, ne.Static.IP, te.ips[i]) } if ne.Static.Mask.String() != e.Scope().Subnet().Mask.String() { t.Fatalf("%d: ctx.BindContainer(%s) => metadata endpoint IP mask %s, want %s", te.i, te.h, ne.Static.Mask.String(), e.Scope().Subnet().Mask.String()) } if !ne.Network.Gateway.IP.Equal(e.Scope().Gateway()) { t.Fatalf("%d: ctx.BindContainer(%s) => metadata endpoint gateway %s, want %s", te.i, te.h, ne.Network.Gateway.IP, e.Scope().Gateway()) } if ne.Network.Gateway.Mask.String() != e.Scope().Subnet().Mask.String() { t.Fatalf("%d: ctx.BindContainer(%s) => metadata endpoint gateway mask %s, want %s", te.i, te.h, ne.Network.Gateway.Mask.String(), e.Scope().Subnet().Mask.String()) } break } if !found { t.Fatalf("%d: ctx.BindContainer(%s) => endpoint for scope %s not added", te.i, te.h, s) } } } tests = []struct { i int h *exec.Handle scopes []string ips []net.IP static bool err error }{ // container not found {0, foo, []string{}, nil, false, fmt.Errorf("")}, // container has bad ip address {1, ipErr, []string{ctx.DefaultScope().Name(), scope.Name()}, nil, false, fmt.Errorf("")}, // successful container unbind {2, added, []string{ctx.DefaultScope().Name(), scope.Name()}, nil, false, nil}, {3, staticIP, []string{ctx.DefaultScope().Name()}, nil, true, nil}, {4, alias, []string{ctx.DefaultScope().Name()}, nil, false, nil}, {5, aliasErr, []string{ctx.DefaultScope().Name()}, nil, false, fmt.Errorf("")}, } // test UnbindContainer for _, te := range tests { eps, err := ctx.UnbindContainer(te.h) if te.err != nil { if err == nil { t.Fatalf("%d: ctx.UnbindContainer(%s) => nil, want err", te.i, te.h) } continue } // container should not be there con := ctx.Container(uid.Parse(te.h.Container.ExecConfig.ID)) if con != nil { t.Fatalf("%d: ctx.Container(%s) => %#v, want nil", te.i, te.h, con) } for _, s := range te.scopes { found := false for _, e := range eps { if e.Scope().Name() == s { found = true } } if !found { t.Fatalf("%d: ctx.UnbindContainer(%s) did not return endpoint for scope %s. Endpoints: %+v", te.i, te.h, s, eps) } // container should not be part of scope scopes, err := ctx.Scopes(&s) if err != nil || len(scopes) != 1 { t.Fatalf("%d: ctx.Scopes(%s) => (%#v, %#v)", te.i, s, scopes, err) } if scopes[0].Container(uid.Parse(te.h.Container.ExecConfig.ID)) != nil { t.Fatalf("%d: container %s is still part of scope %s", te.i, te.h.Container.ExecConfig.ID, s) } // check if endpoint is still there, but without the ip ne, ok := te.h.ExecConfig.Networks[s] if !ok { t.Fatalf("%d: container endpoint not present in %v", te.i, te.h.ExecConfig) } if !te.static && ne.Static != nil { t.Fatalf("%d: endpoint IP should be nil in %v", te.i, ne) } if te.static && (ne.Static == nil || ne.Static.IP.Equal(net.IPv4zero)) { t.Fatalf("%d: endpoint IP should not be zero in %v", te.i, ne) } } } }
func TestContextAddContainer(t *testing.T) { ctx, err := NewContext(net.IPNet{IP: net.IPv4(172, 16, 0, 0), Mask: net.CIDRMask(12, 32)}, net.CIDRMask(16, 32)) if err != nil { t.Fatalf("NewContext() => (nil, %s), want (ctx, nil)", err) return } h := exec.NewContainer("foo") var devices object.VirtualDeviceList backing, _ := ctx.DefaultScope().Network().EthernetCardBackingInfo(context.TODO()) specWithEthCard := &spec.VirtualMachineConfigSpec{ VirtualMachineConfigSpec: &types.VirtualMachineConfigSpec{}, } var d types.BaseVirtualDevice if d, err = devices.CreateEthernetCard("vmxnet3", backing); err == nil { d.GetVirtualDevice().SlotInfo = &types.VirtualDevicePciBusSlotInfo{ PciSlotNumber: 1111, } devices = append(devices, d) var cs []types.BaseVirtualDeviceConfigSpec if cs, err = devices.ConfigSpec(types.VirtualDeviceConfigSpecOperationAdd); err == nil { specWithEthCard.DeviceChange = cs } } if err != nil { t.Fatalf(err.Error()) } aecErr := func(_ *exec.Handle, _ *Scope) (types.BaseVirtualDevice, error) { return nil, fmt.Errorf("error") } otherScope, err := ctx.NewScope(BridgeScopeType, "other", nil, net.IPv4(0, 0, 0, 0), nil, nil) if err != nil { t.Fatalf("failed to add scope") } hBar := exec.NewContainer(uid.New()) var tests = []struct { aec func(h *exec.Handle, s *Scope) (types.BaseVirtualDevice, error) h *exec.Handle s *spec.VirtualMachineConfigSpec scope string ip *net.IP err error }{ // nil handle {nil, nil, nil, "", nil, fmt.Errorf("")}, // scope not found {nil, h, nil, "foo", nil, ResourceNotFoundError{}}, // addEthernetCard returns error {aecErr, h, nil, "default", nil, fmt.Errorf("")}, // add a container {nil, h, nil, "default", nil, nil}, // container already added {nil, h, nil, "default", nil, nil}, {nil, hBar, specWithEthCard, "default", nil, nil}, {nil, hBar, nil, otherScope.Name(), nil, nil}, } origAEC := addEthernetCard defer func() { addEthernetCard = origAEC }() for i, te := range tests { // setup addEthernetCard = origAEC scopy := &spec.VirtualMachineConfigSpec{} if te.h != nil { te.h.SetSpec(te.s) if te.h.Spec != nil { *scopy = *te.h.Spec } } if te.aec != nil { addEthernetCard = te.aec } options := &AddContainerOptions{ Scope: te.scope, IP: te.ip, } err := ctx.AddContainer(te.h, options) if te.err != nil { // expect an error if err == nil { t.Fatalf("case %d: ctx.AddContainer(%v, %s, %s) => nil want err", i, te.h, te.scope, te.ip) } if reflect.TypeOf(err) != reflect.TypeOf(te.err) { t.Fatalf("case %d: ctx.AddContainer(%v, %s, %s) => (%v, %v) want (%v, %v)", i, te.h, te.scope, te.ip, err, te.err, err, te.err) } if _, ok := te.err.(DuplicateResourceError); ok { continue } // verify no device changes in the spec if te.s != nil { if len(scopy.DeviceChange) != len(h.Spec.DeviceChange) { t.Fatalf("case %d: ctx.AddContainer(%v, %s, %s) added device", i, te.h, te.scope, te.ip) } } continue } if err != nil { t.Fatalf("case %d: ctx.AddContainer(%v, %s, %s) => %s want nil", i, te.h, te.scope, te.ip, err) } // verify the container was not added to the scope s, _ := ctx.resolveScope(te.scope) if s != nil && te.h != nil { c := s.Container(uid.Parse(te.h.Container.ExecConfig.ID)) if c != nil { t.Fatalf("case %d: ctx.AddContainer(%v, %s, %s) added container", i, te.h, te.scope, te.ip) } } // spec should have a nic attached to the scope's network var dev types.BaseVirtualDevice dcs, err := te.h.Spec.FindNICs(context.TODO(), s.Network()) if len(dcs) != 1 { t.Fatalf("case %d: ctx.AddContainer(%v, %s, %s) more than one NIC added for scope %s", i, te.h, te.scope, te.ip, s.Network()) } dev = dcs[0].GetVirtualDeviceConfigSpec().Device if spec.VirtualDeviceSlotNumber(dev) == spec.NilSlot { t.Fatalf("case %d: ctx.AddContainer(%v, %s, %s) NIC added has nil pci slot", i, te.h, te.scope, te.ip) } // spec metadata should be updated with endpoint info ne, ok := te.h.ExecConfig.Networks[s.Name()] if !ok { t.Fatalf("case %d: ctx.AddContainer(%v, %s, %s) no network endpoint info added", i, te.h, te.scope, te.ip) } if spec.VirtualDeviceSlotNumber(dev) != atoiOrZero(ne.ID) { t.Fatalf("case %d; ctx.AddContainer(%v, %s, %s) => ne.ID == %d, want %d", i, te.h, te.scope, te.ip, atoiOrZero(ne.ID), spec.VirtualDeviceSlotNumber(dev)) } if ne.Network.Name != s.Name() { t.Fatalf("case %d; ctx.AddContainer(%v, %s, %s) => ne.NetworkName == %s, want %s", i, te.h, te.scope, te.ip, ne.Network.Name, s.Name()) } if te.ip != nil && !te.ip.Equal(ne.Static.IP) { t.Fatalf("case %d; ctx.AddContainer(%v, %s, %s) => ne.Static.IP == %s, want %s", i, te.h, te.scope, te.ip, ne.Static.IP, te.ip) } if te.ip == nil && ne.Static != nil { t.Fatalf("case %d; ctx.AddContainer(%v, %s, %s) => ne.Static.IP == %s, want %s", i, te.h, te.scope, te.ip, ne.Static.IP, net.IPv4zero) } } }
func newContainer(name string) *exec.Handle { h := exec.TestHandle(uid.New().String()) h.ExecConfig.Common.Name = name return h }
func TestLoadScopesFromKV(t *testing.T) { // sample kv store data var tests = []struct { pg string sn string s *Scope }{ { pg: "bridge", sn: "foo", s: &Scope{ id: uid.New(), name: "foo", scopeType: constants.BridgeScopeType, subnet: &net.IPNet{IP: net.ParseIP("10.10.10.0"), Mask: net.CIDRMask(16, 32)}, gateway: net.ParseIP("10.10.10.1"), containers: map[uid.UID]*Container{}, spaces: []*AddressSpace{NewAddressSpaceFromNetwork(&net.IPNet{IP: net.ParseIP("10.10.10.0"), Mask: net.CIDRMask(16, 32)})}, }, }, { pg: "bridge", sn: "bar", s: &Scope{ id: uid.New(), name: "bar", scopeType: constants.BridgeScopeType, subnet: &net.IPNet{IP: net.ParseIP("10.11.0.0"), Mask: net.CIDRMask(16, 32)}, gateway: net.ParseIP("10.11.0.1"), containers: map[uid.UID]*Container{}, dns: []net.IP{net.ParseIP("8.8.8.8")}, spaces: []*AddressSpace{NewAddressSpaceFromNetwork(&net.IPNet{IP: net.ParseIP("10.11.0.0"), Mask: net.CIDRMask(16, 32)})}, }, }, { pg: "ext", sn: "ext", s: &Scope{ id: uid.New(), name: "ext", scopeType: constants.ExternalScopeType, subnet: &net.IPNet{IP: net.ParseIP("10.12.0.0"), Mask: net.CIDRMask(16, 32)}, gateway: net.ParseIP("10.12.0.1"), containers: map[uid.UID]*Container{}, dns: []net.IP{net.ParseIP("8.8.8.8")}, spaces: []*AddressSpace{NewAddressSpaceFromNetwork(&net.IPNet{IP: net.ParseIP("10.12.0.0"), Mask: net.CIDRMask(16, 32)})}, }, }, { sn: "bad", }, } // load the kv store data kvdata := map[string][]byte{} for _, te := range tests { var d []byte if te.s != nil { var err error d, err = te.s.MarshalJSON() assert.NoError(t, err) } kvdata[scopeKey(te.sn)] = d } // cases where there is no data in the kv store, // or kv.List returns error for _, e := range []error{nil, kvstore.ErrKeyNotFound, assert.AnError} { kv := &kvstore.MockKeyValueStore{} kv.On("List", `context\.scopes\..+`).Return(nil, e) ctx, err := NewContext(testConfig(), kv) assert.NoError(t, err) assert.NotNil(t, ctx) // check to see if the only networks // are the ones in the config scs, err := ctx.Scopes(context.TODO(), nil) assert.NoError(t, err) assert.Len(t, scs, len(testConfig().ContainerNetworks)) } // kv.List returns kvdata kv := &kvstore.MockKeyValueStore{} kv.On("List", `context\.scopes\..+`).Return(kvdata, nil) ctx, err := NewContext(testConfig(), kv) assert.NoError(t, err) assert.NotNil(t, ctx) for _, te := range tests { scs, err := ctx.Scopes(context.TODO(), &te.sn) if te.s == nil || ctx.config.PortGroups[te.pg] == nil { assert.Error(t, err) assert.Len(t, scs, 0) continue } assert.NoError(t, err) assert.Len(t, scs, 1) assert.Equal(t, scs[0].Name(), te.s.Name()) assert.Equal(t, scs[0].ID(), te.s.ID()) assert.Equal(t, scs[0].Type(), te.s.Type()) assert.True(t, scs[0].Subnet().IP.Equal(te.s.Subnet().IP)) assert.Equal(t, scs[0].Subnet().Mask, te.s.Subnet().Mask) assert.True(t, scs[0].Gateway().Equal(te.s.Gateway())) assert.EqualValues(t, scs[0].DNS(), te.s.DNS()) assert.Len(t, te.s.Pools(), len(scs[0].Pools())) for _, p := range te.s.Pools() { found := false for _, p2 := range scs[0].Pools() { if p2.Equal(p) { found = true break } } assert.True(t, found) } } }