Example #1
0
func (c *Context) UpdateContainer(h *exec.Handle) error {
	c.Lock()
	defer c.Unlock()

	con := c.containers[uid.Parse(h.ExecConfig.ID)]
	if con == nil {
		return ResourceNotFoundError{}
	}

	for _, s := range con.Scopes() {
		if !s.isDynamic() {
			continue
		}

		ne := h.ExecConfig.Networks[s.Name()]
		if ne == nil {
			return fmt.Errorf("container config does not have info for network scope %s", s.Name())
		}

		e := con.Endpoint(s)
		e.ip = ne.Assigned.IP
		gw, snet, err := net.ParseCIDR(ne.Network.Gateway.String())
		if err != nil {
			return err
		}

		e.gateway = gw
		e.subnet = *snet

		s.gateway = gw
		s.subnet = *snet
	}

	return nil
}
Example #2
0
func (handler *ContainersHandlersImpl) RemoveContainerHandler(params containers.ContainerRemoveParams) middleware.Responder {
	defer trace.End(trace.Begin(params.ID))

	// get the indicated container for removal
	cID := uid.Parse(params.ID)
	h := exec.GetContainer(context.Background(), cID)
	if h == nil || h.ExecConfig == nil {
		return containers.NewContainerRemoveNotFound()
	}

	container := exec.Containers.Container(h.ExecConfig.ID)
	if container == nil {
		return containers.NewGetStateNotFound()
	}

	// NOTE: this should allowing batching of operations, as with Create, Start, Stop, et al
	err := container.Remove(context.Background(), handler.handlerCtx.Session)
	if err != nil {
		switch err := err.(type) {
		case exec.NotFoundError:
			return containers.NewContainerRemoveNotFound()
		case exec.RemovePowerError:
			return containers.NewContainerRemoveConflict().WithPayload(&models.Error{Message: err.Error()})
		default:
			return containers.NewContainerRemoveInternalServerError()
		}
	}

	return containers.NewContainerRemoveOK()
}
Example #3
0
func (handler *ContainersHandlersImpl) ContainerWaitHandler(params containers.ContainerWaitParams) middleware.Responder {
	defer trace.End(trace.Begin(fmt.Sprintf("%s:%d", params.ID, params.Timeout)))

	// default context timeout in seconds
	defaultTimeout := int64(containerWaitTimeout.Seconds())

	// if we have a positive timeout specified then use it
	if params.Timeout > 0 {
		defaultTimeout = params.Timeout
	}

	timeout := time.Duration(defaultTimeout) * time.Second

	ctx, cancel := context.WithTimeout(context.Background(), timeout)
	defer cancel()

	c := exec.Containers.Container(uid.Parse(params.ID).String())
	if c == nil {
		return containers.NewContainerWaitNotFound().WithPayload(&models.Error{
			Message: fmt.Sprintf("container %s not found", params.ID),
		})
	}

	select {
	case <-c.WaitForState(exec.StateStopped):
		c.Refresh(context.Background())
		containerInfo := convertContainerToContainerInfo(c.Info())

		return containers.NewContainerWaitOK().WithPayload(containerInfo)
	case <-ctx.Done():
		return containers.NewContainerWaitInternalServerError().WithPayload(&models.Error{
			Message: fmt.Sprintf("ContainerWaitHandler(%s) Error: %s", params.ID, ctx.Err()),
		})
	}
}
Example #4
0
func TestContainerCache(t *testing.T) {
	NewContainerCache()
	containerID := "1234"
	id := uid.Parse(containerID)

	// create a new container
	NewContainer(id)
	// shouldn't have a container as it's not commited
	assert.Equal(t, len(containers.cache), 0)

	// create a new container
	container := &Container{ExecConfig: &executor.ExecutorConfig{}}
	container.ExecConfig.ID = containerID
	// put it in the cache
	containers.Put(container)
	// Get it
	cachedContainer := containers.Container(containerID)
	// did we find it?
	assert.NotNil(t, cachedContainer)
	// do we have this one in the cache?
	assert.Equal(t, cachedContainer.ExecConfig.ID, containerID)
	// remove the container
	containers.Remove(containerID)
	assert.Equal(t, len(containers.cache), 0)
	// remove non-existent container
	containers.Remove("blahblah")
}
Example #5
0
func (handler *ContainersHandlersImpl) GetHandler(params containers.GetParams) middleware.Responder {
	defer trace.End(trace.Begin(params.ID))

	h := exec.GetContainer(context.Background(), uid.Parse(params.ID))
	if h == nil {
		return containers.NewGetNotFound().WithPayload(&models.Error{Message: fmt.Sprintf("container %s not found", params.ID)})
	}

	return containers.NewGetOK().WithPayload(h.String())
}
Example #6
0
func (c *Context) container(h *exec.Handle) (*Container, error) {
	defer trace.End(trace.Begin(""))
	id := uid.Parse(h.ExecConfig.ID)
	if id == uid.NilUID {
		return nil, fmt.Errorf("invalid container id %s", h.ExecConfig.ID)
	}

	if con, ok := c.containers[id.String()]; ok {
		return con, nil
	}

	return nil, ResourceNotFoundError{error: fmt.Errorf("container %s not found", id.String())}
}
Example #7
0
func (handler *ScopesHandlersImpl) ScopesGetContainerEndpoints(params scopes.GetContainerEndpointsParams) middleware.Responder {
	defer trace.End(trace.Begin("ScopesGetContainerEndpoint"))

	cid := uid.Parse(params.HandleOrID)
	// lookup by handle
	h := exec.GetHandle(cid.String())
	if h != nil {
		cid = uid.Parse(h.ExecConfig.ID)
	}

	c := handler.netCtx.Container(cid)
	if c == nil {
		return scopes.NewGetContainerEndpointsNotFound().WithPayload(errorPayload(fmt.Errorf("container not found")))
	}

	eps := c.Endpoints()
	ecs := make([]*models.EndpointConfig, len(eps))
	for i, e := range eps {
		ecs[i] = toEndpointConfig(e)
	}

	return scopes.NewGetContainerEndpointsOK().WithPayload(ecs)
}
Example #8
0
// handleEvent processes events
func handleEvent(netctx *Context, ie events.Event) {
	switch ie.String() {
	case events.ContainerPoweredOff:
		handle := exec.GetContainer(context.Background(), uid.Parse(ie.Reference()))
		if handle == nil {
			log.Errorf("Container %s not found - unable to UnbindContainer", ie.Reference())
			return
		}
		defer handle.Close()
		if _, err := netctx.UnbindContainer(handle); err != nil {
			log.Warnf("Failed to unbind container %s: %s", ie.Reference(), err)
			return
		}

		if err := handle.Commit(context.Background(), nil, nil); err != nil {
			log.Warnf("Failed to commit handle after network unbind for container %s: %s", ie.Reference(), err)
		}

	}
	return
}
Example #9
0
// convert the infra containers to a container object
func convertInfraContainers(ctx context.Context, sess *session.Session, vms []mo.VirtualMachine) []*Container {
	defer trace.End(trace.Begin(fmt.Sprintf("converting %d containers", len(vms))))
	var cons []*Container

	for _, v := range vms {
		vm := vm.NewVirtualMachine(ctx, sess, v.Reference())
		base := newBase(vm, v.Config, &v.Runtime)
		c := newContainer(base)

		id := uid.Parse(c.ExecConfig.ID)
		if id == uid.NilUID {
			log.Warnf("skipping converting container VM %s: could not parse id", v.Reference())
			continue
		}

		if v.Summary.Storage != nil {
			c.VMUnsharedDisk = v.Summary.Storage.Unshared
		}

		cons = append(cons, c)
	}

	return cons
}
Example #10
0
func (c *Context) UnbindContainer(h *exec.Handle) ([]*Endpoint, error) {
	c.Lock()
	defer c.Unlock()

	con, ok := c.containers[uid.Parse(h.ExecConfig.ID)]
	if !ok {
		return nil, ResourceNotFoundError{error: fmt.Errorf("container %s not found", h.ExecConfig.ID)}
	}

	// local map to hold the container mapping
	var containers []uid.UID

	// Removing long id, short id and common name from the map
	containers = append(containers, uid.Parse(h.ExecConfig.ID))

	tid := con.id.Truncate()
	cname := h.ExecConfig.Common.Name

	var key string
	var endpoints []*Endpoint
	var err error
	for _, ne := range h.ExecConfig.Networks {
		var s *Scope
		s, ok := c.scopes[ne.Network.Name]
		if !ok {
			return nil, &ResourceNotFoundError{}
		}

		defer func() {
			if err == nil {
				return
			}

			var ip *net.IP
			if ne.Static != nil {
				ip = &ne.Static.IP
			}
			s.addContainer(con, ip)
		}()

		// save the endpoint info
		e := con.Endpoint(s).copy()

		if err = s.removeContainer(con); err != nil {
			return nil, err
		}

		if !e.static {
			ne.Static = nil
		}

		// scope name
		sname := e.Scope().Name()

		// delete scope:short id
		key = fmt.Sprintf("%s:%s", sname, tid)
		log.Debugf("Removing %s from the containers", key)
		containers = append(containers, uid.Parse(key))

		// delete scope:name
		key = fmt.Sprintf("%s:%s", sname, cname)
		log.Debugf("Removing %s from the containers", key)
		containers = append(containers, uid.Parse(key))

		// delete aliases
		for i := range ne.Network.Aliases {
			l := strings.Split(ne.Network.Aliases[i], ":")
			if len(l) != 2 {
				err := fmt.Errorf("Parsing %s failed", l)
				log.Errorf(err.Error())
				return nil, err
			}

			_, what := l[0], l[1]

			// delete scope:what
			key = fmt.Sprintf("%s:%s", sname, what)
			log.Debugf("Removing %s from the containers", key)
			containers = append(containers, uid.Parse(key))
		}

		endpoints = append(endpoints, e)
	}

	// delete from real map now that we are err free
	for i := range containers {
		delete(c.containers, containers[i])
	}

	return endpoints, nil
}
Example #11
0
func (c *Context) bindContainer(h *exec.Handle) ([]*Endpoint, error) {
	con, err := c.container(h)
	if con != nil {
		return con.Endpoints(), nil // already bound
	}

	if _, ok := err.(ResourceNotFoundError); !ok {
		return nil, err
	}

	con = &Container{
		id:   uid.Parse(h.ExecConfig.ID),
		name: h.ExecConfig.Name,
	}

	defaultMarked := false
	aliases := make(map[string]*Container)
	var endpoints []*Endpoint
	for _, ne := range h.ExecConfig.Networks {
		var s *Scope
		s, ok := c.scopes[ne.Network.Name]
		if !ok {
			return nil, &ResourceNotFoundError{}
		}

		defer func() {
			if err == nil {
				return
			}

			s.RemoveContainer(con)
		}()

		var eip *net.IP
		if ne.Static {
			eip = &ne.IP.IP
		} else if !ip.IsUnspecifiedIP(ne.Assigned.IP) {
			// for VCH restart, we need to reserve
			// the IP of the running container
			//
			// this may be a DHCP assigned IP, however, the
			// addContainer call below will ignore reserving
			// an IP if the scope is "dynamic"
			eip = &ne.Assigned.IP
		}

		e := newEndpoint(con, s, eip, nil)
		e.static = ne.Static
		if err = s.AddContainer(con, e); err != nil {
			return nil, err
		}

		ports, _, err := nat.ParsePortSpecs(ne.Ports)
		if err != nil {
			return nil, err
		}
		for p := range ports {
			var port Port
			if port, err = ParsePort(string(p)); err != nil {
				return nil, err
			}

			if err = e.addPort(port); err != nil {
				return nil, err
			}
		}

		if !ip.IsUnspecifiedIP(e.IP()) {
			ne.IP = &net.IPNet{
				IP:   e.IP(),
				Mask: e.Scope().Subnet().Mask,
			}
		}
		ne.Network.Gateway = net.IPNet{IP: e.Gateway(), Mask: e.Subnet().Mask}
		ne.Network.Nameservers = make([]net.IP, len(s.dns))
		copy(ne.Network.Nameservers, s.dns)

		// mark the external network as default
		if !defaultMarked && e.Scope().Type() == constants.ExternalScopeType {
			defaultMarked = true
			ne.Network.Default = true
		}

		// dns lookup aliases
		aliases[fmt.Sprintf("%s:%s", s.Name(), con.name)] = con
		aliases[fmt.Sprintf("%s:%s", s.Name(), con.id.Truncate())] = con

		// container specific aliases
		for _, a := range ne.Network.Aliases {
			log.Debugf("adding alias %s", a)
			l := strings.Split(a, ":")
			if len(l) != 2 {
				err = fmt.Errorf("Parsing network alias %s failed", a)
				return nil, err
			}

			who, what := l[0], l[1]
			if who == "" {
				who = con.name
			}
			if a, exists := e.addAlias(who, what); a != badAlias && !exists {
				whoc := con
				// if the alias is not for this container, then
				// find it in the container collection
				if who != con.name {
					whoc = c.containers[who]
				}

				// whoc may be nil here, which means that the aliased
				// container is not bound yet; this is OK, and will be
				// fixed up when "who" is bound
				if whoc != nil {
					aliases[a.scopedName()] = whoc
				}
			}
		}

		// fix up the aliases to this container
		// from other containers
		for _, e := range s.Endpoints() {
			if e.Container() == con {
				continue
			}

			for _, a := range e.getAliases(con.name) {
				aliases[a.scopedName()] = con
			}
		}

		endpoints = append(endpoints, e)
	}

	// verify all the aliases to be added do not conflict with
	// existing container keys
	for a := range aliases {
		if _, ok := c.containers[a]; ok {
			return nil, fmt.Errorf("duplicate alias %s for container %s", a, con.ID())
		}
	}

	// FIXME: if there was no external network to mark as default,
	// then just pick the first network to mark as default
	if !defaultMarked {
		defaultMarked = true
		for _, ne := range h.ExecConfig.Networks {
			ne.Network.Default = true
			break
		}
	}

	// long id
	c.containers[con.id.String()] = con
	// short id
	c.containers[con.id.Truncate().String()] = con
	// name
	c.containers[con.name] = con
	// aliases
	for k, v := range aliases {
		log.Debugf("adding alias %s -> %s", k, v.Name())
		c.containers[k] = v
	}

	return endpoints, nil
}
Example #12
0
func TestContextBindUnbindContainer(t *testing.T) {
	ctx, err := NewContext(testConfig(), nil)
	if err != nil {
		t.Fatalf("NewContext() => (nil, %s), want (ctx, nil)", err)
	}

	scope, err := ctx.NewScope(context.TODO(), constants.BridgeScopeType, "scope", nil, nil, nil, nil)
	if err != nil {
		t.Fatalf("ctx.NewScope(%s, %s, nil, nil, nil) => (nil, %s)", constants.BridgeScopeType, "scope", err)
	}

	foo := newContainer("foo")
	added := newContainer("added")
	staticIP := newContainer("staticIP")
	ipErr := newContainer("ipErr")

	options := &AddContainerOptions{
		Scope: ctx.DefaultScope().Name(),
	}
	// add a container to the default scope
	if err = ctx.AddContainer(added, options); err != nil {
		t.Fatalf("ctx.AddContainer(%s, %s, nil) => %s", added, ctx.DefaultScope().Name(), err)
	}

	// add a container with a static IP
	ip := net.IPv4(172, 16, 0, 10)
	options = &AddContainerOptions{
		Scope: ctx.DefaultScope().Name(),
		IP:    &ip,
	}
	if err = ctx.AddContainer(staticIP, options); err != nil {
		t.Fatalf("ctx.AddContainer(%s, %s, nil) => %s", staticIP, ctx.DefaultScope().Name(), err)
	}

	// add the "added" container to the "scope" scope
	options = &AddContainerOptions{
		Scope: scope.Name(),
	}
	if err = ctx.AddContainer(added, options); err != nil {
		t.Fatalf("ctx.AddContainer(%s, %s, nil) => %s", added, scope.Name(), err)
	}

	// add a container with an ip that is already taken,
	// causing Scope.BindContainer call to fail
	gw := ctx.DefaultScope().Gateway()
	options = &AddContainerOptions{
		Scope: scope.Name(),
	}
	ctx.AddContainer(ipErr, options)

	options = &AddContainerOptions{
		Scope: ctx.DefaultScope().Name(),
		IP:    &gw,
	}
	ctx.AddContainer(ipErr, options)

	var tests = []struct {
		h      *exec.Handle
		scopes []string
		ips    []net.IP
		static bool
		err    error
	}{
		// no scopes to bind to
		{foo, []string{}, []net.IP{}, false, nil},
		// container has bad ip address
		{ipErr, []string{}, nil, false, fmt.Errorf("")},
		// successful container bind
		{added, []string{ctx.DefaultScope().Name(), scope.Name()}, []net.IP{net.IPv4(172, 16, 0, 2), net.IPv4(172, 17, 0, 2)}, false, nil},
		{staticIP, []string{ctx.DefaultScope().Name()}, []net.IP{net.IPv4(172, 16, 0, 10)}, true, nil},
	}

	for i, te := range tests {
		eps, err := ctx.BindContainer(te.h)
		if te.err != nil {
			// expect an error
			if err == nil || eps != nil {
				t.Fatalf("%d: ctx.BindContainer(%s) => (%+v, %+v), want (%+v, %+v)", i, te.h, eps, err, nil, te.err)
			}

			con := ctx.Container(te.h.ExecConfig.ID)
			if con != nil {
				t.Fatalf("%d: ctx.BindContainer(%s) added container %#v", i, te.h, con)
			}

			continue
		}

		if len(te.h.ExecConfig.Networks) == 0 {
			continue
		}

		// check if the correct endpoints were added
		con := ctx.Container(te.h.ExecConfig.ID)
		if con == nil {
			t.Fatalf("%d: ctx.Container(%s) => nil, want %s", i, te.h.ExecConfig.ID, te.h.ExecConfig.ID)
		}

		if len(con.Scopes()) != len(te.scopes) {
			t.Fatalf("%d: len(con.Scopes()) %#v != len(te.scopes) %#v", i, con.Scopes(), te.scopes)
		}

		// check endpoints
		for i, s := range te.scopes {
			found := false
			for _, e := range eps {
				if e.Scope().Name() != s {
					continue
				}

				found = true
				if !e.Gateway().Equal(e.Scope().Gateway()) {
					t.Fatalf("%d: ctx.BindContainer(%s) => endpoint gateway %s, want %s", i, te.h, e.Gateway(), e.Scope().Gateway())
				}
				if !e.IP().Equal(te.ips[i]) {
					t.Fatalf("%d: ctx.BindContainer(%s) => endpoint IP %s, want %s", i, te.h, e.IP(), te.ips[i])
				}
				if e.Subnet().String() != e.Scope().Subnet().String() {
					t.Fatalf("%d: ctx.BindContainer(%s) => endpoint subnet %s, want %s", i, te.h, e.Subnet(), e.Scope().Subnet())
				}

				ne := te.h.ExecConfig.Networks[s]
				if !ne.IP.IP.Equal(te.ips[i]) {
					t.Fatalf("%d: ctx.BindContainer(%s) => metadata endpoint IP %s, want %s", i, te.h, ne.IP.IP, te.ips[i])
				}
				if ne.IP.Mask.String() != e.Scope().Subnet().Mask.String() {
					t.Fatalf("%d: ctx.BindContainer(%s) => metadata endpoint IP mask %s, want %s", i, te.h, ne.IP.Mask.String(), e.Scope().Subnet().Mask.String())
				}
				if !ne.Network.Gateway.IP.Equal(e.Scope().Gateway()) {
					t.Fatalf("%d: ctx.BindContainer(%s) => metadata endpoint gateway %s, want %s", i, te.h, ne.Network.Gateway.IP, e.Scope().Gateway())
				}
				if ne.Network.Gateway.Mask.String() != e.Scope().Subnet().Mask.String() {
					t.Fatalf("%d: ctx.BindContainer(%s) => metadata endpoint gateway mask %s, want %s", i, te.h, ne.Network.Gateway.Mask.String(), e.Scope().Subnet().Mask.String())
				}

				break
			}

			if !found {
				t.Fatalf("%d: ctx.BindContainer(%s) => endpoint for scope %s not added", i, te.h, s)
			}
		}
	}

	tests = []struct {
		h      *exec.Handle
		scopes []string
		ips    []net.IP
		static bool
		err    error
	}{
		// container not bound
		{foo, []string{}, nil, false, nil},
		// successful container unbind
		{added, []string{ctx.DefaultScope().Name(), scope.Name()}, nil, false, nil},
		{staticIP, []string{ctx.DefaultScope().Name()}, nil, true, nil},
	}

	// test UnbindContainer
	for i, te := range tests {
		eps, err := ctx.UnbindContainer(te.h)
		if te.err != nil {
			if err == nil {
				t.Fatalf("%d: ctx.UnbindContainer(%s) => nil, want err", i, te.h)
			}

			continue
		}

		// container should not be there
		con := ctx.Container(te.h.ExecConfig.ID)
		if con != nil {
			t.Fatalf("%d: ctx.Container(%s) => %#v, want nil", i, te.h, con)
		}

		for _, s := range te.scopes {
			found := false
			for _, e := range eps {
				if e.Scope().Name() == s {
					found = true
				}
			}

			if !found {
				t.Fatalf("%d: ctx.UnbindContainer(%s) did not return endpoint for scope %s. Endpoints: %+v", i, te.h, s, eps)
			}

			// container should not be part of scope
			scopes, err := ctx.findScopes(&s)
			if err != nil || len(scopes) != 1 {
				t.Fatalf("%d: ctx.Scopes(%s) => (%#v, %#v)", i, s, scopes, err)
			}
			if scopes[0].Container(uid.Parse(te.h.ExecConfig.ID)) != nil {
				t.Fatalf("%d: container %s is still part of scope %s", i, te.h.ExecConfig.ID, s)
			}

			// check if endpoint is still there, but without the ip
			ne, ok := te.h.ExecConfig.Networks[s]
			if !ok {
				t.Fatalf("%d: container endpoint not present in %v", i, te.h.ExecConfig)
			}

			if te.static != ne.Static {
				t.Fatalf("%d: ne.Static=%v, want %v", i, ne.Static, te.static)
			}
		}
	}
}
Example #13
0
func TestContextAddContainer(t *testing.T) {
	ctx, err := NewContext(net.IPNet{IP: net.IPv4(172, 16, 0, 0), Mask: net.CIDRMask(12, 32)}, net.CIDRMask(16, 32))
	if err != nil {
		t.Fatalf("NewContext() => (nil, %s), want (ctx, nil)", err)
		return
	}

	h := exec.NewContainer("foo")

	var devices object.VirtualDeviceList
	backing, _ := ctx.DefaultScope().Network().EthernetCardBackingInfo(context.TODO())

	specWithEthCard := &spec.VirtualMachineConfigSpec{
		VirtualMachineConfigSpec: &types.VirtualMachineConfigSpec{},
	}

	var d types.BaseVirtualDevice
	if d, err = devices.CreateEthernetCard("vmxnet3", backing); err == nil {
		d.GetVirtualDevice().SlotInfo = &types.VirtualDevicePciBusSlotInfo{
			PciSlotNumber: 1111,
		}
		devices = append(devices, d)
		var cs []types.BaseVirtualDeviceConfigSpec
		if cs, err = devices.ConfigSpec(types.VirtualDeviceConfigSpecOperationAdd); err == nil {
			specWithEthCard.DeviceChange = cs
		}
	}

	if err != nil {
		t.Fatalf(err.Error())
	}

	aecErr := func(_ *exec.Handle, _ *Scope) (types.BaseVirtualDevice, error) {
		return nil, fmt.Errorf("error")
	}

	otherScope, err := ctx.NewScope(BridgeScopeType, "other", nil, net.IPv4(0, 0, 0, 0), nil, nil)
	if err != nil {
		t.Fatalf("failed to add scope")
	}

	hBar := exec.NewContainer(uid.New())

	var tests = []struct {
		aec   func(h *exec.Handle, s *Scope) (types.BaseVirtualDevice, error)
		h     *exec.Handle
		s     *spec.VirtualMachineConfigSpec
		scope string
		ip    *net.IP
		err   error
	}{
		// nil handle
		{nil, nil, nil, "", nil, fmt.Errorf("")},
		// scope not found
		{nil, h, nil, "foo", nil, ResourceNotFoundError{}},
		// addEthernetCard returns error
		{aecErr, h, nil, "default", nil, fmt.Errorf("")},
		// add a container
		{nil, h, nil, "default", nil, nil},
		// container already added
		{nil, h, nil, "default", nil, nil},
		{nil, hBar, specWithEthCard, "default", nil, nil},
		{nil, hBar, nil, otherScope.Name(), nil, nil},
	}

	origAEC := addEthernetCard
	defer func() { addEthernetCard = origAEC }()

	for i, te := range tests {
		// setup
		addEthernetCard = origAEC
		scopy := &spec.VirtualMachineConfigSpec{}
		if te.h != nil {
			te.h.SetSpec(te.s)
			if te.h.Spec != nil {
				*scopy = *te.h.Spec
			}
		}

		if te.aec != nil {
			addEthernetCard = te.aec
		}

		options := &AddContainerOptions{
			Scope: te.scope,
			IP:    te.ip,
		}
		err := ctx.AddContainer(te.h, options)
		if te.err != nil {
			// expect an error
			if err == nil {
				t.Fatalf("case %d: ctx.AddContainer(%v, %s, %s) => nil want err", i, te.h, te.scope, te.ip)
			}

			if reflect.TypeOf(err) != reflect.TypeOf(te.err) {
				t.Fatalf("case %d: ctx.AddContainer(%v, %s, %s) => (%v, %v) want (%v, %v)", i, te.h, te.scope, te.ip, err, te.err, err, te.err)
			}

			if _, ok := te.err.(DuplicateResourceError); ok {
				continue
			}

			// verify no device changes in the spec
			if te.s != nil {
				if len(scopy.DeviceChange) != len(h.Spec.DeviceChange) {
					t.Fatalf("case %d: ctx.AddContainer(%v, %s, %s) added device", i, te.h, te.scope, te.ip)
				}
			}

			continue
		}

		if err != nil {
			t.Fatalf("case %d: ctx.AddContainer(%v, %s, %s) => %s want nil", i, te.h, te.scope, te.ip, err)
		}

		// verify the container was not added to the scope
		s, _ := ctx.resolveScope(te.scope)
		if s != nil && te.h != nil {
			c := s.Container(uid.Parse(te.h.Container.ExecConfig.ID))
			if c != nil {
				t.Fatalf("case %d: ctx.AddContainer(%v, %s, %s) added container", i, te.h, te.scope, te.ip)
			}
		}

		// spec should have a nic attached to the scope's network
		var dev types.BaseVirtualDevice
		dcs, err := te.h.Spec.FindNICs(context.TODO(), s.Network())
		if len(dcs) != 1 {
			t.Fatalf("case %d: ctx.AddContainer(%v, %s, %s) more than one NIC added for scope %s", i, te.h, te.scope, te.ip, s.Network())
		}
		dev = dcs[0].GetVirtualDeviceConfigSpec().Device
		if spec.VirtualDeviceSlotNumber(dev) == spec.NilSlot {
			t.Fatalf("case %d: ctx.AddContainer(%v, %s, %s) NIC added has nil pci slot", i, te.h, te.scope, te.ip)
		}

		// spec metadata should be updated with endpoint info
		ne, ok := te.h.ExecConfig.Networks[s.Name()]
		if !ok {
			t.Fatalf("case %d: ctx.AddContainer(%v, %s, %s) no network endpoint info added", i, te.h, te.scope, te.ip)
		}

		if spec.VirtualDeviceSlotNumber(dev) != atoiOrZero(ne.ID) {
			t.Fatalf("case %d; ctx.AddContainer(%v, %s, %s) => ne.ID == %d, want %d", i, te.h, te.scope, te.ip, atoiOrZero(ne.ID), spec.VirtualDeviceSlotNumber(dev))
		}

		if ne.Network.Name != s.Name() {
			t.Fatalf("case %d; ctx.AddContainer(%v, %s, %s) => ne.NetworkName == %s, want %s", i, te.h, te.scope, te.ip, ne.Network.Name, s.Name())
		}

		if te.ip != nil && !te.ip.Equal(ne.Static.IP) {
			t.Fatalf("case %d; ctx.AddContainer(%v, %s, %s) => ne.Static.IP == %s, want %s", i, te.h, te.scope, te.ip, ne.Static.IP, te.ip)
		}

		if te.ip == nil && ne.Static != nil {
			t.Fatalf("case %d; ctx.AddContainer(%v, %s, %s) => ne.Static.IP == %s, want %s", i, te.h, te.scope, te.ip, ne.Static.IP, net.IPv4zero)
		}
	}
}
Example #14
0
func TestContextBindUnbindContainer(t *testing.T) {
	ctx, err := NewContext(net.IPNet{IP: net.IPv4(172, 16, 0, 0), Mask: net.CIDRMask(12, 32)}, net.CIDRMask(16, 32))
	if err != nil {
		t.Fatalf("NewContext() => (nil, %s), want (ctx, nil)", err)
	}

	scope, err := ctx.NewScope(BridgeScopeType, "scope", nil, nil, nil, nil)
	if err != nil {
		t.Fatalf("ctx.NewScope(%s, %s, nil, nil, nil) => (nil, %s)", BridgeScopeType, "scope", err)
	}

	foo := exec.NewContainer(uid.New())
	added := exec.NewContainer(uid.New())
	staticIP := exec.NewContainer(uid.New())
	ipErr := exec.NewContainer(uid.New())
	alias := exec.NewContainer(uid.New())
	aliasErr := exec.NewContainer(uid.New())

	options := &AddContainerOptions{
		Scope: ctx.DefaultScope().Name(),
	}
	// add a container to the default scope
	if err = ctx.AddContainer(added, options); err != nil {
		t.Fatalf("ctx.AddContainer(%s, %s, nil) => %s", added, ctx.DefaultScope().Name(), err)
	}

	// add a container with a static IP
	ip := net.IPv4(172, 16, 0, 10)
	options = &AddContainerOptions{
		Scope: ctx.DefaultScope().Name(),
		IP:    &ip,
	}
	if err = ctx.AddContainer(staticIP, options); err != nil {
		t.Fatalf("ctx.AddContainer(%s, %s, nil) => %s", staticIP, ctx.DefaultScope().Name(), err)
	}

	options = &AddContainerOptions{
		Scope: scope.Name(),
	}
	if err = ctx.AddContainer(added, options); err != nil {
		t.Fatalf("ctx.AddContainer(%s, %s, nil) => %s", added, scope.Name(), err)
	}

	// add a container with an ip that is already taken,
	// causing Scope.BindContainer call to fail
	gw := ctx.DefaultScope().Gateway()
	options = &AddContainerOptions{
		Scope: scope.Name(),
	}
	ctx.AddContainer(ipErr, options)

	options = &AddContainerOptions{
		Scope: ctx.DefaultScope().Name(),
		IP:    &gw,
	}
	ctx.AddContainer(ipErr, options)

	// add a container with correct aliases
	options = &AddContainerOptions{
		Scope:   ctx.DefaultScope().Name(),
		Aliases: []string{"added:foo", ":bar"},
	}
	if err = ctx.AddContainer(alias, options); err != nil {
		t.Fatalf("ctx.AddContainer(%s, %s, nil) => %s", alias, ctx.DefaultScope().Name(), err)
	}

	// add a container with incorrect aliases
	options = &AddContainerOptions{
		Scope:   ctx.DefaultScope().Name(),
		Aliases: []string{"cloud:foo", "bar"},
	}
	if err = ctx.AddContainer(aliasErr, options); err != nil {
		t.Fatalf("ctx.AddContainer(%s, %s, nil) => %s", aliasErr, ctx.DefaultScope().Name(), err)
	}

	var tests = []struct {
		i      int
		h      *exec.Handle
		scopes []string
		ips    []net.IP
		static bool
		err    error
	}{
		// container not added to scope
		{0, foo, []string{}, []net.IP{}, false, fmt.Errorf("")},
		// container has bad ip address
		{1, ipErr, []string{}, nil, false, fmt.Errorf("")},
		// successful container bind
		{2, added, []string{ctx.DefaultScope().Name(), scope.Name()}, []net.IP{net.IPv4(172, 16, 0, 2), net.IPv4(172, 17, 0, 2)}, false, nil},
		{3, staticIP, []string{ctx.DefaultScope().Name()}, []net.IP{net.IPv4(172, 16, 0, 10)}, true, nil},
		{4, alias, []string{ctx.DefaultScope().Name()}, []net.IP{net.IPv4(172, 16, 0, 3)}, false, nil},
		{5, aliasErr, []string{ctx.DefaultScope().Name()}, []net.IP{}, false, fmt.Errorf("")},
	}

	for _, te := range tests {
		eps, err := ctx.BindContainer(te.h)
		if te.err != nil {
			// expect an error
			if err == nil || eps != nil {
				t.Fatalf("%d: ctx.BindContainer(%s) => (%#v, %#v), want (%#v, %#v)", te.i, te.h, eps, err, nil, te.err)
			}

			con := ctx.Container(uid.Parse(te.h.Container.ExecConfig.ID))
			if con != nil {
				t.Fatalf("%d: ctx.BindContainer(%s) added container %#v", te.i, te.h, con)
			}

			continue
		}

		// check if the correct endpoints were added
		con := ctx.Container(uid.Parse(te.h.Container.ExecConfig.ID))
		if con == nil {
			t.Fatalf("%d: ctx.Container(%s) => nil, want %s", te.i, te.h.Container.ExecConfig.ID, te.h.Container.ExecConfig.ID)
		}

		if len(con.Scopes()) != len(te.scopes) {
			t.Fatalf("%d: len(con.Scopes()) %#v != len(te.scopes) %#v", te.i, con.Scopes(), te.scopes)
		}

		// check endpoints
		for i, s := range te.scopes {
			found := false
			for _, e := range eps {
				if e.Scope().Name() != s {
					continue
				}

				found = true
				if !e.Gateway().Equal(e.Scope().Gateway()) {
					t.Fatalf("%d: ctx.BindContainer(%s) => endpoint gateway %s, want %s", te.i, te.h, e.Gateway(), e.Scope().Gateway())
				}
				if !e.IP().Equal(te.ips[i]) {
					t.Fatalf("%d: ctx.BindContainer(%s) => endpoint IP %s, want %s", te.i, te.h, e.IP(), te.ips[i])
				}
				if e.Subnet().String() != e.Scope().Subnet().String() {
					t.Fatalf("%d: ctx.BindContainer(%s) => endpoint subnet %s, want %s", te.i, te.h, e.Subnet(), e.Scope().Subnet())
				}

				ne := te.h.ExecConfig.Networks[s]
				if !ne.Static.IP.Equal(te.ips[i]) {
					t.Fatalf("%d: ctx.BindContainer(%s) => metadata endpoint IP %s, want %s", te.i, te.h, ne.Static.IP, te.ips[i])
				}
				if ne.Static.Mask.String() != e.Scope().Subnet().Mask.String() {
					t.Fatalf("%d: ctx.BindContainer(%s) => metadata endpoint IP mask %s, want %s", te.i, te.h, ne.Static.Mask.String(), e.Scope().Subnet().Mask.String())
				}
				if !ne.Network.Gateway.IP.Equal(e.Scope().Gateway()) {
					t.Fatalf("%d: ctx.BindContainer(%s) => metadata endpoint gateway %s, want %s", te.i, te.h, ne.Network.Gateway.IP, e.Scope().Gateway())
				}
				if ne.Network.Gateway.Mask.String() != e.Scope().Subnet().Mask.String() {
					t.Fatalf("%d: ctx.BindContainer(%s) => metadata endpoint gateway mask %s, want %s", te.i, te.h, ne.Network.Gateway.Mask.String(), e.Scope().Subnet().Mask.String())
				}

				break
			}

			if !found {
				t.Fatalf("%d: ctx.BindContainer(%s) => endpoint for scope %s not added", te.i, te.h, s)
			}
		}
	}

	tests = []struct {
		i      int
		h      *exec.Handle
		scopes []string
		ips    []net.IP
		static bool
		err    error
	}{
		// container not found
		{0, foo, []string{}, nil, false, fmt.Errorf("")},
		// container has bad ip address
		{1, ipErr, []string{ctx.DefaultScope().Name(), scope.Name()}, nil, false, fmt.Errorf("")},
		// successful container unbind
		{2, added, []string{ctx.DefaultScope().Name(), scope.Name()}, nil, false, nil},
		{3, staticIP, []string{ctx.DefaultScope().Name()}, nil, true, nil},
		{4, alias, []string{ctx.DefaultScope().Name()}, nil, false, nil},
		{5, aliasErr, []string{ctx.DefaultScope().Name()}, nil, false, fmt.Errorf("")},
	}

	// test UnbindContainer
	for _, te := range tests {
		eps, err := ctx.UnbindContainer(te.h)
		if te.err != nil {
			if err == nil {
				t.Fatalf("%d: ctx.UnbindContainer(%s) => nil, want err", te.i, te.h)
			}

			continue
		}

		// container should not be there
		con := ctx.Container(uid.Parse(te.h.Container.ExecConfig.ID))
		if con != nil {
			t.Fatalf("%d: ctx.Container(%s) => %#v, want nil", te.i, te.h, con)
		}

		for _, s := range te.scopes {
			found := false
			for _, e := range eps {
				if e.Scope().Name() == s {
					found = true
				}
			}

			if !found {
				t.Fatalf("%d: ctx.UnbindContainer(%s) did not return endpoint for scope %s. Endpoints: %+v", te.i, te.h, s, eps)
			}

			// container should not be part of scope
			scopes, err := ctx.Scopes(&s)
			if err != nil || len(scopes) != 1 {
				t.Fatalf("%d: ctx.Scopes(%s) => (%#v, %#v)", te.i, s, scopes, err)
			}
			if scopes[0].Container(uid.Parse(te.h.Container.ExecConfig.ID)) != nil {
				t.Fatalf("%d: container %s is still part of scope %s", te.i, te.h.Container.ExecConfig.ID, s)
			}

			// check if endpoint is still there, but without the ip
			ne, ok := te.h.ExecConfig.Networks[s]
			if !ok {
				t.Fatalf("%d: container endpoint not present in %v", te.i, te.h.ExecConfig)
			}

			if !te.static && ne.Static != nil {
				t.Fatalf("%d: endpoint IP should be nil in %v", te.i, ne)
			}

			if te.static && (ne.Static == nil || ne.Static.IP.Equal(net.IPv4zero)) {
				t.Fatalf("%d: endpoint IP should not be zero in %v", te.i, ne)
			}
		}
	}
}
Example #15
0
func isContainerID(id string) bool {
	return uid.Parse(id) != uid.NilUID
}
Example #16
0
File: image.go Project: vmware/vic
// TODO fix the errors so the client doesnt print the generic POST or DELETE message
func (i *Image) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error) {
	defer trace.End(trace.Begin(imageRef))

	var deleted []types.ImageDelete
	var userRefIsID bool
	var imageRemoved bool

	// Use the image cache to go from the reference to the ID we use in the image store
	img, err := cache.ImageCache().Get(imageRef)
	if err != nil {
		return nil, err
	}

	// Get the tags from the repo cache for this image
	// TODO: remove this -- we have it in the image above
	tags := cache.RepositoryCache().Tags(img.ImageID)

	// did the user pass an id or partial id
	userRefIsID = cache.ImageCache().IsImageID(imageRef)
	// do we have any reference conflicts
	if len(tags) > 1 && userRefIsID && !force {
		t := uid.Parse(img.ImageID).Truncate()
		return nil,
			fmt.Errorf("conflict: unable to delete %s (must be forced) - image is referenced in one or more repositories", t)
	}

	// if we have an ID or only 1 tag lets delete the vmdk(s) via the PL
	if userRefIsID || len(tags) == 1 {
		log.Infof("Deleting image via PL %s (%s)", img.ImageID, img.ID)

		// needed for image store
		host, err := sys.UUID()
		if err != nil {
			return nil, err
		}

		params := storage.NewDeleteImageParamsWithContext(ctx).WithStoreName(host).WithID(img.ID)
		// TODO: This will fail if any containerVMs are referencing the vmdk - vanilla docker
		// allows the removal of an image (via force flag) even if a container is referencing it
		// should vic?
		_, err = PortLayerClient().Storage.DeleteImage(params)
		if err != nil {
			switch err := err.(type) {
			case *storage.DeleteImageLocked:
				return nil, fmt.Errorf("Failed to remove image %q: %s", imageRef, err.Payload.Message)
			default:
				return nil, err
			}
		}

		// we've deleted the image so remove from cache
		cache.ImageCache().RemoveImageByConfig(img)
		imagec.LayerCache().Remove(img.ID)
		imageRemoved = true

	} else {

		// only untag the ref supplied
		n, err := reference.ParseNamed(imageRef)
		if err != nil {
			return nil, fmt.Errorf("unable to parse reference(%s): %s", imageRef, err.Error())
		}
		tag := reference.WithDefaultTag(n)
		tags = []string{tag.String()}
	}
	// loop thru and remove from repoCache
	for i := range tags {
		// remove from cache, but don't save -- we'll do that afer all
		// updates
		refNamed, _ := cache.RepositoryCache().Remove(tags[i], false)
		dd := types.ImageDelete{Untagged: refNamed}
		deleted = append(deleted, dd)
	}

	// save repo now -- this will limit the number of PL
	// calls to one per rmi call
	err = cache.RepositoryCache().Save()
	if err != nil {
		return nil, fmt.Errorf("Untag error: %s", err.Error())
	}

	if imageRemoved {
		imageDeleted := types.ImageDelete{Deleted: img.ImageID}
		deleted = append(deleted, imageDeleted)
	}

	return deleted, err
}
Example #17
0
func TestContextRemoveContainer(t *testing.T) {

	hFoo := exec.NewContainer(uid.New())

	ctx, err := NewContext(net.IPNet{IP: net.IPv4(172, 16, 0, 0), Mask: net.CIDRMask(12, 32)}, net.CIDRMask(16, 32))
	if err != nil {
		t.Fatalf("NewContext() => (nil, %s), want (ctx, nil)", err)
	}

	scope, err := ctx.NewScope(BridgeScopeType, "scope", nil, nil, nil, nil)
	if err != nil {
		t.Fatalf("ctx.NewScope() => (nil, %s), want (scope, nil)", err)
	}

	options := &AddContainerOptions{
		Scope: scope.Name(),
	}
	ctx.AddContainer(hFoo, options)
	ctx.BindContainer(hFoo)

	// container that is added to multiple bridge scopes
	hBar := exec.NewContainer(uid.New())
	options.Scope = "default"
	ctx.AddContainer(hBar, options)
	options.Scope = scope.Name()
	ctx.AddContainer(hBar, options)

	var tests = []struct {
		h     *exec.Handle
		scope string
		err   error
	}{
		{nil, "", fmt.Errorf("")},                                 // nil handle
		{hBar, "bar", fmt.Errorf("")},                             // scope not found
		{hFoo, scope.Name(), fmt.Errorf("")},                      // bound container
		{exec.NewContainer(uid.New()), "default", fmt.Errorf("")}, // container not part of scope
		{hBar, "default", nil},
		{hBar, scope.Name(), nil},
	}

	for i, te := range tests {
		var ne *executor.NetworkEndpoint
		if te.h != nil && te.h.ExecConfig.Networks != nil {
			ne = te.h.ExecConfig.Networks[te.scope]
		}

		err = ctx.RemoveContainer(te.h, te.scope)
		if te.err != nil {
			// expect error
			if err == nil {
				t.Fatalf("%d: ctx.RemoveContainer(%#v, %s) => nil want err", i, te.h, te.scope)
			}

			continue
		}

		s, err := ctx.resolveScope(te.scope)
		if err != nil {
			t.Fatalf(err.Error())
		}

		if s.Container(uid.Parse(te.h.Container.ExecConfig.ID)) != nil {
			t.Fatalf("container %s is part of scope %s", te.h, s.Name())
		}

		// should have a remove spec for NIC, if container was only part of one bridge scope
		dcs, err := te.h.Spec.FindNICs(context.TODO(), s.Network())
		if err != nil {
			t.Fatalf(err.Error())
		}

		found := false
		var d types.BaseVirtualDevice
		for _, dc := range dcs {
			if dc.GetVirtualDeviceConfigSpec().Operation != types.VirtualDeviceConfigSpecOperationRemove {
				continue
			}

			d = dc.GetVirtualDeviceConfigSpec().Device
			found = true
			break
		}

		// if a remove spec for the NIC was found, check if any other
		// network endpoints are still using it
		if found {
			for _, ne := range te.h.ExecConfig.Networks {
				if atoiOrZero(ne.ID) == spec.VirtualDeviceSlotNumber(d) {
					t.Fatalf("%d: NIC with pci slot %d is still in use by a network endpoint %#v", i, spec.VirtualDeviceSlotNumber(d), ne)
				}
			}
		} else if ne != nil {
			// check if remove spec for NIC should have been there
			for _, ne2 := range te.h.ExecConfig.Networks {
				if ne.ID == ne2.ID {
					t.Fatalf("%d: NIC with pci slot %s should have been removed", i, ne.ID)
				}
			}
		}

		// metadata should be gone
		if _, ok := te.h.ExecConfig.Networks[te.scope]; ok {
			t.Fatalf("%d: endpoint metadata for container still present in handle %#v", i, te.h.ExecConfig)
		}
	}
}
Example #18
0
func (c *Context) RemoveContainer(h *exec.Handle, scope string) error {
	c.Lock()
	defer c.Unlock()

	if h == nil {
		return fmt.Errorf("handle is required")
	}

	if _, ok := c.containers[uid.Parse(h.ExecConfig.ID)]; ok {
		return fmt.Errorf("container is bound")
	}

	var err error
	s, err := c.resolveScope(scope)
	if err != nil {
		return err
	}

	var ne *executor.NetworkEndpoint
	ne, ok := h.ExecConfig.Networks[s.Name()]
	if !ok {
		return fmt.Errorf("container %s not part of network %s", h.ExecConfig.ID, s.Name())
	}

	// figure out if any other networks are using the NIC
	removeNIC := true
	for _, ne2 := range h.ExecConfig.Networks {
		if ne2 == ne {
			continue
		}
		if ne2.ID == ne.ID {
			removeNIC = false
			break
		}
	}

	if removeNIC {
		// ensure spec is not nil
		h.SetSpec(nil)

		var devices object.VirtualDeviceList
		backing, err := s.network.EthernetCardBackingInfo(context.Background())
		if err != nil {
			return err
		}

		d, err := devices.CreateEthernetCard("vmxnet3", backing)
		if err != nil {
			return err
		}

		devices = append(devices, d)
		spec, err := devices.ConfigSpec(types.VirtualDeviceConfigSpecOperationRemove)
		if err != nil {
			return err
		}
		h.Spec.DeviceChange = append(h.Spec.DeviceChange, spec...)
	}

	delete(h.ExecConfig.Networks, s.Name())

	return nil
}
Example #19
0
func TestAliases(t *testing.T) {
	ctx, err := NewContext(testConfig(), nil)
	assert.NoError(t, err)
	assert.NotNil(t, ctx)

	scope := ctx.DefaultScope()

	var tests = []struct {
		con     string
		aliases []string
		err     error
	}{
		// bad alias
		{"bad1", []string{"bad1"}, assert.AnError},
		{"bad2", []string{"foo:bar:baz"}, assert.AnError},
		// ok
		{"c1", []string{"c2:other", ":c1", ":c1"}, nil},
		{"c2", []string{"c1:other"}, nil},
		{"c3", []string{"c2:c2", "c1:c1"}, nil},
	}

	containers := make(map[string]*exec.Handle)
	for _, te := range tests {
		t.Logf("%+v", te)
		c := newContainer(te.con)

		opts := &AddContainerOptions{
			Scope:   scope.Name(),
			Aliases: te.aliases,
		}

		err = ctx.AddContainer(c, opts)
		assert.NoError(t, err)
		assert.EqualValues(t, opts.Aliases, c.ExecConfig.Networks[scope.Name()].Network.Aliases)

		eps, err := ctx.BindContainer(c)
		if te.err != nil {
			assert.Error(t, err)
			assert.Empty(t, eps)
			continue
		}

		assert.NoError(t, err)
		assert.Len(t, eps, 1)

		// verify aliases are present
		assert.NotNil(t, ctx.Container(c.ExecConfig.ID))
		assert.NotNil(t, ctx.Container(uid.Parse(c.ExecConfig.ID).Truncate().String()))
		assert.NotNil(t, ctx.Container(c.ExecConfig.Name))
		assert.NotNil(t, ctx.Container(fmt.Sprintf("%s:%s", scope.Name(), c.ExecConfig.Name)))
		assert.NotNil(t, ctx.Container(fmt.Sprintf("%s:%s", scope.Name(), uid.Parse(c.ExecConfig.ID).Truncate())))

		aliases := c.ExecConfig.Networks[scope.Name()].Network.Aliases
		for _, a := range aliases {
			l := strings.Split(a, ":")
			con, al := l[0], l[1]
			found := false
			var ea alias
			for _, a := range eps[0].getAliases(con) {
				if al == a.Name {
					found = true
					ea = a
					break
				}
			}
			assert.True(t, found, "alias %s not found for container %s", al, con)

			// if the aliased container is bound we should be able to look it up with
			// the scoped alias name
			if c := ctx.Container(ea.Container); c != nil {
				assert.NotNil(t, ctx.Container(ea.scopedName()))
			} else {
				assert.Nil(t, ctx.Container(ea.scopedName()), "scoped name=%s", ea.scopedName())
			}
		}

		// now that the container is bound, there
		// should be additional aliases scoped to
		// other containers
		for _, e := range scope.Endpoints() {
			for _, a := range e.getAliases(c.ExecConfig.Name) {
				t.Logf("alias: %s", a.scopedName())
				assert.NotNil(t, ctx.Container(a.scopedName()))
			}
		}

		containers[te.con] = c
	}

	t.Logf("containers: %#v", ctx.containers)

	c := containers["c2"]
	_, err = ctx.UnbindContainer(c)
	assert.NoError(t, err)
	// verify aliases are gone
	assert.Nil(t, ctx.Container(c.ExecConfig.ID))
	assert.Nil(t, ctx.Container(uid.Parse(c.ExecConfig.ID).Truncate().String()))
	assert.Nil(t, ctx.Container(c.ExecConfig.Name))
	assert.Nil(t, ctx.Container(fmt.Sprintf("%s:%s", scope.Name(), c.ExecConfig.Name)))
	assert.Nil(t, ctx.Container(fmt.Sprintf("%s:%s", scope.Name(), uid.Parse(c.ExecConfig.ID).Truncate())))

	// aliases from c1 and c3 to c2 should not resolve anymore
	assert.Nil(t, ctx.Container(fmt.Sprintf("%s:c1:other", scope.Name())))
	assert.Nil(t, ctx.Container(fmt.Sprintf("%s:c3:c2", scope.Name())))
}
Example #20
0
func (c *Context) BindContainer(h *exec.Handle) ([]*Endpoint, error) {
	c.Lock()
	defer c.Unlock()

	var con *Container
	var err error

	if len(h.ExecConfig.Networks) == 0 {
		return nil, fmt.Errorf("nothing to bind")
	}

	con, ok := c.containers[uid.Parse(h.ExecConfig.ID)]
	if ok {
		return nil, fmt.Errorf("container %s already bound", h.ExecConfig.ID)
	}

	con = &Container{
		id:   uid.Parse(h.ExecConfig.ID),
		name: h.ExecConfig.Name,
	}
	defaultMarked := false
	var endpoints []*Endpoint
	for _, ne := range h.ExecConfig.Networks {
		var s *Scope
		s, ok := c.scopes[ne.Network.Name]
		if !ok {
			return nil, &ResourceNotFoundError{}
		}

		defer func() {
			if err == nil {
				return
			}

			s.removeContainer(con)
		}()

		var ip *net.IP
		if ne.Static != nil {
			ip = &ne.Static.IP
		}

		var e *Endpoint
		if e, err = s.addContainer(con, ip); err != nil {
			return nil, err
		}

		for _, p := range ne.Ports {
			var port Port
			if port, err = ParsePort(p); err != nil {
				return nil, err
			}

			if err = e.addPort(port); err != nil {
				return nil, err
			}
		}

		eip := e.IP()
		if eip != nil && !eip.IsUnspecified() {
			ne.Static = &net.IPNet{
				IP:   eip,
				Mask: e.Scope().Subnet().Mask,
			}
		}
		ne.Network.Gateway = net.IPNet{IP: e.gateway, Mask: e.subnet.Mask}
		ne.Network.Nameservers = make([]net.IP, len(s.dns))
		copy(ne.Network.Nameservers, s.dns)

		// mark the external network as default
		if !defaultMarked && e.Scope().Type() == ExternalScopeType {
			defaultMarked = true
			ne.Network.Default = true
		}

		endpoints = append(endpoints, e)
	}

	// FIXME: if there was no external network to mark as default,
	// then just pick the first network to mark as default
	if !defaultMarked {
		defaultMarked = true
		for _, ne := range h.ExecConfig.Networks {
			ne.Network.Default = true
			break
		}
	}

	// local map to hold the container mapping
	containers := make(map[uid.UID]*Container)

	// Adding long id, short id and common name to the map to point same container
	// Last two is needed by DNS subsystem
	containers[con.id] = con

	tid := con.id.Truncate()
	cname := h.ExecConfig.Common.Name

	var key string
	// network scoped entries
	for i := range endpoints {
		e := endpoints[i]
		// scope name
		sname := e.Scope().Name()

		// SCOPE:SHORT ID
		key = fmt.Sprintf("%s:%s", sname, tid)
		log.Debugf("Adding %s to the containers", key)
		containers[uid.Parse(key)] = con

		// SCOPE:NAME
		key = fmt.Sprintf("%s:%s", sname, cname)
		log.Debugf("Adding %s to the containers", key)
		containers[uid.Parse(key)] = con

		ne, ok := h.ExecConfig.Networks[sname]
		if !ok {
			err := fmt.Errorf("Failed to find Network %s", sname)
			log.Errorf(err.Error())
			return nil, err
		}

		// Aliases/Links
		for i := range ne.Network.Aliases {
			l := strings.Split(ne.Network.Aliases[i], ":")
			if len(l) != 2 {
				err := fmt.Errorf("Parsing %s failed", l)
				log.Errorf(err.Error())
				return nil, err
			}
			who, what := l[0], l[1]
			// if who is empty string that means it is a alias
			// which points to the container itself
			if who == "" {
				who = cname
			}
			// Find the scope:who container
			key = fmt.Sprintf("%s:%s", sname, who)
			// search global map
			con, ok := c.containers[uid.Parse(key)]
			if !ok {
				// search local map
				con, ok = containers[uid.Parse(key)]
				if !ok {
					err := fmt.Errorf("Failed to find container %s", key)
					log.Errorf(err.Error())
					return nil, err
				}
			}
			log.Debugf("Found container %s", key)

			// Set scope:what to scope:who
			key = fmt.Sprintf("%s:%s", sname, what)
			log.Debugf("Adding %s to the containers", key)
			containers[uid.Parse(key)] = con
		}
	}

	// set the real map now that we are err free
	for k, v := range containers {
		c.containers[k] = v
	}

	return endpoints, nil
}
Example #21
0
// Containers returns the list of containers to show given the user's filtering.
func (c *Container) Containers(config *types.ContainerListOptions) ([]*types.Container, error) {

	// Get an API client to the portlayer
	client := c.containerProxy.Client()

	containme, err := client.Containers.GetContainerList(containers.NewGetContainerListParamsWithContext(ctx).WithAll(&config.All))
	if err != nil {
		switch err := err.(type) {

		case *containers.GetContainerListInternalServerError:
			return nil, fmt.Errorf("Error invoking GetContainerList: %s", err.Payload.Message)

		default:
			return nil, fmt.Errorf("Error invoking GetContainerList: %s", err.Error())
		}
	}
	// TODO: move to conversion function
	containers := make([]*types.Container, 0, len(containme.Payload))

	for _, t := range containme.Payload {
		cmd := strings.Join(t.ProcessConfig.ExecArgs, " ")
		// the docker client expects the friendly name to be prefixed
		// with a forward slash -- create a new slice and add here
		names := make([]string, 0, len(t.ContainerConfig.Names))
		for i := range t.ContainerConfig.Names {
			names = append(names, clientFriendlyContainerName(t.ContainerConfig.Names[i]))
		}
		var started time.Time
		var stopped time.Time
		if t.ProcessConfig.StartTime != nil && *t.ProcessConfig.StartTime > 0 {
			started = time.Unix(*t.ProcessConfig.StartTime, 0)
		}
		if t.ProcessConfig.StopTime != nil && *t.ProcessConfig.StopTime > 0 {
			stopped = time.Unix(*t.ProcessConfig.StopTime, 0)
		}
		// get the docker friendly status
		_, status := dockerStatus(int(*t.ProcessConfig.ExitCode), *t.ProcessConfig.Status, *t.ContainerConfig.State, started, stopped)

		ips, err := externalIPv4Addrs()
		var ports []types.Port
		if err != nil {
			log.Errorf("Could not get IP information for reporting port bindings.")
		} else {
			ports = portInformation(t, ips)
		}

		// verify that the repo:tag exists for the container -- if it doesn't then we should present the
		// truncated imageID -- if we have a failure determining then we'll show the data we have
		repo := *t.ContainerConfig.RepoName
		ref, _ := reference.ParseNamed(*t.ContainerConfig.RepoName)
		if ref != nil {
			imageID, err := cache.RepositoryCache().Get(ref)
			if err != nil && err == cache.ErrDoesNotExist {
				// the tag has been removed, so we need to show the truncated imageID
				imageID = cache.RepositoryCache().GetImageID(*t.ContainerConfig.LayerID)
				if imageID != "" {
					id := uid.Parse(imageID)
					repo = id.Truncate().String()
				}
			}
		}

		c := &types.Container{
			ID:      *t.ContainerConfig.ContainerID,
			Image:   repo,
			Created: *t.ContainerConfig.CreateTime,
			Status:  status,
			Names:   names,
			Command: cmd,
			SizeRw:  *t.ContainerConfig.StorageSize,
			Ports:   ports,
		}
		containers = append(containers, c)
	}
	// sort on creation time
	sort.Sort(sort.Reverse(containerByCreated(containers)))
	return containers, nil
}