Пример #1
0
func (n *network) initSandbox() error {
	n.Lock()
	n.initEpoch++
	n.Unlock()

	sbox, err := osl.NewSandbox(
		osl.GenerateKey(fmt.Sprintf("%d-", n.initEpoch)+n.id), true)
	if err != nil {
		return fmt.Errorf("could not create network sandbox: %v", err)
	}

	n.setSandbox(sbox)

	n.driver.peerDbUpdateSandbox(n.id)

	var nlSock *nl.NetlinkSocket
	sbox.InvokeFunc(func() {
		nlSock, err = nl.Subscribe(syscall.NETLINK_ROUTE, syscall.RTNLGRP_NEIGH)
		if err != nil {
			err = fmt.Errorf("failed to subscribe to neighbor group netlink messages")
		}
	})

	go n.watchMiss(nlSock)
	return nil
}
Пример #2
0
func (n *network) initSandbox() error {
	n.Lock()
	n.initEpoch++
	n.Unlock()

	sbox, err := osl.NewSandbox(
		osl.GenerateKey(fmt.Sprintf("%d-", n.initEpoch)+n.id), true)
	if err != nil {
		return fmt.Errorf("could not create network sandbox: %v", err)
	}

	// Add a bridge inside the namespace
	if err := sbox.AddInterface("bridge1", "br",
		sbox.InterfaceOptions().Address(bridgeIP),
		sbox.InterfaceOptions().Bridge(true)); err != nil {
		return fmt.Errorf("could not create bridge inside the network sandbox: %v", err)
	}

	n.setSandbox(sbox)

	var nlSock *nl.NetlinkSocket
	sbox.InvokeFunc(func() {
		nlSock, err = nl.Subscribe(syscall.NETLINK_ROUTE, syscall.RTNLGRP_NEIGH)
		if err != nil {
			err = fmt.Errorf("failed to subscribe to neighbor group netlink messages")
		}
	})

	go n.watchMiss(nlSock)
	return n.initVxlan()
}
Пример #3
0
func (n *network) initSandbox(restore bool) error {
	n.Lock()
	n.initEpoch++
	n.Unlock()

	networkOnce.Do(networkOnceInit)

	if !restore {
		if hostMode {
			if err := addNetworkChain(n.id[:12]); err != nil {
				return err
			}
		}

		// If there are any stale sandboxes related to this network
		// from previous daemon life clean it up here
		n.cleanupStaleSandboxes()
	}

	// In the restore case network sandbox already exist; but we don't know
	// what epoch number it was created with. It has to be retrieved by
	// searching the net namespaces.
	key := ""
	if restore {
		key = osl.GenerateKey("-" + n.id)
	} else {
		key = osl.GenerateKey(fmt.Sprintf("%d-", n.initEpoch) + n.id)
	}

	sbox, err := osl.NewSandbox(key, !hostMode, restore)
	if err != nil {
		return fmt.Errorf("could not get network sandbox (oper %t): %v", restore, err)
	}

	n.setSandbox(sbox)

	if !restore {
		n.driver.peerDbUpdateSandbox(n.id)
	}

	var nlSock *nl.NetlinkSocket
	sbox.InvokeFunc(func() {
		nlSock, err = nl.Subscribe(syscall.NETLINK_ROUTE, syscall.RTNLGRP_NEIGH)
		if err != nil {
			err = fmt.Errorf("failed to subscribe to neighbor group netlink messages")
		}
	})

	if nlSock != nil {
		go n.watchMiss(nlSock)
	}

	return nil
}
Пример #4
0
// NewSandbox creates a new sandbox for the passed container id
func (c *controller) NewSandbox(containerID string, options ...SandboxOption) (Sandbox, error) {
	var err error

	if containerID == "" {
		return nil, types.BadRequestErrorf("invalid container ID")
	}

	var existing Sandbox
	look := SandboxContainerWalker(&existing, containerID)
	c.WalkSandboxes(look)
	if existing != nil {
		return nil, types.BadRequestErrorf("container %s is already present: %v", containerID, existing)
	}

	// Create sandbox and process options first. Key generation depends on an option
	sb := &sandbox{
		id:          stringid.GenerateRandomID(),
		containerID: containerID,
		endpoints:   epHeap{},
		epPriority:  map[string]int{},
		config:      containerConfig{},
		controller:  c,
	}
	// This sandbox may be using an existing osl sandbox, sharing it with another sandbox
	var peerSb Sandbox
	c.WalkSandboxes(SandboxKeyWalker(&peerSb, sb.Key()))
	if peerSb != nil {
		sb.osSbox = peerSb.(*sandbox).osSbox
	}

	heap.Init(&sb.endpoints)

	sb.processOptions(options...)

	if err = sb.setupResolutionFiles(); err != nil {
		return nil, err
	}

	if sb.osSbox == nil && !sb.config.useExternalKey {
		if sb.osSbox, err = osl.NewSandbox(sb.Key(), !sb.config.useDefaultSandBox); err != nil {
			return nil, fmt.Errorf("failed to create new osl sandbox: %v", err)
		}
	}

	c.Lock()
	c.sandboxes[sb.id] = sb
	c.Unlock()

	return sb, nil
}
Пример #5
0
func (n *network) initSandbox() error {
	n.Lock()
	n.initEpoch++
	n.Unlock()

	networkOnce.Do(networkOnceInit)

	if hostMode {
		if err := addNetworkChain(n.id[:12]); err != nil {
			return err
		}
	}

	// If there are any stale sandboxes related to this network
	// from previous daemon life clean it up here
	n.cleanupStaleSandboxes()

	sbox, err := osl.NewSandbox(
		osl.GenerateKey(fmt.Sprintf("%d-", n.initEpoch)+n.id), !hostMode)
	if err != nil {
		return fmt.Errorf("could not create network sandbox: %v", err)
	}

	n.setSandbox(sbox)

	n.driver.peerDbUpdateSandbox(n.id)

	var nlSock *nl.NetlinkSocket
	sbox.InvokeFunc(func() {
		nlSock, err = nl.Subscribe(syscall.NETLINK_ROUTE, syscall.RTNLGRP_NEIGH)
		if err != nil {
			err = fmt.Errorf("failed to subscribe to neighbor group netlink messages")
		}
	})

	if nlSock != nil {
		go n.watchMiss(nlSock)
	}

	return nil
}
Пример #6
0
func (n *network) initSandbox(restore bool) error {
	n.Lock()
	n.initEpoch++
	n.Unlock()

	networkOnce.Do(networkOnceInit)

	if !restore {
		// If there are any stale sandboxes related to this network
		// from previous daemon life clean it up here
		n.cleanupStaleSandboxes()
	}

	// In the restore case network sandbox already exist; but we don't know
	// what epoch number it was created with. It has to be retrieved by
	// searching the net namespaces.
	key := ""
	if restore {
		key = osl.GenerateKey("-" + n.id)
	} else {
		key = osl.GenerateKey(fmt.Sprintf("%d-", n.initEpoch) + n.id)
	}

	sbox, err := osl.NewSandbox(key, !hostMode, restore)
	if err != nil {
		return fmt.Errorf("could not get network sandbox (oper %t): %v", restore, err)
	}

	n.setSandbox(sbox)

	if !restore {
		n.driver.peerDbUpdateSandbox(n.id)
	}

	return nil
}
Пример #7
0
// NewSandbox creates a new sandbox for the passed container id
func (c *controller) NewSandbox(containerID string, options ...SandboxOption) (sBox Sandbox, err error) {
	if containerID == "" {
		return nil, types.BadRequestErrorf("invalid container ID")
	}

	var sb *sandbox
	c.Lock()
	for _, s := range c.sandboxes {
		if s.containerID == containerID {
			// If not a stub, then we already have a complete sandbox.
			if !s.isStub {
				c.Unlock()
				return nil, types.ForbiddenErrorf("container %s is already present: %v", containerID, s)
			}

			// We already have a stub sandbox from the
			// store. Make use of it so that we don't lose
			// the endpoints from store but reset the
			// isStub flag.
			sb = s
			sb.isStub = false
			break
		}
	}
	c.Unlock()

	// Create sandbox and process options first. Key generation depends on an option
	if sb == nil {
		sb = &sandbox{
			id:                 stringid.GenerateRandomID(),
			containerID:        containerID,
			endpoints:          epHeap{},
			epPriority:         map[string]int{},
			populatedEndpoints: map[string]struct{}{},
			config:             containerConfig{},
			controller:         c,
		}
	}
	sBox = sb

	heap.Init(&sb.endpoints)

	sb.processOptions(options...)

	c.Lock()
	if sb.ingress && c.ingressSandbox != nil {
		c.Unlock()
		return nil, types.ForbiddenErrorf("ingress sandbox already present")
	}

	if sb.ingress {
		c.ingressSandbox = sb
	}
	c.Unlock()
	defer func() {
		if err != nil {
			c.Lock()
			if sb.ingress {
				c.ingressSandbox = nil
			}
			c.Unlock()
		}
	}()

	if err = sb.setupResolutionFiles(); err != nil {
		return nil, err
	}

	if sb.config.useDefaultSandBox {
		c.sboxOnce.Do(func() {
			c.defOsSbox, err = osl.NewSandbox(sb.Key(), false, false)
		})

		if err != nil {
			c.sboxOnce = sync.Once{}
			return nil, fmt.Errorf("failed to create default sandbox: %v", err)
		}

		sb.osSbox = c.defOsSbox
	}

	if sb.osSbox == nil && !sb.config.useExternalKey {
		if sb.osSbox, err = osl.NewSandbox(sb.Key(), !sb.config.useDefaultSandBox, false); err != nil {
			return nil, fmt.Errorf("failed to create new osl sandbox: %v", err)
		}
	}

	c.Lock()
	c.sandboxes[sb.id] = sb
	c.Unlock()
	defer func() {
		if err != nil {
			c.Lock()
			delete(c.sandboxes, sb.id)
			c.Unlock()
		}
	}()

	err = sb.storeUpdate()
	if err != nil {
		return nil, fmt.Errorf("updating the store state of sandbox failed: %v", err)
	}

	return sb, nil
}
Пример #8
0
func (c *controller) sandboxCleanup() {
	store := c.getStore(datastore.LocalScope)
	if store == nil {
		logrus.Errorf("Could not find local scope store while trying to cleanup sandboxes")
		return
	}

	kvol, err := store.List(datastore.Key(sandboxPrefix), &sbState{c: c})
	if err != nil && err != datastore.ErrKeyNotFound {
		logrus.Errorf("failed to get sandboxes for scope %s: %v", store.Scope(), err)
		return
	}

	// It's normal for no sandboxes to be found. Just bail out.
	if err == datastore.ErrKeyNotFound {
		return
	}

	for _, kvo := range kvol {
		sbs := kvo.(*sbState)

		sb := &sandbox{
			id:          sbs.ID,
			controller:  sbs.c,
			containerID: sbs.Cid,
			endpoints:   epHeap{},
			epPriority:  map[string]int{},
			dbIndex:     sbs.dbIndex,
			isStub:      true,
			dbExists:    true,
		}

		sb.osSbox, err = osl.NewSandbox(sb.Key(), true)
		if err != nil {
			logrus.Errorf("failed to create new osl sandbox while trying to build sandbox for cleanup: %v", err)
			continue
		}

		c.Lock()
		c.sandboxes[sb.id] = sb
		c.Unlock()

		for _, eps := range sbs.Eps {
			n, err := c.getNetworkFromStore(eps.Nid)
			var ep *endpoint
			if err != nil {
				logrus.Errorf("getNetworkFromStore for nid %s failed while trying to build sandbox for cleanup: %v", eps.Nid, err)
				n = &network{id: eps.Nid, ctrlr: c, drvOnce: &sync.Once{}, persist: true}
				ep = &endpoint{id: eps.Eid, network: n, sandboxID: sbs.ID}
			} else {
				ep, err = n.getEndpointFromStore(eps.Eid)
				if err != nil {
					logrus.Errorf("getEndpointFromStore for eid %s failed while trying to build sandbox for cleanup: %v", eps.Eid, err)
					ep = &endpoint{id: eps.Eid, network: n, sandboxID: sbs.ID}
				}
			}

			heap.Push(&sb.endpoints, ep)
		}

		logrus.Infof("Removing stale sandbox %s (%s)", sb.id, sb.containerID)
		if err := sb.delete(true); err != nil {
			logrus.Errorf("failed to delete sandbox %s while trying to cleanup: %v", sb.id, err)
		}
	}
}
Пример #9
0
func externalKeyTest(t *testing.T, reexec bool) {
	if !testutils.IsRunningInContainer() {
		defer testutils.SetupTestOSContext(t)()
	}

	n, err := createTestNetwork(bridgeNetType, "testnetwork", options.Generic{
		netlabel.GenericData: options.Generic{
			"BridgeName": "testnetwork",
		},
	}, nil, nil)
	if err != nil {
		t.Fatal(err)
	}
	defer func() {
		if err := n.Delete(); err != nil {
			t.Fatal(err)
		}
	}()

	ep, err := n.CreateEndpoint("ep1")
	if err != nil {
		t.Fatal(err)
	}
	defer func() {
		err = ep.Delete()
		if err != nil {
			t.Fatal(err)
		}
	}()

	ep2, err := n.CreateEndpoint("ep2")
	if err != nil {
		t.Fatal(err)
	}
	defer func() {
		err = ep2.Delete()
		if err != nil {
			t.Fatal(err)
		}
	}()

	cnt, err := controller.NewSandbox(containerID,
		libnetwork.OptionHostname("test"),
		libnetwork.OptionDomainname("docker.io"),
		libnetwork.OptionUseExternalKey(),
		libnetwork.OptionExtraHost("web", "192.168.0.1"))
	defer func() {
		if err := cnt.Delete(); err != nil {
			t.Fatal(err)
		}
		osl.GC()
	}()

	// Join endpoint to sandbox before SetKey
	err = ep.Join(cnt)
	runtime.LockOSThread()
	if err != nil {
		t.Fatal(err)
	}
	defer func() {
		err = ep.Leave(cnt)
		runtime.LockOSThread()
		if err != nil {
			t.Fatal(err)
		}
	}()

	sbox := ep.Info().Sandbox()
	if sbox == nil {
		t.Fatalf("Expected to have a valid Sandbox")
	}

	if reexec {
		err := reexecSetKey("this-must-fail", containerID, controller.ID())
		if err == nil {
			t.Fatalf("SetExternalKey must fail if the corresponding namespace is not created")
		}
	} else {
		// Setting an non-existing key (namespace) must fail
		if err := sbox.SetKey("this-must-fail"); err == nil {
			t.Fatalf("Setkey must fail if the corresponding namespace is not created")
		}
	}

	// Create a new OS sandbox using the osl API before using it in SetKey
	if extOsBox, err := osl.NewSandbox("ValidKey", true); err != nil {
		t.Fatalf("Failed to create new osl sandbox")
	} else {
		defer func() {
			if err := extOsBox.Destroy(); err != nil {
				log.Warnf("Failed to remove os sandbox: %v", err)
			}
		}()
	}

	if reexec {
		err := reexecSetKey("ValidKey", containerID, controller.ID())
		if err != nil {
			t.Fatalf("SetExternalKey failed with %v", err)
		}
	} else {
		if err := sbox.SetKey("ValidKey"); err != nil {
			t.Fatalf("Setkey failed with %v", err)
		}
	}

	// Join endpoint to sandbox after SetKey
	err = ep2.Join(sbox)
	if err != nil {
		t.Fatal(err)
	}
	runtime.LockOSThread()
	defer func() {
		err = ep2.Leave(sbox)
		runtime.LockOSThread()
		if err != nil {
			t.Fatal(err)
		}
	}()

	if ep.Info().Sandbox().Key() != ep2.Info().Sandbox().Key() {
		t.Fatalf("ep1 and ep2 returned different container sandbox key")
	}

	checkSandbox(t, ep.Info())
}
Пример #10
0
func (c *controller) sandboxCleanup(activeSandboxes map[string]interface{}) {
	store := c.getStore(datastore.LocalScope)
	if store == nil {
		logrus.Errorf("Could not find local scope store while trying to cleanup sandboxes")
		return
	}

	kvol, err := store.List(datastore.Key(sandboxPrefix), &sbState{c: c})
	if err != nil && err != datastore.ErrKeyNotFound {
		logrus.Errorf("failed to get sandboxes for scope %s: %v", store.Scope(), err)
		return
	}

	// It's normal for no sandboxes to be found. Just bail out.
	if err == datastore.ErrKeyNotFound {
		return
	}

	for _, kvo := range kvol {
		sbs := kvo.(*sbState)

		sb := &sandbox{
			id:                 sbs.ID,
			controller:         sbs.c,
			containerID:        sbs.Cid,
			endpoints:          epHeap{},
			populatedEndpoints: map[string]struct{}{},
			dbIndex:            sbs.dbIndex,
			isStub:             true,
			dbExists:           true,
			extDNS:             sbs.ExtDNS,
		}

		msg := " for cleanup"
		create := true
		isRestore := false
		if val, ok := activeSandboxes[sb.ID()]; ok {
			msg = ""
			sb.isStub = false
			isRestore = true
			opts := val.([]SandboxOption)
			sb.processOptions(opts...)
			sb.restorePath()
			create = !sb.config.useDefaultSandBox
			heap.Init(&sb.endpoints)
		}
		sb.osSbox, err = osl.NewSandbox(sb.Key(), create, isRestore)
		if err != nil {
			logrus.Errorf("failed to create osl sandbox while trying to restore sandbox %s%s: %v", sb.ID()[0:7], msg, err)
			continue
		}

		c.Lock()
		c.sandboxes[sb.id] = sb
		c.Unlock()

		for _, eps := range sbs.Eps {
			n, err := c.getNetworkFromStore(eps.Nid)
			var ep *endpoint
			if err != nil {
				logrus.Errorf("getNetworkFromStore for nid %s failed while trying to build sandbox for cleanup: %v", eps.Nid, err)
				n = &network{id: eps.Nid, ctrlr: c, drvOnce: &sync.Once{}, persist: true}
				ep = &endpoint{id: eps.Eid, network: n, sandboxID: sbs.ID}
			} else {
				ep, err = n.getEndpointFromStore(eps.Eid)
				if err != nil {
					logrus.Errorf("getEndpointFromStore for eid %s failed while trying to build sandbox for cleanup: %v", eps.Eid, err)
					ep = &endpoint{id: eps.Eid, network: n, sandboxID: sbs.ID}
				}
			}
			if _, ok := activeSandboxes[sb.ID()]; ok && err != nil {
				logrus.Errorf("failed to restore endpoint %s in %s for container %s due to %v", eps.Eid, eps.Nid, sb.ContainerID(), err)
				continue
			}
			heap.Push(&sb.endpoints, ep)
		}

		if _, ok := activeSandboxes[sb.ID()]; !ok {
			logrus.Infof("Removing stale sandbox %s (%s)", sb.id, sb.containerID)
			if err := sb.delete(true); err != nil {
				logrus.Errorf("Failed to delete sandbox %s while trying to cleanup: %v", sb.id, err)
			}
			continue
		}

		// reconstruct osl sandbox field
		if !sb.config.useDefaultSandBox {
			if err := sb.restoreOslSandbox(); err != nil {
				logrus.Errorf("failed to populate fields for osl sandbox %s", sb.ID())
				continue
			}
		} else {
			c.sboxOnce.Do(func() {
				c.defOsSbox = sb.osSbox
			})
		}

		for _, ep := range sb.endpoints {
			// Watch for service records
			if !c.isAgent() {
				c.watchSvcRecord(ep)
			}
		}
	}
}
Пример #11
0
func (c *controller) sandboxCleanup() {
	store := c.getStore(datastore.LocalScope)
	if store == nil {
		logrus.Errorf("Could not find local scope store while trying to cleanup sandboxes")
		return
	}

	kvol, err := store.List(datastore.Key(sandboxPrefix), &sbState{c: c})
	if err != nil && err != datastore.ErrKeyNotFound {
		logrus.Errorf("failed to get sandboxes for scope %s: %v", store.Scope(), err)
		return
	}

	// It's normal for no sandboxes to be found. Just bail out.
	if err == datastore.ErrKeyNotFound {
		return
	}

	for _, kvo := range kvol {
		sbs := kvo.(*sbState)

		logrus.Printf("sandboxcleanup sbs = %+v", sbs)
		sb := &sandbox{
			id:          sbs.ID,
			controller:  sbs.c,
			containerID: sbs.Cid,
			endpoints:   epHeap{},
			epPriority:  map[string]int{},
			dbIndex:     sbs.dbIndex,
			dbExists:    true,
		}

		sb.osSbox, err = osl.NewSandbox(sb.Key(), true)
		if err != nil {
			logrus.Errorf("failed to create new osl sandbox while trying to build sandbox for cleanup: %v", err)
			continue
		}

		for _, eps := range sbs.Eps {
			n, err := c.getNetworkFromStore(eps.Nid)
			if err != nil {
				logrus.Errorf("getNetworkFromStore for nid %s failed while trying to build sandbox for cleanup: %v", eps.Nid, err)
				continue
			}

			ep, err := n.getEndpointFromStore(eps.Eid)
			if err != nil {
				logrus.Errorf("getEndpointFromStore for eid %s failed while trying to build sandbox for cleanup: %v", eps.Eid, err)
				continue
			}

			heap.Push(&sb.endpoints, ep)
		}

		c.Lock()
		c.sandboxes[sb.id] = sb
		c.Unlock()

		if err := sb.Delete(); err != nil {
			logrus.Errorf("failed to delete sandbox %s while trying to cleanup: %v", sb.id, err)
		}
	}
}