// DriverBenchDiffApplyN benchmarks calls to diff and apply together func DriverBenchDiffApplyN(b *testing.B, fileCount int, drivername string, driveroptions ...string) { driver := GetDriver(b, drivername, driveroptions...) defer PutDriver(b) base := stringid.GenerateRandomID() upper := stringid.GenerateRandomID() if err := driver.Create(base, "", nil); err != nil { b.Fatal(err) } if err := addManyFiles(driver, base, fileCount, 3); err != nil { b.Fatal(err) } if err := driver.Create(upper, base, nil); err != nil { b.Fatal(err) } if err := addManyFiles(driver, upper, fileCount, 6); err != nil { b.Fatal(err) } diffSize, err := driver.DiffSize(upper, "") if err != nil { b.Fatal(err) } b.ResetTimer() b.StopTimer() for i := 0; i < b.N; i++ { diff := stringid.GenerateRandomID() if err := driver.Create(diff, base, nil); err != nil { b.Fatal(err) } if err := checkManyFiles(driver, diff, fileCount, 3); err != nil { b.Fatal(err) } b.StartTimer() arch, err := driver.Diff(upper, "") if err != nil { b.Fatal(err) } applyDiffSize, err := driver.ApplyDiff(diff, "", arch) if err != nil { b.Fatal(err) } b.StopTimer() arch.Close() if applyDiffSize != diffSize { // TODO: enforce this //b.Fatalf("Apply diff size different, got %d, expected %s", applyDiffSize, diffSize) } if err := checkManyFiles(driver, diff, fileCount, 6); err != nil { b.Fatal(err) } } }
// DriverBenchDiffN benchmarks calls to diff on two layers with // a provided number of files on the lower and upper layers. func DriverBenchDiffN(b *testing.B, bottom, top int, drivername string, driveroptions ...string) { driver := GetDriver(b, drivername, driveroptions...) defer PutDriver(b) base := stringid.GenerateRandomID() upper := stringid.GenerateRandomID() if err := driver.Create(base, "", "", nil); err != nil { b.Fatal(err) } if err := addManyFiles(driver, base, bottom, 3); err != nil { b.Fatal(err) } if err := driver.Create(upper, base, "", nil); err != nil { b.Fatal(err) } if err := addManyFiles(driver, upper, top, 6); err != nil { b.Fatal(err) } b.ResetTimer() for i := 0; i < b.N; i++ { arch, err := driver.Diff(upper, "") if err != nil { b.Fatal(err) } _, err = io.Copy(ioutil.Discard, arch) if err != nil { b.Fatalf("Error copying archive: %s", err) } arch.Close() } }
func TestByParent(t *testing.T) { archive1, _ := fakeTar() archive2, _ := fakeTar() archive3, _ := fakeTar() graph, _ := tempGraph(t) defer nukeGraph(graph) parentImage := &image.Image{ ID: stringid.GenerateRandomID(), Comment: "parent", Created: time.Now(), Parent: "", } childImage1 := &image.Image{ ID: stringid.GenerateRandomID(), Comment: "child1", Created: time.Now(), Parent: parentImage.ID, } childImage2 := &image.Image{ ID: stringid.GenerateRandomID(), Comment: "child2", Created: time.Now(), Parent: parentImage.ID, } _ = graph.Register(parentImage, archive1) _ = graph.Register(childImage1, archive2) _ = graph.Register(childImage2, archive3) byParent := graph.ByParent() numChildren := len(byParent[parentImage.ID]) if numChildren != 2 { t.Fatalf("Expected 2 children, found %d", numChildren) } }
// DriverTestDiffApply tests diffing and applying produces the same layer func DriverTestDiffApply(t testing.TB, fileCount int, drivername string, driverOptions ...string) { driver := GetDriver(t, drivername, driverOptions...) defer PutDriver(t) base := stringid.GenerateRandomID() upper := stringid.GenerateRandomID() if err := driver.Create(base, "", "", nil); err != nil { t.Fatal(err) } if err := addManyFiles(driver, base, fileCount, 3); err != nil { t.Fatal(err) } if err := driver.Create(upper, base, "", nil); err != nil { t.Fatal(err) } if err := addManyFiles(driver, upper, fileCount, 6); err != nil { t.Fatal(err) } diffSize, err := driver.DiffSize(upper, "") if err != nil { t.Fatal(err) } diff := stringid.GenerateRandomID() if err := driver.Create(diff, base, "", nil); err != nil { t.Fatal(err) } if err := checkManyFiles(driver, diff, fileCount, 3); err != nil { t.Fatal(err) } arch, err := driver.Diff(upper, base) if err != nil { t.Fatal(err) } buf := bytes.NewBuffer(nil) if _, err := buf.ReadFrom(arch); err != nil { t.Fatal(err) } if err := arch.Close(); err != nil { t.Fatal(err) } applyDiffSize, err := driver.ApplyDiff(diff, base, bytes.NewReader(buf.Bytes())) if err != nil { t.Fatal(err) } if applyDiffSize != diffSize { t.Fatalf("Apply diff size different, got %d, expected %d", applyDiffSize, diffSize) } if err := checkManyFiles(driver, diff, fileCount, 6); err != nil { t.Fatal(err) } }
// DriverTestChanges tests computed changes on a layer matches changes made func DriverTestChanges(t testing.TB, drivername string, driverOptions ...string) { driver := GetDriver(t, drivername, driverOptions...) defer PutDriver(t) base := stringid.GenerateRandomID() upper := stringid.GenerateRandomID() if err := driver.Create(base, "", "", nil); err != nil { t.Fatal(err) } if err := addManyFiles(driver, base, 20, 3); err != nil { t.Fatal(err) } if err := driver.Create(upper, base, "", nil); err != nil { t.Fatal(err) } expectedChanges, err := changeManyFiles(driver, upper, 20, 6) if err != nil { t.Fatal(err) } changes, err := driver.Changes(upper, base) if err != nil { t.Fatal(err) } if err = checkChanges(expectedChanges, changes); err != nil { t.Fatal(err) } }
// New creates a new instance of network controller. func New(cfgOptions ...config.Option) (NetworkController, error) { var cfg *config.Config if len(cfgOptions) > 0 { cfg = &config.Config{} cfg.ProcessOptions(cfgOptions...) } c := &controller{ id: stringid.GenerateRandomID(), cfg: cfg, networks: networkTable{}, sandboxes: sandboxTable{}, drivers: driverTable{}} if err := initDrivers(c); err != nil { return nil, err } if cfg != nil { if err := c.initDataStore(); err != nil { // Failing to initalize datastore is a bad situation to be in. // But it cannot fail creating the Controller log.Debugf("Failed to Initialize Datastore due to %v. Operating in non-clustered mode", err) } if err := c.initDiscovery(); err != nil { // Failing to initalize discovery is a bad situation to be in. // But it cannot fail creating the Controller log.Debugf("Failed to Initialize Discovery : %v", err) } } if err := c.startExternalKeyListener(); err != nil { return nil, err } return c, nil }
func (s *DockerSuite) TestRenameCheckNames(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "--name", "first_name", "-d", "busybox", "sh") out, _, err := runCommandWithOutput(runCmd) if err != nil { c.Fatalf(out, err) } newName := "new_name" + stringid.GenerateRandomID() runCmd = exec.Command(dockerBinary, "rename", "first_name", newName) out, _, err = runCommandWithOutput(runCmd) if err != nil { c.Fatalf(out, err) } name, err := inspectField(newName, "Name") if err != nil { c.Fatal(err) } if name != "/"+newName { c.Fatal("Failed to rename container ") } name, err = inspectField("first_name", "Name") if err == nil && !strings.Contains(err.Error(), "No such image or container: first_name") { c.Fatal(err) } }
func runScramble(cmd *cobra.Command, args []string) { globalFlags(cmd) t1 := time.Now() dir, err := ioutil.ReadDir(filepath.Join(graphdir, "graph")) if err != nil { logrus.Fatalf("Error reading graph dir: %s", err) } var ids = []string{} for _, v := range dir { id := v.Name() if len(id) != 64 { logrus.Debugf("Skipping: %s", v.Name()) continue } cacheDir, err := getCacheDir(id) if err != nil { if err == ErrNeedMigration { logrus.Debugf("%s not migrated", id) } logrus.Fatalf("Error getting image IDs: %s", err) } if _, err := os.Stat(cacheDir); err != nil { if os.IsNotExist(err) { logrus.Debugf("Skipping, missing cache dir: %s", id) continue } logrus.Fatalf("Error checking cache dir %s: %s", cacheDir, err) } ids = append(ids, id) } updates := map[string]string{} fileUpdates := []string{ filepath.Join(graphdir, fmt.Sprintf("repositories-%s", driver)), } for _, id := range ids { fmt.Fprintf(cmd.Out(), "Scrambling %s\n", id) newID := stringid.GenerateRandomID() updates[id] = newID oldPath := filepath.Join(graphdir, "graph", id) newPath := filepath.Join(graphdir, "graph", newID) if err := os.Rename(oldPath, newPath); err != nil { logrus.Errorf("Error renaming %s to %s: %s", oldPath, newPath, err) continue } updates[id] = newID fileUpdates = append(fileUpdates, filepath.Join(graphdir, "graph", newID, "json")) } updateReferences(updates, fileUpdates) logrus.Debugf("Ran scramble in %s", time.Since(t1).String()) }
// New creates a new instance of network controller. func New(cfgOptions ...config.Option) (NetworkController, error) { c := &controller{ id: stringid.GenerateRandomID(), cfg: config.ParseConfigOptions(cfgOptions...), sandboxes: sandboxTable{}, svcRecords: make(map[string]svcInfo), serviceBindings: make(map[serviceKey]*service), agentInitDone: make(chan struct{}), } if err := c.initStores(); err != nil { return nil, err } drvRegistry, err := drvregistry.New(c.getStore(datastore.LocalScope), c.getStore(datastore.GlobalScope), c.RegisterDriver, nil) if err != nil { return nil, err } for _, i := range getInitializers() { var dcfg map[string]interface{} // External plugins don't need config passed through daemon. They can // bootstrap themselves if i.ntype != "remote" { dcfg = c.makeDriverConfig(i.ntype) } if err := drvRegistry.AddDriver(i.ntype, i.fn, dcfg); err != nil { return nil, err } } c.drvRegistry = drvRegistry if c.cfg != nil && c.cfg.Cluster.Watcher != nil { if err := c.initDiscovery(c.cfg.Cluster.Watcher); err != nil { // Failing to initialize discovery is a bad situation to be in. // But it cannot fail creating the Controller log.Errorf("Failed to Initialize Discovery : %v", err) } } c.WalkNetworks(populateSpecial) // Reserve pools first before doing cleanup. Otherwise the // cleanups of endpoint/network and sandbox below will // generate many unnecessary warnings c.reservePools() // Cleanup resources c.sandboxCleanup(c.cfg.ActiveSandboxes) c.cleanupLocalEndpoints() c.networkCleanup() if err := c.startExternalKeyListener(); err != nil { return nil, err } return c, nil }
// CreateVolume creates a volume in the cluster func (c *Cluster) CreateVolume(request *dockerclient.VolumeCreateRequest) (*cluster.Volume, error) { var ( wg sync.WaitGroup volume *cluster.Volume err error ) if request.Name == "" { request.Name = stringid.GenerateRandomID() } c.RLock() for _, e := range c.engines { wg.Add(1) go func(engine *cluster.Engine) { defer wg.Done() v, er := engine.CreateVolume(request) if v != nil { volume = v err = nil } if er != nil && volume == nil { err = er } }(e) } c.RUnlock() wg.Wait() return volume, err }
// NewSession creates a new session // TODO(tiborvass): remove authConfig param once registry client v2 is vendored func NewSession(client *http.Client, authConfig *cliconfig.AuthConfig, endpoint *Endpoint) (r *Session, err error) { r = &Session{ authConfig: authConfig, client: client, indexEndpoint: endpoint, id: stringid.GenerateRandomID(), } var alwaysSetBasicAuth bool // If we're working with a standalone private registry over HTTPS, send Basic Auth headers // alongside all our requests. if endpoint.VersionString(1) != IndexServer && endpoint.URL.Scheme == "https" { info, err := endpoint.Ping() if err != nil { return nil, err } if info.Standalone && authConfig != nil { logrus.Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", endpoint.String()) alwaysSetBasicAuth = true } } // Annotate the transport unconditionally so that v2 can // properly fallback on v1 when an image is not found. client.Transport = AuthTransport(client.Transport, authConfig, alwaysSetBasicAuth) jar, err := cookiejar.New(nil) if err != nil { return nil, errors.New("cookiejar.New is not supposed to return an error") } client.Jar = jar return r, nil }
// DriverTestDeepLayerRead reads a file from a lower layer under a given number of layers func DriverTestDeepLayerRead(t testing.TB, layerCount int, drivername string, driverOptions ...string) { driver := GetDriver(t, drivername, driverOptions...) defer PutDriver(t) base := stringid.GenerateRandomID() if err := driver.Create(base, "", "", nil); err != nil { t.Fatal(err) } content := []byte("test content") if err := addFile(driver, base, "testfile.txt", content); err != nil { t.Fatal(err) } topLayer, err := addManyLayers(driver, base, layerCount) if err != nil { t.Fatal(err) } err = checkManyLayers(driver, topLayer, layerCount) if err != nil { t.Fatal(err) } if err := checkFile(driver, topLayer, "testfile.txt", content); err != nil { t.Fatal(err) } }
// NewTask fucntion creates a task func NewTask(config *cluster.ContainerConfig, name string, timeout time.Duration) (*Task, error) { id := stringid.TruncateID(stringid.GenerateRandomID()) if name != "" { id = name + "." + id } // save the name in labels as the mesos containerizer will override it config.Labels[cluster.SwarmLabelNamespace+".mesos.name"] = name // FIXME: once Mesos changes merged no need to save the task id to know which container we launched config.Labels[cluster.SwarmLabelNamespace+".mesos.task"] = id task := &Task{ config: config, container: make(chan *cluster.Container), Error: make(chan error), updates: make(chan *mesosproto.TaskStatus), } task.Name = &name task.TaskId = &mesosproto.TaskID{Value: &id} task.Labels = &mesosproto.Labels{Labels: []*mesosproto.Label{{Key: proto.String("SWARM_CONTAINER_NAME"), Value: &name}}} go task.suicide(timeout) return task, nil }
// DriverBenchDeepLayerDiff benchmarks calls to diff on top of a given number of layers. func DriverBenchDeepLayerDiff(b *testing.B, layerCount int, drivername string, driveroptions ...string) { driver := GetDriver(b, drivername, driveroptions...) defer PutDriver(b) base := stringid.GenerateRandomID() if err := driver.Create(base, "", "", nil); err != nil { b.Fatal(err) } if err := addFiles(driver, base, 50); err != nil { b.Fatal(err) } topLayer, err := addManyLayers(driver, base, layerCount) if err != nil { b.Fatal(err) } b.ResetTimer() for i := 0; i < b.N; i++ { arch, err := driver.Diff(topLayer, "") if err != nil { b.Fatal(err) } _, err = io.Copy(ioutil.Discard, arch) if err != nil { b.Fatalf("Error copying archive: %s", err) } arch.Close() } }
func TestRegister(t *testing.T) { graph, _ := tempGraph(t) defer nukeGraph(graph) archive, err := fakeTar() if err != nil { t.Fatal(err) } image := &image.Image{ ID: stringid.GenerateRandomID(), Comment: "testing", Created: time.Now(), } err = graph.Register(v1ImageDescriptor{image}, archive) if err != nil { t.Fatal(err) } images := graph.Map() if l := len(images); l != 1 { t.Fatalf("Wrong number of images. Should be %d, not %d", 1, l) } if resultImg, err := graph.Get(image.ID); err != nil { t.Fatal(err) } else { if resultImg.ID != image.ID { t.Fatalf("Wrong image ID. Should be '%s', not '%s'", image.ID, resultImg.ID) } if resultImg.Comment != image.Comment { t.Fatalf("Wrong image comment. Should be '%s', not '%s'", image.Comment, resultImg.Comment) } } }
func (s *DockerSuite) TestRenameStoppedContainer(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "--name", "first_name", "-d", "busybox", "sh") out, _, err := runCommandWithOutput(runCmd) if err != nil { c.Fatalf(out, err) } cleanedContainerID := strings.TrimSpace(out) runCmd = exec.Command(dockerBinary, "wait", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) if err != nil { c.Fatalf(out, err) } name, err := inspectField(cleanedContainerID, "Name") newName := "new_name" + stringid.GenerateRandomID() runCmd = exec.Command(dockerBinary, "rename", "first_name", newName) out, _, err = runCommandWithOutput(runCmd) if err != nil { c.Fatalf(out, err) } name, err = inspectField(cleanedContainerID, "Name") if err != nil { c.Fatal(err) } if name != "/"+newName { c.Fatal("Failed to rename container ", name) } }
// Mktemp creates a temporary sub-directory inside the graph's filesystem. func (graph *Graph) Mktemp(id string) (string, error) { dir := path.Join(graph.Root, "_tmp", stringid.GenerateRandomID()) if err := os.MkdirAll(dir, 0700); err != nil { return "", err } return dir, nil }
// NewDaemon returns a Daemon instance to be used for testing. // This will create a directory such as d123456789 in the folder specified by $DEST. // The daemon will not automatically start. func NewDaemon(c *check.C) *Daemon { dest := os.Getenv("DEST") c.Assert(dest, check.Not(check.Equals), "", check.Commentf("Please set the DEST environment variable")) err := os.MkdirAll(daemonSockRoot, 0700) c.Assert(err, checker.IsNil, check.Commentf("could not create daemon socket root")) id := fmt.Sprintf("d%s", stringid.TruncateID(stringid.GenerateRandomID())) dir := filepath.Join(dest, id) daemonFolder, err := filepath.Abs(dir) c.Assert(err, check.IsNil, check.Commentf("Could not make %q an absolute path", dir)) daemonRoot := filepath.Join(daemonFolder, "root") c.Assert(os.MkdirAll(daemonRoot, 0755), check.IsNil, check.Commentf("Could not create daemon root %q", dir)) userlandProxy := true if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" { if val, err := strconv.ParseBool(env); err != nil { userlandProxy = val } } return &Daemon{ id: id, c: c, folder: daemonFolder, root: daemonRoot, storageDriver: os.Getenv("DOCKER_GRAPHDRIVER"), userlandProxy: userlandProxy, execRoot: filepath.Join(os.TempDir(), "docker-execroot", id), } }
func TestNetworkContext(t *testing.T) { networkID := stringid.GenerateRandomID() var ctx networkContext cases := []struct { networkCtx networkContext expValue string expHeader string call func() string }{ {networkContext{ n: types.NetworkResource{ID: networkID}, trunc: false, }, networkID, networkIDHeader, ctx.ID}, {networkContext{ n: types.NetworkResource{ID: networkID}, trunc: true, }, stringid.TruncateID(networkID), networkIDHeader, ctx.ID}, {networkContext{ n: types.NetworkResource{Name: "network_name"}, }, "network_name", nameHeader, ctx.Name}, {networkContext{ n: types.NetworkResource{Driver: "driver_name"}, }, "driver_name", driverHeader, ctx.Driver}, {networkContext{ n: types.NetworkResource{EnableIPv6: true}, }, "true", ipv6Header, ctx.IPv6}, {networkContext{ n: types.NetworkResource{EnableIPv6: false}, }, "false", ipv6Header, ctx.IPv6}, {networkContext{ n: types.NetworkResource{Internal: true}, }, "true", internalHeader, ctx.Internal}, {networkContext{ n: types.NetworkResource{Internal: false}, }, "false", internalHeader, ctx.Internal}, {networkContext{ n: types.NetworkResource{}, }, "", labelsHeader, ctx.Labels}, {networkContext{ n: types.NetworkResource{Labels: map[string]string{"label1": "value1", "label2": "value2"}}, }, "label1=value1,label2=value2", labelsHeader, ctx.Labels}, } for _, c := range cases { ctx = c.networkCtx v := c.call() if strings.Contains(v, ",") { compareMultipleValues(t, v, c.expValue) } else if v != c.expValue { t.Fatalf("Expected %s, was %s\n", c.expValue, v) } h := ctx.fullHeader() if h != c.expHeader { t.Fatalf("Expected %s, was %s\n", c.expHeader, h) } } }
// New creates a new instance of network controller. func New(cfgOptions ...config.Option) (NetworkController, error) { c := &controller{ id: stringid.GenerateRandomID(), cfg: config.ParseConfigOptions(cfgOptions...), sandboxes: sandboxTable{}, svcRecords: make(map[string]svcInfo), serviceBindings: make(map[string]*service), } if err := c.agentInit(c.cfg.Daemon.Bind); err != nil { return nil, err } if err := c.agentJoin(c.cfg.Daemon.Neighbors); err != nil { return nil, err } if err := c.initStores(); err != nil { return nil, err } drvRegistry, err := drvregistry.New(c.getStore(datastore.LocalScope), c.getStore(datastore.GlobalScope), c.RegisterDriver, nil) if err != nil { return nil, err } for _, i := range getInitializers() { var dcfg map[string]interface{} // External plugins don't need config passed through daemon. They can // bootstrap themselves if i.ntype != "remote" { dcfg = c.makeDriverConfig(i.ntype) } if err := drvRegistry.AddDriver(i.ntype, i.fn, dcfg); err != nil { return nil, err } } c.drvRegistry = drvRegistry if c.cfg != nil && c.cfg.Cluster.Watcher != nil { if err := c.initDiscovery(c.cfg.Cluster.Watcher); err != nil { // Failing to initialize discovery is a bad situation to be in. // But it cannot fail creating the Controller log.Errorf("Failed to Initialize Discovery : %v", err) } } c.sandboxCleanup() c.cleanupLocalEndpoints() c.networkCleanup() if err := c.startExternalKeyListener(); err != nil { return nil, err } return c, nil }
// NewNetwork creates a new network of the specified network type. The options // are network specific and modeled in a generic way. func (c *controller) NewNetwork(networkType, name string, options ...NetworkOption) (Network, error) { if !config.IsValidName(name) { return nil, ErrInvalidName(name) } // Construct the network object network := &network{ name: name, networkType: networkType, ipamType: ipamapi.DefaultIPAM, id: stringid.GenerateRandomID(), ctrlr: c, persist: true, drvOnce: &sync.Once{}, } network.processOptions(options...) // Make sure we have a driver available for this network type // before we allocate anything. if _, err := network.driver(); err != nil { return nil, err } cnfs, err := network.ipamAllocate() if err != nil { return nil, err } defer func() { if err != nil { for _, cn := range cnfs { cn() } } }() // addNetwork can be called for local scope network lazily when // an endpoint is created after a restart and the network was // created in previous life. Make sure you wrap around the driver // notification of network creation in once call so that the driver // invoked only once in case both the network and endpoint creation // happens in the same lifetime. network.drvOnce.Do(func() { err = c.addNetwork(network) }) if err != nil { return nil, err } if err = c.updateToStore(network); err != nil { log.Warnf("couldnt create network %s: %v", network.name, err) if e := network.Delete(); e != nil { log.Warnf("couldnt cleanup network %s on network create failure (%v): %v", network.name, err, e) } return nil, err } return network, nil }
// Generate a globally (across the cluster) unique ID. func (c *Cluster) generateUniqueID() string { for { id := stringid.GenerateRandomID() if c.Container(id) == nil { return id } } }
func newTestControllerWithMount(m api.Mount) (*controller, error) { return newController(&daemon.Daemon{}, &api.Task{ ID: stringid.GenerateRandomID(), ServiceID: stringid.GenerateRandomID(), Spec: api.TaskSpec{ Runtime: &api.TaskSpec_Container{ Container: &api.ContainerSpec{ Image: "image_name", Labels: map[string]string{ "com.docker.swarm.task.id": "id", }, Mounts: []api.Mount{m}, }, }, }, }, nil) }
func newSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) *Session { return &Session{ authConfig: authConfig, client: client, indexEndpoint: endpoint, id: stringid.GenerateRandomID(), } }
func (c *controller) agentInit(listenAddr, bindAddrOrInterface, advertiseAddr string) error { if !c.isAgent() { return nil } bindAddr, err := resolveAddr(bindAddrOrInterface) if err != nil { return err } keys, tags := c.getKeys(subsysGossip) hostname, _ := os.Hostname() nodeName := hostname + "-" + stringid.TruncateID(stringid.GenerateRandomID()) logrus.Info("Gossip cluster hostname ", nodeName) nDB, err := networkdb.New(&networkdb.Config{ BindAddr: listenAddr, AdvertiseAddr: advertiseAddr, NodeName: nodeName, Keys: keys, }) if err != nil { return err } ch, cancel := nDB.Watch("endpoint_table", "", "") c.Lock() c.agent = &agent{ networkDB: nDB, bindAddr: bindAddr, advertiseAddr: advertiseAddr, epTblCancel: cancel, driverCancelFuncs: make(map[string][]func()), } c.Unlock() go c.handleTableEvents(ch, c.handleEpTableEvent) drvEnc := discoverapi.DriverEncryptionConfig{} keys, tags = c.getKeys(subsysIPSec) drvEnc.Keys = keys drvEnc.Tags = tags c.drvRegistry.WalkDrivers(func(name string, driver driverapi.Driver, capability driverapi.Capability) bool { err := driver.DiscoverNew(discoverapi.EncryptionKeysConfig, drvEnc) if err != nil { logrus.Warnf("Failed to set datapath keys in driver %s: %v", name, err) } return false }) c.WalkNetworks(joinCluster) return nil }
// NewNetwork creates a new network of the specified network type. The options // are network specific and modeled in a generic way. func (c *controller) NewNetwork(networkType, name string, options ...NetworkOption) (Network, error) { if !config.IsValidName(name) { return nil, ErrInvalidName(name) } // Check if a network already exists with the specified network name c.Lock() for _, n := range c.networks { if n.name == name { c.Unlock() return nil, NetworkNameError(name) } } c.Unlock() // Construct the network object network := &network{ name: name, networkType: networkType, ipamType: ipamapi.DefaultIPAM, id: stringid.GenerateRandomID(), ctrlr: c, endpoints: endpointTable{}, persist: true, } network.processOptions(options...) if _, err := c.loadNetworkDriver(network); err != nil { return nil, err } cnfs, err := network.ipamAllocate() if err != nil { return nil, err } defer func() { if err != nil { for _, cn := range cnfs { cn() } } }() if err = c.addNetwork(network); err != nil { return nil, err } if err = c.updateToStore(network); err != nil { log.Warnf("couldnt create network %s: %v", network.name, err) if e := network.Delete(); e != nil { log.Warnf("couldnt cleanup network %s on network create failure (%v): %v", network.name, err, e) } return nil, err } return network, nil }
func (n *network) CreateEndpoint(name string, options ...EndpointOption) (Endpoint, error) { var err error if !config.IsValidName(name) { return nil, ErrInvalidName(name) } if _, err = n.EndpointByName(name); err == nil { return nil, types.ForbiddenErrorf("service endpoint with name %s already exists", name) } ep := &endpoint{name: name, generic: make(map[string]interface{}), iface: &endpointInterface{}} ep.id = stringid.GenerateRandomID() ep.network = n ep.processOptions(options...) if err = ep.assignAddress(); err != nil { return nil, err } defer func() { if err != nil { ep.releaseAddress() } }() ctrlr := n.getController() n.IncEndpointCnt() if err = ctrlr.updateToStore(n); err != nil { return nil, err } defer func() { if err != nil { n.DecEndpointCnt() if err = ctrlr.updateToStore(n); err != nil { log.Warnf("endpoint count cleanup failed when updating network for %s : %v", name, err) } } }() if err = n.addEndpoint(ep); err != nil { return nil, err } defer func() { if err != nil { if e := ep.Delete(); ep != nil { log.Warnf("cleaning up endpoint failed %s : %v", name, e) } } }() if !ep.isLocalScoped() { if err = ctrlr.updateToStore(ep); err != nil { return nil, err } } return ep, nil }
// New creates a new instance of network controller. func New(cfgOptions ...config.Option) (NetworkController, error) { var cfg *config.Config if len(cfgOptions) > 0 { cfg = &config.Config{ Daemon: config.DaemonCfg{ DriverCfg: make(map[string]interface{}), }, } cfg.ProcessOptions(cfgOptions...) } c := &controller{ id: stringid.GenerateRandomID(), cfg: cfg, networks: networkTable{}, sandboxes: sandboxTable{}, drivers: driverTable{}, ipamDrivers: ipamTable{}} if err := initDrivers(c); err != nil { return nil, err } if cfg != nil { if err := c.initGlobalStore(); err != nil { // Failing to initalize datastore is a bad situation to be in. // But it cannot fail creating the Controller log.Debugf("Failed to Initialize Datastore due to %v. Operating in non-clustered mode", err) } if err := c.initLocalStore(); err != nil { log.Debugf("Failed to Initialize LocalDatastore due to %v.", err) } } if err := initIpams(c, c.localStore, c.globalStore); err != nil { return nil, err } if cfg != nil { if err := c.restoreFromGlobalStore(); err != nil { log.Debugf("Failed to restore from global Datastore due to %v", err) } if err := c.initDiscovery(cfg.Cluster.Watcher); err != nil { // Failing to initalize discovery is a bad situation to be in. // But it cannot fail creating the Controller log.Debugf("Failed to Initialize Discovery : %v", err) } if err := c.restoreFromLocalStore(); err != nil { log.Debugf("Failed to restore from local Datastore due to %v", err) } } if err := c.startExternalKeyListener(); err != nil { return nil, err } return c, nil }
// New creates a new instance of network controller. //网络的控制器 func New(cfgOptions ...config.Option) (NetworkController, error) { //网络控制器对象,包括以下内容: /* id号,配置,包括的沙盒(namespaces),驱动,ip管理驱动,数据库 */ c := &controller{ id: stringid.GenerateRandomID(), cfg: config.ParseConfigOptions(cfgOptions...), sandboxes: sandboxTable{}, drivers: driverTable{}, ipamDrivers: ipamTable{}, svcDb: make(map[string]svcInfo), } //初始化数据库。 if err := c.initStores(); err != nil { return nil, err } //配置集群模式下的自动发现,这应该是使用swarm时候可以用overlay才需要这一步。 //周飒重点研究。 if c.cfg != nil && c.cfg.Cluster.Watcher != nil { if err := c.initDiscovery(c.cfg.Cluster.Watcher); err != nil { // Failing to initialize discovery is a bad situation to be in. // But it cannot fail creating the Controller log.Errorf("Failed to Initialize Discovery : %v", err) } } //这一步会创建对应的网络吗?按理说不会,只是初始化一些配置应该。 //这里对符合要求的驱动进行注册。注册就是将controller的drivers数组中配置 //具体的driver,这里是一个driverTable包括driver的名称和driver的句柄以及capabilities。 if err := initDrivers(c); err != nil { return nil, err } //初始化IP地址管理驱动。同样的,会在controller中注册ipam的driver, //对应controller的ipamdrivers,ipamTable是由名称和ipamData构成, //ipamData是一个struct,包括某个具体的ipam的句柄,capabilities等信息。 if err := initIpams(c, c.getStore(datastore.LocalScope), c.getStore(datastore.GlobalScope)); err != nil { return nil, err } //清楚垃圾信息。 c.sandboxCleanup() c.cleanupLocalEndpoints() c.networkCleanup() //这一步应该是接受remote plugin的吧? if err := c.startExternalKeyListener(); err != nil { return nil, err } return c, nil }
// CreateVolume creates a volume in the cluster func (c *Cluster) CreateVolume(request *dockerclient.VolumeCreateRequest) (*cluster.Volume, error) { var ( wg sync.WaitGroup volume *cluster.Volume err error parts = strings.SplitN(request.Name, "/", 2) node = "" ) if request.Name == "" { request.Name = stringid.GenerateRandomID() } else if len(parts) == 2 { node = parts[0] request.Name = parts[1] } if node == "" { c.RLock() for _, e := range c.engines { wg.Add(1) go func(engine *cluster.Engine) { defer wg.Done() v, er := engine.CreateVolume(request) if v != nil { volume = v err = nil } if er != nil && volume == nil { err = er } }(e) } c.RUnlock() wg.Wait() } else { config := cluster.BuildContainerConfig(dockerclient.ContainerConfig{Env: []string{"constraint:node==" + parts[0]}}) nodes, err := c.scheduler.SelectNodesForContainer(c.listNodes(), config) if err != nil { return nil, err } if nodes != nil { v, er := c.engines[nodes[0].ID].CreateVolume(request) if v != nil { volume = v err = nil } if er != nil && volume == nil { err = er } } } return volume, err }