// ResourceOffers method func (c *Cluster) ResourceOffers(_ mesosscheduler.SchedulerDriver, offers []*mesosproto.Offer) { log.WithFields(log.Fields{"name": "mesos", "offers": len(offers)}).Debug("Offers received") for _, offer := range offers { slaveID := offer.SlaveId.GetValue() dockerPort := c.dockerEnginePort for _, attribute := range offer.GetAttributes() { if attribute.GetName() == dockerPortAttribute { switch attribute.GetType() { case mesosproto.Value_SCALAR: dockerPort = fmt.Sprintf("%d", int(attribute.GetScalar().GetValue())) case mesosproto.Value_TEXT: dockerPort = attribute.GetText().GetValue() } } } s, ok := c.slaves[slaveID] if !ok { engine := cluster.NewEngine(*offer.Hostname+":"+dockerPort, 0) if err := engine.Connect(c.TLSConfig); err != nil { log.Error(err) } else { s = newSlave(slaveID, engine) c.slaves[slaveID] = s } } c.addOffer(offer) } go c.pendingTasks.Process() }
// ResourceOffers method func (c *Cluster) ResourceOffers(_ mesosscheduler.SchedulerDriver, offers []*mesosproto.Offer) { log.WithFields(log.Fields{"name": "mesos", "offers": len(offers)}).Debug("Offers received") for _, offer := range offers { agentID := offer.SlaveId.GetValue() dockerPort := c.dockerEnginePort for _, attribute := range offer.GetAttributes() { if attribute.GetName() == dockerPortAttribute { switch attribute.GetType() { case mesosproto.Value_SCALAR: dockerPort = fmt.Sprintf("%d", int(attribute.GetScalar().GetValue())) case mesosproto.Value_TEXT: dockerPort = attribute.GetText().GetValue() } } } s, ok := c.agents[agentID] if !ok { engine := cluster.NewEngine(*offer.Hostname+":"+dockerPort, 0, c.engineOpts) if err := engine.Connect(c.TLSConfig); err != nil { log.Error(err) } else { // Set engine state to healthy and start refresh loop engine.ValidationComplete() s = newAgent(agentID, engine) c.agents[agentID] = s if err := s.engine.RegisterEventHandler(c); err != nil { log.Error(err) } } } c.addOffer(offer) } go c.pendingTasks.Process() }
// FIXMEENGINEAPI : Need to write more unit tests for creating/inspecting containers with engine-api func createEngine(t *testing.T, ID string, containers ...*cluster.Container) *cluster.Engine { engine := cluster.NewEngine(ID, 0, engOpts) engine.Name = ID engine.ID = ID for _, container := range containers { container.Engine = engine engine.AddContainer(container) } return engine }
func createSlave(t *testing.T, ID string, containers ...*cluster.Container) *slave { engine := cluster.NewEngine(ID, 0) engine.Name = ID engine.ID = ID for _, container := range containers { container.Engine = engine engine.AddContainer(container) } return newSlave("slave-"+ID, engine) }
func TestImportImage(t *testing.T) { // create cluster c := &Cluster{ engines: make(map[string]*cluster.Engine), } // create engione id := "test-engine" engine := cluster.NewEngine(id, 0, engOpts) engine.Name = id engine.ID = id // create mock client client := mockclient.NewMockClient() apiClient := engineapimock.NewMockClient() apiClient.On("Info", mock.Anything).Return(mockInfo, nil) apiClient.On("ServerVersion", mock.Anything).Return(mockVersion, nil) apiClient.On("NetworkList", mock.Anything, mock.AnythingOfType("NetworkListOptions"), ).Return([]types.NetworkResource{}, nil) apiClient.On("VolumeList", mock.Anything, mock.Anything).Return(types.VolumesListResponse{}, nil) apiClient.On("Events", mock.Anything, mock.AnythingOfType("EventsOptions")).Return(&nopCloser{bytes.NewBufferString("")}, nil) apiClient.On("ImageList", mock.Anything, mock.AnythingOfType("ImageListOptions")).Return([]types.Image{}, nil) apiClient.On("ContainerList", mock.Anything, types.ContainerListOptions{All: true, Size: false}).Return([]types.Container{}, nil).Once() // connect client engine.ConnectWithClient(client, apiClient) // add engine to cluster c.engines[engine.ID] = engine // import success readCloser := nopCloser{bytes.NewBufferString("ok")} apiClient.On("ImageImport", mock.Anything, mock.AnythingOfType("types.ImageImportSource"), mock.Anything, mock.AnythingOfType("types.ImageImportOptions")).Return(readCloser, nil).Once() callback := func(what, status string, err error) { // import success assert.Nil(t, err) } c.Import("-", "testImageOK", "latest", bytes.NewReader(nil), callback) // import error readCloser = nopCloser{bytes.NewBufferString("error")} err := fmt.Errorf("Import error") apiClient.On("ImageImport", mock.Anything, mock.AnythingOfType("types.ImageImportSource"), mock.Anything, mock.AnythingOfType("types.ImageImportOptions")).Return(readCloser, err).Once() callback = func(what, status string, err error) { // import error assert.NotNil(t, err) } c.Import("-", "testImageError", "latest", bytes.NewReader(nil), callback) }
func TestLoadImage(t *testing.T) { // create cluster c := &Cluster{ engines: make(map[string]*cluster.Engine), } // create engione id := "test-engine" engine := cluster.NewEngine(id, 0, engOpts) engine.Name = id engine.ID = id // create mock client client := mockclient.NewMockClient() apiClient := engineapimock.NewMockClient() apiClient.On("Info", mock.Anything).Return(mockInfo, nil) apiClient.On("ServerVersion", mock.Anything).Return(mockVersion, nil) apiClient.On("NetworkList", mock.Anything, mock.AnythingOfType("NetworkListOptions"), ).Return([]types.NetworkResource{}, nil) apiClient.On("VolumeList", mock.Anything, mock.Anything).Return(types.VolumesListResponse{}, nil) apiClient.On("Events", mock.Anything, mock.AnythingOfType("EventsOptions")).Return(&nopCloser{bytes.NewBufferString("")}, nil) apiClient.On("ImageList", mock.Anything, mock.AnythingOfType("ImageListOptions")).Return([]types.Image{}, nil) apiClient.On("ContainerList", mock.Anything, types.ContainerListOptions{All: true, Size: false}).Return([]types.Container{}, nil).Once() // connect client engine.ConnectWithClient(client, apiClient) // add engine to cluster c.engines[engine.ID] = engine // load success readCloser := nopCloser{bytes.NewBufferString("")} apiClient.On("ImageLoad", mock.Anything, mock.AnythingOfType("*io.PipeReader"), false).Return(types.ImageLoadResponse{Body: readCloser}, nil).Once() callback := func(what, status string, err error) { //if load OK, err will be nil assert.Nil(t, err) } c.Load(bytes.NewReader(nil), callback) // load error err := fmt.Errorf("Load error") apiClient.On("ImageLoad", mock.Anything, mock.AnythingOfType("*io.PipeReader"), false).Return(types.ImageLoadResponse{}, err).Once() callback = func(what, status string, err error) { // load error, err is not nil assert.NotNil(t, err) } c.Load(bytes.NewReader(nil), callback) }
func TestImportImage(t *testing.T) { // create cluster c := &Cluster{ engines: make(map[string]*cluster.Engine), } // create engione id := "test-engine" engine := cluster.NewEngine(id, 0) engine.Name = id engine.ID = id // create mock client client := mockclient.NewMockClient() client.On("Info").Return(mockInfo, nil) client.On("Version").Return(mockVersion, nil) client.On("StartMonitorEvents", mock.Anything, mock.Anything, mock.Anything).Return() client.On("ListContainers", true, false, "").Return([]dockerclient.Container{}, nil).Once() client.On("ListImages", mock.Anything).Return([]*dockerclient.Image{}, nil) client.On("ListVolumes", mock.Anything).Return([]*dockerclient.Volume{}, nil) client.On("ListNetworks", mock.Anything).Return([]*dockerclient.NetworkResource{}, nil) // connect client engine.ConnectWithClient(client) // add engine to cluster c.engines[engine.ID] = engine // import success readCloser := nopCloser{bytes.NewBufferString("ok")} client.On("ImportImage", mock.Anything, mock.Anything, mock.Anything, mock.AnythingOfType("*io.PipeReader")).Return(readCloser, nil).Once() callback := func(what, status string, err error) { // import success assert.Nil(t, err) } c.Import("-", "testImageOK", "latest", bytes.NewReader(nil), callback) // import error readCloser = nopCloser{bytes.NewBufferString("error")} err := fmt.Errorf("Import error") client.On("ImportImage", mock.Anything, mock.Anything, mock.Anything, mock.AnythingOfType("*io.PipeReader")).Return(readCloser, err).Once() callback = func(what, status string, err error) { // import error assert.NotNil(t, err) } c.Import("-", "testImageError", "latest", bytes.NewReader(nil), callback) }
func (c *Cluster) addEngine(addr string) bool { // Check the engine is already registered by address. if c.hasEngineByAddr(addr) { return false } // Attempt a connection to the engine. Since this is slow, don't get a hold // of the lock yet. engine := cluster.NewEngine(addr, c.overcommitRatio) var er error for retry := 1; retry <= 3; retry++ { <-time.After(15 * time.Second) if er = engine.Connect(c.TLSConfig); er == nil { break } log.Debugf("%v", er) } if er != nil { log.Error(er) return false } // The following is critical and fast. Grab a lock. c.Lock() defer c.Unlock() // Make sure the engine ID is unique. if old, exists := c.engines[engine.ID]; exists { if old.Addr != engine.Addr { log.Errorf("ID duplicated. %s shared by %s and %s", engine.ID, old.Addr, engine.Addr) } else { log.Debugf("node %q (name: %q) with address %q is already registered", engine.ID, engine.Name, engine.Addr) } engine.Disconnect() return false } // Finally register the engine. c.engines[engine.ID] = engine if err := engine.RegisterEventHandler(c); err != nil { log.Error(err) } log.Infof("Registered Engine %s at %s", engine.Name, addr) return true }
func createAgent(t *testing.T, ID string, containers ...*cluster.Container) *agent { engOpts := &cluster.EngineOpts{ RefreshMinInterval: time.Duration(30) * time.Second, RefreshMaxInterval: time.Duration(60) * time.Second, RefreshRetry: 3, } engine := cluster.NewEngine(ID, 0, engOpts) engine.Name = ID engine.ID = ID for _, container := range containers { container.Engine = engine engine.AddContainer(container) } return newAgent("agent-"+ID, engine) }
func TestLoadImage(t *testing.T) { // create cluster c := &Cluster{ engines: make(map[string]*cluster.Engine), } // create engione id := "test-engine" engine := cluster.NewEngine(id, 0) engine.Name = id engine.ID = id // create mock client client := mockclient.NewMockClient() client.On("Info").Return(mockInfo, nil) client.On("Version").Return(mockVersion, nil) client.On("StartMonitorEvents", mock.Anything, mock.Anything, mock.Anything).Return() client.On("ListContainers", true, false, "").Return([]dockerclient.Container{}, nil).Once() client.On("ListImages", mock.Anything).Return([]*dockerclient.Image{}, nil) client.On("ListVolumes", mock.Anything).Return([]*dockerclient.Volume{}, nil) client.On("ListNetworks", mock.Anything).Return([]*dockerclient.NetworkResource{}, nil) // connect client engine.ConnectWithClient(client) // add engine to cluster c.engines[engine.ID] = engine // load success client.On("LoadImage", mock.AnythingOfType("*io.PipeReader")).Return(nil).Once() callback := func(what, status string, err error) { //if load OK, err will be nil assert.Nil(t, err) } c.Load(bytes.NewReader(nil), callback) // load error err := fmt.Errorf("Load error") client.On("LoadImage", mock.AnythingOfType("*io.PipeReader")).Return(err).Once() callback = func(what, status string, err error) { // load error, err is not nil assert.NotNil(t, err) } c.Load(bytes.NewReader(nil), callback) }
func TestTagImage(t *testing.T) { // create cluster c := &Cluster{ engines: make(map[string]*cluster.Engine), } images := []types.Image{} image1 := types.Image{ ID: "1234567890", RepoTags: []string{"busybox:latest"}, } images = append(images, image1) // create engine id := "test-engine" engine := cluster.NewEngine(id, 0, engOpts) engine.Name = id engine.ID = id // create mock client client := mockclient.NewMockClient() apiClient := engineapimock.NewMockClient() apiClient.On("Info", mock.Anything).Return(mockInfo, nil) apiClient.On("ServerVersion", mock.Anything).Return(mockVersion, nil) apiClient.On("NetworkList", mock.Anything, mock.AnythingOfType("NetworkListOptions"), ).Return([]types.NetworkResource{}, nil) apiClient.On("VolumeList", mock.Anything, mock.Anything).Return(types.VolumesListResponse{}, nil) apiClient.On("Events", mock.Anything, mock.AnythingOfType("EventsOptions")).Return(&nopCloser{bytes.NewBufferString("")}, nil) apiClient.On("ImageList", mock.Anything, mock.AnythingOfType("ImageListOptions")).Return(images, nil) apiClient.On("ContainerList", mock.Anything, types.ContainerListOptions{All: true, Size: false}).Return([]types.Container{}, nil).Once() // connect client engine.ConnectWithClient(client, apiClient) // add engine to cluster c.engines[engine.ID] = engine // tag image apiClient.On("ImageTag", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() assert.Nil(t, c.TagImage("busybox", "test_busybox:latest", false)) assert.NotNil(t, c.TagImage("busybox_not_exists", "test_busybox:latest", false)) }
// ResourceOffers method func (c *Cluster) ResourceOffers(_ mesosscheduler.SchedulerDriver, offers []*mesosproto.Offer) { log.WithFields(log.Fields{"name": "mesos", "offers": len(offers)}).Debug("Offers received") for _, offer := range offers { slaveID := offer.SlaveId.GetValue() s, ok := c.slaves[slaveID] if !ok { engine := cluster.NewEngine(*offer.Hostname+":"+c.dockerEnginePort, 0) if err := engine.Connect(c.TLSConfig); err != nil { log.Error(err) } else { s = newSlave(slaveID, engine) c.slaves[slaveID] = s } } c.addOffer(offer) } go c.pendingTasks.Process() }
func TestTagImage(t *testing.T) { // create cluster c := &Cluster{ engines: make(map[string]*cluster.Engine), } images := []*dockerclient.Image{} image1 := &dockerclient.Image{ Id: "1234567890", RepoTags: []string{"busybox:latest"}, } images = append(images, image1) // create engine id := "test-engine" engine := cluster.NewEngine(id, 0) engine.Name = id engine.ID = id // create mock client client := mockclient.NewMockClient() client.On("Info").Return(mockInfo, nil) client.On("Version").Return(mockVersion, nil) client.On("StartMonitorEvents", mock.Anything, mock.Anything, mock.Anything).Return() client.On("ListContainers", true, false, "").Return([]dockerclient.Container{}, nil).Once() client.On("ListImages", mock.Anything).Return(images, nil) client.On("ListVolumes", mock.Anything).Return([]*dockerclient.Volume{}, nil) client.On("ListNetworks", mock.Anything).Return([]*dockerclient.NetworkResource{}, nil) // connect client engine.ConnectWithClient(client) // add engine to cluster c.engines[engine.ID] = engine // tag image client.On("TagImage", mock.Anything, mock.Anything, mock.Anything, false).Return(nil).Once() assert.Nil(t, c.TagImage("busybox", "test_busybox", "latest", false)) assert.NotNil(t, c.TagImage("busybox_not_exists", "test_busybox", "latest", false)) }
func (c *Cluster) addEngine(addr string) bool { // Check the engine is already registered by address. if c.hasEngineByAddr(addr) { return false } engine := cluster.NewEngine(addr, c.overcommitRatio, c.engineOpts) if err := engine.RegisterEventHandler(c); err != nil { log.Error(err) } // Add it to pending engine map, indexed by address. This will prevent // duplicates from entering c.Lock() c.pendingEngines[addr] = engine c.Unlock() // validatePendingEngine will start a thread to validate the engine. // If the engine is reachable and valid, it'll be monitored and updated in a loop. // If engine is not reachable, pending engines will be examined once in a while go c.validatePendingEngine(engine) return true }