func proxyAsync(engine *cluster.Engine, w http.ResponseWriter, r *http.Request, callback func(*http.Response)) error { // RequestURI may not be sent to client r.RequestURI = "" client, scheme := engine.HTTPClientAndScheme() r.URL.Scheme = scheme r.URL.Host = engine.Addr log.WithFields(log.Fields{"method": r.Method, "url": r.URL}).Debug("Proxy request") resp, err := client.Do(r) if err != nil { return err } copyHeader(w.Header(), resp.Header) w.WriteHeader(resp.StatusCode) io.Copy(NewWriteFlusher(w), resp.Body) if callback != nil { callback(resp) } // cleanup resp.Body.Close() return nil }
func (c *Cluster) createContainer(config *cluster.ContainerConfig, name string, withSoftImageAffinity bool) (*cluster.Container, error) { c.scheduler.Lock() defer c.scheduler.Unlock() // Ensure the name is available if cID := c.getIDFromName(name); cID != "" { return nil, fmt.Errorf("Conflict, The name %s is already assigned to %s. You have to delete (or rename) that container to be able to assign %s to a container again.", name, cID, name) } // Associate a Swarm ID to the container we are creating. config.SetSwarmID(c.generateUniqueID()) configTemp := config if withSoftImageAffinity { configTemp.AddAffinity("image==~" + config.Image) } n, err := c.scheduler.SelectNodeForContainer(c.listNodes(), configTemp) if err != nil { switch err { case strategy.ErrNoResourcesAvailable: var masterEngine *cluster.Engine for _, engine := range c.engines { for k, v := range engine.Labels { if k == "swarmmaster" && v == "true" { masterEngine = engine } } } containerConfig := &cluster.ContainerConfig{ dockerclient.ContainerConfig{ Image: "ankushagarwal11/machine", Cmd: []string{"-D", "create", "--driver=amazonec2", "--amazonec2-instance-type", "t2.micro", "--amazonec2-secret-key", "INSERT_SECRET_KEY", "--amazonec2-access-key", "INSERT_ACCESS_KEY", "--amazonec2-vpc-id", "INSERT_VPC_ID", "randommachine1"}, HostConfig: dockerclient.HostConfig{ Binds: []string{"/root/.docker:/root/.docker"}, }, }, } container, _ := masterEngine.Create(containerConfig, "random123", false) log.Info("Created container") masterEngine.Client.StartContainer(container.Id, &dockerclient.HostConfig{}) log.Info("Started container and now waiting") <-masterEngine.Client.Wait(container.Id) log.Info("Done waiting") return nil, err default: return nil, err } } if nn, ok := c.engines[n.ID]; ok { container, err := nn.Create(config, name, true) return container, err } return nil, nil }
// POST /networks/{networkid:.*}/disconnect func proxyNetworkDisconnect(c *context, w http.ResponseWriter, r *http.Request) { var networkid = mux.Vars(r)["networkid"] network := c.cluster.Networks().Uniq().Get(networkid) if network == nil { httpError(w, fmt.Sprintf("No such network: %s", networkid), http.StatusNotFound) return } // Set the network ID in the proxied URL path. r.URL.Path = strings.Replace(r.URL.Path, networkid, network.ID, 1) // make a copy of r.Body buf, _ := ioutil.ReadAll(r.Body) bodyCopy := ioutil.NopCloser(bytes.NewBuffer(buf)) defer bodyCopy.Close() // restore r.Body stream as it'll be read again r.Body = ioutil.NopCloser(bytes.NewBuffer(buf)) // Extract container info from r.Body copy var disconnect apitypes.NetworkDisconnect if err := json.NewDecoder(bodyCopy).Decode(&disconnect); err != nil { httpError(w, fmt.Sprintf("Container is not specified"), http.StatusNotFound) return } var engine *cluster.Engine if disconnect.Force && network.Scope == "global" { randomEngine, err := c.cluster.RANDOMENGINE() if err != nil { httpError(w, err.Error(), http.StatusInternalServerError) return } engine = randomEngine } else { container := c.cluster.Container(disconnect.Container) if container == nil { httpError(w, fmt.Sprintf("No such container: %s", disconnect.Container), http.StatusNotFound) return } engine = container.Engine } cb := func(resp *http.Response) { // force fresh networks on this engine engine.RefreshNetworks() } // request is forwarded to the container's address err := proxyAsync(engine, w, r, cb) engine.CheckConnectionErr(err) if err != nil { httpError(w, err.Error(), http.StatusNotFound) } }
// validatePendingEngine connects to the engine, func (c *Cluster) validatePendingEngine(engine *cluster.Engine) bool { // Attempt a connection to the engine. Since this is slow, don't get a hold // of the lock yet. if err := engine.Connect(c.TLSConfig); err != nil { log.WithFields(log.Fields{"Addr": engine.Addr}).Debugf("Failed to validate pending node: %s", err) return false } // The following is critical and fast. Grab a lock. c.Lock() defer c.Unlock() // Only validate engines from pendingEngines list if _, exists := c.pendingEngines[engine.Addr]; !exists { return false } // Make sure the engine ID is unique. if old, exists := c.engines[engine.ID]; exists { if old.Addr != engine.Addr { log.Errorf("ID duplicated. %s shared by %s and %s", engine.ID, old.Addr, engine.Addr) // Keep this engine in pendingEngines table and show its error. // If it's ID duplication from VM clone, user see this message and can fix it. // If the engine is rebooted and get new IP from DHCP, previous address will be removed // from discovery after a while. // In both cases, retry may fix the problem. engine.HandleIDConflict(old.Addr) } else { log.Debugf("node %q (name: %q) with address %q is already registered", engine.ID, engine.Name, engine.Addr) engine.Disconnect() // Remove it from pendingEngines table delete(c.pendingEngines, engine.Addr) } return false } // Engine validated, move from pendingEngines table to engines table delete(c.pendingEngines, engine.Addr) // set engine state to healthy, and start refresh loop engine.ValidationComplete() c.engines[engine.ID] = engine log.Infof("Registered Engine %s at %s", engine.Name, engine.Addr) return true }
// NewNode creates a node from an engine. func NewNode(e *cluster.Engine) *Node { return &Node{ ID: e.ID, IP: e.IP, Addr: e.Addr, Name: e.Name, Labels: e.Labels, Containers: e.Containers(), Images: e.Images(), UsedMemory: e.UsedMemory(), UsedCpus: e.UsedCpus(), TotalMemory: e.TotalMemory(), TotalCpus: e.TotalCpus(), IsHealthy: e.IsHealthy(), } }