func Commission(job *engine.Job) engine.Status { var name string if len(job.Args) == 1 { name = job.Args[0] } else if len(job.Args) > 1 { return job.Errorf("Usage: %s", job.Name) } fqdn := job.Getenv("Fqdn") configuration := job.Eng.Hack_GetGlobalVar("configuration").(types.KraneConfiguration) parameters := url.Values{} parameters.Set("name", name) parameters.Set("fqdn", fqdn) parameters.Set("plan", job.Getenv("Plan")) parameters.Set("ssh_profile", configuration.Production.SshProfile) id, err := configuration.Driver.Create(parameters) if err != nil { job.Errorf("unable to commission ship %s", fqdn) } job.Stdout.Write([]byte(id)) ship := configuration.Driver.FindShip(name) newjob := job.Eng.Job("ssh_tunnel", ship.Fqdn, "true") go newjob.Run() return engine.StatusOK }
// serveFd creates an http.Server and sets it up to serve given a socket activated // argument. func serveFd(addr string, job *engine.Job) error { r := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version")) ls, e := systemd.ListenFD(addr) if e != nil { return e } chErrors := make(chan error, len(ls)) // We don't want to start serving on these sockets until the // daemon is initialized and installed. Otherwise required handlers // won't be ready. <-activationLock // Since ListenFD will return one or more sockets we have // to create a go func to spawn off multiple serves for i := range ls { listener := ls[i] go func() { httpSrv := http.Server{Handler: r} chErrors <- httpSrv.Serve(listener) }() } for i := 0; i < len(ls); i++ { err := <-chErrors if err != nil { return err } } return nil }
// NewServer sets up the required Server and does protocol specific checking. func NewServer(proto, addr string, job *engine.Job) (Server, error) { var ( err error l net.Listener r = createRouter( job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("CorsHeaders"), job.Getenv("Version"), ) ) switch proto { case "tcp": if !job.GetenvBool("TlsVerify") { logrus.Warn("/!\\ DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") } if l, err = NewTcpSocket(addr, tlsConfigFromJob(job)); err != nil { return nil, err } if err := allocateDaemonPort(addr); err != nil { return nil, err } default: return nil, errors.New("Invalid protocol format. Windows only supports tcp.") } }
func setupUnixHttp(addr string, job *engine.Job) (*HttpServer, error) { r, err := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version")) if err != nil { return nil, err } if err := syscall.Unlink(addr); err != nil && !os.IsNotExist(err) { return nil, err } mask := syscall.Umask(0777) defer syscall.Umask(mask) l, err := newListener("unix", addr, job.GetenvBool("BufferRequests")) if err != nil { return nil, err } if err := setSocketGroup(addr, job.Getenv("SocketGroup")); err != nil { return nil, err } if err := os.Chmod(addr, 0660); err != nil { return nil, err } return &HttpServer{&http.Server{Addr: addr, Handler: r}, l}, nil }
func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { hostConfig := &HostConfig{ ContainerIDFile: job.Getenv("ContainerIDFile"), Privileged: job.GetenvBool("Privileged"), PublishAllPorts: job.GetenvBool("PublishAllPorts"), NetworkMode: NetworkMode(job.Getenv("NetworkMode")), } job.GetenvJson("LxcConf", &hostConfig.LxcConf) job.GetenvJson("PortBindings", &hostConfig.PortBindings) job.GetenvJson("Devices", &hostConfig.Devices) if Binds := job.GetenvList("Binds"); Binds != nil { hostConfig.Binds = Binds } if Links := job.GetenvList("Links"); Links != nil { hostConfig.Links = Links } if Dns := job.GetenvList("Dns"); Dns != nil { hostConfig.Dns = Dns } if DnsSearch := job.GetenvList("DnsSearch"); DnsSearch != nil { hostConfig.DnsSearch = DnsSearch } if VolumesFrom := job.GetenvList("VolumesFrom"); VolumesFrom != nil { hostConfig.VolumesFrom = VolumesFrom } if CapAdd := job.GetenvList("CapAdd"); CapAdd != nil { hostConfig.CapAdd = CapAdd } if CapDrop := job.GetenvList("CapDrop"); CapDrop != nil { hostConfig.CapDrop = CapDrop } return hostConfig }
// FIXME: Allow to interrupt current push when new push of same image is done. func (s *TagStore) CmdPush(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s IMAGE", job.Name) } var ( localName = job.Args[0] sf = utils.NewStreamFormatter(job.GetenvBool("json")) authConfig = ®istry.AuthConfig{} metaHeaders map[string][]string ) // Resolve the Repository name from fqn to RepositoryInfo repoInfo, err := registry.ResolveRepositoryInfo(job, localName) if err != nil { return job.Error(err) } tag := job.Getenv("tag") job.GetenvJson("authConfig", authConfig) job.GetenvJson("metaHeaders", &metaHeaders) if _, err := s.poolAdd("push", repoInfo.LocalName); err != nil { return job.Error(err) } defer s.poolRemove("push", repoInfo.LocalName) endpoint, err := repoInfo.GetEndpoint() if err != nil { return job.Error(err) } img, err := s.graph.Get(repoInfo.LocalName) r, err2 := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, false) if err2 != nil { return job.Error(err2) } if err != nil { reposLen := 1 if tag == "" { reposLen = len(s.Repositories[repoInfo.LocalName]) } job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", repoInfo.CanonicalName, reposLen)) // If it fails, try to get the repository if localRepo, exists := s.Repositories[repoInfo.LocalName]; exists { if err := s.pushRepository(r, job.Stdout, repoInfo, localRepo, tag, sf); err != nil { return job.Error(err) } return engine.StatusOK } return job.Error(err) } var token []string job.Stdout.Write(sf.FormatStatus("", "The push refers to an image: [%s]", repoInfo.CanonicalName)) if _, err := s.pushImage(r, job.Stdout, img.ID, endpoint.String(), token, sf); err != nil { return job.Error(err) } return engine.StatusOK }
// Allocate a network interface func Allocate(job *engine.Job) engine.Status { var ( ip *net.IP err error id = job.Args[0] requestedIP = net.ParseIP(job.Getenv("RequestedIP")) ) if requestedIP != nil { ip, err = ipallocator.RequestIP(bridgeNetwork, &requestedIP) } else { ip, err = ipallocator.RequestIP(bridgeNetwork, nil) } if err != nil { return job.Error(err) } out := engine.Env{} out.Set("IP", ip.String()) out.Set("Mask", bridgeNetwork.Mask.String()) out.Set("Gateway", bridgeNetwork.IP.String()) out.Set("Bridge", bridgeIface) size, _ := bridgeNetwork.Mask.Size() out.SetInt("IPPrefixLen", size) currentInterfaces.Set(id, &networkInterface{ IP: *ip, }) out.WriteTo(job.Stdout) return engine.StatusOK }
func (s *TagStore) CmdManifest(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("usage: %s NAME", job.Name) } name := job.Args[0] tag := job.Getenv("tag") if tag == "" { tag = "latest" } // Resolve the Repository name from fqn to endpoint + name repoInfo, err := registry.ParseRepositoryInfo(name) if err != nil { return job.Error(err) } manifestBytes, err := s.newManifest(name, repoInfo.RemoteName, tag) if err != nil { return job.Error(err) } _, err = job.Stdout.Write(manifestBytes) if err != nil { return job.Error(err) } return engine.StatusOK }
// FIXME: Allow to interrupt current push when new push of same image is done. func (srv *Server) ImagePush(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s IMAGE", job.Name) } var ( localName = job.Args[0] sf = utils.NewStreamFormatter(job.GetenvBool("json")) authConfig = ®istry.AuthConfig{} metaHeaders map[string][]string ) tag := job.Getenv("tag") job.GetenvJson("authConfig", authConfig) job.GetenvJson("metaHeaders", &metaHeaders) if _, err := srv.poolAdd("push", localName); err != nil { return job.Error(err) } defer srv.poolRemove("push", localName) // Resolve the Repository name from fqn to endpoint + name hostname, remoteName, err := registry.ResolveRepositoryName(localName) if err != nil { return job.Error(err) } endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname) if err != nil { return job.Error(err) } img, err := srv.daemon.Graph().Get(localName) r, err2 := registry.NewRegistry(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, false) if err2 != nil { return job.Error(err2) } if err != nil { reposLen := 1 if tag == "" { reposLen = len(srv.daemon.Repositories().Repositories[localName]) } job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen)) // If it fails, try to get the repository if localRepo, exists := srv.daemon.Repositories().Repositories[localName]; exists { if err := srv.pushRepository(r, job.Stdout, localName, remoteName, localRepo, tag, sf); err != nil { return job.Error(err) } return engine.StatusOK } return job.Error(err) } var token []string job.Stdout.Write(sf.FormatStatus("", "The push refers to an image: [%s]", localName)) if _, err := srv.pushImage(r, job.Stdout, remoteName, img.ID, endpoint, token, sf); err != nil { return job.Error(err) } return engine.StatusOK }
// ContainerMonitorOp only called at attach mode by docker client to stop monitor server func (daemon *Daemon) ContainerMonitorOp(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s CONTAINER\n", job.Name) } var ( name = job.Args[0] op = job.Getenv("op") err error ) log.Infof("Container %s, monitor operation %s", name, op) if container := daemon.Get(name); container != nil { if container.State.IsRunning() { return job.Errorf("Container %s is running, stop container before stop monitor", name) } if op == "stop" { r := container.monitorState.IsRunning() if !r { // monitor may be stopped by 'docker stop' API log.Infof("Container %s 's monitor is not running", name) return engine.StatusOK } // stop poll container state before kill monitor server container.exMonitor.StopStatePoller() // docker daemon has restarted, we should clean container here if !container.exMonitor.hasCmd { container.exMonitor.Stop() } log.Debugf("Kill monitor server with pid %v", container.monitorState.Pid) // kill monitor server if err := syscall.Kill(container.monitorState.Pid, syscall.SIGTERM); err != nil { log.Errorf("kill monitor server with pid %v error: %v", container.monitorState.Pid, err) return job.Error(err) } // write monitor state container.monitorState.SetStopped(0) if err = container.WriteMonitorState(); err != nil { log.Errorf("write monitor state error: %v", err) return job.Error(err) } } else { return job.Errorf("Monitor op: %s is not supported", op) } return engine.StatusOK } return job.Errorf("No such container: %s", name) }
func setupTcpHttp(addr string, job *engine.Job) (*HttpServer, error) { if !job.GetenvBool("TlsVerify") { log.Infof("/!\\ DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") } r := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version")) l, err := newListener("tcp", addr, job.GetenvBool("BufferRequests")) if err != nil { return nil, err } if err := allocateDaemonPort(addr); err != nil { return nil, err } if job.GetenvBool("Tls") || job.GetenvBool("TlsVerify") { var tlsCa string if job.GetenvBool("TlsVerify") { tlsCa = job.Getenv("TlsCa") } l, err = setupTls(job.Getenv("TlsCert"), job.Getenv("TlsKey"), tlsCa, l) if err != nil { return nil, err } } return &HttpServer{&http.Server{Addr: addr, Handler: r}, l}, nil }
func ExecConfigFromJob(job *engine.Job) *ExecConfig { execConfig := &ExecConfig{ User: job.Getenv("User"), Privileged: job.GetenvBool("Privileged"), Tty: job.GetenvBool("Tty"), Container: job.Getenv("Container"), AttachStdin: job.GetenvBool("AttachStdin"), AttachStderr: job.GetenvBool("AttachStderr"), AttachStdout: job.GetenvBool("AttachStdout"), } if cmd := job.GetenvList("Cmd"); cmd != nil { execConfig.Cmd = cmd } return execConfig }
func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { if job.EnvExists("HostConfig") { hostConfig := HostConfig{} job.GetenvJson("HostConfig", &hostConfig) return &hostConfig } hostConfig := &HostConfig{ ContainerIDFile: job.Getenv("ContainerIDFile"), Privileged: job.GetenvBool("Privileged"), PublishAllPorts: job.GetenvBool("PublishAllPorts"), NetworkMode: NetworkMode(job.Getenv("NetworkMode")), } job.GetenvJson("LxcConf", &hostConfig.LxcConf) job.GetenvJson("PortBindings", &hostConfig.PortBindings) job.GetenvJson("Devices", &hostConfig.Devices) job.GetenvJson("RestartPolicy", &hostConfig.RestartPolicy) hostConfig.SecurityOpt = job.GetenvList("SecurityOpt") if Binds := job.GetenvList("Binds"); Binds != nil { hostConfig.Binds = Binds } if Links := job.GetenvList("Links"); Links != nil { hostConfig.Links = Links } if Dns := job.GetenvList("Dns"); Dns != nil { hostConfig.Dns = Dns } if DnsSearch := job.GetenvList("DnsSearch"); DnsSearch != nil { hostConfig.DnsSearch = DnsSearch } if ExtraHosts := job.GetenvList("ExtraHosts"); ExtraHosts != nil { hostConfig.ExtraHosts = ExtraHosts } if VolumesFrom := job.GetenvList("VolumesFrom"); VolumesFrom != nil { hostConfig.VolumesFrom = VolumesFrom } if CapAdd := job.GetenvList("CapAdd"); CapAdd != nil { hostConfig.CapAdd = CapAdd } if CapDrop := job.GetenvList("CapDrop"); CapDrop != nil { hostConfig.CapDrop = CapDrop } return hostConfig }
func (t *TrustStore) CmdCheckKey(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s NAMESPACE", job.Name) } var ( namespace = job.Args[0] keyBytes = job.Getenv("PublicKey") ) if keyBytes == "" { return job.Errorf("Missing PublicKey") } pk, err := libtrust.UnmarshalPublicKeyJWK([]byte(keyBytes)) if err != nil { return job.Errorf("Error unmarshalling public key: %s", err) } permission := uint16(job.GetenvInt("Permission")) if permission == 0 { permission = 0x03 } t.RLock() defer t.RUnlock() if t.graph == nil { job.Stdout.Write([]byte("no graph")) return engine.StatusOK } // Check if any expired grants verified, err := t.graph.Verify(pk, namespace, permission) if err != nil { return job.Errorf("Error verifying key to namespace: %s", namespace) } if !verified { log.Debugf("Verification failed for %s using key %s", namespace, pk.KeyID()) job.Stdout.Write([]byte("not verified")) } else if t.expiration.Before(time.Now()) { job.Stdout.Write([]byte("expired")) } else { job.Stdout.Write([]byte("verified")) } return engine.StatusOK }
func ExecConfigFromJob(job *engine.Job) (*ExecConfig, error) { execConfig := &ExecConfig{ User: job.Getenv("User"), Privileged: job.GetenvBool("Privileged"), Tty: job.GetenvBool("Tty"), AttachStdin: job.GetenvBool("AttachStdin"), AttachStderr: job.GetenvBool("AttachStderr"), AttachStdout: job.GetenvBool("AttachStdout"), } cmd := job.GetenvList("Cmd") if len(cmd) == 0 { return nil, fmt.Errorf("No exec command specified") } execConfig.Cmd = cmd return execConfig, nil }
func (e *Events) Get(job *engine.Job) error { var ( since = job.GetenvInt64("since") until = job.GetenvInt64("until") timeout = time.NewTimer(time.Unix(until, 0).Sub(time.Now())) ) eventFilters, err := filters.FromParam(job.Getenv("filters")) if err != nil { return err } // If no until, disable timeout if until == 0 { timeout.Stop() } listener := make(chan *jsonmessage.JSONMessage) e.subscribe(listener) defer e.unsubscribe(listener) job.Stdout.Write(nil) // Resend every event in the [since, until] time interval. if since != 0 { if err := e.writeCurrent(job, since, until, eventFilters); err != nil { return err } } for { select { case event, ok := <-listener: if !ok { return nil } if err := writeEvent(job, event, eventFilters); err != nil { return err } case <-timeout.C: return nil } } }
func LinkContainers(job *engine.Job) engine.Status { var ( action = job.Args[0] childIP = job.Getenv("ChildIP") parentIP = job.Getenv("ParentIP") ignoreErrors = job.GetenvBool("IgnoreErrors") ports = job.GetenvList("Ports") useIpv6 = job.GetenvBool("UseIpv6") ) split := func(p string) (string, string) { parts := strings.Split(p, "/") return parts[0], parts[1] } for _, p := range ports { port, proto := split(p) if output, err := iptables.Raw(useIpv6, action, "FORWARD", "-i", bridgeIface, "-o", bridgeIface, "-p", proto, "-s", parentIP, "--dport", port, "-d", childIP, "-j", "ACCEPT"); !ignoreErrors && err != nil { return job.Error(err) } else if len(output) != 0 { return job.Errorf("Error toggle iptables forward: %s", output) } if output, err := iptables.Raw(useIpv6, action, "FORWARD", "-i", bridgeIface, "-o", bridgeIface, "-p", proto, "-s", childIP, "--sport", port, "-d", parentIP, "-j", "ACCEPT"); !ignoreErrors && err != nil { return job.Error(err) } else if len(output) != 0 { return job.Errorf("Error toggle iptables forward: %s", output) } } return engine.StatusOK }
// Allocate a network interface func Allocate(job *engine.Job) engine.Status { var ( ip net.IP mac net.HardwareAddr err error id = job.Args[0] requestedIP = net.ParseIP(job.Getenv("RequestedIP")) ) if requestedIP != nil { ip, err = ipallocator.RequestIP(bridgeNetwork, requestedIP) } else { ip, err = ipallocator.RequestIP(bridgeNetwork, nil) } if err != nil { return job.Error(err) } // If no explicit mac address was given, generate a random one. if mac, err = net.ParseMAC(job.Getenv("RequestedMac")); err != nil { mac = generateMacAddr(ip) } out := engine.Env{} out.Set("IP", ip.String()) out.Set("Mask", bridgeNetwork.Mask.String()) out.Set("Gateway", bridgeNetwork.IP.String()) out.Set("MacAddress", mac.String()) out.Set("Bridge", bridgeIface) size, _ := bridgeNetwork.Mask.Size() out.SetInt("IPPrefixLen", size) currentInterfaces.Set(id, &networkInterface{ IP: ip, }) out.WriteTo(job.Stdout) return engine.StatusOK }
func (daemon *Daemon) ContainerCommit(job *engine.Job) error { if len(job.Args) != 1 { return fmt.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name) } name := job.Args[0] container, err := daemon.Get(name) if err != nil { return err } var ( config = container.Config stdoutBuffer = bytes.NewBuffer(nil) newConfig runconfig.Config ) buildConfigJob := daemon.eng.Job("build_config") buildConfigJob.Stdout.Add(stdoutBuffer) buildConfigJob.Setenv("changes", job.Getenv("changes")) // FIXME this should be remove when we remove deprecated config param buildConfigJob.Setenv("config", job.Getenv("config")) if err := buildConfigJob.Run(); err != nil { return err } if err := json.NewDecoder(stdoutBuffer).Decode(&newConfig); err != nil { return err } if err := runconfig.Merge(&newConfig, config); err != nil { return err } img, err := daemon.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), job.GetenvBool("pause"), &newConfig) if err != nil { return err } job.Printf("%s\n", img.ID) return nil }
func setupTcpHttp(addr string, job *engine.Job) (*HttpServer, error) { if !strings.HasPrefix(addr, "127.0.0.1") && !job.GetenvBool("TlsVerify") { log.Infof("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") } r, err := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version")) if err != nil { return nil, err } l, err := newListener("tcp", addr, job.GetenvBool("BufferRequests")) if err != nil { return nil, err } if job.GetenvBool("Tls") || job.GetenvBool("TlsVerify") { var tlsCa string if job.GetenvBool("TlsVerify") { tlsCa = job.Getenv("TlsCa") } l, err = setupTls(job.Getenv("TlsCert"), job.Getenv("TlsKey"), tlsCa, l) if err != nil { return nil, err } } return &HttpServer{&http.Server{Addr: addr, Handler: r}, l}, nil }
func (daemon *Daemon) ContainerCommit(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name) } name := job.Args[0] container, err := daemon.Get(name) if err != nil { return job.Error(err) } var ( config = container.Config newConfig runconfig.Config ) if err := job.GetenvJson("config", &newConfig); err != nil { return job.Error(err) } if err := runconfig.Merge(&newConfig, config); err != nil { return job.Error(err) } img, err := daemon.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), job.GetenvBool("pause"), &newConfig) if err != nil { return job.Error(err) } job.Printf("%s\n", img.ID) return engine.StatusOK }
func LinkContainers(job *engine.Job) error { var ( action = job.Args[0] nfAction iptables.Action childIP = job.Getenv("ChildIP") parentIP = job.Getenv("ParentIP") ignoreErrors = job.GetenvBool("IgnoreErrors") ports = job.GetenvList("Ports") ) switch action { case "-A": nfAction = iptables.Append case "-I": nfAction = iptables.Insert case "-D": nfAction = iptables.Delete default: return fmt.Errorf("Invalid action '%s' specified", action) } ip1 := net.ParseIP(parentIP) if ip1 == nil { return fmt.Errorf("Parent IP '%s' is invalid", parentIP) } ip2 := net.ParseIP(childIP) if ip2 == nil { return fmt.Errorf("Child IP '%s' is invalid", childIP) } chain := iptables.Chain{Name: "DOCKER", Bridge: bridgeIface} for _, p := range ports { port := nat.Port(p) if err := chain.Link(nfAction, ip1, ip2, port.Int(), port.Proto()); !ignoreErrors && err != nil { return err } } return nil }
// CmdSet stores a new image in the graph. // Images are stored in the graph using 4 elements: // - A user-defined ID // - A collection of metadata describing the image // - A directory tree stored as a tar archive (also called the "layer") // - A reference to a "parent" ID on top of which the layer should be applied // // NOTE: even though the parent ID is only useful in relation to the layer and how // to apply it (ie you could represent the full directory tree as 'parent_layer + layer', // it is treated as a top-level property of the image. This is an artifact of early // design and should probably be cleaned up in the future to simplify the design. // // Syntax: image_set ID // Input: // - Layer content must be streamed in tar format on stdin. An empty input is // valid and represents a nil layer. // // - Image metadata must be passed in the command environment. // 'json': a json-encoded object with all image metadata. // It will be stored as-is, without any encoding/decoding artifacts. // That is a requirement of the current registry client implementation, // because a re-encoded json might invalidate the image checksum at // the next upload, even with functionaly identical content. func (s *TagStore) CmdSet(job *engine.Job) error { if len(job.Args) != 1 { return fmt.Errorf("usage: %s NAME", job.Name) } var ( imgJSON = []byte(job.Getenv("json")) layer = job.Stdin ) if len(imgJSON) == 0 { return fmt.Errorf("mandatory key 'json' is not set") } // We have to pass an *image.Image object, even though it will be completely // ignored in favor of the redundant json data. // FIXME: the current prototype of Graph.Register is redundant. img, err := image.NewImgJSON(imgJSON) if err != nil { return err } if err := s.graph.Register(img, layer); err != nil { return err } return nil }
func Decomission(job *engine.Job) engine.Status { var idOrName string if len(job.Args) == 1 { idOrName = job.Args[0] } else if len(job.Args) > 1 { return job.Errorf("Usage: %s", job.Name) } parameters := url.Values{} parameters.Set("idOrName", idOrName) parameters.Set("time", job.Getenv("time")) configuration := job.Eng.Hack_GetGlobalVar("configuration").(types.KraneConfiguration) id, err := configuration.Driver.Destroy(parameters) if err != nil { job.Errorf("unable to decomission ship %s", idOrName) } job.Stdout.Write([]byte(id)) return engine.StatusOK }
func LinkContainers(job *engine.Job) engine.Status { var ( action = job.Args[0] childIP = job.Getenv("ChildIP") parentIP = job.Getenv("ParentIP") ignoreErrors = job.GetenvBool("IgnoreErrors") ports = job.GetenvList("Ports") ) for _, value := range ports { port := nat.Port(value) if output, err := iptables.Raw(action, "FORWARD", "-i", bridgeIface, "-o", bridgeIface, "-p", port.Proto(), "-s", parentIP, "--dport", strconv.Itoa(port.Int()), "-d", childIP, "-j", "ACCEPT"); !ignoreErrors && err != nil { return job.Error(err) } else if len(output) != 0 { return job.Errorf("Error toggle iptables forward: %s", output) } if output, err := iptables.Raw(action, "FORWARD", "-i", bridgeIface, "-o", bridgeIface, "-p", port.Proto(), "-s", childIP, "--sport", strconv.Itoa(port.Int()), "-d", parentIP, "-j", "ACCEPT"); !ignoreErrors && err != nil { return job.Error(err) } else if len(output) != 0 { return job.Errorf("Error toggle iptables forward: %s", output) } } return engine.StatusOK }
func tlsConfigFromJob(job *engine.Job) *tlsConfig { verify := job.GetenvBool("TlsVerify") if !job.GetenvBool("Tls") && !verify { return nil } return &tlsConfig{ Verify: verify, Certificate: job.Getenv("TlsCert"), Key: job.Getenv("TlsKey"), CA: job.Getenv("TlsCa"), } }
func (s *TagStore) CmdImages(job *engine.Job) engine.Status { var ( allImages map[string]*image.Image err error filt_tagged = true ) imageFilters, err := filters.FromParam(job.Getenv("filters")) if err != nil { return job.Error(err) } for name := range imageFilters { if _, ok := acceptedImageFilterTags[name]; !ok { return job.Errorf("Invalid filter '%s'", name) } } if i, ok := imageFilters["dangling"]; ok { for _, value := range i { if strings.ToLower(value) == "true" { filt_tagged = false } } } if job.GetenvBool("all") && filt_tagged { allImages, err = s.graph.Map() } else { allImages, err = s.graph.Heads() } if err != nil { return job.Error(err) } lookup := make(map[string]*engine.Env) s.Lock() for name, repository := range s.Repositories { if job.Getenv("filter") != "" { if match, _ := path.Match(job.Getenv("filter"), name); !match { continue } } for tag, id := range repository { image, err := s.graph.Get(id) if err != nil { log.Printf("Warning: couldn't load %s from %s/%s: %s", id, name, tag, err) continue } if out, exists := lookup[id]; exists { if filt_tagged { out.SetList("RepoTags", append(out.GetList("RepoTags"), fmt.Sprintf("%s:%s", name, tag))) } } else { // get the boolean list for if only the untagged images are requested delete(allImages, id) if filt_tagged { out := &engine.Env{} out.SetJson("ParentId", image.Parent) out.SetList("RepoTags", []string{fmt.Sprintf("%s:%s", name, tag)}) out.SetJson("Id", image.ID) out.SetInt64("Created", image.Created.Unix()) out.SetInt64("Size", image.Size) out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size) lookup[id] = out } } } } s.Unlock() outs := engine.NewTable("Created", len(lookup)) for _, value := range lookup { outs.Add(value) } // Display images which aren't part of a repository/tag if job.Getenv("filter") == "" { for _, image := range allImages { out := &engine.Env{} out.SetJson("ParentId", image.Parent) out.SetList("RepoTags", []string{"<none>:<none>"}) out.SetJson("Id", image.ID) out.SetInt64("Created", image.Created.Unix()) out.SetInt64("Size", image.Size) out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size) outs.Add(out) } } outs.ReverseSort() if _, err := outs.WriteListTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK }
func (daemon *Daemon) Containers(job *engine.Job) engine.Status { var ( foundBefore bool displayed int all = job.GetenvBool("all") since = job.Getenv("since") before = job.Getenv("before") n = job.GetenvInt("limit") size = job.GetenvBool("size") psFilters filters.Args filt_exited []int ) outs := engine.NewTable("Created", 0) psFilters, err := filters.FromParam(job.Getenv("filters")) if err != nil { return job.Error(err) } if i, ok := psFilters["exited"]; ok { for _, value := range i { code, err := strconv.Atoi(value) if err != nil { return job.Error(err) } filt_exited = append(filt_exited, code) } } if i, ok := psFilters["status"]; ok { for _, value := range i { if value == "exited" { all = true } } } names := map[string][]string{} daemon.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error { names[e.ID()] = append(names[e.ID()], p) return nil }, 1) var beforeCont, sinceCont *Container if before != "" { beforeCont, err = daemon.Get(before) if err != nil { return job.Error(err) } } if since != "" { sinceCont, err = daemon.Get(since) if err != nil { return job.Error(err) } } errLast := errors.New("last container") writeCont := func(container *Container) error { container.Lock() defer container.Unlock() if !container.Running && !all && n <= 0 && since == "" && before == "" { return nil } if !psFilters.Match("name", container.Name) { return nil } if !psFilters.Match("id", container.ID) { return nil } if !psFilters.MatchKVList("label", container.Config.Labels) { return nil } if before != "" && !foundBefore { if container.ID == beforeCont.ID { foundBefore = true } return nil } if n > 0 && displayed == n { return errLast } if since != "" { if container.ID == sinceCont.ID { return errLast } } if len(filt_exited) > 0 { should_skip := true for _, code := range filt_exited { if code == container.ExitCode && !container.Running { should_skip = false break } } if should_skip { return nil } } if !psFilters.Match("status", container.State.StateString()) { return nil } displayed++ out := &engine.Env{} out.SetJson("Id", container.ID) out.SetList("Names", names[container.ID]) img := container.Config.Image _, tag := parsers.ParseRepositoryTag(container.Config.Image) if tag == "" { img = utils.ImageReference(img, graph.DEFAULTTAG) } out.SetJson("Image", img) if len(container.Args) > 0 { args := []string{} for _, arg := range container.Args { if strings.Contains(arg, " ") { args = append(args, fmt.Sprintf("'%s'", arg)) } else { args = append(args, arg) } } argsAsString := strings.Join(args, " ") out.Set("Command", fmt.Sprintf("\"%s %s\"", container.Path, argsAsString)) } else { out.Set("Command", fmt.Sprintf("\"%s\"", container.Path)) } out.SetInt64("Created", container.Created.Unix()) out.Set("Status", container.State.String()) str, err := container.NetworkSettings.PortMappingAPI().ToListString() if err != nil { return err } out.Set("Ports", str) if size { sizeRw, sizeRootFs := container.GetSize() out.SetInt64("SizeRw", sizeRw) out.SetInt64("SizeRootFs", sizeRootFs) } out.SetJson("Labels", container.Config.Labels) outs.Add(out) return nil } for _, container := range daemon.List() { if err := writeCont(container); err != nil { if err != errLast { return job.Error(err) } break } } outs.ReverseSort() if _, err := outs.WriteListTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK }
func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status { if len(job.Args) != 0 { return job.Errorf("Usage: %s\n", job.Name) } var ( remoteURL = job.Getenv("remote") repoName = job.Getenv("t") suppressOutput = job.GetenvBool("q") noCache = job.GetenvBool("nocache") rm = job.GetenvBool("rm") forceRm = job.GetenvBool("forcerm") authConfig = ®istry.AuthConfig{} configFile = ®istry.ConfigFile{} tag string context io.ReadCloser ) job.GetenvJson("authConfig", authConfig) job.GetenvJson("configFile", configFile) repoName, tag = parsers.ParseRepositoryTag(repoName) if repoName != "" { if _, _, err := registry.ResolveRepositoryName(repoName); err != nil { return job.Error(err) } if len(tag) > 0 { if err := graph.ValidateTagName(tag); err != nil { return job.Error(err) } } } if remoteURL == "" { context = ioutil.NopCloser(job.Stdin) } else if utils.IsGIT(remoteURL) { if !utils.ValidGitTransport(remoteURL) { remoteURL = "https://" + remoteURL } root, err := ioutil.TempDir("", "docker-build-git") if err != nil { return job.Error(err) } defer os.RemoveAll(root) if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil { return job.Errorf("Error trying to use git: %s (%s)", err, output) } c, err := archive.Tar(root, archive.Uncompressed) if err != nil { return job.Error(err) } context = c } else if utils.IsURL(remoteURL) { f, err := utils.Download(remoteURL) if err != nil { return job.Error(err) } defer f.Body.Close() dockerFile, err := ioutil.ReadAll(f.Body) if err != nil { return job.Error(err) } c, err := archive.Generate("Dockerfile", string(dockerFile)) if err != nil { return job.Error(err) } context = c } defer context.Close() sf := utils.NewStreamFormatter(job.GetenvBool("json")) builder := &Builder{ Daemon: b.Daemon, Engine: b.Engine, OutStream: &utils.StdoutFormater{ Writer: job.Stdout, StreamFormatter: sf, }, ErrStream: &utils.StderrFormater{ Writer: job.Stdout, StreamFormatter: sf, }, Verbose: !suppressOutput, UtilizeCache: !noCache, Remove: rm, ForceRemove: forceRm, OutOld: job.Stdout, StreamFormatter: sf, AuthConfig: authConfig, AuthConfigFile: configFile, } id, err := builder.Run(context) if err != nil { return job.Error(err) } if repoName != "" { b.Daemon.Repositories().Set(repoName, tag, id, false) } return engine.StatusOK }
// ListenAndServe sets up the required http.Server and gets it listening for // each addr passed in and does protocol specific checking. func ListenAndServe(proto, addr string, job *engine.Job) error { var l net.Listener r, err := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version")) if err != nil { return err } if proto == "fd" { return ServeFd(addr, r) } if proto == "unix" { if err := syscall.Unlink(addr); err != nil && !os.IsNotExist(err) { return err } } var oldmask int if proto == "unix" { oldmask = syscall.Umask(0777) } if job.GetenvBool("BufferRequests") { l, err = listenbuffer.NewListenBuffer(proto, addr, activationLock) } else { l, err = net.Listen(proto, addr) } if proto == "unix" { syscall.Umask(oldmask) } if err != nil { return err } if proto != "unix" && (job.GetenvBool("Tls") || job.GetenvBool("TlsVerify")) { tlsCert := job.Getenv("TlsCert") tlsKey := job.Getenv("TlsKey") cert, err := tls.LoadX509KeyPair(tlsCert, tlsKey) if err != nil { return fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", tlsCert, tlsKey, err) } tlsConfig := &tls.Config{ NextProtos: []string{"http/1.1"}, Certificates: []tls.Certificate{cert}, } if job.GetenvBool("TlsVerify") { certPool := x509.NewCertPool() file, err := ioutil.ReadFile(job.Getenv("TlsCa")) if err != nil { return fmt.Errorf("Couldn't read CA certificate: %s", err) } certPool.AppendCertsFromPEM(file) tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert tlsConfig.ClientCAs = certPool } l = tls.NewListener(l, tlsConfig) } // Basic error and sanity checking switch proto { case "tcp": if !strings.HasPrefix(addr, "127.0.0.1") && !job.GetenvBool("TlsVerify") { log.Println("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") } case "unix": socketGroup := job.Getenv("SocketGroup") if socketGroup != "" { if err := changeGroup(addr, socketGroup); err != nil { if socketGroup == "docker" { // if the user hasn't explicitly specified the group ownership, don't fail on errors. utils.Debugf("Warning: could not chgrp %s to docker: %s", addr, err.Error()) } else { return err } } } if err := os.Chmod(addr, 0660); err != nil { return err } default: return fmt.Errorf("Invalid protocol format.") } httpSrv := http.Server{Addr: addr, Handler: r} return httpSrv.Serve(l) }