コード例 #1
0
ファイル: init.go プロジェクト: niuzhiheng/docker
// jobInitApi runs the remote api server `srv` as a daemon,
// Only one api server can run at the same time - this is enforced by a pidfile.
// The signals SIGINT, SIGQUIT and SIGTERM are intercepted for cleanup.
func InitServer(job *engine.Job) engine.Status {
	job.Logf("Creating server")
	cfg := daemonconfig.ConfigFromJob(job)
	srv, err := NewServer(job.Eng, cfg)
	if err != nil {
		return job.Error(err)
	}
	job.Eng.Hack_SetGlobalVar("httpapi.server", srv)
	job.Eng.Hack_SetGlobalVar("httpapi.daemon", srv.daemon)

	for name, handler := range map[string]engine.Handler{
		"build": srv.Build,
	} {
		if err := job.Eng.Register(name, srv.handlerWrap(handler)); err != nil {
			return job.Error(err)
		}
	}
	// Install image-related commands from the image subsystem.
	// See `graph/service.go`
	if err := srv.daemon.Repositories().Install(job.Eng); err != nil {
		return job.Error(err)
	}
	// Install daemon-related commands from the daemon subsystem.
	// See `daemon/`
	if err := srv.daemon.Install(job.Eng); err != nil {
		return job.Error(err)
	}
	return engine.StatusOK
}
コード例 #2
0
ファイル: init.go プロジェクト: JacsonPaz/docker
func InitPidfile(job *engine.Job) engine.Status {
	if len(job.Args) == 0 {
		return job.Error(fmt.Errorf("no pidfile provided to initialize"))
	}
	job.Logf("Creating pidfile")
	if err := utils.CreatePidFile(job.Args[0]); err != nil {
		return job.Error(err)
	}
	return engine.StatusOK
}
コード例 #3
0
ファイル: ssh.go プロジェクト: krane-io/krane
func portMapping(job *engine.Job, remoteHost string, localPort int, remotePort int) {
	localListener, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", localPort))
	if err != nil {
		job.Errorf("\nnet.Listen failed: %v", err)
	}

	config := &ssh.ClientConfig{
		User: "******",
		Auth: []ssh.AuthMethod{
			ssh.PublicKeys(getPrivateKeys(job)),
		},
	}

	// Dial your ssh server.
	conn, err := ssh.Dial("tcp", fmt.Sprintf("%s:22", remoteHost), config)
	if err != nil {
		job.Errorf("\nUnable to connect: %s with %s", err, remoteHost)
	} else {
		job.Logf("\nEstablish ssh tunnel with %s:22 %d:%d", remoteHost, localPort, remotePort)
	}

	defer conn.Close()

	for {
		// Setup localConn (type net.Conn)
		localConnection, err := localListener.Accept()
		if err != nil {
			job.Errorf("\nListen.Accept failed: %v", err)
		}
		defer localConnection.Close()

		remoteConnection, err := conn.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", remotePort))
		if err != nil {
			job.Errorf("\nUnable to register tcp forward: %v", err)
		}
		defer remoteConnection.Close()

		go ioProxy(localConnection, remoteConnection)
	}

	defer func() {
		if r := recover(); r != nil {
			fmt.Println("Recovered in f", r)
		}
	}()
}
コード例 #4
0
ファイル: list.go プロジェクト: krane-io/krane
func listContainer(job *engine.Job, configuration types.KraneConfiguration) <-chan *types.Ship {
	v := url.Values{}
	if all := job.GetenvBool("all"); all {
		v.Set("all", strconv.FormatBool(all))
	}

	ch := make(chan *types.Ship, len(configuration.Production.Fleet.Ships()))
	for _, ship := range configuration.Production.Fleet.Available() {
		go func(ship types.Ship) {

			cli := client.NewKraneClientApi(ship, false, job)
			body, statusCode, err := readBody(cli.Call("GET", "/containers/json?"+v.Encode(), nil, false))

			job.Logf("(%d) %s\n", statusCode, body)

			if err != nil {
				job.Logf("Error: %s", err.Error())
			}
			var resultShip types.Ship
			if (statusCode >= 200) && (statusCode < 300) {
				var containerList []types.Containers
				json.Unmarshal(body, &containerList)
				resultShip.Name = ship.Name
				resultShip.Fqdn = ship.Fqdn
				resultShip.Port = ship.Port
				resultShip.State = "operational"
				resultShip.Containers = containerList
				fmt.Printf("%#v", resultShip)
				ch <- &resultShip
			} else {
				ch <- nil
			}
		}(ship)
	}
	return ch
}
コード例 #5
0
ファイル: init.go プロジェクト: JacsonPaz/docker
// jobInitApi runs the remote api server `srv` as a daemon,
// Only one api server can run at the same time - this is enforced by a pidfile.
// The signals SIGINT, SIGQUIT and SIGTERM are intercepted for cleanup.
func InitServer(job *engine.Job) engine.Status {
	job.Logf("Creating server")
	srv, err := NewServer(job.Eng, daemonconfig.ConfigFromJob(job))
	if err != nil {
		return job.Error(err)
	}
	job.Logf("Setting up signal traps")
	c := make(chan os.Signal, 1)
	signals := []os.Signal{os.Interrupt, syscall.SIGTERM}
	if os.Getenv("DEBUG") == "" {
		signals = append(signals, syscall.SIGQUIT)
	}
	gosignal.Notify(c, signals...)
	go func() {
		interruptCount := uint32(0)
		for sig := range c {
			go func(sig os.Signal) {
				log.Printf("Received signal '%v', starting shutdown of docker...\n", sig)
				switch sig {
				case os.Interrupt, syscall.SIGTERM:
					// If the user really wants to interrupt, let him do so.
					if atomic.LoadUint32(&interruptCount) < 3 {
						atomic.AddUint32(&interruptCount, 1)
						// Initiate the cleanup only once
						if atomic.LoadUint32(&interruptCount) == 1 {
							utils.RemovePidFile(srv.daemon.Config().Pidfile)
							srv.Close()
						} else {
							return
						}
					} else {
						log.Printf("Force shutdown of docker, interrupting cleanup\n")
					}
				case syscall.SIGQUIT:
				}
				os.Exit(128 + int(sig.(syscall.Signal)))
			}(sig)
		}
	}()
	job.Eng.Hack_SetGlobalVar("httpapi.server", srv)
	job.Eng.Hack_SetGlobalVar("httpapi.daemon", srv.daemon)

	for name, handler := range map[string]engine.Handler{
		"build": srv.Build,
		"pull":  srv.ImagePull,
		"push":  srv.ImagePush,
	} {
		if err := job.Eng.Register(name, srv.handlerWrap(handler)); err != nil {
			return job.Error(err)
		}
	}
	// Install image-related commands from the image subsystem.
	// See `graph/service.go`
	if err := srv.daemon.Repositories().Install(job.Eng); err != nil {
		return job.Error(err)
	}
	// Install daemon-related commands from the daemon subsystem.
	// See `daemon/`
	if err := srv.daemon.Install(job.Eng); err != nil {
		return job.Error(err)
	}
	srv.SetRunning(true)
	return engine.StatusOK
}
コード例 #6
0
ファイル: driver.go プロジェクト: baa-archieve/docker
func InitDriver(job *engine.Job) engine.Status {
	var (
		network        *net.IPNet
		enableIPTables = job.GetenvBool("EnableIptables")
		icc            = job.GetenvBool("InterContainerCommunication")
		ipMasq         = job.GetenvBool("EnableIpMasq")
		ipForward      = job.GetenvBool("EnableIpForward")
		bridgeIP       = job.Getenv("BridgeIP")
		fixedCIDR      = job.Getenv("FixedCIDR")
	)

	if defaultIP := job.Getenv("DefaultBindingIP"); defaultIP != "" {
		defaultBindingIP = net.ParseIP(defaultIP)
	}

	bridgeIface = job.Getenv("BridgeIface")
	usingDefaultBridge := false
	if bridgeIface == "" {
		usingDefaultBridge = true
		bridgeIface = DefaultNetworkBridge
	}

	addr, err := networkdriver.GetIfaceAddr(bridgeIface)
	if err != nil {
		// If we're not using the default bridge, fail without trying to create it
		if !usingDefaultBridge {
			return job.Error(err)
		}
		// If the bridge interface is not found (or has no address), try to create it and/or add an address
		if err := configureBridge(bridgeIP); err != nil {
			return job.Error(err)
		}

		addr, err = networkdriver.GetIfaceAddr(bridgeIface)
		if err != nil {
			return job.Error(err)
		}
		network = addr.(*net.IPNet)
	} else {
		network = addr.(*net.IPNet)
		// validate that the bridge ip matches the ip specified by BridgeIP
		if bridgeIP != "" {
			bip, _, err := net.ParseCIDR(bridgeIP)
			if err != nil {
				return job.Error(err)
			}
			if !network.IP.Equal(bip) {
				return job.Errorf("bridge ip (%s) does not match existing bridge configuration %s", network.IP, bip)
			}
		}
	}

	// Configure iptables for link support
	if enableIPTables {
		if err := setupIPTables(addr, icc, ipMasq); err != nil {
			return job.Error(err)
		}
	}

	if ipForward {
		// Enable IPv4 forwarding
		if err := ioutil.WriteFile("/proc/sys/net/ipv4/ip_forward", []byte{'1', '\n'}, 0644); err != nil {
			job.Logf("WARNING: unable to enable IPv4 forwarding: %s\n", err)
		}
	}

	// We can always try removing the iptables
	if err := iptables.RemoveExistingChain("DOCKER"); err != nil {
		return job.Error(err)
	}

	if enableIPTables {
		chain, err := iptables.NewChain("DOCKER", bridgeIface)
		if err != nil {
			return job.Error(err)
		}
		portmapper.SetIptablesChain(chain)
	}

	bridgeNetwork = network
	if fixedCIDR != "" {
		_, subnet, err := net.ParseCIDR(fixedCIDR)
		if err != nil {
			return job.Error(err)
		}
		log.Debugf("Subnet: %v", subnet)
		if err := ipallocator.RegisterSubnet(bridgeNetwork, subnet); err != nil {
			return job.Error(err)
		}
	}

	// https://github.com/docker/docker/issues/2768
	job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", bridgeNetwork.IP)

	for name, f := range map[string]engine.Handler{
		"allocate_interface": Allocate,
		"release_interface":  Release,
		"allocate_port":      AllocatePort,
		"link":               LinkContainers,
	} {
		if err := job.Eng.Register(name, f); err != nil {
			return job.Error(err)
		}
	}
	return engine.StatusOK
}
コード例 #7
0
ファイル: driver.go プロジェクト: baa-archieve/docker
// Allocate an external port and map it to the interface
func AllocatePort(job *engine.Job) engine.Status {
	var (
		err error

		ip            = defaultBindingIP
		id            = job.Args[0]
		hostIP        = job.Getenv("HostIP")
		hostPort      = job.GetenvInt("HostPort")
		containerPort = job.GetenvInt("ContainerPort")
		proto         = job.Getenv("Proto")
		network       = currentInterfaces.Get(id)
	)

	if hostIP != "" {
		ip = net.ParseIP(hostIP)
		if ip == nil {
			return job.Errorf("Bad parameter: invalid host ip %s", hostIP)
		}
	}

	// host ip, proto, and host port
	var container net.Addr
	switch proto {
	case "tcp":
		container = &net.TCPAddr{IP: network.IP, Port: containerPort}
	case "udp":
		container = &net.UDPAddr{IP: network.IP, Port: containerPort}
	default:
		return job.Errorf("unsupported address type %s", proto)
	}

	//
	// Try up to 10 times to get a port that's not already allocated.
	//
	// In the event of failure to bind, return the error that portmapper.Map
	// yields.
	//

	var host net.Addr
	for i := 0; i < MaxAllocatedPortAttempts; i++ {
		if host, err = portmapper.Map(container, ip, hostPort); err == nil {
			break
		}

		if allocerr, ok := err.(portallocator.ErrPortAlreadyAllocated); ok {
			// There is no point in immediately retrying to map an explicitly
			// chosen port.
			if hostPort != 0 {
				job.Logf("Failed to bind %s for container address %s: %s", allocerr.IPPort(), container.String(), allocerr.Error())
				break
			}

			// Automatically chosen 'free' port failed to bind: move on the next.
			job.Logf("Failed to bind %s for container address %s. Trying another port.", allocerr.IPPort(), container.String())
		} else {
			// some other error during mapping
			job.Logf("Received an unexpected error during port allocation: %s", err.Error())
			break
		}
	}

	if err != nil {
		return job.Error(err)
	}

	network.PortMappings = append(network.PortMappings, host)

	out := engine.Env{}
	switch netAddr := host.(type) {
	case *net.TCPAddr:
		out.Set("HostIP", netAddr.IP.String())
		out.SetInt("HostPort", netAddr.Port)
	case *net.UDPAddr:
		out.Set("HostIP", netAddr.IP.String())
		out.SetInt("HostPort", netAddr.Port)
	}
	if _, err := out.WriteTo(job.Stdout); err != nil {
		return job.Error(err)
	}

	return engine.StatusOK
}
コード例 #8
0
ファイル: driver.go プロジェクト: MjAbuz/docker
func InitDriver(job *engine.Job) engine.Status {
	var (
		networkv4      *net.IPNet
		networkv6      *net.IPNet
		addrv4         net.Addr
		addrsv6        []net.Addr
		enableIPTables = job.GetenvBool("EnableIptables")
		enableIPv6     = job.GetenvBool("EnableIPv6")
		icc            = job.GetenvBool("InterContainerCommunication")
		ipMasq         = job.GetenvBool("EnableIpMasq")
		ipForward      = job.GetenvBool("EnableIpForward")
		bridgeIP       = job.Getenv("BridgeIP")
		bridgeIPv6     = "fe80::1/64"
		fixedCIDR      = job.Getenv("FixedCIDR")
		fixedCIDRv6    = job.Getenv("FixedCIDRv6")
	)

	if defaultIP := job.Getenv("DefaultBindingIP"); defaultIP != "" {
		defaultBindingIP = net.ParseIP(defaultIP)
	}

	bridgeIface = job.Getenv("BridgeIface")
	usingDefaultBridge := false
	if bridgeIface == "" {
		usingDefaultBridge = true
		bridgeIface = DefaultNetworkBridge
	}

	addrv4, addrsv6, err := networkdriver.GetIfaceAddr(bridgeIface)

	if err != nil {
		// No Bridge existent. Create one
		// If we're not using the default bridge, fail without trying to create it
		if !usingDefaultBridge {
			return job.Error(err)
		}

		// If the iface is not found, try to create it
		if err := configureBridge(bridgeIP, bridgeIPv6, enableIPv6); err != nil {
			return job.Error(err)
		}

		addrv4, addrsv6, err = networkdriver.GetIfaceAddr(bridgeIface)
		if err != nil {
			return job.Error(err)
		}

		if fixedCIDRv6 != "" {
			// Setting route to global IPv6 subnet
			log.Infof("Adding route to IPv6 network %q via device %q", fixedCIDRv6, bridgeIface)
			if err := netlink.AddRoute(fixedCIDRv6, "", "", bridgeIface); err != nil {
				log.Fatalf("Could not add route to IPv6 network %q via device %q", fixedCIDRv6, bridgeIface)
			}
		}
	} else {
		// Bridge exists already. Getting info...
		// validate that the bridge ip matches the ip specified by BridgeIP
		if bridgeIP != "" {
			networkv4 = addrv4.(*net.IPNet)
			bip, _, err := net.ParseCIDR(bridgeIP)
			if err != nil {
				return job.Error(err)
			}
			if !networkv4.IP.Equal(bip) {
				return job.Errorf("bridge ip (%s) does not match existing bridge configuration %s", networkv4.IP, bip)
			}
		}

		// a bridge might exist but not have any IPv6 addr associated with it yet
		// (for example, an existing Docker installation that has only been used
		// with IPv4 and docker0 already is set up) In that case, we can perform
		// the bridge init for IPv6 here, else we will error out below if --ipv6=true
		if len(addrsv6) == 0 && enableIPv6 {
			if err := setupIPv6Bridge(bridgeIPv6); err != nil {
				return job.Error(err)
			}
			// recheck addresses now that IPv6 is setup on the bridge
			addrv4, addrsv6, err = networkdriver.GetIfaceAddr(bridgeIface)
			if err != nil {
				return job.Error(err)
			}
		}

		// TODO: Check if route to fixedCIDRv6 is set
	}

	if enableIPv6 {
		bip6, _, err := net.ParseCIDR(bridgeIPv6)
		if err != nil {
			return job.Error(err)
		}
		found := false
		for _, addrv6 := range addrsv6 {
			networkv6 = addrv6.(*net.IPNet)
			if networkv6.IP.Equal(bip6) {
				found = true
				break
			}
		}
		if !found {
			return job.Errorf("bridge IPv6 does not match existing bridge configuration %s", bip6)
		}
	}

	networkv4 = addrv4.(*net.IPNet)

	log.Infof("enableIPv6 = %t", enableIPv6)
	if enableIPv6 {
		if len(addrsv6) == 0 {
			return job.Error(errors.New("IPv6 enabled but no IPv6 detected"))
		}
		bridgeIPv6Addr = networkv6.IP
	}

	// Configure iptables for link support
	if enableIPTables {
		if err := setupIPTables(addrv4, icc, ipMasq); err != nil {
			return job.Error(err)
		}

	}

	if ipForward {
		// Enable IPv4 forwarding
		if err := ioutil.WriteFile("/proc/sys/net/ipv4/ip_forward", []byte{'1', '\n'}, 0644); err != nil {
			job.Logf("WARNING: unable to enable IPv4 forwarding: %s\n", err)
		}

		if fixedCIDRv6 != "" {
			// Enable IPv6 forwarding
			if err := ioutil.WriteFile("/proc/sys/net/ipv6/conf/default/forwarding", []byte{'1', '\n'}, 0644); err != nil {
				job.Logf("WARNING: unable to enable IPv6 default forwarding: %s\n", err)
			}
			if err := ioutil.WriteFile("/proc/sys/net/ipv6/conf/all/forwarding", []byte{'1', '\n'}, 0644); err != nil {
				job.Logf("WARNING: unable to enable IPv6 all forwarding: %s\n", err)
			}
		}
	}

	// We can always try removing the iptables
	if err := iptables.RemoveExistingChain("DOCKER", iptables.Nat); err != nil {
		return job.Error(err)
	}

	if enableIPTables {
		_, err := iptables.NewChain("DOCKER", bridgeIface, iptables.Nat)
		if err != nil {
			return job.Error(err)
		}
		chain, err := iptables.NewChain("DOCKER", bridgeIface, iptables.Filter)
		if err != nil {
			return job.Error(err)
		}
		portmapper.SetIptablesChain(chain)
	}

	bridgeIPv4Network = networkv4
	if fixedCIDR != "" {
		_, subnet, err := net.ParseCIDR(fixedCIDR)
		if err != nil {
			return job.Error(err)
		}
		log.Debugf("Subnet: %v", subnet)
		if err := ipallocator.RegisterSubnet(bridgeIPv4Network, subnet); err != nil {
			return job.Error(err)
		}
	}

	if fixedCIDRv6 != "" {
		_, subnet, err := net.ParseCIDR(fixedCIDRv6)
		if err != nil {
			return job.Error(err)
		}
		log.Debugf("Subnet: %v", subnet)
		if err := ipallocator.RegisterSubnet(subnet, subnet); err != nil {
			return job.Error(err)
		}
		globalIPv6Network = subnet
	}

	// Block BridgeIP in IP allocator
	ipallocator.RequestIP(bridgeIPv4Network, bridgeIPv4Network.IP)

	// https://github.com/docker/docker/issues/2768
	job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", bridgeIPv4Network.IP)

	for name, f := range map[string]engine.Handler{
		"allocate_interface": Allocate,
		"release_interface":  Release,
		"allocate_port":      AllocatePort,
		"link":               LinkContainers,
	} {
		if err := job.Eng.Register(name, f); err != nil {
			return job.Error(err)
		}
	}
	return engine.StatusOK
}
コード例 #9
0
ファイル: init.go プロジェクト: JianfuLi/docker
// jobInitApi runs the remote api server `srv` as a daemon,
// Only one api server can run at the same time - this is enforced by a pidfile.
// The signals SIGINT, SIGQUIT and SIGTERM are intercepted for cleanup.
func InitServer(job *engine.Job) engine.Status {
	job.Logf("Creating server")
	srv, err := NewServer(job.Eng, daemonconfig.ConfigFromJob(job))
	if err != nil {
		return job.Error(err)
	}
	job.Logf("Setting up signal traps")
	c := make(chan os.Signal, 1)
	signals := []os.Signal{os.Interrupt, syscall.SIGTERM}
	if os.Getenv("DEBUG") == "" {
		signals = append(signals, syscall.SIGQUIT)
	}
	gosignal.Notify(c, signals...)
	go func() {
		interruptCount := uint32(0)
		for sig := range c {
			go func(sig os.Signal) {
				log.Printf("Received signal '%v', starting shutdown of docker...\n", sig)
				switch sig {
				case os.Interrupt, syscall.SIGTERM:
					// If the user really wants to interrupt, let him do so.
					if atomic.LoadUint32(&interruptCount) < 3 {
						atomic.AddUint32(&interruptCount, 1)
						// Initiate the cleanup only once
						if atomic.LoadUint32(&interruptCount) == 1 {
							utils.RemovePidFile(srv.daemon.Config().Pidfile)
							srv.Close()
						} else {
							return
						}
					} else {
						log.Printf("Force shutdown of docker, interrupting cleanup\n")
					}
				case syscall.SIGQUIT:
				}
				os.Exit(128 + int(sig.(syscall.Signal)))
			}(sig)
		}
	}()
	job.Eng.Hack_SetGlobalVar("httpapi.server", srv)
	job.Eng.Hack_SetGlobalVar("httpapi.daemon", srv.daemon)

	for name, handler := range map[string]engine.Handler{
		"export":           srv.ContainerExport,
		"create":           srv.ContainerCreate,
		"stop":             srv.ContainerStop,
		"restart":          srv.ContainerRestart,
		"start":            srv.ContainerStart,
		"kill":             srv.ContainerKill,
		"pause":            srv.ContainerPause,
		"unpause":          srv.ContainerUnpause,
		"wait":             srv.ContainerWait,
		"tag":              srv.ImageTag, // FIXME merge with "image_tag"
		"resize":           srv.ContainerResize,
		"commit":           srv.ContainerCommit,
		"info":             srv.DockerInfo,
		"container_delete": srv.ContainerDestroy,
		"image_export":     srv.ImageExport,
		"images":           srv.Images,
		"history":          srv.ImageHistory,
		"viz":              srv.ImagesViz,
		"container_copy":   srv.ContainerCopy,
		"attach":           srv.ContainerAttach,
		"logs":             srv.ContainerLogs,
		"changes":          srv.ContainerChanges,
		"top":              srv.ContainerTop,
		"load":             srv.ImageLoad,
		"build":            srv.Build,
		"pull":             srv.ImagePull,
		"import":           srv.ImageImport,
		"image_delete":     srv.ImageDelete,
		"events":           srv.Events,
		"push":             srv.ImagePush,
		"containers":       srv.Containers,
	} {
		if err := job.Eng.Register(name, srv.handlerWrap(handler)); err != nil {
			return job.Error(err)
		}
	}
	// Install image-related commands from the image subsystem.
	// See `graph/service.go`
	if err := srv.daemon.Repositories().Install(job.Eng); err != nil {
		return job.Error(err)
	}
	// Install daemon-related commands from the daemon subsystem.
	// See `daemon/`
	if err := srv.daemon.Install(job.Eng); err != nil {
		return job.Error(err)
	}
	srv.SetRunning(true)
	return engine.StatusOK
}
コード例 #10
0
ファイル: ssh.go プロジェクト: krane-io/krane
func Tunnel(job *engine.Job) engine.Status {
	var fqdn string
	var delayed string

	if len(job.Args) == 2 {
		fqdn = job.Args[0]
		delayed = job.Args[1]
	} else if len(job.Args) > 2 {
		return job.Errorf("Usage: %s", job.Name)
	}

	configuration := job.Eng.Hack_GetGlobalVar("configuration").(types.KraneConfiguration)

	if delayed == "true" {
		job.Logf("\nWe are going to waiting 30 seconds to create ssh tunnel with %s", fqdn)
		time.Sleep(30 * time.Second)
		fleet, err := configuration.Driver.List(nil)
		production := configuration.Production.Fleet.Find(fqdn)
		if fleet.Find(fqdn).Id == "" {
			job.Logf("Ship %s does not exist in cloud provider", fqdn)
			return engine.StatusOK
		} else if production.Id != "" && production.LocalPort > 0 {
			job.Logf("Tunnel with Ship %s already exist", fqdn)
			return engine.StatusOK
		}

		if err != nil {
			job.Logf("\nUnable to get list of ships from %s", configuration.Driver.Name())
		}
		configuration.Production.Fleet.Append(fleet.Ships())
		job.Eng.Hack_SetGlobalVar("configuration", configuration)
		fmt.Printf("%#v", configuration.Production.Fleet)
	}

	if configuration.Production.HighPort == 0 {
		configuration.Production.HighPort = 8000
		job.Eng.Hack_SetGlobalVar("configuration", configuration)
	}

	ship := configuration.Production.Fleet.Find(fqdn)

	if (ship.State == "operational") && (ship.LocalPort == 0) {
		job.Logf("\nCreating ssh tunnel for %s\n", fqdn)
		configuration.Production.HighPort = configuration.Production.HighPort + 1
		ship.LocalPort = configuration.Production.HighPort
		configuration.Production.Fleet.AppendShip(ship)
		job.Eng.Hack_SetGlobalVar("configuration", configuration)
		go portMapping(job, ship.Fqdn, ship.LocalPort, ship.Port)
		return engine.StatusOK

	} else {
		job.Logf("\nGoing to queue job to create tunnel for %s\n", fqdn)
		newjob := job.Eng.Job("ssh_tunnel", fqdn, "true")
		if delayed == "true" {
			newjob.Run()
		} else {
			go newjob.Run()
		}
		return engine.StatusOK
	}
}