コード例 #1
0
ファイル: dev.go プロジェクト: rubysolo/nanobox
// dev
func dev(ccmd *cobra.Command, args []string) {

	// PreRun: boot

	// don't rebuild
	if !nobuild {

		// if the vm has no been created or deployed, the rebuild flag, or the VM has
		// recently been reloaded do a deploy
		if Vagrant.Status() == "not created" || !config.VMfile.HasDeployed() || rebuild || config.VMfile.HasReloaded() {

			fmt.Printf(stylish.Bullet("Deploying codebase..."))

			// remount the engine file at ~/.nanobox/apps/<app>/<engine> so any new scripts
			// will be used during the deploy
			if err := engineutil.RemountLocal(); err != nil {
				config.Error("[util/vagrant/init] engineutil.RemountLocal() failed", err.Error())
			}

			// run a deploy
			if err := Server.Deploy(""); err != nil {
				Config.Fatal("[commands/dev] server.Deploy() failed - ", err.Error())
			}

			// stream log output
			go Mist.Stream([]string{"log", "deploy"}, Mist.PrintLogStream)

			// listen for status updates
			errch := make(chan error)
			go func() {
				errch <- Mist.Listen([]string{"job", "deploy"}, Mist.DeployUpdates)
			}()

			// wait for a status update (blocking)
			err := <-errch

			//
			if err != nil {
				fmt.Printf(err.Error())
				return
			}
		}
	}

	//
	if err := Server.Exec("develop", ""); err != nil {
		config.Error("[commands/dev] Server.Exec failed", err.Error())
	}

	// PostRun: halt
}
コード例 #2
0
ファイル: exec.go プロジェクト: sfermigier/nanobox
// execute
func execute(ccmd *cobra.Command, args []string) {

	// PreRun: boot

	//
	if len(args) == 0 {
		args = append(args, Print.Prompt("Please specify a command you wish to exec: "))
	}

	//
	v := url.Values{}

	// if a container is found that matches args[0] then set that as a qparam, and
	// remove it from the argument list
	if isContainer(args) {
		v.Add("container", args[0])
		args = args[1:]
	}
	v.Add("cmd", strings.Join(args, " "))

	//
	fmt.Printf(stylish.Bullet("Executing command in nanobox..."))
	if err := server.Exec(v.Encode()); err != nil {
		config.Error("[commands/exec] server.Exec failed", err.Error())
	}

	// PostRun: halt
}
コード例 #3
0
ファイル: handlers.go プロジェクト: sfermigier/nanobox
// NotifyServer
func NotifyServer(event *fsnotify.Event) error {

	// if there is no timeout reader create one and open a request; if there is no
	// timeout reader there wont be an open request, so checking for timeoutReader
	// is enough
	tr := timeoutReader
	if tr == nil {

		// create a new timeout reader
		tr = &TimeoutReader{
			Files:   make(chan string),
			timeout: 10 * time.Second,
		}
		timeoutReader = tr
		// launch a new request that is held open until EOF from the timeoutReader
		go func() {
			if _, err := Post("/file-changes", "text/plain", tr); err != nil {
				config.Error("file changes error", err.Error())
			}
		}()
	}

	// strip the current working directory from the filepath
	relPath := strings.Replace(event.Name, config.CWDir, "", -1)

	// for any event other than Chmod, append the filepath to the list of files to
	// be read
	if event.Op != fsnotify.Chmod {
		tr.Files <- relPath
	}

	return nil
}
コード例 #4
0
ファイル: console.go プロジェクト: rubysolo/nanobox
// console
func console(ccmd *cobra.Command, args []string) {

	// PreRun: boot

	//
	switch {

	// if no args are passed provide instruction
	case len(args) == 0:
		fmt.Printf(stylish.ErrBullet("Unable to console. Please provide a service to connect to.\n"))

	// if 1 args is passed it's assumed to be a container to console into
	case len(args) == 1:
		if err := server.Exec("console", "container="+args[0]); err != nil {
			config.Error("[commands/console] Server.Exec failed", err.Error())
		}
	}

	// PostRun: halt
}
コード例 #5
0
ファイル: deploy.go プロジェクト: rubysolo/nanobox
// deploy
func deploy(ccmd *cobra.Command, args []string) {

	// PreRun: boot

	fmt.Printf(stylish.Bullet("Deploying codebase..."))

	// stream deploy output
	go Mist.Stream([]string{"log", "deploy"}, Mist.PrintLogStream)

	// listen for status updates
	errch := make(chan error)
	go func() {
		errch <- Mist.Listen([]string{"job", "deploy"}, Mist.DeployUpdates)
	}()

	v := url.Values{}
	v.Add("reset", strconv.FormatBool(config.Force))
	v.Add("run", strconv.FormatBool(install))

	// remount the engine file at ~/.nanobox/apps/<app>/<engine> so any new scripts
	// will be used during the deploy
	if err := engineutil.RemountLocal(); err != nil {
		config.Error("[util/vagrant/init] engineutil.RemountLocal() failed", err.Error())
	}

	// run a deploy
	if err := Server.Deploy(v.Encode()); err != nil {
		Config.Fatal("[commands/deploy] server.Deploy() failed - ", err.Error())
	}

	// wait for a status update (blocking)
	err := <-errch

	//
	if err != nil {
		fmt.Printf(err.Error())
		return
	}

	// PostRun: halt
}
コード例 #6
0
ファイル: update.go プロジェクト: pauldevelder/nanobox
// update
func update(ccmd *cobra.Command, args []string) {

	update, err := updatable()
	if err != nil {
		config.Error("Unable to determing if updates are available", err.Error())
		return
	}

	// if the md5s don't match or it's been forced, update
	switch {
	case update, config.Force:
		if err := runUpdate(); err != nil {
			if _, ok := err.(*os.LinkError); ok {
				fmt.Println(`Nanobox was unable to update, try again with admin privilege (ex. "sudo nanobox update")`)
			} else {
				config.Fatal("[commands/update] runUpdate() failed", err.Error())
			}
		}
	default:
		fmt.Printf(stylish.SubBullet("[√] Nanobox is up-to-date"))
	}
}
コード例 #7
0
ファイル: build.go プロジェクト: rubysolo/nanobox
// build
func build(ccmd *cobra.Command, args []string) {

	// PreRun: boot

	fmt.Printf(stylish.Bullet("Building codebase..."))

	// stream build output
	go Mist.Stream([]string{"log", "deploy"}, Mist.PrintLogStream)

	// listen for status updates
	errch := make(chan error)
	go func() {
		errch <- Mist.Listen([]string{"job", "build"}, Mist.BuildUpdates)
	}()

	// remount the engine file at ~/.nanobox/apps/<app>/<engine> so any new scripts
	// are used during the build
	if err := engineutil.RemountLocal(); err != nil {
		config.Error("[util/vagrant/init] engineutil.RemountLocal() failed", err.Error())
	}

	// run a build
	if err := Server.Build(""); err != nil {
		Config.Fatal("[commands/build] server.Build() failed - ", err.Error())
	}

	// wait for a status update (blocking)
	err := <-errch

	//
	if err != nil {
		fmt.Printf(err.Error())
		return
	}

	// PostRun: halt
}
コード例 #8
0
ファイル: run.go プロジェクト: rubysolo/nanobox
// run
func run(ccmd *cobra.Command, args []string) {

	// PreRun: boot

	fmt.Printf(stylish.Bullet("Deploying codebase..."))

	// stream deploy output
	go Mist.Stream([]string{"log", "deploy"}, Mist.PrintLogStream)

	// listen for status updates
	errch := make(chan error)
	go func() {
		errch <- Mist.Listen([]string{"job", "deploy"}, Mist.DeployUpdates)
	}()

	// remount the engine file at ~/.nanobox/apps/<app>/<engine> so any new scripts
	// will be used during the deploy
	if err := engineutil.RemountLocal(); err != nil {
		config.Error("[util/vagrant/init] engineutil.RemountLocal() failed", err.Error())
	}

	// run a deploy
	if err := Server.Deploy("run=true"); err != nil {
		Config.Fatal("[commands/run] server.Deploy() failed - ", err.Error())
	}

	// wait for a status update (blocking)
	err := <-errch

	//
	if err != nil {
		fmt.Printf(err.Error())
		return
	}

	fmt.Printf(`
--------------------------------------------------------------------------------
[√] APP SUCCESSFULLY BUILT   ///   DEV URL : %v
--------------------------------------------------------------------------------
`, config.Nanofile.Domain)

	// if in background mode just exist w/o streaming logs or watching files
	if config.VMfile.IsBackground() {
		fmt.Println(`
To stream logs and watch files while in 'background mode' you can use
'nanobox log' and 'nanobox watch'
`)
		return
	}

	// if not in background mode begin streaming logs and watching files
	fmt.Printf(`
++> STREAMING LOGS (ctrl-c to exit) >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
`)

	// stream app output
	go Mist.Stream([]string{"log", "app"}, Mist.ProcessLogStream)

	// begin watching for file changes (blocking)
	if err := Notify.Watch(config.CWDir, Server.NotifyRebuild); err != nil {
		fmt.Printf(err.Error())
	}

	// PostRun: halt
}
コード例 #9
0
ファイル: init.go プロジェクト: shaunstanislaus/nanobox
// Init
func Init() {

	// create Vagrantfile
	vagrantfile, err := os.Create(config.AppDir + "/Vagrantfile")
	if err != nil {
		config.Fatal("[util/vagrant/init] ioutil.WriteFile() failed", err.Error())
	}
	defer vagrantfile.Close()

	//
	// create synced folders

	// mount code directory (mounted as nfs by default)
	synced_folders := fmt.Sprintf(`nanobox.vm.synced_folder "%s", "/vagrant/code/%s"`, config.CWDir, config.Nanofile.Name)

	// mount code directory as NFS unless configured otherwise; if not mounted in
	// this way Vagrant will just decide what it thinks is best
	if config.Nanofile.MountNFS {
		synced_folders += `,
      type: "nfs",
      mount_options: ["nfsvers=3", "proto=tcp"]`
	}

	// "mount" the engine file localy at ~/.nanobox/apps/<app>/<engine>
	name, path, err := engineutil.MountLocal()
	if err != nil {
		config.Error("Engine failed to mount and will not work!", err.Error())
	}

	// "mount" the engine into the VM (if there is one)
	if name != "" && path != "" {
		synced_folders += fmt.Sprintf(`
    nanobox.vm.synced_folder "%s", "/vagrant/engines/%s"`, path, name)

		// mount engine directory as NFS unless configured otherwise; if not mounted in
		// this way Vagrant will just decide what it thinks is best
		if config.Nanofile.MountNFS {
			synced_folders += `,
      type: "nfs",
      mount_options: ["nfsvers=3", "proto=tcp"]`
		}
	}

	//
	// nanofile config

	// create nanobox private network and unique forward port
	network := fmt.Sprintf("nanobox.vm.network \"private_network\", ip: \"%s\"", config.Nanofile.IP)
	sshport := fmt.Sprintf("nanobox.vm.network :forwarded_port, guest: 22, host: %v, id: 'ssh'", util.StringToPort(config.Nanofile.Name))

	//
	provider := fmt.Sprintf(`# VirtualBox
    nanobox.vm.provider "virtualbox" do |p|
      p.name = "%v"

      p.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
      p.customize ["modifyvm", :id, "--cpuexecutioncap", "%v"]
      p.cpus = %v
      p.memory = %v
    end`, config.Nanofile.Name, config.Nanofile.CPUCap, config.Nanofile.CPUs, config.Nanofile.RAM)

	//
	// insert a provision script that will indicate to nanobox-server to boot into
	// 'devmode'
	var devmode string
	if config.Devmode {
		fmt.Printf(stylish.Bullet("Configuring vm to run in 'devmode'"))

		devmode = `# added because --dev was detected; boots the server into 'devmode'
    config.vm.provision "shell", inline: <<-DEVMODE
      echo "Starting VM in dev mode..."
      mkdir -p /mnt/sda/var/nanobox
      touch /mnt/sda/var/nanobox/DEV
    DEVMODE`
	}

	//
	// write to Vagrantfile
	vagrantfile.Write([]byte(fmt.Sprintf(`
################################################################################
##                                                                            ##
##                                   ***                                      ##
##                                *********                                   ##
##                           *******************                              ##
##                       ***************************                          ##
##                           *******************                              ##
##                       ...      *********      ...                          ##
##                           ...     ***     ...                              ##
##                       +++      ...   ...      +++                          ##
##                           +++     ...     +++                              ##
##                       \\\      +++   +++      ///                          ##
##                           \\\     +++     ///                              ##
##                                \\     //                                   ##
##                                   \//                                      ##
##                                                                            ##
##                    _  _ ____ _  _ ____ ___  ____ _  _                      ##
##                    |\ | |__| |\ | |  | |__) |  |  \/                       ##
##                    | \| |  | | \| |__| |__) |__| _/\_                      ##
##                                                                            ##
## This file was generated by nanobox. Any modifications to it may cause your ##
## nanobox VM to fail! To regenerate this file, delete it and run             ##
## 'nanobox init'                                                             ##
##                                                                            ##
################################################################################

# -*- mode: ruby -*-
# vi: set ft=ruby :

#
Vagrant.configure(2) do |config|

  # add the boot2docker user credentials to allow nanobox to freely ssh into the vm
  # w/o requiring a password
  config.ssh.shell = "bash"
  config.ssh.username = "******"
  config.ssh.password = "******"

  config.vm.define :'%s' do |nanobox|

    ## Set the hostname of the vm to the app domain
    nanobox.vm.provision "shell", inline: <<-SCRIPT
      sudo hostname %s
    SCRIPT

    ## Wait for nanobox-server to be ready before vagrant exits
    nanobox.vm.provision "shell", inline: <<-WAIT
      echo "Waiting for nanobox server..."
      while ! nc -z 127.0.0.1 1757; do sleep 1; done;
    WAIT

    ## box
    nanobox.vm.box     = "nanobox/boot2docker"


    ## network

    # add custom private network and ip and custom ssh port forward
    %s
    %s


    ## shared folders

    # disable default /vagrant share (overridden below)
    nanobox.vm.synced_folder ".", "/vagrant", disabled: true

    # add nanobox shared folders
    nanobox.vm.synced_folder "~/.ssh", "/mnt/ssh"
    %s


    ## provider configs
    %s

    ## wait for the dhcp service to come online
    nanobox.vm.provision "shell", inline: <<-WAIT
      attempts=0
      while [[ ! -f /var/run/udhcpc.eth1.pid && $attempts -lt 30 ]]; do
        let attempts++
        sleep 1
      done
    WAIT

    # kill the eth1 dhcp server so that it doesn't override the assigned ip when
    # the lease is up
    nanobox.vm.provision "shell", inline: <<-KILL
      if [ -f /var/run/udhcpc.eth1.pid ]; then
        echo "Killing eth1 dhcp..."
        kill -9 $(cat /var/run/udhcpc.eth1.pid)
      fi
    KILL

    %s

  end
end`, config.Nanofile.Name, config.Nanofile.Domain, network, sshport, synced_folders, provider, devmode)))
}
コード例 #10
0
ファイル: notify.go プロジェクト: datachand/nanobox
// Watch
func Watch(path string, handle func(e *fsnotify.Event) error) error {

	var err error

	//
	setFileLimit()

	// get a list of directories that should not be watched; this is done because
	// there is a limit to how many files can be watched at a time, so folders like
	// node_modules, bower_components, vendor, etc...
	getIgnoreDirs()

	// add source control files to be ignored (git, mercuriel, svn)
	ignoreDirs = append(ignoreDirs, ".git", ".hg", "trunk")

	// create a new file watcher
	watcher, err = fsnotify.NewWatcher()
	if err != nil {
		if _, ok := err.(syscall.Errno); ok {
			return fmt.Errorf(`
! WARNING !
Failed to watch files, max file descriptor limit reached. Nanobox will not
be able to propagate filesystem events to the virtual machine. Consider
increasing your max file descriptor limit to re-enable this functionality.
`)
		}

		config.Fatal("[util/notify/notify] watcher.NewWatcher() failed - ", err.Error())
	}

	// return this err because that means the path to the file they are trying to
	// watch doesn't exist
	fi, err := os.Stat(path)
	if err != nil {
		return err
	}

	switch {

	// if the file is a directory, recursively add each subsequent directory to
	// the watch list; fsnotify will watch all files in a directory
	case fi.Mode().IsDir():
		if err = filepath.Walk(path, watchDir); err != nil {
			return err
		}

	// if the file is just a file, add only it to the watch list
	case fi.Mode().IsRegular():
		if err = watcher.Add(path); err != nil {
			return err
		}
	}

	// watch for interrupts
	exit := make(chan os.Signal, 1)
	signal.Notify(exit, os.Interrupt, os.Kill)

	// watch for file events (blocking)
	for {

		select {

		// handle any file events by calling the handler function
		case event := <-watcher.Events:

			// I use fileinfo here instead of error simply to avoid err collisions; the
			// error would be just as good at indicating if the file existed or not
			fi, _ := os.Stat(event.Name)

			switch event.Op {

			// the watcher needs to watch itself to see if any files are added to then
			// add them to the list of watched files
			case fsnotify.Create:

				// ensure that the file still exists before trying to watch it; ran into
				// a case with VIM where some tmp file (.swpx) was create and removed in
				// the same instant causing the watch to panic
				if fi != nil && fi.Mode().IsDir() {

					// just ignore errors here since there isn't really anything that can
					// be done about it
					watchDir(event.Name, fi, err)
				}

			// the watcher needs to watch itself to see if any directories are removed
			// to then remove them from the list of watched files
			case fsnotify.Remove:

				// ensure thath the file is still available to be removed before attempting
				// to remove it; the main reason for manually removing files is to help
				// spare the ulimit
				if fi != nil {
					if err := watcher.Remove(event.Name); err != nil {
						config.Fatal("[util/notify/notify] watcher.Remove() failed - ", err.Error())
					}
				}
			}

			// call the handler for each even fired
			if err := handle(&event); err != nil {
				config.Error("[util/notify/notify] handle error - ", err.Error())
			}

		// handle any errors by calling the handler function
		case <-watcher.Errors:
			// do something with watch errors?

			// listen for any signals and retun execution back to the CLI to finish
			// w/e it might need to finish
		case <-exit:
			return nil
		}
	}
}