Beispiel #1
0
		}
		defer testExec(output)()

		ok, err := vagrant.VboxIsVagrant()
		if err != nil {
			t.Errorf("%d: IsVagrant()=%s", i, err)
			continue
		}

		if ok != cas.ok {
			t.Errorf("%d: got %t, want %t", i, ok, cas.ok)
		}
	}
}

var h = vagrant.NewHandlers(&vagrant.Options{Log: logging.NewCustom("vagrant", true)})

func TestVboxLookupName(t *testing.T) {
	output := map[string]string{
		"VBoxManage list vms": vboxManageList,
	}
	defer testExec(output)()

	want := "urfve6cb85a2_default_1458910687825_39001"

	got, err := h.VboxLookupName("urfve6cb85a2")
	if err != nil {
		t.Fatalf("VboxLookupName()=%s", err)
	}

	if got != want {
Beispiel #2
0
// NewKlient returns a new Klient instance
func NewKlient(conf *KlientConfig) (*Klient, error) {
	// this is our main reference to count and measure metrics for the klient
	// we count only those methods, please add/remove methods here that will
	// reset the timer of a klient.
	usg := usage.NewUsage(map[string]bool{
		"fs.readDirectory":     true,
		"fs.glob":              true,
		"fs.readFile":          true,
		"fs.writeFile":         true,
		"fs.uniquePath":        true,
		"fs.getInfo":           true,
		"fs.setPermissions":    true,
		"fs.remove":            true,
		"fs.rename":            true,
		"fs.createDirectory":   true,
		"fs.move":              true,
		"fs.copy":              true,
		"webterm.getSessions":  true,
		"webterm.connect":      true,
		"webterm.killSession":  true,
		"webterm.killSessions": true,
		"webterm.rename":       true,
		"exec":                 true,
		"klient.share":         true,
		"klient.unshare":       true,
		"klient.shared":        true,
		"sshkeys.List":         true,
		"sshkeys.Add":          true,
		"sshkeys.Delete":       true,
		"storage.Get":          true,
		"storage.Set":          true,
		"storage.Delete":       true,
		"log.upload":           true,
		// "docker.create":       true,
		// "docker.connect":      true,
		// "docker.stop":         true,
		// "docker.start":        true,
		// "docker.remove":       true,
		// "docker.list":         true,
	})

	// TODO(rjeczalik): Once klient installation method is reworked,
	// ensure flags are stored alongside konfig and do not
	// overwrite konfig here.
	if conf.KontrolURL != "" {
		konfig.Konfig.KontrolURL = conf.KontrolURL
	}

	// NOTE(rjeczalik): For backward-compatibility with old klient,
	// remove once not needed.
	if u, err := url.Parse(konfig.Konfig.KontrolURL); err == nil && konfig.Konfig.KontrolURL != "" {
		u.Path = ""
		konfig.Konfig.Endpoints.Koding = cfg.NewEndpointURL(u)
	}

	if conf.TunnelKiteURL != "" {
		u, err := url.Parse(conf.TunnelKiteURL)
		if err != nil {
			return nil, err
		}

		konfig.Konfig.Endpoints.Tunnel.Public.URL = u
	}

	k := newKite(conf)
	k.Config.VerifyAudienceFunc = verifyAudience

	if k.Config.KontrolURL == "" || k.Config.KontrolURL == "http://127.0.0.1:3000/kite" ||
		!konfig.Konfig.Endpoints.Kontrol().Equal(konfig.Builtin.Endpoints.Kontrol()) {
		k.Config.KontrolURL = konfig.Konfig.Endpoints.Kontrol().Public.String()
	}

	term := terminal.New(k.Log, conf.ScreenrcPath)
	term.InputHook = usg.Reset

	db, err := openBoltDB(configstore.CacheOptions("klient"))
	if err != nil {
		k.Log.Warning("Couldn't open BoltDB: %s", err)
	}

	up := uploader.New(&uploader.Options{
		KeygenURL: konfig.Konfig.Endpoints.Kloud().Public.String(),
		Kite:      k,
		Bucket:    conf.logBucketName(),
		Region:    conf.logBucketRegion(),
		DB:        db,
		Log:       k.Log,
	})

	vagrantOpts := &vagrant.Options{
		Home:   conf.VagrantHome,
		DB:     db, // nil is ok, fallbacks to in-memory storage
		Log:    k.Log,
		Debug:  conf.Debug,
		Output: up.Output,
	}

	tunOpts := &tunnel.Options{
		DB:            db,
		Log:           k.Log,
		Kite:          k,
		NoProxy:       conf.NoProxy,
		TunnelKiteURL: konfig.Konfig.Endpoints.Tunnel.Public.String(),
	}

	t, err := tunnel.New(tunOpts)
	if err != nil {
		log.Fatal(err)
	}

	if conf.UpdateInterval < time.Minute {
		k.Log.Warning("Update interval can't be less than one minute. Setting to one minute.")
		conf.UpdateInterval = time.Minute
	}

	// TODO(rjeczalik): Enable after TMS-848.
	// mountEvents := make(chan *mount.Event)

	remoteOpts := &remote.RemoteOptions{
		Kite:    k,
		Log:     k.Log,
		Storage: storage.New(db),
		// EventSub: mountEvents,
	}

	machinesOpts := &machinegroup.GroupOpts{
		Storage:         storage.NewEncodingStorage(db, []byte("machines")),
		Builder:         mclient.NewKiteBuilder(k),
		DynAddrInterval: 2 * time.Second,
		PingInterval:    15 * time.Second,
	}

	machines, err := machinegroup.New(machinesOpts)
	if err != nil {
		k.Log.Fatal("Cannot initialize machine group: %s", err)
	}

	c := k.NewClient(konfig.Konfig.Endpoints.Kloud().Public.String())
	c.Auth = &kite.Auth{
		Type: "kiteKey",
		Key:  k.Config.KiteKey,
	}

	kloud := &apiutil.LazyKite{
		Client: c,
	}

	restClient := httputil.DefaultRestClient(konfig.Konfig.Debug)
	restClient.Transport = &api.Transport{
		RoundTripper: restClient.Transport,
		AuthFunc: (&apiutil.KloudAuth{
			Kite: kloud,
			Storage: &apiutil.Storage{
				&cfg.Cache{
					EncodingStorage: storage.NewEncodingStorage(db, []byte("klient")),
				},
			},
		}).Auth,
		Log: k.Log.(logging.Logger),
	}

	kl := &Klient{
		kite:    k,
		collab:  collaboration.New(db), // nil is ok, fallbacks to in memory storage
		storage: storage.New(db),       // nil is ok, fallbacks to in memory storage
		tunnel:  t,
		vagrant: vagrant.NewHandlers(vagrantOpts),
		// docker:   docker.New("unix://var/run/docker.sock", k.Log),
		terminal: term,
		usage:    usg,
		log:      k.Log,
		config:   conf,
		remote:   remote.NewRemote(remoteOpts),
		uploader: up,
		machines: machines,
		updater: &Updater{
			Endpoint:       conf.UpdateURL,
			Interval:       conf.UpdateInterval,
			CurrentVersion: conf.Version,
			KontrolURL:     k.Config.KontrolURL,
			// MountEvents:    mountEvents,
			Log: k.Log,
		},
		logUploadDelay: 3 * time.Minute,
		presence: &presence.Client{
			Endpoint: konfig.Konfig.Endpoints.Social().Public.WithPath("presence").URL,
			Client:   restClient,
		},
		presenceEvery: onceevery.New(1 * time.Hour),
		kloud:         kloud,
	}

	kl.kite.OnRegister(kl.updateKiteKey)

	// Close all active sessions of the current. Do not close it immediately,
	// instead of give some time so users can safely exit. If the user
	// reconnects again the timer will be stopped so we don't unshare for
	// network hiccups accidentally.
	kl.collabCloser = NewDeferTime(time.Minute, func() {
		sharedUsers, err := kl.collab.GetAll()
		if err != nil {
			kl.log.Warning("Couldn't unshare users: %s", err)
			return
		}

		if len(sharedUsers) == 0 {
			return
		}

		kl.log.Info("Unsharing users '%s'", sharedUsers)
		for user, option := range sharedUsers {
			// dont touch permanent users
			if option.Permanent {
				kl.log.Info("User is permanent, avoiding it: %q", user)
				continue
			}

			if err := kl.collab.Delete(user); err != nil {
				kl.log.Warning("Couldn't delete user from storage: %s", err)
			}
			kl.terminal.CloseSessions(user)
		}
	})

	// This is important, don't forget it
	kl.RegisterMethods()

	return kl, nil
}