Exemple #1
0
func init() {
	defer trace.End(trace.Begin(""))
	trace.Logger.Level = log.DebugLevel
	_ = pprof.StartPprof("vicadmin", pprof.VicadminPort)

	// We don't want to run this as root.
	ud := syscall.Getuid()
	gd := syscall.Getgid()
	log.Info(fmt.Sprintf("Current UID/GID = %d/%d", ud, gd))
	// TODO: Enable this after we figure out to NOT break the test suite with it.
	// if ud == 0 {
	// log.Errorf("Error: vicadmin must not run as root.")
	// time.Sleep(60 * time.Second)
	// os.Exit(1)
	// }

	flag.StringVar(&rootConfig.addr, "l", "client.localhost:2378", "Listen address")

	// TODO: This should all be pulled from the config
	flag.StringVar(&rootConfig.DatacenterPath, "dc", "", "Path of the datacenter")
	flag.StringVar(&rootConfig.ClusterPath, "cluster", "", "Path of the cluster")
	flag.StringVar(&rootConfig.PoolPath, "pool", "", "Path of the resource pool")

	// load the vch config
	src, err := extraconfig.GuestInfoSource()
	if err != nil {
		log.Errorf("Unable to load configuration from guestinfo")
		return
	}

	extraconfig.Decode(src, &vchConfig)

	// FIXME: pull the rest from flags
	flag.Parse()
}
Exemple #2
0
func handleFlags() (*CliOptions, bool) {
	flag.Usage = Usage

	_ = flag.String("serveraddr", "127.0.0.1", "Server address to listen") // ignored
	serverPort := flag.Uint("port", 9000, "Port to listen")
	portLayerAddr := flag.String("port-layer-addr", "127.0.0.1", "Port layer server address")
	portLayerPort := flag.Uint("port-layer-port", 9001, "Port Layer server port")

	debug := flag.Bool("debug", false, "Enable debuglevel logging")

	flag.Parse()

	// load the vch config
	src, err := extraconfig.GuestInfoSource()
	if err != nil {
		log.Fatalf("Unable to load configuration from guestinfo: %s", err)
	}
	extraconfig.Decode(src, &vchConfig)

	if *debug || vchConfig.Diagnostics.DebugLevel > 0 {
		log.SetLevel(log.DebugLevel)
	}

	cli := &CliOptions{
		serverPort:    *serverPort,
		portLayerAddr: fmt.Sprintf("%s:%d", *portLayerAddr, *portLayerPort),
		proto:         "tcp",
	}

	return cli, true
}
Exemple #3
0
// updates acquires updates from the infrastructure without holding a lock
func (c *containerBase) updates(ctx context.Context) (*containerBase, error) {
	defer trace.End(trace.Begin(c.ExecConfig.ID))

	var o mo.VirtualMachine

	// make sure we have vm
	if c.vm == nil {
		return nil, NotYetExistError{c.ExecConfig.ID}
	}

	if err := c.vm.Properties(ctx, c.vm.Reference(), []string{"config", "runtime"}, &o); err != nil {
		return nil, err
	}

	base := &containerBase{
		vm:         c.vm,
		Config:     o.Config,
		Runtime:    &o.Runtime,
		ExecConfig: &executor.ExecutorConfig{},
	}

	// Get the ExtraConfig
	extraconfig.Decode(vmomi.OptionValueSource(o.Config.ExtraConfig), base.ExecConfig)

	return base, nil
}
Exemple #4
0
// Create accepts a Config and returns a Session with the cached vSphere resources.
func (s *Session) Create(ctx context.Context) (*Session, error) {
	var vchExtraConfig metadata.VirtualContainerHostConfigSpec
	source, err := extraconfig.GuestInfoSource()
	if err != nil {
		return nil, err
	}

	extraconfig.Decode(source, &vchExtraConfig)

	s.ExtensionKey = vchExtraConfig.ExtensionKey
	s.ExtensionCert = vchExtraConfig.ExtensionCert
	s.ExtensionName = vchExtraConfig.ExtensionName

	_, err = s.Connect(ctx)
	if err != nil {
		return nil, err
	}

	// we're treating this as an atomic behaviour, so log out if we failed
	defer func() {
		if err != nil {
			s.Client.Logout(ctx)
		}
	}()

	_, err = s.Populate(ctx)
	if err != nil {
		return nil, err
	}

	return s, nil
}
Exemple #5
0
func init() {
	trace.Logger.Level = log.DebugLevel
	defer trace.End(trace.Begin(""))

	flag.StringVar(&config.addr, "l", ":2378", "Listen address")
	flag.StringVar(&config.dockerHost, "docker-host", "127.0.0.1:2376", "Docker host")
	flag.StringVar(&config.ExtensionCert, "cert", "", "VMOMI Client certificate file")
	flag.StringVar(&config.hostCertFile, "hostcert", "", "Host certificate file")
	flag.StringVar(&config.ExtensionKey, "key", "", "VMOMI Client private key file")
	flag.StringVar(&config.hostKeyFile, "hostkey", "", "Host private key file")
	flag.StringVar(&config.Service, "sdk", "", "The ESXi or vCenter URL")
	flag.StringVar(&config.DatacenterPath, "dc", "", "Name of the Datacenter")
	flag.StringVar(&config.DatastorePath, "ds", "", "Name of the Datastore")
	flag.StringVar(&config.ClusterPath, "cluster", "", "Path of the cluster")
	flag.StringVar(&config.PoolPath, "pool", "", "Path of the resource pool")
	flag.BoolVar(&config.Insecure, "insecure", false, "Allow connection when sdk certificate cannot be verified")
	flag.BoolVar(&config.tls, "tls", true, "Set to false to disable -hostcert and -hostkey and enable plain HTTP")

	// This is only applicable for containers hosted under the VCH VM folder
	// This will not function for vSAN
	flag.StringVar(&config.vmPath, "vm-path", "", "Docker vm path")

	// load the vch config
	src, err := extraconfig.GuestInfoSource()
	if err != nil {
		log.Errorf("Unable to load configuration from guestinfo")
	}
	extraconfig.Decode(src, &vchConfig)
}
Exemple #6
0
func main() {
	defer func() {
		if r := recover(); r != nil {
			log.Errorf("run time panic: %s : %s", r, debug.Stack())
		}

		reboot()
	}()

	src, err := extraconfig.GuestInfoSourceWithPrefix("init")
	if err != nil {
		log.Error(err)
		return
	}

	extraconfig.Decode(src, &config)

	debugLevel = config.Diagnostics.DebugLevel
	if debugLevel > 2 {
		enableShell()
	}
	setLogLevels()

	logFile, err := os.OpenFile("/dev/ttyS1", os.O_WRONLY|os.O_SYNC, 0644)
	if err != nil {
		log.Errorf("Could not pipe stderr to serial for debugging info. Some debug info may be lost! Error reported was %s", err)
	}
	err = syscall.Dup3(int(logFile.Fd()), int(os.Stderr.Fd()), 0)
	if err != nil {
		log.Errorf("Could not pipe logfile to standard error due to error %s", err)
	}

	_, err = os.Stderr.WriteString("all stderr redirected to debug log")
	if err != nil {
		log.Errorf("Could not write to Stderr due to error %s", err)
	}

	sink, err := extraconfig.GuestInfoSinkWithPrefix("init")
	if err != nil {
		log.Error(err)
		return
	}

	// create the tether
	tthr = tether.New(src, sink, &operations{})

	// register the toolbox extension and configure for appliance
	toolbox := configureToolbox(tether.NewToolbox())
	toolbox.PrimaryIP = externalIP
	tthr.Register("Toolbox", toolbox)

	err = tthr.Start()
	if err != nil {
		log.Error(err)
		return
	}

	log.Info("Clean exit from init")
}
Exemple #7
0
func TestToExtraConfig(t *testing.T) {
	exec := metadata.ExecutorConfig{
		Common: metadata.Common{
			ID:   "deadbeef",
			Name: "configtest",
		},
		Sessions: map[string]metadata.SessionConfig{
			"deadbeef": metadata.SessionConfig{
				Cmd: metadata.Cmd{
					Path: "/bin/bash",
					Args: []string{"/bin/bash", "-c", "echo hello"},
					Dir:  "/",
					Env:  []string{"HOME=/", "PATH=/bin"},
				},
			},
			"beefed": metadata.SessionConfig{
				Cmd: metadata.Cmd{
					Path: "/bin/bash",
					Args: []string{"/bin/bash", "-c", "echo goodbye"},
					Dir:  "/",
					Env:  []string{"HOME=/", "PATH=/bin"},
				},
			},
		},
		Networks: map[string]*metadata.NetworkEndpoint{
			"eth0": &metadata.NetworkEndpoint{
				Static: &net.IPNet{IP: localhost, Mask: lmask.Mask},
				Network: metadata.ContainerNetwork{
					Common: metadata.Common{
						Name: "notsure",
					},
					Gateway:     net.IPNet{IP: gateway, Mask: gmask.Mask},
					Nameservers: []net.IP{},
				},
			},
		},
	}

	// encode metadata package's ExecutorConfig
	encoded := map[string]string{}
	extraconfig.Encode(extraconfig.MapSink(encoded), exec)

	// decode into this package's ExecutorConfig
	var decoded ExecutorConfig
	extraconfig.Decode(extraconfig.MapSource(encoded), &decoded)

	// the networks should be identical
	assert.Equal(t, exec.Networks["eth0"], decoded.Networks["eth0"])

	// the source and destination structs are different - we're doing a sparse comparison
	expected := exec.Sessions["deadbeef"]
	actual := *decoded.Sessions["deadbeef"]

	assert.Equal(t, expected.Cmd.Path, actual.Cmd.Path)
	assert.Equal(t, expected.Cmd.Args, actual.Cmd.Args)
	assert.Equal(t, expected.Cmd.Dir, actual.Cmd.Dir)
	assert.Equal(t, expected.Cmd.Env, actual.Cmd.Env)
}
Exemple #8
0
func handleFlags() (*CliOptions, bool) {
	flag.Usage = Usage

	enableTLS := flag.Bool("TLS", false, "Use TLS; implied by --tlsverify")
	verifyTLS := flag.Bool("tlsverify", false, "Use TLS and verify the remote")
	cafile := flag.String("tls-ca-certificate", "", "Trust certs signed only by this CA")
	certfile := flag.String("tls-certificate", "", "Path to TLS certificate file")
	keyfile := flag.String("tls-key", "", "Path to TLS Key file")
	serverAddr := flag.String("serveraddr", "127.0.0.1", "Server address to listen")
	serverPort := flag.Uint("port", 9000, "Port to listen")
	portLayerAddr := flag.String("port-layer-addr", "127.0.0.1", "Port layer server address")
	portLayerPort := flag.Uint("port-layer-port", 9001, "Port Layer server port")

	debug := flag.Bool("debug", false, "Enable debuglevel logging")

	flag.Parse()

	if *enableTLS && (len(*certfile) == 0 || len(*keyfile) == 0) {
		fmt.Fprintf(os.Stderr, "TLS requested, but tls-certificate and tls-key were all not specified\n")
		return nil, false
	}

	if *verifyTLS {
		*enableTLS = true

		if len(*certfile) == 0 || len(*keyfile) == 0 || len(*cafile) == 0 {
			fmt.Fprintf(os.Stderr, "tlsverfiy requested, but tls-ca-certificate, tls-certificate, tls-key were all not specified\n")
			return nil, false
		}
	}

	cli := &CliOptions{
		enableTLS:     *enableTLS,
		verifyTLS:     *verifyTLS,
		cafile:        *cafile,
		certfile:      *certfile,
		keyfile:       *keyfile,
		serverAddr:    *serverAddr,
		serverPort:    *serverPort,
		fullserver:    fmt.Sprintf("%s:%d", *serverAddr, *serverPort),
		portLayerAddr: fmt.Sprintf("%s:%d", *portLayerAddr, *portLayerPort),
		proto:         "tcp",
	}

	// load the vch config
	src, err := extraconfig.GuestInfoSource()
	if err != nil {
		log.Errorf("Unable to load configuration from guestinfo")
	}
	extraconfig.Decode(src, &vchConfig)

	if *debug || vchConfig.Diagnostics.DebugLevel > 0 {
		log.SetLevel(log.DebugLevel)
	}

	return cli, true
}
Exemple #9
0
func init() {
	// load the vch config
	// TODO: Optimize this to just pull the fields we need...
	src, err := extraconfig.GuestInfoSource()
	if err != nil {
		log.Errorf("Unable to load configuration from guestinfo")
		return
	}
	extraconfig.Decode(src, &vchConfig)
}
Exemple #10
0
func TestHalt(t *testing.T) {
	_, mocker := testSetup(t)
	defer testTeardown(t, mocker)

	cfg := executor.ExecutorConfig{
		Common: executor.Common{
			ID:   "abspath",
			Name: "tether_test_executor",
		},

		Sessions: map[string]*executor.SessionConfig{
			"abspath": &executor.SessionConfig{
				Common: executor.Common{
					ID:   "abspath",
					Name: "tether_test_session",
				},
				Tty: false,
				Cmd: executor.Cmd{
					// test abs path
					Path: "/bin/date",
					Args: []string{"date", "--reference=/"},
					Env:  []string{},
					Dir:  "/",
				},
			},
		},
	}

	_, src, err := RunTether(t, &cfg, mocker)
	assert.NoError(t, err, "Didn't expected error from RunTether")

	// block until tether exits
	<-mocker.Cleaned

	result := ExecutorConfig{}
	extraconfig.Decode(src, &result)

	assert.Equal(t, "true", result.Sessions["abspath"].Started, "Expected command to have been started successfully")
	assert.Equal(t, 0, result.Sessions["abspath"].ExitStatus, "Expected command to have exited cleanly")

	// read the output from the session
	log := mocker.SessionLogBuffer.Bytes()

	// run the command directly
	out, err := exec.Command("/bin/date", "--reference=/").Output()
	if err != nil {
		fmt.Printf("Failed to run date for comparison data: %s", err)
		t.Error(err)
		return
	}

	if !assert.Equal(t, out, log) {
		return
	}
}
Exemple #11
0
func (t *tether) Start() error {
	defer trace.End(trace.Begin("main tether loop"))

	// do the initial setup and start the extensions
	t.setup()
	defer t.cleanup()

	// initial entry, so seed this
	t.reload <- true
	for range t.reload {
		log.Info("Loading main configuration")

		// load the config - this modifies the structure values in place
		extraconfig.Decode(t.src, t.config)

		t.setLogLevel()

		if err := t.setHostname(); err != nil {
			log.Error(err)
			return err
		}

		// process the networks then publish any dynamic data
		if err := t.setNetworks(); err != nil {
			log.Error(err)
			return err
		}
		extraconfig.Encode(t.sink, t.config)

		//process the filesystem mounts - this is performed after networks to allow for network mounts
		if err := t.setMounts(); err != nil {
			log.Error(err)
			return err
		}

		if err := t.initializeSessions(); err != nil {
			log.Error(err)
			return err
		}

		if err := t.reloadExtensions(); err != nil {
			log.Error(err)
			return err
		}

		if err := t.processSessions(); err != nil {
			log.Error(err)
			return err
		}
	}

	log.Info("Finished processing sessions")

	return nil
}
Exemple #12
0
// applianceConfiguration updates the configuration passed in with the latest from the appliance VM.
// there's no guarantee of consistency within the configuration at this time
func (d *Dispatcher) applianceConfiguration(conf *metadata.VirtualContainerHostConfigSpec) error {
	defer trace.End(trace.Begin(""))

	extraConfig, err := d.appliance.FetchExtraConfig(d.ctx)
	if err != nil {
		return err
	}

	extraconfig.Decode(extraconfig.MapSource(extraConfig), conf)
	return nil
}
Exemple #13
0
func newBase(vm *vm.VirtualMachine, c *types.VirtualMachineConfigInfo, r *types.VirtualMachineRuntimeInfo) *containerBase {
	base := &containerBase{
		ExecConfig: &executor.ExecutorConfig{},
		Config:     c,
		Runtime:    r,
		vm:         vm,
	}

	// construct a working copy of the exec config
	if c != nil && c.ExtraConfig != nil {
		src := vmomi.OptionValueSource(c.ExtraConfig)
		extraconfig.Decode(src, base.ExecConfig)
	}

	return base
}
Exemple #14
0
func Init(ctx context.Context, sess *session.Session) error {
	source, err := extraconfig.GuestInfoSource()
	if err != nil {
		return err
	}

	sink, err := extraconfig.GuestInfoSink()
	if err != nil {
		return err
	}

	// Grab the storage layer config blobs from extra config
	extraconfig.Decode(source, &storage.Config)
	log.Debugf("Decoded VCH config for storage: %#v", storage.Config)

	// create or restore a portlayer k/v store in the VCH's directory.
	vch, err := guest.GetSelf(ctx, sess)
	if err != nil {
		return err
	}

	vchvm := vm.NewVirtualMachineFromVM(ctx, sess, vch)
	vmPath, err := vchvm.VMPathName(ctx)
	if err != nil {
		return err
	}

	// vmPath is set to the vmx.  Grab the directory from that.
	vmFolder, err := datastore.ToURL(path.Dir(vmPath))
	if err != nil {
		return err
	}

	if err = store.Init(ctx, sess, vmFolder); err != nil {
		return err
	}

	if err := exec.Init(ctx, sess, source, sink); err != nil {
		return err
	}

	if err = network.Init(ctx, sess, source, sink); err != nil {
		return err
	}

	return nil
}
Exemple #15
0
// convert the infra containers to a container object
func convertInfraContainers(vms []mo.VirtualMachine, all bool) []*Container {
	var containerVMs []*Container

	for i := range vms {
		// poweredOn or all states
		if !all && vms[i].Runtime.PowerState == types.VirtualMachinePowerStatePoweredOff {
			// don't want it
			log.Debugf("Skipping poweredOff VM %s", vms[i].Config.Name)
			continue
		}

		container := &Container{ExecConfig: &executor.ExecutorConfig{}}
		source := vmomi.OptionValueSource(vms[i].Config.ExtraConfig)
		extraconfig.Decode(source, container.ExecConfig)

		// check extraConfig to see if we have a containerVM -- assumes
		// that ID will always be populated for each containerVM
		if container.ExecConfig == nil || container.ExecConfig.ID == "" {
			log.Debugf("Skipping non-container vm %s", vms[i].Config.Name)
			continue
		}

		// set state
		if vms[i].Runtime.PowerState == types.VirtualMachinePowerStatePoweredOn {
			container.State = StateRunning
		} else {
			// look in the container cache and check state
			// if it's created we'll take that as it's been created, but
			// not started
			cached := containers.Container(container.ExecConfig.ID)
			if cached != nil && cached.State == StateCreated {
				container.State = StateCreated
			} else {
				container.State = StateStopped
			}
		}
		if vms[i].Summary.Storage != nil {
			container.VMUnsharedDisk = vms[i].Summary.Storage.Unshared
		}

		containerVMs = append(containerVMs, container)

	}

	return containerVMs
}
Exemple #16
0
func (c *Container) Update(ctx context.Context, sess *session.Session) (*executor.ExecutorConfig, error) {
	defer trace.End(trace.Begin("Container.Update"))
	c.Lock()
	defer c.Unlock()

	if c.vm == nil {
		return nil, fmt.Errorf("container does not have a vm")
	}

	var vm []mo.VirtualMachine

	if err := sess.Retrieve(ctx, []types.ManagedObjectReference{c.vm.Reference()}, []string{"config"}, &vm); err != nil {
		return nil, err
	}

	extraconfig.Decode(vmomi.OptionValueSource(vm[0].Config.ExtraConfig), c.ExecConfig)
	return c.ExecConfig, nil
}
Exemple #17
0
func init() {
	defer trace.End(trace.Begin(""))
	trace.Logger.Level = log.DebugLevel
	_ = pprof.StartPprof("vicadmin", pprof.VicadminPort)

	// We don't want to run this as root.
	ud := syscall.Getuid()
	gd := syscall.Getgid()
	log.Info(fmt.Sprintf("Current UID/GID = %d/%d", ud, gd))
	// TODO: Enable this after we figure out to NOT break the test suite with it.
	// if ud == 0 {
	// log.Errorf("Error: vicadmin must not run as root.")
	// time.Sleep(60 * time.Second)
	// os.Exit(1)
	// }

	flag.StringVar(&config.addr, "l", ":2378", "Listen address")
	flag.StringVar(&config.dockerHost, "docker-host", "127.0.0.1:2376", "Docker host")
	flag.StringVar(&config.hostCertFile, "hostcert", "", "Host certificate file")
	flag.StringVar(&config.hostKeyFile, "hostkey", "", "Host private key file")
	flag.StringVar(&config.DatacenterPath, "dc", "", "Name of the Datacenter")
	flag.StringVar(&config.DatastorePath, "ds", "", "Name of the Datastore")
	flag.StringVar(&config.ClusterPath, "cluster", "", "Path of the cluster")
	flag.StringVar(&config.PoolPath, "pool", "", "Path of the resource pool")
	flag.BoolVar(&config.Insecure, "insecure", false, "Allow connection when sdk certificate cannot be verified")
	flag.BoolVar(&config.tls, "tls", true, "Set to false to disable -hostcert and -hostkey and enable plain HTTP")

	// This is only applicable for containers hosted under the VCH VM folder
	// This will not function for vSAN
	flag.StringVar(&config.vmPath, "vm-path", "", "Docker vm path")

	flag.Parse()

	// load the vch config
	src, err := extraconfig.GuestInfoSource()
	if err != nil {
		log.Errorf("Unable to load configuration from guestinfo")
		return
	}

	extraconfig.Decode(src, &vchConfig)
}
Exemple #18
0
func (d *Dispatcher) isVCH(vm *vm.VirtualMachine) (bool, error) {
	if vm == nil {
		return false, errors.New("nil parameter")
	}
	defer trace.End(trace.Begin(vm.InventoryPath))

	info, err := vm.FetchExtraConfig(d.ctx)
	if err != nil {
		err = errors.Errorf("Failed to fetch guest info of appliance vm: %s", err)
		return false, err
	}

	var remoteConf config.VirtualContainerHostConfigSpec
	extraconfig.Decode(extraconfig.MapSource(info), &remoteConf)

	// if the moref of the target matches where we expect to find it for a VCH, run with it
	if remoteConf.ExecutorConfig.ID == vm.Reference().String() {
		return true, nil
	}

	return false, nil
}
Exemple #19
0
func TestMissingRelativeBinary(t *testing.T) {
	_, mocker := testSetup(t)
	defer testTeardown(t, mocker)

	cfg := executor.ExecutorConfig{
		Common: executor.Common{
			ID:   "missing",
			Name: "tether_test_executor",
		},

		Sessions: map[string]*executor.SessionConfig{
			"missing": &executor.SessionConfig{
				Common: executor.Common{
					ID:   "missing",
					Name: "tether_test_session",
				},
				Tty: false,
				Cmd: executor.Cmd{
					// test relative path
					Path: "notthere",
					Args: []string{"notthere"},
					Env:  []string{"PATH=/not"},
					Dir:  "/",
				},
			},
		},
	}

	_, src, err := RunTether(t, &cfg, mocker)
	assert.Error(t, err, "Expected error from RunTether")

	// refresh the cfg with current data
	extraconfig.Decode(src, &cfg)

	// check the launch status was failed
	status := cfg.Sessions["missing"].Started

	assert.Equal(t, "notthere: no such executable in PATH", status, "Expected status to have a command not found error message")
}
Exemple #20
0
func (d *Dispatcher) GetVCHConfig(vm *vm.VirtualMachine) (*metadata.VirtualContainerHostConfigSpec, error) {
	defer trace.End(trace.Begin(""))

	//this is the appliance vm
	mapConfig, err := vm.FetchExtraConfig(d.ctx)
	if err != nil {
		err = errors.Errorf("Failed to get VM extra config of %s, %s", vm.Reference(), err)
		log.Errorf("%s", err)
		return nil, err
	}
	data := extraconfig.MapSource(mapConfig)
	vchConfig := &metadata.VirtualContainerHostConfigSpec{}
	result := extraconfig.Decode(data, vchConfig)
	if result == nil {
		err = errors.Errorf("Failed to decode VM configuration %s, %s", vm.Reference(), err)
		log.Errorf("%s", err)
		return nil, err
	}

	//	vchConfig.ID
	return vchConfig, nil
}
Exemple #21
0
func TestRelativePath(t *testing.T) {
	_, mocker := testSetup(t)
	defer testTeardown(t, mocker)

	cfg := executor.ExecutorConfig{
		Common: executor.Common{
			ID:   "relpath",
			Name: "tether_test_executor",
		},

		Sessions: map[string]*executor.SessionConfig{
			"relpath": &executor.SessionConfig{
				Common: executor.Common{
					ID:   "relpath",
					Name: "tether_test_session",
				},
				Tty: false,
				Cmd: executor.Cmd{
					// test relative path
					Path: "./date",
					Args: []string{"./date", "--reference=/"},
					Env:  []string{"PATH="},
					Dir:  "/bin",
				},
			},
		},
	}

	_, src, err := RunTether(t, &cfg, mocker)
	assert.NoError(t, err, "Didn't expected error from RunTether")

	result := ExecutorConfig{}
	extraconfig.Decode(src, &result)

	assert.Equal(t, "true", result.Sessions["relpath"].Started, "Expected command to have been started successfully")
	assert.Equal(t, 0, result.Sessions["relpath"].ExitStatus, "Expected command to have exited cleanly")
}
Exemple #22
0
func TestToExtraConfig(t *testing.T) {
	exec := executor.ExecutorConfig{
		Common: executor.Common{
			ID:   "deadbeef",
			Name: "configtest",
		},
		Sessions: map[string]*executor.SessionConfig{
			"deadbeef": &executor.SessionConfig{
				Cmd: executor.Cmd{
					Path: "/bin/bash",
					Args: []string{"/bin/bash", "-c", "echo hello"},
					Dir:  "/",
					Env:  []string{"HOME=/", "PATH=/bin"},
				},
			},
			"beefed": &executor.SessionConfig{
				Cmd: executor.Cmd{
					Path: "/bin/bash",
					Args: []string{"/bin/bash", "-c", "echo goodbye"},
					Dir:  "/",
					Env:  []string{"HOME=/", "PATH=/bin"},
				},
			},
		},
		Networks: map[string]*executor.NetworkEndpoint{
			"eth0": &executor.NetworkEndpoint{
				Static: true,
				IP:     &net.IPNet{IP: localhost, Mask: lmask.Mask},
				Network: executor.ContainerNetwork{
					Common: executor.Common{
						Name: "notsure",
					},
					Gateway:     net.IPNet{IP: gateway, Mask: gmask.Mask},
					Nameservers: []net.IP{},
					Pools:       []ip.Range{},
					Aliases:     []string{},
				},
			},
		},
	}

	// encode exec package's ExecutorConfig
	encoded := map[string]string{}
	extraconfig.Encode(extraconfig.MapSink(encoded), exec)

	// decode into this package's ExecutorConfig
	var decoded ExecutorConfig
	extraconfig.Decode(extraconfig.MapSource(encoded), &decoded)

	// the source and destination structs are different - we're doing a sparse comparison
	expectedNet := exec.Networks["eth0"]
	actualNet := decoded.Networks["eth0"]

	assert.Equal(t, expectedNet.Common, actualNet.Common)
	assert.Equal(t, expectedNet.Static, actualNet.Static)
	assert.Equal(t, expectedNet.Assigned, actualNet.Assigned)
	assert.Equal(t, expectedNet.Network, actualNet.Network)

	expectedSession := exec.Sessions["deadbeef"]
	actualSession := decoded.Sessions["deadbeef"]

	assert.Equal(t, expectedSession.Cmd.Path, actualSession.Cmd.Path)
	assert.Equal(t, expectedSession.Cmd.Args, actualSession.Cmd.Args)
	assert.Equal(t, expectedSession.Cmd.Dir, actualSession.Cmd.Dir)
	assert.Equal(t, expectedSession.Cmd.Env, actualSession.Cmd.Env)
}
Exemple #23
0
func Init(ctx context.Context, sess *session.Session) error {
	source, err := extraconfig.GuestInfoSource()
	if err != nil {
		return err
	}

	f := find.NewFinder(sess.Vim25(), false)

	extraconfig.Decode(source, &exec.VCHConfig)
	log.Debugf("Decoded VCH config for execution: %#v", exec.VCHConfig)
	ccount := len(exec.VCHConfig.ComputeResources)
	if ccount != 1 {
		detail := fmt.Sprintf("expected singular compute resource element, found %d", ccount)
		log.Errorf(detail)
		return err
	}

	cr := exec.VCHConfig.ComputeResources[0]
	r, err := f.ObjectReference(ctx, cr)
	if err != nil {
		detail := fmt.Sprintf("could not get resource pool or virtual app reference from %q: %s", cr.String(), err)
		log.Errorf(detail)
		return err
	}
	switch o := r.(type) {
	case *object.VirtualApp:
		exec.VCHConfig.VirtualApp = o
		exec.VCHConfig.ResourcePool = o.ResourcePool
	case *object.ResourcePool:
		exec.VCHConfig.ResourcePool = o
	default:
		detail := fmt.Sprintf("could not get resource pool or virtual app from reference %q: object type is wrong", cr.String())
		log.Errorf(detail)
		return errors.New(detail)
	}

	// we have a resource pool, so lets create the event manager for monitoring
	exec.VCHConfig.EventManager = vsphere.NewEventManager(sess)
	// configure event manager to monitor the resource pool
	exec.VCHConfig.EventManager.AddMonitoredObject(exec.VCHConfig.ResourcePool.Reference().String())

	// instantiate the container cache now
	exec.NewContainerCache()

	// need to blacklist the VCH from eventlistening - too many reconfigures
	vch, err := guest.GetSelf(ctx, sess)
	if err != nil {
		return fmt.Errorf("Unable to get a reference to the VCH: %s", err.Error())
	}
	exec.VCHConfig.EventManager.Blacklist(vch.Reference().String())

	// other managed objects could be added for the event stream, but for now the resource pool will do
	exec.VCHConfig.EventManager.Start()

	//FIXME: temporary injection of debug network for debug nic
	ne := exec.VCHConfig.Networks["client"]
	if ne == nil {
		detail := fmt.Sprintf("could not get client network reference for debug nic - this code can be removed once network mapping/dhcp client is present")
		log.Errorf(detail)
		return err
	}
	nr := new(types.ManagedObjectReference)
	nr.FromString(ne.Network.ID)
	r, err = f.ObjectReference(ctx, *nr)
	if err != nil {
		detail := fmt.Sprintf("could not get client network reference from %s: %s", nr.String(), err)
		log.Errorf(detail)
		return err
	}
	exec.VCHConfig.DebugNetwork = r.(object.NetworkReference)

	extraconfig.Decode(source, &network.Config)
	log.Debugf("Decoded VCH config for network: %#v", network.Config)
	for nn, n := range network.Config.ContainerNetworks {
		pgref := new(types.ManagedObjectReference)
		if !pgref.FromString(n.ID) {
			log.Errorf("Could not reacquire object reference from id for network %s: %s", nn, n.ID)
		}

		r, err = f.ObjectReference(ctx, *pgref)
		if err != nil {
			log.Warnf("could not get network reference for %s network", nn)
			continue
		}

		n.PortGroup = r.(object.NetworkReference)
	}

	// Grab the storage layer config blobs from extra config
	extraconfig.Decode(source, &storage.Config)
	log.Debugf("Decoded VCH config for storage: %#v", storage.Config)

	// Grab the AboutInfo about our host environment
	about := sess.Vim25().ServiceContent.About
	exec.VCHConfig.VCHMhz = exec.NCPU(ctx)
	exec.VCHConfig.VCHMemoryLimit = exec.MemTotal(ctx)
	exec.VCHConfig.HostOS = about.OsType
	exec.VCHConfig.HostOSVersion = about.Version
	exec.VCHConfig.HostProductName = about.Name
	log.Debugf("Host - OS (%s), version (%s), name (%s)", about.OsType, about.Version, about.Name)
	log.Debugf("VCH limits - %d Mhz, %d MB", exec.VCHConfig.VCHMhz, exec.VCHConfig.VCHMemoryLimit)
	return nil
}
Exemple #24
0
func (t *tether) Start() error {
	defer trace.End(trace.Begin("main tether loop"))

	t.setup()
	defer t.cleanup()

	// initial entry, so seed this
	t.reload <- true
	for _ = range t.reload {
		log.Info("Loading main configuration")
		// load the config - this modifies the structure values in place
		extraconfig.Decode(t.src, t.config)
		logConfig(t.config)

		if err := t.ops.SetHostname(stringid.TruncateID(t.config.ID), t.config.Name); err != nil {
			detail := fmt.Sprintf("failed to set hostname: %s", err)
			log.Error(detail)
			// we don't attempt to recover from this - it's a fundemental misconfiguration
			// so just exit
			return errors.New(detail)
		}

		// process the networks then publish any dynamic data
		for _, v := range t.config.Networks {
			if err := t.ops.Apply(v); err != nil {
				detail := fmt.Sprintf("failed to apply network endpoint config: %s", err)
				log.Error(detail)
				return errors.New(detail)
			}
		}
		extraconfig.Encode(t.sink, t.config)

		// process the sessions and launch if needed
		for id, session := range t.config.Sessions {
			log.Debugf("Processing config for session %s", session.ID)
			var proc = session.Cmd.Process

			// check if session is alive and well
			if proc != nil && proc.Signal(syscall.Signal(0)) == nil {
				log.Debugf("Process for session %s is already running (pid: %d)", session.ID, proc.Pid)
				continue
			}

			// check if session has never been started
			if proc == nil {
				log.Infof("Launching process for session %s", session.ID)
				err := t.launch(session)
				if err != nil {
					detail := fmt.Sprintf("failed to launch %s for %s: %s", session.Cmd.Path, id, err)
					log.Error(detail)

					// TODO: check if failure to launch this is fatal to everything in this containerVM
					return errors.New(detail)
				}

				// TODO: decide how to handle restart - probably needs to glue into the child reaping
			}

			// handle exited session
			// TODO
		}

		for name, ext := range t.extensions {
			log.Info("Passing config to " + name)
			err := ext.Reload(t.config)
			if err != nil {
				log.Errorf("Failed to cleanly reload config for extension %s: %s", name, err)
				return err
			}
		}
	}

	return nil
}
Exemple #25
0
func TestRestart(t *testing.T) {
	testSetup(t)
	defer testTeardown(t)

	cfg := executor.ExecutorConfig{
		Common: executor.Common{
			ID:   "pathlookup",
			Name: "tether_test_executor",
		},
		Diagnostics: executor.Diagnostics{
			DebugLevel: 2,
		},
		Sessions: map[string]executor.SessionConfig{
			"pathlookup": executor.SessionConfig{
				Common: executor.Common{
					ID:   "pathlookup",
					Name: "tether_test_session",
				},
				Tty: false,
				Cmd: executor.Cmd{
					// test relative path
					Path: "date",
					Args: []string{"date", "--reference=/"},
					Env:  []string{"PATH=/bin"},
					Dir:  "/bin",
				},
				Restart: true,
			},
		},
	}

	tthr, src := StartTether(t, &cfg)

	// wait for initialization
	<-Mocked.Started

	result := &tether.ExecutorConfig{}
	extraconfig.Decode(src, result)

	// tether will block trying to

	assert.Equal(t, "true", result.Sessions["pathlookup"].Started, "Expected command to have been started successfully")
	assert.Equal(t, 0, result.Sessions["pathlookup"].ExitStatus, "Expected command to have exited cleanly")

	assert.True(t, result.Sessions["pathlookup"].Restart, "Expected command to be configured for restart")

	// wait for the resurrection count to max out the channel
	for result.Sessions["pathlookup"].Diagnostics.ResurrectionCount < 10 {
		result = &tether.ExecutorConfig{}
		extraconfig.Decode(src, &result)
		assert.Equal(t, 0, result.Sessions["pathlookup"].ExitStatus, "Expected command to have exited cleanly")
		// proceed to the next reincarnation
		<-Mocked.SessionExit
	}

	// read the output from the session
	log := Mocked.SessionLogBuffer.Bytes()

	// the tether has to be stopped before comparison on the reaper may swaller exec.Wait
	tthr.Stop()
	<-Mocked.Cleaned

	// run the command directly
	out, err := exec.Command("/bin/date", "--reference=/").Output()
	if err != nil {
		fmt.Printf("Failed to run date for comparison data: %s", err)
		t.Error(err)
		return
	}

	assert.True(t, strings.HasPrefix(string(log), string(out)), "Expected the data to be constant - first invocation doesn't match")
	assert.True(t, strings.HasSuffix(string(log), string(out)), "Expected the data to be constant - last invocation doesn't match")

	// prevent indefinite wait in tether - normally session exit would trigger this
	tthr.Stop()
}
Exemple #26
0
func (c *Configuration) Decode() {
	extraconfig.Decode(c.source, c)
}
Exemple #27
0
func Init(ctx context.Context, sess *session.Session) error {
	source, err := extraconfig.GuestInfoSource()
	if err != nil {
		return err
	}

	f := find.NewFinder(sess.Vim25(), false)

	extraconfig.Decode(source, &exec.Config)
	log.Debugf("Decoded VCH config for execution: %#v", exec.Config)
	ccount := len(exec.Config.ComputeResources)
	if ccount != 1 {
		detail := fmt.Sprintf("expected singular compute resource element, found %d", ccount)
		log.Errorf(detail)
		return err
	}

	cr := exec.Config.ComputeResources[0]
	r, err := f.ObjectReference(ctx, cr)
	if err != nil {
		detail := fmt.Sprintf("could not get resource pool reference from %s: %s", cr.String(), err)
		log.Errorf(detail)
		return err
	}
	exec.Config.ResourcePool = r.(*object.ResourcePool)
	//FIXME: temporary injection of debug network for debug nic
	ne := exec.Config.Networks["client"]
	if ne == nil {
		detail := fmt.Sprintf("could not get client network reference for debug nic - this code can be removed once network mapping/dhcp client is present")
		log.Errorf(detail)
		return err
	}
	nr := new(types.ManagedObjectReference)
	nr.FromString(ne.Network.ID)
	r, err = f.ObjectReference(ctx, *nr)
	if err != nil {
		detail := fmt.Sprintf("could not get client network reference from %s: %s", nr.String(), err)
		log.Errorf(detail)
		return err
	}
	exec.Config.DebugNetwork = r.(object.NetworkReference)

	extraconfig.Decode(source, &network.Config)
	log.Debugf("Decoded VCH config for network: %#v", network.Config)
	for nn, n := range network.Config.ContainerNetworks {
		pgref := new(types.ManagedObjectReference)
		if !pgref.FromString(n.ID) {
			log.Errorf("Could not reacquire object reference from id for network %s: %s", nn, n.ID)
		}

		r, err = f.ObjectReference(ctx, *pgref)
		if err != nil {
			log.Warnf("could not get network reference for %s network", nn)
			continue
		}

		n.PortGroup = r.(object.NetworkReference)
	}

	return nil
}
Exemple #28
0
func (t *tether) Start() error {
	defer trace.End(trace.Begin("main tether loop"))

	t.setup()
	defer t.cleanup()

	// initial entry, so seed this
	t.reload <- true
	for _ = range t.reload {
		log.Info("Loading main configuration")
		// load the config - this modifies the structure values in place
		extraconfig.Decode(t.src, t.config)
		logConfig(t.config)

		short := t.config.ID
		if len(short) > shortLen {
			short = short[:shortLen]
		}

		if err := t.ops.SetHostname(short, t.config.Name); err != nil {
			detail := fmt.Sprintf("failed to set hostname: %s", err)
			log.Error(detail)
			// we don't attempt to recover from this - it's a fundemental misconfiguration
			// so just exit
			return errors.New(detail)
		}

		// process the networks then publish any dynamic data
		for _, v := range t.config.Networks {
			if err := t.ops.Apply(v); err != nil {
				detail := fmt.Sprintf("failed to apply network endpoint config: %s", err)
				log.Error(detail)
				return errors.New(detail)
			}
		}
		extraconfig.Encode(t.sink, t.config)

		//process the filesystem mounts - this is performed after networks to allow for network mounts
		for k, v := range t.config.Mounts {
			if v.Source.Scheme != "label" {
				detail := fmt.Sprintf("unsupported volume mount type for %s: %s", k, v.Source.Scheme)
				log.Error(detail)
				return errors.New(detail)
			}

			// this could block indefinitely while waiting for a volume to present
			t.ops.MountLabel(context.Background(), v.Source.Path, v.Path)
		}

		// process the sessions and launch if needed
		for id, session := range t.config.Sessions {
			log.Debugf("Processing config for session %s", session.ID)
			var proc = session.Cmd.Process

			// check if session is alive and well
			if proc != nil && proc.Signal(syscall.Signal(0)) == nil {
				log.Debugf("Process for session %s is already running (pid: %d)", session.ID, proc.Pid)
				continue
			}

			// check if session has never been started or is configured for restart
			if proc == nil || session.Restart {
				if proc == nil {
					log.Infof("Launching process for session %s", session.ID)
				} else {
					session.Diagnostics.ResurrectionCount++

					// FIXME: we cannot have this embedded knowledge of the extraconfig encoding pattern, but not
					// currently sure how to expose it neatly via a utility function
					extraconfig.EncodeWithPrefix(t.sink, session, fmt.Sprintf("guestinfo..sessions|%s", session.ID))
					log.Warnf("Re-launching process for session %s (count: %d)", session.ID, session.Diagnostics.ResurrectionCount)
					session.Cmd = *restartableCmd(&session.Cmd)
				}

				err := t.launch(session)
				if err != nil {
					detail := fmt.Sprintf("failed to launch %s for %s: %s", session.Cmd.Path, id, err)
					log.Error(detail)

					// TODO: check if failure to launch this is fatal to everything in this containerVM
					// 		for now failure to launch at all is terminal
					return errors.New(detail)
				}

				continue
			}

			log.Warnf("Process for session %s has exited (%d) and is not configured for restart", session.ID, session.ExitStatus)
		}

		for name, ext := range t.extensions {
			log.Info("Passing config to " + name)
			err := ext.Reload(t.config)
			if err != nil {
				log.Errorf("Failed to cleanly reload config for extension %s: %s", name, err)
				return err
			}
		}
	}

	return nil
}
Exemple #29
0
func Init(ctx context.Context, sess *session.Session, source extraconfig.DataSource, _ extraconfig.DataSink) error {
	initializer.once.Do(func() {
		var err error
		defer func() {
			if err != nil {
				initializer.err = err
			}
		}()
		f := find.NewFinder(sess.Vim25(), false)

		extraconfig.Decode(source, &Config)

		log.Debugf("Decoded VCH config for execution: %#v", Config)
		ccount := len(Config.ComputeResources)
		if ccount != 1 {
			err = fmt.Errorf("expected singular compute resource element, found %d", ccount)
			log.Error(err)
			return
		}

		cr := Config.ComputeResources[0]
		var r object.Reference
		r, err = f.ObjectReference(ctx, cr)
		if err != nil {
			err = fmt.Errorf("could not get resource pool or virtual app reference from %q: %s", cr.String(), err)
			log.Error(err)
			return
		}
		switch o := r.(type) {
		case *object.VirtualApp:
			Config.VirtualApp = o
			Config.ResourcePool = o.ResourcePool
		case *object.ResourcePool:
			Config.ResourcePool = o
		default:
			err = fmt.Errorf("could not get resource pool or virtual app from reference %q: object type is wrong", cr.String())
			log.Error(err)
			return
		}

		// we want to monitor the cluster, so create a vSphere Event Collector
		// The cluster managed object will either be a proper vSphere Cluster or
		// a specific host when standalone mode
		ec := vsphere.NewCollector(sess.Vim25(), sess.Cluster.Reference().String())

		// start the collection of vsphere events
		err = ec.Start()
		if err != nil {
			err = fmt.Errorf("%s failed to start: %s", ec.Name(), err)
			log.Error(err)
			return
		}

		// create the event manager &  register the existing collector
		Config.EventManager = event.NewEventManager(ec)

		// subscribe the exec layer to the event stream for Vm events
		Config.EventManager.Subscribe(events.NewEventType(vsphere.VMEvent{}).Topic(), "exec", eventCallback)
		// subscribe callback to handle vm registered event
		Config.EventManager.Subscribe(events.NewEventType(vsphere.VMEvent{}).Topic(), "registeredVMEvent", func(ie events.Event) {
			registeredVMCallback(sess, ie)
		})

		// instantiate the container cache now
		NewContainerCache()

		// Grab the AboutInfo about our host environment
		about := sess.Vim25().ServiceContent.About
		Config.VCHMhz = NCPU(ctx)
		Config.VCHMemoryLimit = MemTotal(ctx)
		Config.HostOS = about.OsType
		Config.HostOSVersion = about.Version
		Config.HostProductName = about.Name
		log.Debugf("Host - OS (%s), version (%s), name (%s)", about.OsType, about.Version, about.Name)
		log.Debugf("VCH limits - %d Mhz, %d MB", Config.VCHMhz, Config.VCHMemoryLimit)

		// sync container cache
		if err = Containers.sync(ctx, sess); err != nil {
			return
		}
	})
	return initializer.err
}