func (h *Handle) Commit(ctx context.Context, sess *session.Session, waitTime *int32) error { cfg := make(map[string]string) // Set timestamps based on target state switch h.TargetState() { case StateRunning: for _, sc := range h.ExecConfig.Sessions { sc.StartTime = time.Now().UTC().Unix() sc.Started = "" sc.ExitStatus = 0 } case StateStopped: for _, sc := range h.ExecConfig.Sessions { sc.StopTime = time.Now().UTC().Unix() } } extraconfig.Encode(extraconfig.MapSink(cfg), h.ExecConfig) s := h.Spec.Spec() s.ExtraConfig = append(s.ExtraConfig, vmomi.OptionValueFromMap(cfg)...) if err := Commit(ctx, sess, h, waitTime); err != nil { return err } removeHandle(h.key) return nil }
func StartAttachTether(t *testing.T, cfg *executor.ExecutorConfig, mocker *Mocker) (tether.Tether, extraconfig.DataSource, net.Conn) { store := extraconfig.New() sink := store.Put src := store.Get extraconfig.Encode(sink, cfg) log.Debugf("Test configuration: %#v", sink) tthr = tether.New(src, sink, mocker) tthr.Register("mocker", mocker) tthr.Register("Attach", server) // run the tether to service the attach go func() { erR := tthr.Start() if erR != nil { t.Error(erR) } }() // create client on the mock pipe conn, err := mockBackChannel(context.Background()) if err != nil && (err != io.EOF || server.(*testAttachServer).enabled) { // we accept the case where the error is end-of-file and the attach server is disabled because that's // expected when the tether is shut down. t.Error(err) } return tthr, src, conn }
func (d *Dispatcher) reconfigureApplianceSpec(vm *vm.VirtualMachine, conf *metadata.VirtualContainerHostConfigSpec) (*types.VirtualMachineConfigSpec, error) { defer trace.End(trace.Begin("")) var devices object.VirtualDeviceList var err error spec := &types.VirtualMachineConfigSpec{ Name: conf.Name, GuestId: "other3xLinux64Guest", Files: &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", conf.ImageStores[0].Host)}, } if devices, err = d.configIso(conf, vm); err != nil { return nil, err } deviceChange, err := devices.ConfigSpec(types.VirtualDeviceConfigSpecOperationAdd) if err != nil { log.Errorf("Failed to create config spec for appliance: %s", err) return nil, err } spec.DeviceChange = deviceChange cfg := make(map[string]string) extraconfig.Encode(extraconfig.MapSink(cfg), conf) spec.ExtraConfig = append(spec.ExtraConfig, extraconfig.OptionValueFromMap(cfg)...) return spec, nil }
func TestToExtraConfig(t *testing.T) { exec := metadata.ExecutorConfig{ Common: metadata.Common{ ID: "deadbeef", Name: "configtest", }, Sessions: map[string]metadata.SessionConfig{ "deadbeef": metadata.SessionConfig{ Cmd: metadata.Cmd{ Path: "/bin/bash", Args: []string{"/bin/bash", "-c", "echo hello"}, Dir: "/", Env: []string{"HOME=/", "PATH=/bin"}, }, }, "beefed": metadata.SessionConfig{ Cmd: metadata.Cmd{ Path: "/bin/bash", Args: []string{"/bin/bash", "-c", "echo goodbye"}, Dir: "/", Env: []string{"HOME=/", "PATH=/bin"}, }, }, }, Networks: map[string]*metadata.NetworkEndpoint{ "eth0": &metadata.NetworkEndpoint{ Static: &net.IPNet{IP: localhost, Mask: lmask.Mask}, Network: metadata.ContainerNetwork{ Common: metadata.Common{ Name: "notsure", }, Gateway: net.IPNet{IP: gateway, Mask: gmask.Mask}, Nameservers: []net.IP{}, }, }, }, } // encode metadata package's ExecutorConfig encoded := map[string]string{} extraconfig.Encode(extraconfig.MapSink(encoded), exec) // decode into this package's ExecutorConfig var decoded ExecutorConfig extraconfig.Decode(extraconfig.MapSource(encoded), &decoded) // the networks should be identical assert.Equal(t, exec.Networks["eth0"], decoded.Networks["eth0"]) // the source and destination structs are different - we're doing a sparse comparison expected := exec.Sessions["deadbeef"] actual := *decoded.Sessions["deadbeef"] assert.Equal(t, expected.Cmd.Path, actual.Cmd.Path) assert.Equal(t, expected.Cmd.Args, actual.Cmd.Args) assert.Equal(t, expected.Cmd.Dir, actual.Cmd.Dir) assert.Equal(t, expected.Cmd.Env, actual.Cmd.Env) }
func (t *tether) Start() error { defer trace.End(trace.Begin("main tether loop")) // do the initial setup and start the extensions t.setup() defer t.cleanup() // initial entry, so seed this t.reload <- true for range t.reload { log.Info("Loading main configuration") // load the config - this modifies the structure values in place extraconfig.Decode(t.src, t.config) t.setLogLevel() if err := t.setHostname(); err != nil { log.Error(err) return err } // process the networks then publish any dynamic data if err := t.setNetworks(); err != nil { log.Error(err) return err } extraconfig.Encode(t.sink, t.config) //process the filesystem mounts - this is performed after networks to allow for network mounts if err := t.setMounts(); err != nil { log.Error(err) return err } if err := t.initializeSessions(); err != nil { log.Error(err) return err } if err := t.reloadExtensions(); err != nil { log.Error(err) return err } if err := t.processSessions(); err != nil { log.Error(err) return err } } log.Info("Finished processing sessions") return nil }
func logConfig(config *ExecutorConfig) { // just pretty print the json for now log.Info("Loaded executor config") if log.GetLevel() == log.DebugLevel && config.DebugLevel > 1 { sink := map[string]string{} extraconfig.Encode(extraconfig.MapSink(sink), config) for k, v := range sink { log.Debugf("%s: %s", k, v) } } }
func RunTether(t *testing.T, cfg *executor.ExecutorConfig) (tether.Tether, extraconfig.DataSource, error) { store := extraconfig.New() sink := store.Put src := store.Get extraconfig.Encode(sink, cfg) log.Debugf("Test configuration: %#v", sink) tthr = tether.New(src, sink, &Mocked) tthr.Register("Mocker", &Mocked) // run the tether to service the attach erR := tthr.Start() return tthr, src, erR }
func RunTether(t *testing.T, cfg *metadata.ExecutorConfig) (Tether, extraconfig.DataSource, error) { store := map[string]string{} sink := extraconfig.MapSink(store) src := extraconfig.MapSource(store) extraconfig.Encode(sink, cfg) log.Debugf("Test configuration: %#v", sink) tthr := New(src, sink, &Mocked) tthr.Register("Mocker", &Mocked) // run the tether to service the attach erR := tthr.Start() return tthr, src, erR }
func logConfig(config *ExecutorConfig) { // just pretty print the json for now log.Info("Loaded executor config") // TODO: investigate whether it's the govmomi types package cause the binary size // inflation - if so we need an alternative approach here or in extraconfig if log.GetLevel() == log.DebugLevel { sink := map[string]string{} extraconfig.Encode(extraconfig.MapSink(sink), config) for k, v := range sink { log.Debugf("%s: %s", k, v) } } }
func (d *Dispatcher) encodeConfig(conf *config.VirtualContainerHostConfigSpec) (map[string]string, error) { if d.secret == nil { log.Debug("generating new config secret key") s, err := extraconfig.NewSecretKey() if err != nil { return nil, err } d.secret = s } cfg := make(map[string]string) extraconfig.Encode(d.secret.Sink(extraconfig.MapSink(cfg)), conf) return cfg, nil }
func (d *Dispatcher) createApplianceSpec(conf *metadata.VirtualContainerHostConfigSpec, vConf *data.InstallerData) (*types.VirtualMachineConfigSpec, error) { defer trace.End(trace.Begin("")) var devices object.VirtualDeviceList var err error cfg := make(map[string]string) extraconfig.Encode(extraconfig.MapSink(cfg), conf) spec := &spec.VirtualMachineConfigSpec{ VirtualMachineConfigSpec: &types.VirtualMachineConfigSpec{ Name: conf.Name, GuestId: "other3xLinux64Guest", Files: &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", conf.ImageStores[0].Host)}, NumCPUs: int32(vConf.ApplianceSize.CPU.Limit), MemoryMB: vConf.ApplianceSize.Memory.Limit, // Encode the config both here and after the VMs created so that it can be identified as a VCH appliance as soon as // creation is complete. ExtraConfig: extraconfig.OptionValueFromMap(cfg), }, } if devices, err = d.addIDEController(devices); err != nil { return nil, err } if devices, err = d.addParaVirtualSCSIController(devices); err != nil { return nil, err } if devices, err = d.addNetworkDevices(conf, spec, devices); err != nil { return nil, err } deviceChange, err := devices.ConfigSpec(types.VirtualDeviceConfigSpecOperationAdd) if err != nil { return nil, err } spec.DeviceChange = deviceChange return spec.VirtualMachineConfigSpec, nil }
func (h *Handle) Commit(ctx context.Context, sess *session.Session, waitTime *int32) error { if h.committed { return nil // already committed } // make sure there is a spec h.SetSpec(nil) cfg := make(map[string]string) extraconfig.Encode(extraconfig.MapSink(cfg), h.ExecConfig) s := h.Spec.Spec() s.ExtraConfig = append(s.ExtraConfig, vmomi.OptionValueFromMap(cfg)...) if err := h.Container.Commit(ctx, sess, h, waitTime); err != nil { return err } h.committed = true removeHandle(h.key) return nil }
func StartTether(t *testing.T, cfg *executor.ExecutorConfig) (tether.Tether, extraconfig.DataSource) { store := extraconfig.New() sink := store.Put src := store.Get extraconfig.Encode(sink, cfg) log.Debugf("Test configuration: %#v", sink) tthr = tether.New(src, sink, &Mocked) tthr.Register("mocker", &Mocked) // run the tether to service the attach go func() { err := tthr.Start() if err != nil { t.Error(err) } }() return tthr, src }
func StartTether(t *testing.T, cfg *executor.ExecutorConfig, mocker *Mocker) (Tether, extraconfig.DataSource) { store := extraconfig.New() sink := store.Put src := store.Get extraconfig.Encode(sink, cfg) log.Debugf("Test configuration: %#v", sink) Tthr = New(src, sink, mocker) Tthr.Register("mocker", mocker) // run the tether to service the attach go func() { erR := Tthr.Start() if erR != nil { t.Error(erR) } }() return Tthr, src }
func TestToExtraConfig(t *testing.T) { exec := executor.ExecutorConfig{ Common: executor.Common{ ID: "deadbeef", Name: "configtest", }, Sessions: map[string]*executor.SessionConfig{ "deadbeef": &executor.SessionConfig{ Cmd: executor.Cmd{ Path: "/bin/bash", Args: []string{"/bin/bash", "-c", "echo hello"}, Dir: "/", Env: []string{"HOME=/", "PATH=/bin"}, }, }, "beefed": &executor.SessionConfig{ Cmd: executor.Cmd{ Path: "/bin/bash", Args: []string{"/bin/bash", "-c", "echo goodbye"}, Dir: "/", Env: []string{"HOME=/", "PATH=/bin"}, }, }, }, Networks: map[string]*executor.NetworkEndpoint{ "eth0": &executor.NetworkEndpoint{ Static: true, IP: &net.IPNet{IP: localhost, Mask: lmask.Mask}, Network: executor.ContainerNetwork{ Common: executor.Common{ Name: "notsure", }, Gateway: net.IPNet{IP: gateway, Mask: gmask.Mask}, Nameservers: []net.IP{}, Pools: []ip.Range{}, Aliases: []string{}, }, }, }, } // encode exec package's ExecutorConfig encoded := map[string]string{} extraconfig.Encode(extraconfig.MapSink(encoded), exec) // decode into this package's ExecutorConfig var decoded ExecutorConfig extraconfig.Decode(extraconfig.MapSource(encoded), &decoded) // the source and destination structs are different - we're doing a sparse comparison expectedNet := exec.Networks["eth0"] actualNet := decoded.Networks["eth0"] assert.Equal(t, expectedNet.Common, actualNet.Common) assert.Equal(t, expectedNet.Static, actualNet.Static) assert.Equal(t, expectedNet.Assigned, actualNet.Assigned) assert.Equal(t, expectedNet.Network, actualNet.Network) expectedSession := exec.Sessions["deadbeef"] actualSession := decoded.Sessions["deadbeef"] assert.Equal(t, expectedSession.Cmd.Path, actualSession.Cmd.Path) assert.Equal(t, expectedSession.Cmd.Args, actualSession.Cmd.Args) assert.Equal(t, expectedSession.Cmd.Dir, actualSession.Cmd.Dir) assert.Equal(t, expectedSession.Cmd.Env, actualSession.Cmd.Env) }
// NewVirtualMachineConfigSpec returns a VirtualMachineConfigSpec func NewVirtualMachineConfigSpec(ctx context.Context, session *session.Session, config *VirtualMachineConfigSpecConfig) (*VirtualMachineConfigSpec, error) { defer trace.End(trace.Begin(config.ID)) log.Debugf("Adding metadata to the configspec: %+v", config.Metadata) // TEMPORARY // set VM name to prettyname-ID, to make it readable a little bit // if prettyname-ID is longer than max vm name length, truncate pretty name, instead of UUID, to make it unique nameMaxLen := maxVMNameLength - len(config.ID) prettyName := config.Name if len(prettyName) > nameMaxLen-1 { prettyName = prettyName[:nameMaxLen-1] } fullName := fmt.Sprintf("%s-%s", prettyName, config.ID) config.VMFullName = fullName VMPathName := config.VMPathName if !session.IsVSAN(ctx) { // VMFS requires the full path to vmx or everything but the datastore is ignored VMPathName = fmt.Sprintf("%s/%s/%s.vmx", config.VMPathName, config.VMFullName, config.ID) } s := &types.VirtualMachineConfigSpec{ Name: fullName, Uuid: config.BiosUUID, Files: &types.VirtualMachineFileInfo{ VmPathName: VMPathName, }, NumCPUs: config.NumCPUs, CpuHotAddEnabled: &config.VMForkEnabled, // this disables vNUMA when true MemoryMB: config.MemoryMB, MemoryHotAddEnabled: &config.VMForkEnabled, ExtraConfig: []types.BaseOptionValue{ // lets us see the UUID for the containerfs disk (hidden from daemon) &types.OptionValue{Key: "disk.EnableUUID", Value: "true"}, // needed to avoid the questions that occur when attaching multiple disks with the same uuid (bugzilla 1362918) &types.OptionValue{Key: "answer.msg.disk.duplicateUUID", Value: "Yes"}, // needed to avoid the question that occur when opening a file backed serial port &types.OptionValue{Key: "answer.msg.serial.file.open", Value: "Append"}, &types.OptionValue{Key: "sched.mem.lpage.maxSharedPages", Value: "256"}, // seems to be needed to avoid children hanging shortly after fork &types.OptionValue{Key: "vmotion.checkpointSVGAPrimarySize", Value: "4194304"}, // trying this out - if it works then we need to determine if we can rely on serial0 being the correct index. &types.OptionValue{Key: "serial0.hardwareFlowControl", Value: "TRUE"}, // https://enatai-jira.eng.vmware.com/browse/BON-257 // Hotadd memory above 3 GB not working &types.OptionValue{Key: "memory.noHotAddOver4GB", Value: "FALSE"}, &types.OptionValue{Key: "memory.maxGrow", Value: "512"}, // http://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2030189 &types.OptionValue{Key: "tools.remindInstall", Value: "FALSE"}, &types.OptionValue{Key: "tools.upgrade.policy", Value: "manual"}, }, } // encode the config as optionvalues cfg := map[string]string{} extraconfig.Encode(extraconfig.MapSink(cfg), config.Metadata) metaCfg := vmomi.OptionValueFromMap(cfg) // merge it with the sec s.ExtraConfig = append(s.ExtraConfig, metaCfg...) vmcs := &VirtualMachineConfigSpec{ Session: session, VirtualMachineConfigSpec: s, config: config, } log.Debugf("Virtual machine config spec created: %+v", vmcs) return vmcs, nil }
func (t *tether) Start() error { defer trace.End(trace.Begin("main tether loop")) t.setup() defer t.cleanup() // initial entry, so seed this t.reload <- true for _ = range t.reload { log.Info("Loading main configuration") // load the config - this modifies the structure values in place extraconfig.Decode(t.src, t.config) logConfig(t.config) if err := t.ops.SetHostname(stringid.TruncateID(t.config.ID), t.config.Name); err != nil { detail := fmt.Sprintf("failed to set hostname: %s", err) log.Error(detail) // we don't attempt to recover from this - it's a fundemental misconfiguration // so just exit return errors.New(detail) } // process the networks then publish any dynamic data for _, v := range t.config.Networks { if err := t.ops.Apply(v); err != nil { detail := fmt.Sprintf("failed to apply network endpoint config: %s", err) log.Error(detail) return errors.New(detail) } } extraconfig.Encode(t.sink, t.config) // process the sessions and launch if needed for id, session := range t.config.Sessions { log.Debugf("Processing config for session %s", session.ID) var proc = session.Cmd.Process // check if session is alive and well if proc != nil && proc.Signal(syscall.Signal(0)) == nil { log.Debugf("Process for session %s is already running (pid: %d)", session.ID, proc.Pid) continue } // check if session has never been started if proc == nil { log.Infof("Launching process for session %s", session.ID) err := t.launch(session) if err != nil { detail := fmt.Sprintf("failed to launch %s for %s: %s", session.Cmd.Path, id, err) log.Error(detail) // TODO: check if failure to launch this is fatal to everything in this containerVM return errors.New(detail) } // TODO: decide how to handle restart - probably needs to glue into the child reaping } // handle exited session // TODO } for name, ext := range t.extensions { log.Info("Passing config to " + name) err := ext.Reload(t.config) if err != nil { log.Errorf("Failed to cleanly reload config for extension %s: %s", name, err) return err } } } return nil }
// NewVirtualMachineConfigSpec returns a VirtualMachineConfigSpec func NewVirtualMachineConfigSpec(ctx context.Context, session *session.Session, config *VirtualMachineConfigSpecConfig) (*VirtualMachineConfigSpec, error) { defer trace.End(trace.Begin(config.ID)) VMPathName := config.VMPathName if !session.IsVSAN(ctx) { // VMFS requires the full path to vmx or everything but the datastore is ignored VMPathName = fmt.Sprintf("%s/%s/%[2]s.vmx", config.VMPathName, config.ID) } log.Debugf("Adding metadata to the configspec: %+v", config.Metadata) // TEMPORARY s := &types.VirtualMachineConfigSpec{ Name: config.ID, Files: &types.VirtualMachineFileInfo{ VmPathName: VMPathName, }, NumCPUs: config.NumCPUs, CpuHotAddEnabled: &config.VMForkEnabled, // this disables vNUMA when true MemoryMB: config.MemoryMB, MemoryHotAddEnabled: &config.VMForkEnabled, // needed to cause the disk uuid to propogate into linux for presentation via /dev/disk/by-id/ ExtraConfig: []types.BaseOptionValue{ // lets us see the UUID for the containerfs disk (hidden from daemon) &types.OptionValue{Key: "disk.EnableUUID", Value: "true"}, // needed to avoid the questions that occur when attaching multiple disks with the same uuid (bugzilla 1362918) &types.OptionValue{Key: "answer.msg.disk.duplicateUUID", Value: "Yes"}, &types.OptionValue{Key: "answer.msg.serial.file.open", Value: "Replace"}, &types.OptionValue{Key: "sched.mem.lpage.maxSharedPages", Value: "256"}, // seems to be needed to avoid children hanging shortly after fork &types.OptionValue{Key: "vmotion.checkpointSVGAPrimarySize", Value: "4194304"}, // trying this out - if it works then we need to determine if we can rely on serial0 being the correct index. &types.OptionValue{Key: "serial0.hardwareFlowControl", Value: "TRUE"}, // https://enatai-jira.eng.vmware.com/browse/BON-257 &types.OptionValue{Key: "memory.noHotAddOver4GB", Value: "FALSE"}, &types.OptionValue{Key: "memory.maxGrow", Value: "512"}, // http://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2030189 &types.OptionValue{Key: "tools.remindInstall", Value: "FALSE"}, &types.OptionValue{Key: "tools.upgrade.policy", Value: "manual"}, }, } // encode the config as optionvalues cfg := map[string]string{} extraconfig.Encode(extraconfig.MapSink(cfg), config.Metadata) metaCfg := extraconfig.OptionValueFromMap(cfg) // merge it with the sec s.ExtraConfig = append(s.ExtraConfig, metaCfg...) return &VirtualMachineConfigSpec{ Session: session, VirtualMachineConfigSpec: s, config: config, }, nil }
func (c *Configuration) Encode() { extraconfig.Encode(c.sink, c) }
func (t *tether) Flush() error { defer trace.End(trace.Begin("tether.Flush")) extraconfig.Encode(t.sink, t.config) return nil }
func (t *tether) Start() error { defer trace.End(trace.Begin("main tether loop")) t.setup() defer t.cleanup() // initial entry, so seed this t.reload <- true for _ = range t.reload { log.Info("Loading main configuration") // load the config - this modifies the structure values in place extraconfig.Decode(t.src, t.config) logConfig(t.config) short := t.config.ID if len(short) > shortLen { short = short[:shortLen] } if err := t.ops.SetHostname(short, t.config.Name); err != nil { detail := fmt.Sprintf("failed to set hostname: %s", err) log.Error(detail) // we don't attempt to recover from this - it's a fundemental misconfiguration // so just exit return errors.New(detail) } // process the networks then publish any dynamic data for _, v := range t.config.Networks { if err := t.ops.Apply(v); err != nil { detail := fmt.Sprintf("failed to apply network endpoint config: %s", err) log.Error(detail) return errors.New(detail) } } extraconfig.Encode(t.sink, t.config) //process the filesystem mounts - this is performed after networks to allow for network mounts for k, v := range t.config.Mounts { if v.Source.Scheme != "label" { detail := fmt.Sprintf("unsupported volume mount type for %s: %s", k, v.Source.Scheme) log.Error(detail) return errors.New(detail) } // this could block indefinitely while waiting for a volume to present t.ops.MountLabel(context.Background(), v.Source.Path, v.Path) } // process the sessions and launch if needed for id, session := range t.config.Sessions { log.Debugf("Processing config for session %s", session.ID) var proc = session.Cmd.Process // check if session is alive and well if proc != nil && proc.Signal(syscall.Signal(0)) == nil { log.Debugf("Process for session %s is already running (pid: %d)", session.ID, proc.Pid) continue } // check if session has never been started or is configured for restart if proc == nil || session.Restart { if proc == nil { log.Infof("Launching process for session %s", session.ID) } else { session.Diagnostics.ResurrectionCount++ // FIXME: we cannot have this embedded knowledge of the extraconfig encoding pattern, but not // currently sure how to expose it neatly via a utility function extraconfig.EncodeWithPrefix(t.sink, session, fmt.Sprintf("guestinfo..sessions|%s", session.ID)) log.Warnf("Re-launching process for session %s (count: %d)", session.ID, session.Diagnostics.ResurrectionCount) session.Cmd = *restartableCmd(&session.Cmd) } err := t.launch(session) if err != nil { detail := fmt.Sprintf("failed to launch %s for %s: %s", session.Cmd.Path, id, err) log.Error(detail) // TODO: check if failure to launch this is fatal to everything in this containerVM // for now failure to launch at all is terminal return errors.New(detail) } continue } log.Warnf("Process for session %s has exited (%d) and is not configured for restart", session.ID, session.ExitStatus) } for name, ext := range t.extensions { log.Info("Passing config to " + name) err := ext.Reload(t.config) if err != nil { log.Errorf("Failed to cleanly reload config for extension %s: %s", name, err) return err } } } return nil }