Example #1
0
// NewService creates a new resource for managing services
// using systemd on a GNU/Linux system
func NewService(name string) (Resource, error) {
	if !util.IsRunningSystemd() {
		return nil, ErrNoSystemd
	}

	s := &Service{
		Base: Base{
			Name:              name,
			Type:              "service",
			State:             "running",
			Require:           make([]string, 0),
			PresentStatesList: []string{"present", "running"},
			AbsentStatesList:  []string{"absent", "stopped"},
			Concurrent:        true,
			Subscribe:         make(TriggerMap),
		},
		Enable: true,
		unit:   fmt.Sprintf("%s.service", name),
	}

	// Set resource properties
	s.PropertyList = []Property{
		&ResourceProperty{
			PropertyName:         "enable",
			PropertySetFunc:      s.setEnable,
			PropertyIsSyncedFunc: s.isEnableSynced,
		},
	}

	return s, nil
}
Example #2
0
func TestJournalLink(t *testing.T) {
	if !sd_util.IsRunningSystemd() {
		t.Skip("Systemd is not running on the host.")
	}

	if _, err := os.Stat(journalDir); os.IsNotExist(err) {
		t.Skip("Persistent journaling disabled.")
	}

	image := getInspectImagePath()

	ctx := testutils.NewRktRunCtx()
	defer ctx.Cleanup()

	rktCmd := fmt.Sprintf("%s prepare --insecure-options=image %s", ctx.Cmd(), image)
	uuid := runRktAndGetUUID(t, rktCmd)

	rktCmd = fmt.Sprintf("%s run-prepared %s", ctx.Cmd(), uuid)
	spawnAndWaitOrFail(t, rktCmd, 0)

	machineID := strings.Replace(uuid, "-", "", -1)
	journalPath := filepath.Join("/var/log/journal", machineID)

	link, err := os.Readlink(journalPath)
	if err != nil {
		t.Fatalf("failed to read journal link %q", journalPath)
	}

	podJournal := filepath.Join(ctx.DataDir(), "pods/run", uuid, "stage1/rootfs/var/log/journal/", machineID)
	if link != podJournal {
		t.Fatalf("unexpected target of journal link: %q. Expected %q", link, podJournal)
	}
}
Example #3
0
File: service.go Project: 40a/mgmt
func (obj *ServiceType) Apply() bool {
	log.Printf("%v[%v]: Apply", obj.GetType(), obj.GetName())

	if !util.IsRunningSystemd() {
		log.Fatal("Systemd is not running.")
	}

	conn, err := systemd.NewSystemdConnection() // needs root access
	if err != nil {
		log.Fatal("Failed to connect to systemd: ", err)
	}
	defer conn.Close()

	var service = fmt.Sprintf("%v.service", obj.Name) // systemd name
	var files = []string{service}                     // the service represented in a list
	if obj.Startup == "enabled" {
		_, _, err = conn.EnableUnitFiles(files, false, true)

	} else if obj.Startup == "disabled" {
		_, err = conn.DisableUnitFiles(files, false)
	} else {
		err = nil
	}
	if err != nil {
		log.Printf("Unable to change startup status: %v", err)
		return false
	}

	result := make(chan string, 1) // catch result information

	if obj.State == "running" {
		_, err := conn.StartUnit(service, "fail", result)
		if err != nil {
			log.Fatal("Failed to start unit: ", err)
			return false
		}
	} else if obj.State == "stopped" {
		_, err = conn.StopUnit(service, "fail", result)
		if err != nil {
			log.Fatal("Failed to stop unit: ", err)
			return false
		}
	} else {
		log.Fatal("Unknown state: ", obj.State)
	}

	status := <-result
	if &status == nil {
		log.Fatal("Result is nil")
		return false
	}
	if status != "done" {
		log.Fatal("Unknown return string: ", status)
		return false
	}

	// XXX: also set enabled on boot

	return true
}
Example #4
0
func TestService(t *testing.T) {
	if !util.IsRunningSystemd() {
		return
	}

	L := newLuaState()
	defer L.Close()

	const code = `
	svc = service.new("nginx")
	`

	if err := L.DoString(code); err != nil {
		t.Fatal(err)
	}

	svc := luaResource(L, "svc").(*Service)
	errorIfNotEqual(t, "service", svc.Type)
	errorIfNotEqual(t, "nginx", svc.Name)
	errorIfNotEqual(t, "running", svc.State)
	errorIfNotEqual(t, []string{}, svc.After)
	errorIfNotEqual(t, []string{}, svc.Before)
	errorIfNotEqual(t, []string{"present", "running"}, svc.PresentStates)
	errorIfNotEqual(t, []string{"absent", "stopped"}, svc.AbsentStates)
	errorIfNotEqual(t, true, svc.Enable)
}
Example #5
0
func main() {
	flag.Parse()

	exists, err := dirExists(*outputDir)
	if err != nil {
		log.Fatal(err)
	}

	if !exists {
		if err := os.Mkdir(*outputDir, 0755); err != nil {
			log.Fatal(err)
		}
	}

	cfg := client.Config{
		Endpoints: []string{*endpoint},
		Transport: client.DefaultTransport,
		// set timeout per request to fail fast when the target endpoint is unavailable
		HeaderTimeoutPerRequest: time.Second,
	}

	c, err := client.New(cfg)
	if err != nil {
		log.Fatal(err)
	}

	if err := c.Sync(context.Background()); err != nil {
		log.Fatal(err)
	}

	kapi := client.NewKeysAPI(c)

	resp, err := generateConfig(kapi)
	if err != nil {
		log.Fatal(err)
	}
	if systemdutil.IsRunningSystemd() {
		err := daemon.SdNotify("READY=1")
		if err != nil {
			log.Printf("failed to notify systemd for readiness: %v", err)
			if err == daemon.SdNotifyNoSocket {
				log.Printf("forgot to set Type=notify in systemd service file?")
			}
		}
	}
	if *watch {
		for {
			resp, err = generateConfigWatcher(kapi, resp)
			if err != nil {
				log.Fatal(err)
			}
		}
	}
	os.Exit(0)
}
Example #6
0
File: service.go Project: 40a/mgmt
func (obj *ServiceType) StateOK() bool {
	if obj.isStateOK { // cache the state
		return true
	}

	if !util.IsRunningSystemd() {
		log.Fatal("Systemd is not running.")
	}

	conn, err := systemd.NewSystemdConnection() // needs root access
	if err != nil {
		log.Fatal("Failed to connect to systemd: ", err)
	}
	defer conn.Close()

	var service = fmt.Sprintf("%v.service", obj.Name) // systemd name

	loadstate, err := conn.GetUnitProperty(service, "LoadState")
	if err != nil {
		log.Printf("Failed to get load state: %v", err)
		return false
	}

	// NOTE: we have to compare variants with other variants, they are really strings...
	var notFound = (loadstate.Value == dbus.MakeVariant("not-found"))
	if notFound {
		log.Printf("Failed to find service: %v", service)
		return false
	}

	// XXX: check service "enabled at boot" or not status...

	//conn.GetUnitProperties(service)
	activestate, err := conn.GetUnitProperty(service, "ActiveState")
	if err != nil {
		log.Fatal("Failed to get active state: ", err)
	}

	var running = (activestate.Value == dbus.MakeVariant("active"))

	if obj.State == "running" {
		if !running {
			return false // we are in the wrong state
		}
	} else if obj.State == "stopped" {
		if running {
			return false
		}
	} else {
		log.Fatal("Unknown state: ", obj.State)
	}

	return true // all is good, no state change needed
}
Example #7
0
func (s *System) detectService() {
	switch {
	case util.IsRunningSystemd():
		s.NewService = NewServiceDbus
		dbus, err := dbus.New()
		if err != nil {
			fmt.Println(err)
			os.Exit(1)
		}
		s.Dbus = dbus
	case isUbuntu():
		s.NewService = NewServiceUpstart
	default:
		s.NewService = NewServiceInit
	}
}
func setupSocketActivation() (net.Listener, error) {
	if !util.IsRunningSystemd() {
		return nil, nil
	}
	listenFds := activation.Files(false)
	if len(listenFds) > 1 {
		return nil, fmt.Errorf("expected only one socket from systemd, got %d", len(listenFds))
	}
	var listener net.Listener
	if len(listenFds) == 1 {
		l, err := net.FileListener(listenFds[0])
		if err != nil {
			return nil, err
		}
		listener = l
	}
	return listener, nil
}
Example #9
0
// NewService creates a new resource for managing services
// using systemd on a GNU/Linux system
func NewService(name string) (Resource, error) {
	if !util.IsRunningSystemd() {
		return nil, ErrNoSystemd
	}

	s := &Service{
		Base: Base{
			Name:          name,
			Type:          "service",
			State:         "running",
			After:         make([]string, 0),
			Before:        make([]string, 0),
			PresentStates: []string{"present", "running"},
			AbsentStates:  []string{"absent", "stopped"},
		},
		Enable: true,
		unit:   fmt.Sprintf("%s.service", name),
	}

	return s, nil
}
Example #10
0
func New(c *cli.Context) *System {
	system := &System{
		NewFile:     NewDefFile,
		NewAddr:     NewDefAddr,
		NewPort:     NewDefPort,
		NewUser:     NewDefUser,
		NewGroup:    NewDefGroup,
		NewCommand:  NewDefCommand,
		NewDNS:      NewDefDNS,
		NewProcess:  NewDefProcess,
		NewGossfile: NewDefGossfile,
	}

	if util.IsRunningSystemd() {
		system.NewService = NewServiceDbus
		dbus, err := dbus.New()
		if err != nil {
			fmt.Println(err)
			os.Exit(1)
		}
		system.Dbus = dbus
	} else {
		system.NewService = NewServiceInit
	}

	switch {
	case c.GlobalString("package") == "rpm":
		system.NewPackage = NewRpmPackage
	case c.GlobalString("package") == "deb":
		system.NewPackage = NewDebPackage
	default:
		system.NewPackage = detectPackage()
	}

	return system
}
Example #11
0
File: kvm.go Project: nhlfr/rkt
func linkJournal(s1Root, machineID string) error {
	if !util.IsRunningSystemd() {
		return nil
	}

	absS1Root, err := filepath.Abs(s1Root)
	if err != nil {
		return err
	}

	// /var/log/journal doesn't exist on the host, don't do anything
	if _, err := os.Stat(journalDir); os.IsNotExist(err) {
		return nil
	}

	machineJournalDir := filepath.Join(journalDir, machineID)
	podJournalDir := filepath.Join(absS1Root, machineJournalDir)

	hostMachineID, err := util.GetMachineID()
	if err != nil {
		return err
	}

	// unlikely, machine ID is random (== pod UUID)
	if hostMachineID == machineID {
		return fmt.Errorf("host and pod machine IDs are equal (%s)", machineID)
	}

	fi, err := os.Lstat(machineJournalDir)
	switch {
	case os.IsNotExist(err):
		// good, we'll create the symlink
	case err != nil:
		return err
	// unlikely, machine ID is random (== pod UUID)
	default:
		if fi.IsDir() {
			if err := os.Remove(machineJournalDir); err != nil {
				return err
			}
		}

		link, err := os.Readlink(machineJournalDir)
		if err != nil {
			return err
		}

		if link == podJournalDir {
			return nil
		} else {
			if err := os.Remove(machineJournalDir); err != nil {
				return err
			}
		}
	}

	if err := os.Symlink(podJournalDir, machineJournalDir); err != nil {
		return err
	}

	return nil
}
Example #12
0
File: init.go Project: joshix/rkt
func getContainerSubCgroup(machineID string, canMachinedRegister, unified bool) (string, error) {
	var fromUnit bool

	if util.IsRunningSystemd() {
		var err error
		if fromUnit, err = util.RunningFromSystemService(); err != nil {
			return "", errwrap.Wrap(errors.New("could not determine if we're running from a unit file"), err)
		}
	}

	if fromUnit {
		slice, err := util.GetRunningSlice()
		if err != nil {
			return "", errwrap.Wrap(errors.New("could not get slice name"), err)
		}
		slicePath, err := common.SliceToPath(slice)
		if err != nil {
			return "", errwrap.Wrap(errors.New("could not convert slice name to path"), err)
		}
		unit, err := util.CurrentUnitName()
		if err != nil {
			return "", errwrap.Wrap(errors.New("could not get unit name"), err)
		}
		subcgroup := filepath.Join(slicePath, unit)

		if unified {
			return filepath.Join(subcgroup, "payload"), nil
		}

		return subcgroup, nil
	}

	escapedmID := strings.Replace(machineID, "-", "\\x2d", -1)
	machineDir := "machine-" + escapedmID + ".scope"

	if canMachinedRegister {
		// we are not in the final cgroup yet: systemd-nspawn will move us
		// to the correct cgroup later during registration so we can't
		// look it up in /proc/self/cgroup
		return filepath.Join("machine.slice", machineDir), nil
	}

	if unified {
		subcgroup, err := v2.GetOwnCgroupPath()
		if err != nil {
			return "", errwrap.Wrap(errors.New("could not get own v2 cgroup path"), err)
		}
		return subcgroup, nil
	}

	// when registration is disabled the container will be directly
	// under the current cgroup so we can look it up in /proc/self/cgroup
	ownV1CgroupPath, err := v1.GetOwnCgroupPath("name=systemd")
	if err != nil {
		return "", errwrap.Wrap(errors.New("could not get own v1 cgroup path"), err)
	}

	// systemd-nspawn won't work if we are in the root cgroup. In addition,
	// we want all rkt instances to be in distinct cgroups. Create a
	// subcgroup and add ourselves to it.
	return filepath.Join(ownV1CgroupPath, machineDir), nil
}
Example #13
0
File: init.go Project: joshix/rkt
// getArgsEnv returns the nspawn or lkvm args and env according to the flavor
// as the first two return values respectively.
func getArgsEnv(p *stage1commontypes.Pod, flavor string, canMachinedRegister bool, debug bool, n *networking.Networking, insecureOptions stage1initcommon.Stage1InsecureOptions) ([]string, []string, error) {
	var args []string
	env := os.Environ()

	// We store the pod's flavor so we can later garbage collect it correctly
	if err := os.Symlink(flavor, filepath.Join(p.Root, stage1initcommon.FlavorFile)); err != nil {
		return nil, nil, errwrap.Wrap(errors.New("failed to create flavor symlink"), err)
	}

	// set hostname inside pod
	// According to systemd manual (https://www.freedesktop.org/software/systemd/man/hostname.html) :
	// "The /etc/hostname file configures the name of the local system that is set
	// during boot using the sethostname system call"
	if hostname == "" {
		hostname = stage1initcommon.GetMachineID(p)
	}
	hostnamePath := filepath.Join(common.Stage1RootfsPath(p.Root), "etc/hostname")
	if err := ioutil.WriteFile(hostnamePath, []byte(hostname), 0644); err != nil {
		return nil, nil, fmt.Errorf("error writing %s, %s", hostnamePath, err)
	}

	// systemd-nspawn needs /etc/machine-id to link the container's journal
	// to the host. Since systemd-v230, /etc/machine-id is mandatory, see
	// https://github.com/systemd/systemd/commit/e01ff70a77e781734e1e73a2238af2e9bf7967a8
	mPath := filepath.Join(common.Stage1RootfsPath(p.Root), "etc", "machine-id")
	machineID := strings.Replace(p.UUID.String(), "-", "", -1)

	switch flavor {
	case "kvm":
		if privateUsers != "" {
			return nil, nil, fmt.Errorf("flag --private-users cannot be used with an lkvm stage1")
		}

		// kernel and hypervisor binaries are located relative to the working directory
		// of init (/var/lib/rkt/..../uuid)
		// TODO: move to path.go
		kernelPath := filepath.Join(common.Stage1RootfsPath(p.Root), "bzImage")
		netDescriptions := kvm.GetNetworkDescriptions(n)

		cpu, mem := kvm.GetAppsResources(p.Manifest.Apps)

		// Parse hypervisor
		hv, err := KvmCheckHypervisor(common.Stage1RootfsPath(p.Root))
		if err != nil {
			return nil, nil, err
		}

		// Set start command for hypervisor
		StartCmd := hvlkvm.StartCmd
		switch hv {
		case "lkvm":
			StartCmd = hvlkvm.StartCmd
		case "qemu":
			StartCmd = hvqemu.StartCmd
		default:
			return nil, nil, fmt.Errorf("unrecognized hypervisor")
		}

		hvStartCmd := StartCmd(
			common.Stage1RootfsPath(p.Root),
			p.UUID.String(),
			kernelPath,
			netDescriptions,
			cpu,
			mem,
			debug,
		)

		if hvStartCmd == nil {
			return nil, nil, fmt.Errorf("no hypervisor")
		}

		args = append(args, hvStartCmd...)

		// lkvm requires $HOME to be defined,
		// see https://github.com/coreos/rkt/issues/1393
		if os.Getenv("HOME") == "" {
			env = append(env, "HOME=/root")
		}

		if err := linkJournal(common.Stage1RootfsPath(p.Root), machineID); err != nil {
			return nil, nil, errwrap.Wrap(errors.New("error linking pod's journal"), err)
		}

		// use only dynamic libraries provided in the image
		// from systemd v231 there's a new internal libsystemd-shared-v231.so
		// which is present in /usr/lib/systemd
		env = append(env, "LD_LIBRARY_PATH="+filepath.Join(common.Stage1RootfsPath(p.Root), "usr/lib/systemd"))

		return args, env, nil

	case "coreos":
		args = append(args, filepath.Join(common.Stage1RootfsPath(p.Root), interpBin))
		args = append(args, filepath.Join(common.Stage1RootfsPath(p.Root), nspawnBin))
		args = append(args, "--boot")             // Launch systemd in the pod
		args = append(args, "--notify-ready=yes") // From systemd v231

		if context := os.Getenv(common.EnvSELinuxContext); context != "" {
			args = append(args, fmt.Sprintf("-Z%s", context))
		}

		if context := os.Getenv(common.EnvSELinuxMountContext); context != "" {
			args = append(args, fmt.Sprintf("-L%s", context))
		}

		if canMachinedRegister {
			args = append(args, fmt.Sprintf("--register=true"))
		} else {
			args = append(args, fmt.Sprintf("--register=false"))
		}

		// use only dynamic libraries provided in the image
		// from systemd v231 there's a new internal libsystemd-shared-v231.so
		// which is present in /usr/lib/systemd
		env = append(env, "LD_LIBRARY_PATH="+
			filepath.Join(common.Stage1RootfsPath(p.Root), "usr/lib")+":"+
			filepath.Join(common.Stage1RootfsPath(p.Root), "usr/lib/systemd"))

	case "src":
		args = append(args, filepath.Join(common.Stage1RootfsPath(p.Root), interpBin))
		args = append(args, filepath.Join(common.Stage1RootfsPath(p.Root), nspawnBin))
		args = append(args, "--boot")             // Launch systemd in the pod
		args = append(args, "--notify-ready=yes") // From systemd v231

		if context := os.Getenv(common.EnvSELinuxContext); context != "" {
			args = append(args, fmt.Sprintf("-Z%s", context))
		}

		if context := os.Getenv(common.EnvSELinuxMountContext); context != "" {
			args = append(args, fmt.Sprintf("-L%s", context))
		}

		if canMachinedRegister {
			args = append(args, fmt.Sprintf("--register=true"))
		} else {
			args = append(args, fmt.Sprintf("--register=false"))
		}

		// use only dynamic libraries provided in the image
		// from systemd v231 there's a new internal libsystemd-shared-v231.so
		// which is present in /usr/lib/systemd
		env = append(env, "LD_LIBRARY_PATH="+
			filepath.Join(common.Stage1RootfsPath(p.Root), "usr/lib")+":"+
			filepath.Join(common.Stage1RootfsPath(p.Root), "usr/lib/systemd"))

	case "host":
		hostNspawnBin, err := common.LookupPath("systemd-nspawn", os.Getenv("PATH"))
		if err != nil {
			return nil, nil, err
		}

		// Check dynamically which version is installed on the host
		// Support version >= 220
		versionBytes, err := exec.Command(hostNspawnBin, "--version").CombinedOutput()
		if err != nil {
			return nil, nil, errwrap.Wrap(fmt.Errorf("unable to probe %s version", hostNspawnBin), err)
		}
		versionStr := strings.SplitN(string(versionBytes), "\n", 2)[0]
		var version int
		n, err := fmt.Sscanf(versionStr, "systemd %d", &version)
		if err != nil {
			return nil, nil, fmt.Errorf("cannot parse version: %q", versionStr)
		}
		if n != 1 || version < 220 {
			return nil, nil, fmt.Errorf("rkt needs systemd-nspawn >= 220. %s version not supported: %v", hostNspawnBin, versionStr)
		}

		// Copy systemd, bash, etc. in stage1 at run-time
		if err := installAssets(); err != nil {
			return nil, nil, errwrap.Wrap(errors.New("cannot install assets from the host"), err)
		}

		args = append(args, hostNspawnBin)
		args = append(args, "--boot") // Launch systemd in the pod
		args = append(args, fmt.Sprintf("--register=true"))

		if version >= 231 {
			args = append(args, "--notify-ready=yes") // From systemd v231
		}

		if context := os.Getenv(common.EnvSELinuxContext); context != "" {
			args = append(args, fmt.Sprintf("-Z%s", context))
		}

		if context := os.Getenv(common.EnvSELinuxMountContext); context != "" {
			args = append(args, fmt.Sprintf("-L%s", context))
		}

	default:
		return nil, nil, fmt.Errorf("unrecognized stage1 flavor: %q", flavor)
	}

	machineIDBytes := append([]byte(machineID), '\n')
	if err := ioutil.WriteFile(mPath, machineIDBytes, 0644); err != nil {
		log.FatalE("error writing /etc/machine-id", err)
	}

	// link journal only if the host is running systemd
	if util.IsRunningSystemd() {
		args = append(args, "--link-journal=try-guest")

		keepUnit, err := util.RunningFromSystemService()
		if err != nil {
			if err == dlopen.ErrSoNotFound {
				log.Print("warning: libsystemd not found even though systemd is running. Cgroup limits set by the environment (e.g. a systemd service) won't be enforced.")
			} else {
				return nil, nil, errwrap.Wrap(errors.New("error determining if we're running from a system service"), err)
			}
		}

		if keepUnit {
			args = append(args, "--keep-unit")
		}
	} else {
		args = append(args, "--link-journal=no")
	}

	if !debug {
		args = append(args, "--quiet")             // silence most nspawn output (log_warning is currently not covered by this)
		env = append(env, "SYSTEMD_LOG_LEVEL=err") // silence log_warning too
	}

	env = append(env, "SYSTEMD_NSPAWN_CONTAINER_SERVICE=rkt")
	// TODO (alepuccetti) remove this line when rkt will use cgroup namespace
	// If the kernel has the cgroup namespace enabled, systemd v232 will use it by default.
	// This was introduced by https://github.com/systemd/systemd/pull/3809 and it will cause
	// problems in rkt when cgns is enabled and cgroup-v1 is used. For more information see
	// https://github.com/systemd/systemd/pull/3589#discussion_r70277625.
	// The following line tells systemd-nspawn not to use cgroup namespace using the environment variable
	// introduced by https://github.com/systemd/systemd/pull/3809.
	env = append(env, "SYSTEMD_NSPAWN_USE_CGNS=no")

	if insecureOptions.DisablePaths {
		env = append(env, "SYSTEMD_NSPAWN_API_VFS_WRITABLE=yes")
	}

	if len(privateUsers) > 0 {
		args = append(args, "--private-users="+privateUsers)
	}

	nsargs, err := stage1initcommon.PodToNspawnArgs(p, insecureOptions)
	if err != nil {
		return nil, nil, errwrap.Wrap(errors.New("failed to generate nspawn args"), err)
	}
	args = append(args, nsargs...)

	// Arguments to systemd
	args = append(args, "--")
	args = append(args, "--default-standard-output=tty") // redirect all service logs straight to tty
	if !debug {
		args = append(args, "--log-target=null") // silence systemd output inside pod
		args = append(args, "--show-status=0")   // silence systemd initialization status output
	}

	return args, env, nil
}
Example #14
0
func Main() {
	cfg := NewConfig()
	err := cfg.Parse(os.Args[1:])
	if err != nil {
		plog.Errorf("error verifying flags, %v. See 'etcd --help'.", err)
		switch err {
		case errUnsetAdvertiseClientURLsFlag:
			plog.Errorf("When listening on specific address(es), this etcd process must advertise accessible url(s) to each connected client.")
		}
		os.Exit(1)
	}
	setupLogging(cfg)

	var stopped <-chan struct{}

	GoMaxProcs := 1
	if envMaxProcs, err := strconv.Atoi(os.Getenv("GOMAXPROCS")); err == nil {
		GoMaxProcs = envMaxProcs
	}
	plog.Infof("setting maximum number of CPUs to %d, total number of available CPUs is %d", GoMaxProcs, runtime.NumCPU())
	runtime.GOMAXPROCS(GoMaxProcs)

	// TODO: check whether fields are set instead of whether fields have default value
	if cfg.name != defaultName && cfg.initialCluster == initialClusterFromName(defaultName) {
		cfg.initialCluster = initialClusterFromName(cfg.name)
	}

	if cfg.dir == "" {
		cfg.dir = fmt.Sprintf("%v.etcd", cfg.name)
		plog.Warningf("no data-dir provided, using default data-dir ./%s", cfg.dir)
	}

	which := identifyDataDirOrDie(cfg.dir)
	if which != dirEmpty {
		plog.Noticef("the server is already initialized as %v before, starting as etcd %v...", which, which)
		switch which {
		case dirMember:
			stopped, err = startEtcd(cfg)
		case dirProxy:
			err = startProxy(cfg)
		default:
			plog.Panicf("unhandled dir type %v", which)
		}
	} else {
		shouldProxy := cfg.isProxy()
		if !shouldProxy {
			stopped, err = startEtcd(cfg)
			if err == discovery.ErrFullCluster && cfg.shouldFallbackToProxy() {
				plog.Noticef("discovery cluster full, falling back to %s", fallbackFlagProxy)
				shouldProxy = true
			}
		}
		if shouldProxy {
			err = startProxy(cfg)
		}
	}

	if err != nil {
		switch err {
		case discovery.ErrDuplicateID:
			plog.Errorf("member %q has previously registered with discovery service token (%s).", cfg.name, cfg.durl)
			plog.Errorf("But etcd could not find valid cluster configuration in the given data dir (%s).", cfg.dir)
			plog.Infof("Please check the given data dir path if the previous bootstrap succeeded")
			plog.Infof("or use a new discovery token if the previous bootstrap failed.")
			os.Exit(1)
		case discovery.ErrDuplicateName:
			plog.Errorf("member with duplicated name has registered with discovery service token(%s).", cfg.durl)
			plog.Errorf("please check (cURL) the discovery token for more information.")
			plog.Errorf("please do not reuse the discovery token and generate a new one to bootstrap the cluster.")
		default:
			plog.Fatalf("%v", err)
		}
	}

	osutil.HandleInterrupts()

	if systemdutil.IsRunningSystemd() {
		// At this point, the initialization of etcd is done.
		// The listeners are listening on the TCP ports and ready
		// for accepting connections.
		// The http server is probably ready for serving incoming
		// connections. If it is not, the connection might be pending
		// for less than one second.
		err := daemon.SdNotify("READY=1")
		if err != nil {
			plog.Errorf("failed to notify systemd for readiness")
		}
	}

	<-stopped
	osutil.Exit(0)
}
Example #15
0
// getArgsEnv returns the nspawn or lkvm args and env according to the flavor used
func getArgsEnv(p *stage1commontypes.Pod, flavor string, debug bool, n *networking.Networking) ([]string, []string, error) {
	var args []string
	env := os.Environ()

	// We store the pod's flavor so we can later garbage collect it correctly
	if err := os.Symlink(flavor, filepath.Join(p.Root, stage1initcommon.FlavorFile)); err != nil {
		return nil, nil, errwrap.Wrap(errors.New("failed to create flavor symlink"), err)
	}

	// set hostname inside pod
	// According to systemd manual (https://www.freedesktop.org/software/systemd/man/hostname.html) :
	// "The /etc/hostname file configures the name of the local system that is set
	// during boot using the sethostname system call"
	if hostname == "" {
		hostname = stage1initcommon.GetMachineID(p)
	}
	hostnamePath := filepath.Join(common.Stage1RootfsPath(p.Root), "etc/hostname")
	if err := ioutil.WriteFile(hostnamePath, []byte(hostname), 0644); err != nil {
		return nil, nil, fmt.Errorf("error writing %s, %s", hostnamePath, err)
	}

	switch flavor {
	case "kvm":
		if privateUsers != "" {
			return nil, nil, fmt.Errorf("flag --private-users cannot be used with an lkvm stage1")
		}

		// kernel and lkvm are relative path, because init has /var/lib/rkt/..../uuid as its working directory
		// TODO: move to path.go
		kernelPath := filepath.Join(common.Stage1RootfsPath(p.Root), "bzImage")
		lkvmPath := filepath.Join(common.Stage1RootfsPath(p.Root), "lkvm")
		netDescriptions := kvm.GetNetworkDescriptions(n)
		lkvmNetArgs, err := kvm.GetKVMNetArgs(netDescriptions)
		if err != nil {
			return nil, nil, err
		}

		cpu, mem := kvm.GetAppsResources(p.Manifest.Apps)

		kernelParams := []string{
			"console=hvc0",
			"init=/usr/lib/systemd/systemd",
			"no_timer_check",
			"noreplace-smp",
			"systemd.default_standard_error=journal+console",
			"systemd.default_standard_output=journal+console",
			// "systemd.default_standard_output=tty",
			"tsc=reliable",
			"MACHINEID=" + p.UUID.String(),
		}

		if debug {
			kernelParams = append(kernelParams, []string{
				"debug",
				"systemd.log_level=debug",
				"systemd.show_status=true",
				// "systemd.confirm_spawn=true",
			}...)
		} else {
			kernelParams = append(kernelParams, "quiet")
		}

		args = append(args, []string{
			"./" + lkvmPath, // relative path
			"run",
			"--name", "rkt-" + p.UUID.String(),
			"--no-dhcp", // speed bootup
			"--cpu", strconv.FormatInt(cpu, 10),
			"--mem", strconv.FormatInt(mem, 10),
			"--console=virtio",
			"--kernel", kernelPath,
			"--disk", "stage1/rootfs", // relative to run/pods/uuid dir this is a place where systemd resides
			// MACHINEID will be available as environment variable
			"--params", strings.Join(kernelParams, " "),
		}...,
		)
		args = append(args, lkvmNetArgs...)

		if debug {
			args = append(args, "--debug")
		}

		// host volume sharing with 9p
		nsargs := stage1initcommon.VolumesToKvmDiskArgs(p.Manifest.Volumes)
		args = append(args, nsargs...)

		// lkvm requires $HOME to be defined,
		// see https://github.com/coreos/rkt/issues/1393
		if os.Getenv("HOME") == "" {
			env = append(env, "HOME=/root")
		}

		return args, env, nil

	case "coreos":
		args = append(args, filepath.Join(common.Stage1RootfsPath(p.Root), interpBin))
		args = append(args, filepath.Join(common.Stage1RootfsPath(p.Root), nspawnBin))
		args = append(args, "--boot") // Launch systemd in the pod

		if context := os.Getenv(common.EnvSELinuxContext); context != "" {
			args = append(args, fmt.Sprintf("-Z%s", context))
		}

		if context := os.Getenv(common.EnvSELinuxMountContext); context != "" {
			args = append(args, fmt.Sprintf("-L%s", context))
		}

		if machinedRegister() {
			args = append(args, fmt.Sprintf("--register=true"))
		} else {
			args = append(args, fmt.Sprintf("--register=false"))
		}

		// use only dynamic libraries provided in the image
		env = append(env, "LD_LIBRARY_PATH="+filepath.Join(common.Stage1RootfsPath(p.Root), "usr/lib"))

	case "src":
		args = append(args, filepath.Join(common.Stage1RootfsPath(p.Root), nspawnBin))
		args = append(args, "--boot") // Launch systemd in the pod

		if context := os.Getenv(common.EnvSELinuxContext); context != "" {
			args = append(args, fmt.Sprintf("-Z%s", context))
		}

		if context := os.Getenv(common.EnvSELinuxMountContext); context != "" {
			args = append(args, fmt.Sprintf("-L%s", context))
		}

		if machinedRegister() {
			args = append(args, fmt.Sprintf("--register=true"))
		} else {
			args = append(args, fmt.Sprintf("--register=false"))
		}

	case "host":
		hostNspawnBin, err := common.LookupPath("systemd-nspawn", os.Getenv("PATH"))
		if err != nil {
			return nil, nil, err
		}

		// Check dynamically which version is installed on the host
		// Support version >= 220
		versionBytes, err := exec.Command(hostNspawnBin, "--version").CombinedOutput()
		if err != nil {
			return nil, nil, errwrap.Wrap(fmt.Errorf("unable to probe %s version", hostNspawnBin), err)
		}
		versionStr := strings.SplitN(string(versionBytes), "\n", 2)[0]
		var version int
		n, err := fmt.Sscanf(versionStr, "systemd %d", &version)
		if err != nil {
			return nil, nil, fmt.Errorf("cannot parse version: %q", versionStr)
		}
		if n != 1 || version < 220 {
			return nil, nil, fmt.Errorf("rkt needs systemd-nspawn >= 220. %s version not supported: %v", hostNspawnBin, versionStr)
		}

		// Copy systemd, bash, etc. in stage1 at run-time
		if err := installAssets(); err != nil {
			return nil, nil, errwrap.Wrap(errors.New("cannot install assets from the host"), err)
		}

		args = append(args, hostNspawnBin)
		args = append(args, "--boot") // Launch systemd in the pod
		args = append(args, fmt.Sprintf("--register=true"))

		if context := os.Getenv(common.EnvSELinuxContext); context != "" {
			args = append(args, fmt.Sprintf("-Z%s", context))
		}

		if context := os.Getenv(common.EnvSELinuxMountContext); context != "" {
			args = append(args, fmt.Sprintf("-L%s", context))
		}

	default:
		return nil, nil, fmt.Errorf("unrecognized stage1 flavor: %q", flavor)
	}

	// link journal only if the host is running systemd
	if util.IsRunningSystemd() {
		// we write /etc/machine-id here because systemd-nspawn needs it to link
		// the container's journal to the host
		mPath := filepath.Join(common.Stage1RootfsPath(p.Root), "etc", "machine-id")
		mID := strings.Replace(p.UUID.String(), "-", "", -1)

		if err := ioutil.WriteFile(mPath, []byte(mID), 0644); err != nil {
			log.FatalE("error writing /etc/machine-id", err)
		}

		args = append(args, "--link-journal=try-guest")

		keepUnit, err := util.RunningFromSystemService()
		if err != nil {
			if err == util.ErrSoNotFound {
				log.Print("warning: libsystemd not found even though systemd is running. Cgroup limits set by the environment (e.g. a systemd service) won't be enforced.")
			} else {
				return nil, nil, errwrap.Wrap(errors.New("error determining if we're running from a system service"), err)
			}
		}

		if keepUnit {
			args = append(args, "--keep-unit")
		}
	}

	if !debug {
		args = append(args, "--quiet")             // silence most nspawn output (log_warning is currently not covered by this)
		env = append(env, "SYSTEMD_LOG_LEVEL=err") // silence log_warning too
	}

	env = append(env, "SYSTEMD_NSPAWN_CONTAINER_SERVICE=rkt")

	if len(privateUsers) > 0 {
		args = append(args, "--private-users="+privateUsers)
	}

	nsargs, err := stage1initcommon.PodToNspawnArgs(p)
	if err != nil {
		return nil, nil, errwrap.Wrap(errors.New("failed to generate nspawn args"), err)
	}
	args = append(args, nsargs...)

	// Arguments to systemd
	args = append(args, "--")
	args = append(args, "--default-standard-output=tty") // redirect all service logs straight to tty
	if !debug {
		args = append(args, "--log-target=null") // silence systemd output inside pod
		args = append(args, "--show-status=0")   // silence systemd initialization status output
	}

	return args, env, nil
}
Example #16
0
// CheckApply checks the resource state and applies the resource if the bool
// input is true. It returns error info and if the state check passed or not.
func (obj *SvcRes) CheckApply(apply bool) (checkOK bool, err error) {
	if !systemdUtil.IsRunningSystemd() {
		return false, fmt.Errorf("Systemd is not running.")
	}

	conn, err := systemd.NewSystemdConnection() // needs root access
	if err != nil {
		return false, errwrap.Wrapf(err, "Failed to connect to systemd")
	}
	defer conn.Close()

	var svc = fmt.Sprintf("%s.service", obj.Name) // systemd name

	loadstate, err := conn.GetUnitProperty(svc, "LoadState")
	if err != nil {
		return false, errwrap.Wrapf(err, "Failed to get load state")
	}

	// NOTE: we have to compare variants with other variants, they are really strings...
	var notFound = (loadstate.Value == dbus.MakeVariant("not-found"))
	if notFound {
		return false, errwrap.Wrapf(err, "Failed to find svc: %s", svc)
	}

	// XXX: check svc "enabled at boot" or not status...

	//conn.GetUnitProperties(svc)
	activestate, err := conn.GetUnitProperty(svc, "ActiveState")
	if err != nil {
		return false, errwrap.Wrapf(err, "Failed to get active state")
	}

	var running = (activestate.Value == dbus.MakeVariant("active"))
	var stateOK = ((obj.State == "") || (obj.State == "running" && running) || (obj.State == "stopped" && !running))
	var startupOK = true        // XXX: DETECT AND SET
	var refresh = obj.Refresh() // do we have a pending reload to apply?

	if stateOK && startupOK && !refresh {
		return true, nil // we are in the correct state
	}

	// state is not okay, no work done, exit, but without error
	if !apply {
		return false, nil
	}

	// apply portion
	log.Printf("%s[%s]: Apply", obj.Kind(), obj.GetName())
	var files = []string{svc} // the svc represented in a list
	if obj.Startup == "enabled" {
		_, _, err = conn.EnableUnitFiles(files, false, true)

	} else if obj.Startup == "disabled" {
		_, err = conn.DisableUnitFiles(files, false)
	}

	if err != nil {
		return false, errwrap.Wrapf(err, "Unable to change startup status")
	}

	// XXX: do we need to use a buffered channel here?
	result := make(chan string, 1) // catch result information

	if obj.State == "running" {
		_, err = conn.StartUnit(svc, "fail", result)
		if err != nil {
			return false, errwrap.Wrapf(err, "Failed to start unit")
		}
		if refresh {
			log.Printf("%s[%s]: Skipping reload, due to pending start", obj.Kind(), obj.GetName())
		}
		refresh = false // we did a start, so a reload is not needed
	} else if obj.State == "stopped" {
		_, err = conn.StopUnit(svc, "fail", result)
		if err != nil {
			return false, errwrap.Wrapf(err, "Failed to stop unit")
		}
		if refresh {
			log.Printf("%s[%s]: Skipping reload, due to pending stop", obj.Kind(), obj.GetName())
		}
		refresh = false // we did a stop, so a reload is not needed
	}

	status := <-result
	if &status == nil {
		return false, fmt.Errorf("Systemd service action result is nil")
	}
	if status != "done" {
		return false, fmt.Errorf("Unknown systemd return string: %v", status)
	}

	if refresh { // we need to reload the service
		// XXX: run a svc reload here!
		log.Printf("%s[%s]: Reloading...", obj.Kind(), obj.GetName())
	}

	// XXX: also set enabled on boot

	return false, nil // success
}
Example #17
0
					"UsageBytes":      bounded(1*mb, 10*gb),
					"WorkingSetBytes": bounded(1*mb, 10*gb),
					"RSSBytes":        bounded(1*mb, 1*gb),
					"PageFaults":      bounded(1000, 1E9),
					"MajorPageFaults": bounded(0, 100000),
				}),
				"Rootfs":             BeNil(),
				"Logs":               BeNil(),
				"UserDefinedMetrics": BeEmpty(),
			})
			systemContainers := gstruct.Elements{
				"kubelet": sysContExpectations,
				"runtime": sysContExpectations,
			}
			// The Kubelet only manages the 'misc' system container if the host is not running systemd.
			if !systemdutil.IsRunningSystemd() {
				framework.Logf("Host not running systemd; expecting 'misc' system container.")
				systemContainers["misc"] = sysContExpectations
			}
			// Expectations for pods.
			podExpectations := gstruct.MatchAllFields(gstruct.Fields{
				"PodRef":    gstruct.Ignore(),
				"StartTime": recent(maxStartAge),
				"Containers": gstruct.MatchAllElements(summaryObjectID, gstruct.Elements{
					"busybox-container": gstruct.MatchAllFields(gstruct.Fields{
						"Name":      Equal("busybox-container"),
						"StartTime": recent(maxStartAge),
						"CPU": ptrMatchAllFields(gstruct.Fields{
							"Time":                 recent(maxStatsAge),
							"UsageNanoCores":       bounded(100000, 100000000),
							"UsageCoreNanoSeconds": bounded(10000000, 1000000000),
Example #18
0
func TestServiceFile(t *testing.T) {
	if !sd_util.IsRunningSystemd() {
		t.Skip("Systemd is not running on the host.")
	}

	ctx := testutils.NewRktRunCtx()
	defer ctx.Cleanup()

	r := rand.New(rand.NewSource(time.Now().UnixNano()))

	conn, err := sd_dbus.New()
	if err != nil {
		t.Fatal(err)
	}

	imageFile := getInspectImagePath()

	image, err := filepath.Abs(imageFile)
	if err != nil {
		t.Fatal(err)
	}
	opts := "-- --print-msg=HelloWorld --sleep=1000"

	cmd := fmt.Sprintf("%s --insecure-options=image run --mds-register=false --set-env=MESSAGE_LOOP=1000 %s %s", ctx.Cmd(), image, opts)
	props := []sd_dbus.Property{
		sd_dbus.PropExecStart(strings.Split(cmd, " "), false),
	}
	target := fmt.Sprintf("rkt-testing-transient-%d.service", r.Int())

	reschan := make(chan string)
	_, err = conn.StartTransientUnit(target, "replace", props, reschan)
	if err != nil {
		t.Fatal(err)
	}

	job := <-reschan
	if job != "done" {
		t.Fatal("Job is not done:", job)
	}

	units, err := conn.ListUnits()

	var found bool
	for _, u := range units {
		if u.Name == target {
			found = true
			if u.ActiveState != "active" {
				t.Fatalf("Test unit %s not active: %s (target: %s)", u.Name, u.ActiveState, target)
			}
		}
	}

	if !found {
		t.Fatalf("Test unit not found in list")
	}

	// Run the unit for 10 seconds. You can check the logs manually in journalctl
	time.Sleep(10 * time.Second)

	// Stop the unit
	_, err = conn.StopUnit(target, "replace", reschan)
	if err != nil {
		t.Fatal(err)
	}

	// wait for StopUnit job to complete
	<-reschan

	units, err = conn.ListUnits()

	found = false
	for _, u := range units {
		if u.Name == target {
			found = true
		}
	}

	if found {
		t.Fatalf("Test unit found in list, should be stopped")
	}
}
Example #19
0
func UseSystemd() bool {
	if !systemdUtil.IsRunningSystemd() {
		return false
	}

	connLock.Lock()
	defer connLock.Unlock()

	if theConn == nil {
		var err error
		theConn, err = systemdDbus.New()
		if err != nil {
			return false
		}

		// Assume we have StartTransientUnit
		hasStartTransientUnit = true

		// But if we get UnknownMethod error we don't
		if _, err := theConn.StartTransientUnit("test.scope", "invalid", nil, nil); err != nil {
			if dbusError, ok := err.(dbus.Error); ok {
				if dbusError.Name == "org.freedesktop.DBus.Error.UnknownMethod" {
					hasStartTransientUnit = false
					return hasStartTransientUnit
				}
			}
		}

		// Ensure the scope name we use doesn't exist. Use the Pid to
		// avoid collisions between multiple libcontainer users on a
		// single host.
		scope := fmt.Sprintf("libcontainer-%d-systemd-test-default-dependencies.scope", os.Getpid())
		testScopeExists := true
		for i := 0; i <= testScopeWait; i++ {
			if _, err := theConn.StopUnit(scope, "replace", nil); err != nil {
				if dbusError, ok := err.(dbus.Error); ok {
					if strings.Contains(dbusError.Name, "org.freedesktop.systemd1.NoSuchUnit") {
						testScopeExists = false
						break
					}
				}
			}
			time.Sleep(time.Millisecond)
		}

		// Bail out if we can't kill this scope without testing for DefaultDependencies
		if testScopeExists {
			return hasStartTransientUnit
		}

		// Assume StartTransientUnit on a scope allows DefaultDependencies
		hasTransientDefaultDependencies = true
		ddf := newProp("DefaultDependencies", false)
		if _, err := theConn.StartTransientUnit(scope, "replace", []systemdDbus.Property{ddf}, nil); err != nil {
			if dbusError, ok := err.(dbus.Error); ok {
				if strings.Contains(dbusError.Name, "org.freedesktop.DBus.Error.PropertyReadOnly") {
					hasTransientDefaultDependencies = false
				}
			}
		}

		// Not critical because of the stop unit logic above.
		theConn.StopUnit(scope, "replace", nil)

		// Assume StartTransientUnit on a scope allows Delegate
		hasDelegate = true
		dl := newProp("Delegate", true)
		if _, err := theConn.StartTransientUnit(scope, "replace", []systemdDbus.Property{dl}, nil); err != nil {
			if dbusError, ok := err.(dbus.Error); ok {
				if strings.Contains(dbusError.Name, "org.freedesktop.DBus.Error.PropertyReadOnly") {
					hasDelegate = false
				}
			}
		}

		// Assume we have the ability to start a transient unit as a slice
		// This was broken until systemd v229, but has been back-ported on RHEL environments >= 219
		// For details, see: https://bugzilla.redhat.com/show_bug.cgi?id=1370299
		hasStartTransientSliceUnit = true

		// To ensure simple clean-up, we create a slice off the root with no hierarchy
		slice := fmt.Sprintf("libcontainer_%d_systemd_test_default.slice", os.Getpid())
		if _, err := theConn.StartTransientUnit(slice, "replace", nil, nil); err != nil {
			if _, ok := err.(dbus.Error); ok {
				hasStartTransientSliceUnit = false
			}
		}

		for i := 0; i <= testSliceWait; i++ {
			if _, err := theConn.StopUnit(slice, "replace", nil); err != nil {
				if dbusError, ok := err.(dbus.Error); ok {
					if strings.Contains(dbusError.Name, "org.freedesktop.systemd1.NoSuchUnit") {
						hasStartTransientSliceUnit = false
						break
					}
				}
			} else {
				break
			}
			time.Sleep(time.Millisecond)
		}

		// Not critical because of the stop unit logic above.
		theConn.StopUnit(scope, "replace", nil)
		theConn.StopUnit(slice, "replace", nil)
	}
	return hasStartTransientUnit
}
Example #20
0
File: service.go Project: 40a/mgmt
// Service watcher
func (obj *ServiceType) Watch() {
	if obj.IsWatching() {
		return
	}
	obj.SetWatching(true)
	defer obj.SetWatching(false)

	// obj.Name: service name
	//vertex := obj.GetVertex()         // stored with SetVertex
	if !util.IsRunningSystemd() {
		log.Fatal("Systemd is not running.")
	}

	conn, err := systemd.NewSystemdConnection() // needs root access
	if err != nil {
		log.Fatal("Failed to connect to systemd: ", err)
	}
	defer conn.Close()

	bus, err := dbus.SystemBus()
	if err != nil {
		log.Fatal("Failed to connect to bus: ", err)
	}

	// XXX: will this detect new units?
	bus.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
		"type='signal',interface='org.freedesktop.systemd1.Manager',member='Reloading'")
	buschan := make(chan *dbus.Signal, 10)
	bus.Signal(buschan)

	var service = fmt.Sprintf("%v.service", obj.Name) // systemd name
	var send = false                                  // send event?
	var dirty = false
	var invalid = false              // does the service exist or not?
	var previous bool                // previous invalid value
	set := conn.NewSubscriptionSet() // no error should be returned
	subChannel, subErrors := set.Subscribe()
	var activeSet = false

	for {
		// XXX: watch for an event for new units...
		// XXX: detect if startup enabled/disabled value changes...

		previous = invalid
		invalid = false

		// firstly, does service even exist or not?
		loadstate, err := conn.GetUnitProperty(service, "LoadState")
		if err != nil {
			log.Printf("Failed to get property: %v", err)
			invalid = true
		}

		if !invalid {
			var notFound = (loadstate.Value == dbus.MakeVariant("not-found"))
			if notFound { // XXX: in the loop we'll handle changes better...
				log.Printf("Failed to find service: %v", service)
				invalid = true // XXX ?
			}
		}

		if previous != invalid { // if invalid changed, send signal
			send = true
			dirty = true
		}

		if invalid {
			log.Printf("Waiting for: %v", service) // waiting for service to appear...
			if activeSet {
				activeSet = false
				set.Remove(service) // no return value should ever occur
			}

			obj.SetState(typeWatching) // reset
			select {
			case _ = <-buschan: // XXX wait for new units event to unstick
				obj.SetConvergedState(typeConvergedNil)
				// loop so that we can see the changed invalid signal
				log.Printf("Service[%v]->DaemonReload()", service)

			case event := <-obj.events:
				obj.SetConvergedState(typeConvergedNil)
				if ok := obj.ReadEvent(&event); !ok {
					return // exit
				}
				if event.GetActivity() {
					dirty = true
				}
				send = true
			case _ = <-TimeAfterOrBlock(obj.ctimeout):
				obj.SetConvergedState(typeConvergedTimeout)
				obj.converged <- true
				continue
			}
		} else {
			if !activeSet {
				activeSet = true
				set.Add(service) // no return value should ever occur
			}

			log.Printf("Watching: %v", service) // attempting to watch...
			obj.SetState(typeWatching)          // reset
			select {
			case event := <-subChannel:

				log.Printf("Service event: %+v", event)
				// NOTE: the value returned is a map for some reason...
				if event[service] != nil {
					// event[service].ActiveState is not nil
					if event[service].ActiveState == "active" {
						log.Printf("Service[%v]->Started()", service)
					} else if event[service].ActiveState == "inactive" {
						log.Printf("Service[%v]->Stopped!()", service)
					} else {
						log.Fatal("Unknown service state: ", event[service].ActiveState)
					}
				} else {
					// service stopped (and ActiveState is nil...)
					log.Printf("Service[%v]->Stopped", service)
				}
				send = true
				dirty = true

			case err := <-subErrors:
				obj.SetConvergedState(typeConvergedNil) // XXX ?
				log.Println("error:", err)
				log.Fatal(err)
				//vertex.events <- fmt.Sprintf("service: %v", "error") // XXX: how should we handle errors?

			case event := <-obj.events:
				obj.SetConvergedState(typeConvergedNil)
				if ok := obj.ReadEvent(&event); !ok {
					return // exit
				}
				if event.GetActivity() {
					dirty = true
				}
				send = true
			}
		}

		if send {
			send = false
			if dirty {
				dirty = false
				obj.isStateOK = false // something made state dirty
			}
			Process(obj) // XXX: rename this function
		}

	}
}
Example #21
0
// CheckApply is run to check the state and, if apply is true, to apply the
// necessary changes to reach the desired state. this is run before Watch and
// again if watch finds a change occurring to the state
func (obj *NspawnRes) CheckApply(apply bool) (checkok bool, err error) {
	if global.DEBUG {
		log.Printf("%s[%s]: CheckApply(%t)", obj.Kind(), obj.GetName(), apply)
	}

	// this resource depends on systemd ensure that it's running
	if !systemdUtil.IsRunningSystemd() {
		return false, errors.New("Systemd is not running.")
	}

	// connect to org.freedesktop.machine1.Manager
	conn, err := machined.New()
	if err != nil {
		return false, errwrap.Wrapf(err, "Failed to connect to dbus")
	}

	// compare the current state with the desired state and perform the
	// appropriate action
	var exists = true
	properties, err := conn.GetProperties(obj.GetName())
	if err != nil {
		if err, ok := err.(dbus.Error); ok && err.Name !=
			"org.freedesktop.machine1.NoSuchMachine" {
			return false, err
		}
		exists = false
		// if we could not successfully get the properties because
		// there's no such machine the machine is stopped
		// error if we need the image ignore if we don't
		if _, err = conn.GetImage(obj.GetName()); err != nil && obj.State != stopped {
			return false, fmt.Errorf(
				"No machine or image named '%s'",
				obj.GetName())
		}
	}
	if global.DEBUG {
		log.Printf("%s[%s]: properties: %v", obj.Kind(), obj.GetName(), properties)
	}
	// if the machine doesn't exist and is supposed to
	// be stopped or the state matches we're done
	if !exists && obj.State == stopped || properties["State"] == obj.State {
		if global.DEBUG {
			log.Printf("%s[%s]: CheckApply() in valid state", obj.Kind(), obj.GetName())
		}
		obj.isStateOK = true // state is ok
		return true, nil
	}

	// end of state checking. if we're here, checkok is false
	if !apply {
		return false, nil
	}

	if global.DEBUG {
		log.Printf("%s[%s]: CheckApply() applying '%s' state", obj.Kind(), obj.GetName(), obj.State)
	}

	if obj.State == running {
		// start the machine using svc resource
		log.Printf("%s[%s]: Starting machine", obj.Kind(), obj.GetName())
		// assume state had to be changed at this point, ignore checkOK
		if _, err := obj.svc.CheckApply(apply); err != nil {
			return false, errwrap.Wrapf(err, "Nested svc failed")
		}
	}
	if obj.State == stopped {
		// terminate the machine with
		// org.freedesktop.machine1.Manager.KillMachine
		log.Printf("%s[%s]: Stopping machine", obj.Kind(), obj.GetName())
		if err := conn.TerminateMachine(obj.GetName()); err != nil {
			return false, errwrap.Wrapf(err, "Failed to stop machine")
		}
	}

	obj.isStateOK = true // state is now good
	return false, nil
}
Example #22
0
func startEtcdOrProxyV2() {
	grpc.EnableTracing = false

	cfg := newConfig()
	defaultInitialCluster := cfg.InitialCluster

	err := cfg.parse(os.Args[1:])
	if err != nil {
		plog.Errorf("error verifying flags, %v. See 'etcd --help'.", err)
		switch err {
		case embed.ErrUnsetAdvertiseClientURLsFlag:
			plog.Errorf("When listening on specific address(es), this etcd process must advertise accessible url(s) to each connected client.")
		}
		os.Exit(1)
	}
	setupLogging(cfg)

	var stopped <-chan struct{}
	var errc <-chan error

	plog.Infof("etcd Version: %s\n", version.Version)
	plog.Infof("Git SHA: %s\n", version.GitSHA)
	plog.Infof("Go Version: %s\n", runtime.Version())
	plog.Infof("Go OS/Arch: %s/%s\n", runtime.GOOS, runtime.GOARCH)

	GoMaxProcs := runtime.GOMAXPROCS(0)
	plog.Infof("setting maximum number of CPUs to %d, total number of available CPUs is %d", GoMaxProcs, runtime.NumCPU())

	// TODO: check whether fields are set instead of whether fields have default value
	defaultHost, defaultHostErr := cfg.IsDefaultHost()
	defaultHostOverride := defaultHost == "" || defaultHostErr == nil
	if (defaultHostOverride || cfg.Name != embed.DefaultName) && cfg.InitialCluster == defaultInitialCluster {
		cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
	}

	if cfg.Dir == "" {
		cfg.Dir = fmt.Sprintf("%v.etcd", cfg.Name)
		plog.Warningf("no data-dir provided, using default data-dir ./%s", cfg.Dir)
	}

	which := identifyDataDirOrDie(cfg.Dir)
	if which != dirEmpty {
		plog.Noticef("the server is already initialized as %v before, starting as etcd %v...", which, which)
		switch which {
		case dirMember:
			stopped, errc, err = startEtcd(&cfg.Config)
		case dirProxy:
			err = startProxy(cfg)
		default:
			plog.Panicf("unhandled dir type %v", which)
		}
	} else {
		shouldProxy := cfg.isProxy()
		if !shouldProxy {
			stopped, errc, err = startEtcd(&cfg.Config)
			if derr, ok := err.(*etcdserver.DiscoveryError); ok && derr.Err == discovery.ErrFullCluster {
				if cfg.shouldFallbackToProxy() {
					plog.Noticef("discovery cluster full, falling back to %s", fallbackFlagProxy)
					shouldProxy = true
				}
			}
		}
		if shouldProxy {
			err = startProxy(cfg)
		}
	}

	if err != nil {
		if derr, ok := err.(*etcdserver.DiscoveryError); ok {
			switch derr.Err {
			case discovery.ErrDuplicateID:
				plog.Errorf("member %q has previously registered with discovery service token (%s).", cfg.Name, cfg.Durl)
				plog.Errorf("But etcd could not find valid cluster configuration in the given data dir (%s).", cfg.Dir)
				plog.Infof("Please check the given data dir path if the previous bootstrap succeeded")
				plog.Infof("or use a new discovery token if the previous bootstrap failed.")
			case discovery.ErrDuplicateName:
				plog.Errorf("member with duplicated name has registered with discovery service token(%s).", cfg.Durl)
				plog.Errorf("please check (cURL) the discovery token for more information.")
				plog.Errorf("please do not reuse the discovery token and generate a new one to bootstrap the cluster.")
			default:
				plog.Errorf("%v", err)
				plog.Infof("discovery token %s was used, but failed to bootstrap the cluster.", cfg.Durl)
				plog.Infof("please generate a new discovery token and try to bootstrap again.")
			}
			os.Exit(1)
		}

		if strings.Contains(err.Error(), "include") && strings.Contains(err.Error(), "--initial-cluster") {
			plog.Infof("%v", err)
			if cfg.InitialCluster == cfg.InitialClusterFromName(cfg.Name) {
				plog.Infof("forgot to set --initial-cluster flag?")
			}
			if types.URLs(cfg.APUrls).String() == embed.DefaultInitialAdvertisePeerURLs {
				plog.Infof("forgot to set --initial-advertise-peer-urls flag?")
			}
			if cfg.InitialCluster == cfg.InitialClusterFromName(cfg.Name) && len(cfg.Durl) == 0 {
				plog.Infof("if you want to use discovery service, please set --discovery flag.")
			}
			os.Exit(1)
		}
		plog.Fatalf("%v", err)
	}

	osutil.HandleInterrupts()

	if systemdutil.IsRunningSystemd() {
		// At this point, the initialization of etcd is done.
		// The listeners are listening on the TCP ports and ready
		// for accepting connections. The etcd instance should be
		// joined with the cluster and ready to serve incoming
		// connections.
		sent, err := daemon.SdNotify(false, "READY=1")
		if err != nil {
			plog.Errorf("failed to notify systemd for readiness: %v", err)
		}
		if !sent {
			plog.Errorf("forgot to set Type=notify in systemd service file?")
		}
	}

	select {
	case lerr := <-errc:
		// fatal out on listener errors
		plog.Fatal(lerr)
	case <-stopped:
	}

	osutil.Exit(0)
}
Example #23
0
// Watch for state changes and sends a message to the bus if there is a change
func (obj *NspawnRes) Watch(processChan chan event.Event) error {
	if obj.IsWatching() {
		return nil
	}
	obj.SetWatching(true)
	defer obj.SetWatching(false)
	cuid := obj.converger.Register()
	defer cuid.Unregister()

	var startup bool
	Startup := func(block bool) <-chan time.Time {
		if block {
			return nil // blocks forever
		}
		// 1/2 the resolution of converged timeout
		return time.After(time.Duration(500) * time.Millisecond)
	}

	// this resource depends on systemd ensure that it's running
	if !systemdUtil.IsRunningSystemd() {
		return fmt.Errorf("Systemd is not running.")
	}

	// create a private message bus
	bus, err := util.SystemBusPrivateUsable()
	if err != nil {
		return errwrap.Wrapf(err, "Failed to connect to bus")
	}

	// add a match rule to match messages going through the message bus
	call := bus.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
		fmt.Sprintf("type='signal',interface='%s',eavesdrop='true'",
			dbusInterface))
	// <-call.Done
	if err := call.Err; err != nil {
		return err
	}
	buschan := make(chan *dbus.Signal, 10)
	bus.Signal(buschan)

	var send = false
	var exit = false
	var dirty = false

	for {
		obj.SetState(ResStateWatching)
		select {
		case event := <-buschan:
			// process org.freedesktop.machine1 events for this resource's name
			if event.Body[0] == obj.GetName() {
				log.Printf("%s[%s]: Event received: %v", obj.Kind(), obj.GetName(), event.Name)
				if event.Name == machineNew {
					log.Printf("%s[%s]: Machine started", obj.Kind(), obj.GetName())
				} else if event.Name == machineRemoved {
					log.Printf("%s[%s]: Machine stopped", obj.Kind(), obj.GetName())
				} else {
					return fmt.Errorf("Unknown event: %s", event.Name)
				}
				send = true
				dirty = true
			}

		case event := <-obj.Events():
			cuid.SetConverged(false)
			if exit, send = obj.ReadEvent(&event); exit {
				return nil // exit
			}

		case <-cuid.ConvergedTimer():
			cuid.SetConverged(true) // converged!
			continue

		case <-Startup(startup):
			cuid.SetConverged(false)
			send = true
			dirty = true
		}

		// do all our event sending all together to avoid duplicate msgs
		if send || !obj.isStateOK {
			startup = true // startup finished
			send = false
			// only invalid state on certain types of events
			if dirty {
				dirty = false
				obj.isStateOK = false // something made state dirty
			}
			if exit, err := obj.DoSend(processChan, ""); exit || err != nil {
				return err // we exit or bubble up a NACK...
			}
		}
	}
}
func TestSocketActivation(t *testing.T) {
	if !sd_util.IsRunningSystemd() {
		t.Skip("Systemd is not running on the host.")
	}

	r := rand.New(rand.NewSource(time.Now().UnixNano()))

	port, err := randomFreePort(t)
	if err != nil {
		t.Fatal(err)
	}

	echoImage := patchTestACI("rkt-inspect-echo.aci",
		"--exec=/echo-socket-activated",
		"--ports=test-port,protocol=tcp,port=80,socketActivated=true")
	defer os.Remove(echoImage)

	ctx := testutils.NewRktRunCtx()
	defer ctx.Cleanup()

	conn, err := sd_dbus.New()
	if err != nil {
		t.Fatal(err)
	}

	rktTestingEchoService := `
	[Unit]
	Description=Socket-activated echo server

	[Service]
	ExecStart=%s
	KillMode=process
	`

	rnd := r.Int()

	// Write unit files directly to runtime system units directory
	// (/run/systemd/system) to avoid calling LinkUnitFiles - it is buggy in
	// systemd v219 as it does not work with absolute paths.
	unitsDir := "/run/systemd/system"

	cmd := fmt.Sprintf("%s --insecure-options=image run --port=test-port:%d --mds-register=false %s", ctx.Cmd(), port, echoImage)
	serviceContent := fmt.Sprintf(rktTestingEchoService, cmd)
	serviceTargetBase := fmt.Sprintf("rkt-testing-socket-activation-%d.service", rnd)
	serviceTarget := filepath.Join(unitsDir, serviceTargetBase)

	if err := ioutil.WriteFile(serviceTarget, []byte(serviceContent), 0666); err != nil {
		t.Fatal(err)
	}
	defer os.Remove(serviceTarget)

	rktTestingEchoSocket := `
	[Unit]
	Description=Socket-activated netcat server socket

	[Socket]
	ListenStream=%d
	`
	socketContent := fmt.Sprintf(rktTestingEchoSocket, port)
	socketTargetBase := fmt.Sprintf("rkt-testing-socket-activation-%d.socket", rnd)
	socketTarget := filepath.Join(unitsDir, socketTargetBase)

	if err := ioutil.WriteFile(socketTarget, []byte(socketContent), 0666); err != nil {
		t.Fatal(err)
	}
	defer os.Remove(socketTarget)

	reschan := make(chan string)
	doJob := func() {
		job := <-reschan
		if job != "done" {
			t.Fatal("Job is not done:", job)
		}
	}

	if _, err := conn.StartUnit(socketTargetBase, "replace", reschan); err != nil {
		t.Fatal(err)
	}
	doJob()

	defer func() {
		if _, err := conn.StopUnit(socketTargetBase, "replace", reschan); err != nil {
			t.Fatal(err)
		}
		doJob()

		if _, err := conn.StopUnit(serviceTargetBase, "replace", reschan); err != nil {
			t.Fatal(err)
		}
		doJob()
	}()

	expected := "HELO\n"
	sockConn, err := net.Dial("tcp", fmt.Sprintf("localhost:%d", port))
	if err != nil {
		t.Fatal(err)
	}

	if _, err := fmt.Fprintf(sockConn, expected); err != nil {
		t.Fatal(err)
	}

	answer, err := bufio.NewReader(sockConn).ReadString('\n')
	if err != nil {
		t.Fatal(err)
	}

	if answer != expected {
		t.Fatalf("Expected %q, Got %q", expected, answer)
	}

	return
}
Example #25
0
File: etcd.go Project: oywc410/MYPG
func Main() {
	//载入命令中设置的配置项
	cfg := NewConfig()
	err := cfg.Parse(os.Args[1:])
	if err != nil {
		plog.Errorf("error verifying flags, %v. See 'etcd --help'.", err)
		switch err {
		case errUnsetAdvertiseClientURLsFlag:
			plog.Errorf("When listening on specific address(es), this etcd process must advertise accessible url(s) to each connected client.")
		}
		os.Exit(1)
	}
	//配置信息记录到日志中
	setupLogging(cfg)

	var stopped <-chan struct{}

	plog.Infof("etcd Version: %s\n", version.Version)
	plog.Infof("Git SHA: %s\n", version.GitSHA)
	plog.Infof("Go Version: %s\n", runtime.Version())
	plog.Infof("Go OS/Arch: %s/%s\n", runtime.GOOS, runtime.GOARCH)

	GoMaxProcs := runtime.GOMAXPROCS(0)
	plog.Infof("setting maximum number of CPUs to %d, total number of available CPUs is %d", GoMaxProcs, runtime.NumCPU())

	// TODO: check whether fields are set instead of whether fields have default value
	if cfg.name != defaultName && cfg.initialCluster == initialClusterFromName(defaultName) {
		cfg.initialCluster = initialClusterFromName(cfg.name)
	}

	if cfg.dir == "" {
		cfg.dir = fmt.Sprintf("%v.etcd", cfg.name)
		plog.Warningf("no data-dir provided, using default data-dir ./%s", cfg.dir)
	}
	//识别数据文件夹类型(会员或代理或为空)
	which := identifyDataDirOrDie(cfg.dir)
	if which != dirEmpty {
		plog.Noticef("the server is already initialized as %v before, starting as etcd %v...", which, which)
		switch which {
		case dirMember:
			stopped, err = startEtcd(cfg)
		case dirProxy:
			err = startProxy(cfg)
		default:
			plog.Panicf("unhandled dir type %v", which)
		}
	} else { //类型为空时(新建数据--???--)
		shouldProxy := cfg.isProxy() //是否为代理模式
		if !shouldProxy {
			stopped, err = startEtcd(cfg)
			if derr, ok := err.(*etcdserver.DiscoveryError); ok && derr.Err == discovery.ErrFullCluster {
				if cfg.shouldFallbackToProxy() {
					plog.Noticef("discovery cluster full, falling back to %s", fallbackFlagProxy)
					shouldProxy = true
				}
			}
		}
		if shouldProxy { //代理模式
			//不加入到etcd一致性集群中,纯粹进行代理转发。
			err = startProxy(cfg)
		}
	}

	if err != nil {
		if derr, ok := err.(*etcdserver.DiscoveryError); ok {
			switch derr.Err {
			case discovery.ErrDuplicateID:
				plog.Errorf("member %q has previously registered with discovery service token (%s).", cfg.name, cfg.durl)
				plog.Errorf("But etcd could not find valid cluster configuration in the given data dir (%s).", cfg.dir)
				plog.Infof("Please check the given data dir path if the previous bootstrap succeeded")
				plog.Infof("or use a new discovery token if the previous bootstrap failed.")
			case discovery.ErrDuplicateName:
				plog.Errorf("member with duplicated name has registered with discovery service token(%s).", cfg.durl)
				plog.Errorf("please check (cURL) the discovery token for more information.")
				plog.Errorf("please do not reuse the discovery token and generate a new one to bootstrap the cluster.")
			default:
				plog.Errorf("%v", err)
				plog.Infof("discovery token %s was used, but failed to bootstrap the cluster.", cfg.durl)
				plog.Infof("please generate a new discovery token and try to bootstrap again.")
			}
			os.Exit(1)
		}

		if strings.Contains(err.Error(), "include") && strings.Contains(err.Error(), "--initial-cluster") {
			plog.Infof("%v", err)
			if cfg.initialCluster == initialClusterFromName(cfg.name) {
				plog.Infof("forgot to set --initial-cluster flag?")
			}
			if types.URLs(cfg.apurls).String() == defaultInitialAdvertisePeerURLs {
				plog.Infof("forgot to set --initial-advertise-peer-urls flag?")
			}
			if cfg.initialCluster == initialClusterFromName(cfg.name) && len(cfg.durl) == 0 {
				plog.Infof("if you want to use discovery service, please set --discovery flag.")
			}
			os.Exit(1)
		}
		plog.Fatalf("%v", err)
	}

	osutil.HandleInterrupts()

	if systemdutil.IsRunningSystemd() {
		// At this point, the initialization of etcd is done.
		// The listeners are listening on the TCP ports and ready
		// for accepting connections.
		// The http server is probably ready for serving incoming
		// connections. If it is not, the connection might be pending
		// for less than one second.
		err := daemon.SdNotify("READY=1")
		if err != nil {
			plog.Errorf("failed to notify systemd for readiness: %v", err)
			if err == daemon.SdNotifyNoSocket {
				plog.Errorf("forgot to set Type=notify in systemd service file?")
			}
		}
	}

	<-stopped
	osutil.Exit(0)
}
Example #26
0
// Watch is the primary listener for this resource and it outputs events.
func (obj *SvcRes) Watch(processChan chan event.Event) error {
	if obj.IsWatching() {
		return nil
	}
	obj.SetWatching(true)
	defer obj.SetWatching(false)
	cuid := obj.converger.Register()
	defer cuid.Unregister()

	var startup bool
	Startup := func(block bool) <-chan time.Time {
		if block {
			return nil // blocks forever
			//return make(chan time.Time) // blocks forever
		}
		return time.After(time.Duration(500) * time.Millisecond) // 1/2 the resolution of converged timeout
	}

	// obj.Name: svc name
	if !systemdUtil.IsRunningSystemd() {
		return fmt.Errorf("Systemd is not running.")
	}

	conn, err := systemd.NewSystemdConnection() // needs root access
	if err != nil {
		return errwrap.Wrapf(err, "Failed to connect to systemd")
	}
	defer conn.Close()

	// if we share the bus with others, we will get each others messages!!
	bus, err := util.SystemBusPrivateUsable() // don't share the bus connection!
	if err != nil {
		return errwrap.Wrapf(err, "Failed to connect to bus")
	}

	// XXX: will this detect new units?
	bus.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
		"type='signal',interface='org.freedesktop.systemd1.Manager',member='Reloading'")
	buschan := make(chan *dbus.Signal, 10)
	bus.Signal(buschan)

	var svc = fmt.Sprintf("%s.service", obj.Name) // systemd name
	var send = false                              // send event?
	var exit = false
	var invalid = false              // does the svc exist or not?
	var previous bool                // previous invalid value
	set := conn.NewSubscriptionSet() // no error should be returned
	subChannel, subErrors := set.Subscribe()
	var activeSet = false

	for {
		// XXX: watch for an event for new units...
		// XXX: detect if startup enabled/disabled value changes...

		previous = invalid
		invalid = false

		// firstly, does svc even exist or not?
		loadstate, err := conn.GetUnitProperty(svc, "LoadState")
		if err != nil {
			log.Printf("Failed to get property: %v", err)
			invalid = true
		}

		if !invalid {
			var notFound = (loadstate.Value == dbus.MakeVariant("not-found"))
			if notFound { // XXX: in the loop we'll handle changes better...
				log.Printf("Failed to find svc: %s", svc)
				invalid = true // XXX: ?
			}
		}

		if previous != invalid { // if invalid changed, send signal
			send = true
			obj.StateOK(false) // dirty
		}

		if invalid {
			log.Printf("Waiting for: %s", svc) // waiting for svc to appear...
			if activeSet {
				activeSet = false
				set.Remove(svc) // no return value should ever occur
			}

			obj.SetState(ResStateWatching) // reset
			select {
			case <-buschan: // XXX: wait for new units event to unstick
				cuid.SetConverged(false)
				// loop so that we can see the changed invalid signal
				log.Printf("Svc[%s]->DaemonReload()", svc)

			case event := <-obj.Events():
				cuid.SetConverged(false)
				if exit, send = obj.ReadEvent(&event); exit {
					return nil // exit
				}

			case <-cuid.ConvergedTimer():
				cuid.SetConverged(true) // converged!
				continue

			case <-Startup(startup):
				cuid.SetConverged(false)
				send = true
				obj.StateOK(false) // dirty
			}
		} else {
			if !activeSet {
				activeSet = true
				set.Add(svc) // no return value should ever occur
			}

			log.Printf("Watching: %s", svc) // attempting to watch...
			obj.SetState(ResStateWatching)  // reset
			select {
			case event := <-subChannel:

				log.Printf("Svc event: %+v", event)
				// NOTE: the value returned is a map for some reason...
				if event[svc] != nil {
					// event[svc].ActiveState is not nil

					switch event[svc].ActiveState {
					case "active":
						log.Printf("Svc[%s]->Started", svc)
					case "inactive":
						log.Printf("Svc[%s]->Stopped", svc)
					case "reloading":
						log.Printf("Svc[%s]->Reloading", svc)
					default:
						log.Fatalf("Unknown svc state: %s", event[svc].ActiveState)
					}
				} else {
					// svc stopped (and ActiveState is nil...)
					log.Printf("Svc[%s]->Stopped", svc)
				}
				send = true
				obj.StateOK(false) // dirty

			case err := <-subErrors:
				cuid.SetConverged(false)
				return errwrap.Wrapf(err, "Unknown %s[%s] error", obj.Kind(), obj.GetName())

			case event := <-obj.Events():
				cuid.SetConverged(false)
				if exit, send = obj.ReadEvent(&event); exit {
					return nil // exit
				}

			case <-cuid.ConvergedTimer():
				cuid.SetConverged(true) // converged!
				continue

			case <-Startup(startup):
				cuid.SetConverged(false)
				send = true
				obj.StateOK(false) // dirty
			}
		}

		if send {
			startup = true // startup finished
			send = false
			if exit, err := obj.DoSend(processChan, ""); exit || err != nil {
				return err // we exit or bubble up a NACK...
			}
		}
	}
}
Example #27
0
// Service watcher
func (obj ServiceType) Watch(v *Vertex) {
	// obj.Name: service name

	if !util.IsRunningSystemd() {
		log.Fatal("Systemd is not running.")
	}

	conn, err := systemd.NewSystemdConnection() // needs root access
	if err != nil {
		log.Fatal("Failed to connect to systemd: ", err)
	}
	defer conn.Close()

	bus, err := dbus.SystemBus()
	if err != nil {
		log.Fatal("Failed to connect to bus: %v\n", err)
	}

	// XXX: will this detect new units?
	bus.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
		"type='signal',interface='org.freedesktop.systemd1.Manager',member='Reloading'")
	buschan := make(chan *dbus.Signal, 10)
	bus.Signal(buschan)

	var service = fmt.Sprintf("%v.service", obj.Name) // systemd name
	var send = false                                  // send event?
	var invalid = false                               // does the service exist or not?
	var previous bool                                 // previous invalid value
	set := conn.NewSubscriptionSet()                  // no error should be returned
	subChannel, subErrors := set.Subscribe()
	var activeSet = false

	for {
		// XXX: watch for an event for new units...
		// XXX: detect if startup enabled/disabled value changes...

		previous = invalid
		invalid = false

		// firstly, does service even exist or not?
		loadstate, err := conn.GetUnitProperty(service, "LoadState")
		if err != nil {
			log.Printf("Failed to get property: %v\n", err)
			invalid = true
		}

		if !invalid {
			var notFound = (loadstate.Value == dbus.MakeVariant("not-found"))
			if notFound { // XXX: in the loop we'll handle changes better...
				log.Printf("Failed to find service: %v\n", service)
				invalid = true // XXX ?
			}
		}

		if previous != invalid { // if invalid changed, send signal
			send = true
		}

		if invalid {
			log.Printf("Waiting for: %v\n", service) // waiting for service to appear...
			if activeSet {
				activeSet = false
				set.Remove(service) // no return value should ever occur
			}

			select {
			case _ = <-buschan: // XXX wait for new units event to unstick
				// loop so that we can see the changed invalid signal
				log.Printf("Service[%v]->DaemonReload()\n", service)

			case exit := <-obj.Events:
				if exit == "exit" {
					return
				} else {
					log.Fatal("Unknown event: %v\n", exit)
				}
			}
		} else {
			if !activeSet {
				activeSet = true
				set.Add(service) // no return value should ever occur
			}

			log.Printf("Watching: %v\n", service) // attempting to watch...
			select {
			case event := <-subChannel:

				log.Printf("Service event: %+v\n", event)
				// NOTE: the value returned is a map for some reason...
				if event[service] != nil {
					// event[service].ActiveState is not nil
					if event[service].ActiveState == "active" {
						log.Printf("Service[%v]->Started()\n", service)
					} else if event[service].ActiveState == "inactive" {
						log.Printf("Service[%v]->Stopped!()\n", service)
					} else {
						log.Fatal("Unknown service state: ", event[service].ActiveState)
					}
				} else {
					// service stopped (and ActiveState is nil...)
					log.Printf("Service[%v]->Stopped\n", service)
				}
				send = true

			case err := <-subErrors:
				log.Println("error:", err)
				log.Fatal(err)
				v.Events <- fmt.Sprintf("service: %v", "error")

			case exit := <-obj.Events:
				if exit == "exit" {
					return
				} else {
					log.Fatal("Unknown event: %v\n", exit)
				}
			}
		}

		if send {
			send = false
			//log.Println("Sending event!")
			v.Events <- fmt.Sprintf("service(%v): %v", obj.Name, "event!") // FIXME: use struct
		}
	}
}
func UseSystemd() bool {
	if !systemdUtil.IsRunningSystemd() {
		return false
	}

	connLock.Lock()
	defer connLock.Unlock()

	if theConn == nil {
		var err error
		theConn, err = systemdDbus.New()
		if err != nil {
			return false
		}

		// Assume we have StartTransientUnit
		hasStartTransientUnit = true

		// But if we get UnknownMethod error we don't
		if _, err := theConn.StartTransientUnit("test.scope", "invalid", nil, nil); err != nil {
			if dbusError, ok := err.(dbus.Error); ok {
				if dbusError.Name == "org.freedesktop.DBus.Error.UnknownMethod" {
					hasStartTransientUnit = false
					return hasStartTransientUnit
				}
			}
		}

		// Ensure the scope name we use doesn't exist. Use the Pid to
		// avoid collisions between multiple libcontainer users on a
		// single host.
		scope := fmt.Sprintf("libcontainer-%d-systemd-test-default-dependencies.scope", os.Getpid())
		testScopeExists := true
		for i := 0; i <= testScopeWait; i++ {
			if _, err := theConn.StopUnit(scope, "replace", nil); err != nil {
				if dbusError, ok := err.(dbus.Error); ok {
					if strings.Contains(dbusError.Name, "org.freedesktop.systemd1.NoSuchUnit") {
						testScopeExists = false
						break
					}
				}
			}
			time.Sleep(time.Millisecond)
		}

		// Bail out if we can't kill this scope without testing for DefaultDependencies
		if testScopeExists {
			return hasStartTransientUnit
		}

		// Assume StartTransientUnit on a scope allows DefaultDependencies
		hasTransientDefaultDependencies = true
		ddf := newProp("DefaultDependencies", false)
		if _, err := theConn.StartTransientUnit(scope, "replace", []systemdDbus.Property{ddf}, nil); err != nil {
			if dbusError, ok := err.(dbus.Error); ok {
				if strings.Contains(dbusError.Name, "org.freedesktop.DBus.Error.PropertyReadOnly") {
					hasTransientDefaultDependencies = false
				}
			}
		}

		// Not critical because of the stop unit logic above.
		theConn.StopUnit(scope, "replace", nil)
	}
	return hasStartTransientUnit
}
func TestSocketProxyd(t *testing.T) {
	if !sd_util.IsRunningSystemd() {
		t.Skip("Systemd is not running on the host.")
	}

	socketProxydPath := "/lib/systemd/systemd-socket-proxyd"
	if _, err := os.Stat(socketProxydPath); os.IsNotExist(err) {
		t.Skip("systemd-socket-proxyd is not installed.")
	}

	ctx := testutils.NewRktRunCtx()
	defer ctx.Cleanup()

	iface, _, err := testutils.GetNonLoIfaceWithAddrs(netlink.FAMILY_V4)
	if err != nil {
		t.Fatalf("Error while getting non-lo host interface: %v\n", err)
	}
	if iface.Name == "" {
		t.Skipf("Cannot run test without non-lo host interface")
	}

	nt := networkTemplateT{
		Name:   "ptp0",
		Type:   "ptp",
		IpMasq: true,
		Master: iface.Name,
		Ipam: &ipamTemplateT{
			Type:   "host-local",
			Subnet: "192.168.0.0/24",
			Routes: []map[string]string{
				{"dst": "0.0.0.0/0"},
			},
		},
	}

	netDir := prepareTestNet(t, ctx, nt)
	defer os.RemoveAll(netDir)

	port, err := randomFreePort(t)
	if err != nil {
		t.Fatal(err)
	}

	echoImage := patchTestACI("rkt-inspect-echo.aci",
		"--exec=/echo-socket-activated",
		"--ports=test-port,protocol=tcp,port=80,socketActivated=true")
	defer os.Remove(echoImage)

	conn, err := sd_dbus.New()
	if err != nil {
		t.Fatal(err)
	}

	rktTestingEchoService := `
	[Unit]
	Description=Socket-activated echo server

	[Service]
	ExecStart=%s
	KillMode=process
	`

	r := rand.New(rand.NewSource(time.Now().UnixNano()))
	rnd := r.Int()

	// Write unit files directly to runtime system units directory
	// (/run/systemd/system) to avoid calling LinkUnitFiles - it is buggy in
	// systemd v219 as it does not work with absolute paths.
	unitsDir := "/run/systemd/system"
	containerIP := "192.168.0.101"

	cmd := fmt.Sprintf("%s --insecure-options=image --debug run --net=%s:IP=%s --port=test-port:%d --mds-register=false %s",
		ctx.Cmd(), nt.Name, containerIP, port, echoImage)

	serviceContent := fmt.Sprintf(rktTestingEchoService, cmd)
	serviceTargetBase := fmt.Sprintf("rkt-testing-socket-activation-%d.service", rnd)
	serviceTarget := filepath.Join(unitsDir, serviceTargetBase)

	if err := ioutil.WriteFile(serviceTarget, []byte(serviceContent), 0666); err != nil {
		t.Fatal(err)
	}
	defer os.Remove(serviceTarget)

	rktTestingEchoSocket := `
	[Unit]
	Description=Socket-activated netcat server socket

	[Socket]
	ListenStream=%d

	[Install]
	WantedBy=sockets.target
	`

	socketContent := fmt.Sprintf(rktTestingEchoSocket, port)
	socketTargetBase := fmt.Sprintf("proxy-to-rkt-testing-socket-activation-%d.socket", rnd)
	socketTarget := filepath.Join(unitsDir, socketTargetBase)

	if err := ioutil.WriteFile(socketTarget, []byte(socketContent), 0666); err != nil {
		t.Fatal(err)
	}
	defer os.Remove(socketTarget)

	proxyToRktTestingEchoService := `
	[Unit]
	Requires=%s
	After=%s

	[Service]
	ExecStart=%s %s:%d
	`

	proxyContent := fmt.Sprintf(proxyToRktTestingEchoService, serviceTargetBase, serviceTargetBase,
		socketProxydPath, containerIP, port)
	proxyContentBase := fmt.Sprintf("proxy-to-rkt-testing-socket-activation-%d.service", rnd)
	proxyTarget := filepath.Join(unitsDir, proxyContentBase)

	if err := ioutil.WriteFile(proxyTarget, []byte(proxyContent), 0666); err != nil {
		t.Fatal(err)
	}
	defer os.Remove(proxyTarget)

	reschan := make(chan string)
	doJob := func() {
		job := <-reschan
		if job != "done" {
			t.Fatal("Job is not done:", job)
		}
	}

	if _, err := conn.StartUnit(socketTargetBase, "replace", reschan); err != nil {
		t.Fatal(err)
	}
	doJob()

	defer func() {
		if _, err := conn.StopUnit(socketTargetBase, "replace", reschan); err != nil {
			t.Fatal(err)
		}
		doJob()

		if _, err := conn.StopUnit(serviceTargetBase, "replace", reschan); err != nil {
			t.Fatal(err)
		}
		doJob()

		if _, err := conn.StopUnit(proxyContentBase, "replace", reschan); err != nil {
			t.Fatal(err)
		}
		doJob()
	}()

	expected := "HELO\n"
	sockConn, err := net.Dial("tcp", fmt.Sprintf("localhost:%d", port))
	if err != nil {
		t.Fatal(err)
	}

	if _, err := fmt.Fprintf(sockConn, expected); err != nil {
		t.Fatal(err)
	}

	answer, err := bufio.NewReader(sockConn).ReadString('\n')
	if err != nil {
		t.Fatal(err)
	}

	if answer != expected {
		t.Fatalf("Expected %q, Got %q", expected, answer)
	}

	return
}