func TestFSGID(t *testing.T) { gid, err := getfsgid() if err != nil { t.Fatal(err) } if int(gid) != os.Getegid() { t.Fatalf("getfsgid() returned unexpected results (%d!=%d)", gid, os.Getegid()) } t.Logf("fsgid = %v\n", gid) }
// lookupGid resolves a groupname to a numeric id. Current egid is returned on failure. func lookupGid(groupname string) uint32 { g, err := user.Lookup(groupname) if err != nil { log.Printf("Error resolving gid for %v: %v\n", groupname, err) return uint32(os.Getegid()) } gid, err := strconv.ParseUint(g.Gid, 10 /* base */, 32 /* bits */) if err != nil { log.Printf("Error resolving gid for %v: %v\n", groupname, err) return uint32(os.Getegid()) } return uint32(gid) }
func TestChangingTheUserWihtNobody(t *testing.T) { t.Parallel() //!TODO: find out if this test is possible at all. // If not, delete it from here. t.Skip("Setting tye UID and GID is not supported for some reason") nobody, err := user.Lookup("nobody") if err != nil { if _, ok := err.(user.UnknownUserError); ok { t.Skip("This system does not have the nobody user." + " Skipping the test since it requires it") } else { t.Errorf("Error getting the nobody user: %s", err) } } tempDir, cleanup := testutils.GetTestFolder(t) defer cleanup() targetPidFile := filepath.Join(tempDir, "pidfile") cfg := getCfg(config.System{ User: nobody.Name, Pidfile: targetPidFile, }) err = SetupEnv(cfg) if err != nil { t.Errorf("There was an error when setting gid and uit to %s's. %s", nobody.Name, err) } currentEuid := os.Geteuid() uidOfNobody, err := strconv.Atoi(nobody.Uid) if err != nil { t.Errorf("Error converting UID [%s] to int: %s", nobody.Uid, err) } if uidOfNobody != currentEuid { t.Errorf("The current user id was not set to nobody's. "+ "Expected %d but it was %d", uidOfNobody, currentEuid) } currentEgid := os.Getegid() gidOfNobody, err := strconv.Atoi(nobody.Gid) if err != nil { t.Errorf("Error converting GID [%s] to int: %s", nobody.Gid, err) } if gidOfNobody != currentEgid { t.Errorf("The current group id was not set to nobody's. "+ "Expected %d but it was %d", gidOfNobody, currentEgid) } testutils.ShouldntFail(t, os.Remove(targetPidFile)) }
func main() { var err error var l *p.Logger flag.Parse() rsrv.user = p.OsUsers.Uid2User(os.Geteuid()) rsrv.group = p.OsUsers.Gid2Group(os.Getegid()) rsrv.blksz = *blksize rsrv.blkchan = make(chan []byte, 2048) rsrv.zero = make([]byte, rsrv.blksz) root := new(RFile) err = root.Add(nil, "/", rsrv.user, nil, p.DMDIR|0777, root) if err != nil { goto error } l = p.NewLogger(*logsz) rsrv.srv = srv.NewFileSrv(&root.File) rsrv.srv.Dotu = true rsrv.srv.Debuglevel = *debug rsrv.srv.Start(rsrv.srv) rsrv.srv.Id = "ramfs" rsrv.srv.Log = l err = rsrv.srv.StartNetListener("tcp", *addr) if err != nil { goto error } return error: log.Println(fmt.Sprintf("Error: %s", err)) }
func CurrentProcessInfo() *ProcessInfo { var hasTty bool cwd, _ := os.Getwd() grp, _ := os.Getgroups() // no syscall.Getsid() wrapper on Linux? sid, _, _ := syscall.RawSyscall(syscall.SYS_GETSID, 0, 0, 0) if fh, err := os.Open("/dev/tty"); err == nil { hasTty = true fh.Close() } return &ProcessInfo{ Ppid: os.Getppid(), Pid: os.Getpid(), Uid: os.Getuid(), Euid: os.Geteuid(), Gid: os.Getgid(), Egid: os.Getegid(), Pgrp: syscall.Getpgrp(), Sid: int(sid), Dir: cwd, Groups: grp, Args: os.Args, Env: os.Environ(), HasTty: hasTty, } }
// Expose CPU stats using KVM. func (c *statCollector) Update(ch chan<- prometheus.Metric) (err error) { if os.Geteuid() != 0 && os.Getegid() != 2 { return errors.New("caller should be either root user or kmem group to access /dev/mem") } var errbuf *C.char kd := C.kvm_open(nil, nil, nil, C.O_RDONLY, errbuf) if errbuf != nil { return errors.New("failed to call kvm_open()") } defer C.kvm_close(kd) // The cp_time variable is an array of CPUSTATES long integers -- in // the same format as the kern.cp_time sysctl. According to the // comments in sys/kern/kern_clock.c, the frequency of this timer will // be stathz (or hz, if stathz is zero). clockrate, err := getClockRate() if err != nil { return err } ncpus := C.kvm_getncpus(kd) for i := 0; i < int(ncpus); i++ { pcpu := C.kvm_getpcpu(kd, C.int(i)) cp_time := ((*C.struct_pcpu)(unsafe.Pointer(pcpu))).pc_cp_time c.cpu.With(prometheus.Labels{"cpu": strconv.Itoa(i), "mode": "user"}).Set(float64(cp_time[C.CP_USER]) / clockrate) c.cpu.With(prometheus.Labels{"cpu": strconv.Itoa(i), "mode": "nice"}).Set(float64(cp_time[C.CP_NICE]) / clockrate) c.cpu.With(prometheus.Labels{"cpu": strconv.Itoa(i), "mode": "system"}).Set(float64(cp_time[C.CP_SYS]) / clockrate) c.cpu.With(prometheus.Labels{"cpu": strconv.Itoa(i), "mode": "interrupt"}).Set(float64(cp_time[C.CP_INTR]) / clockrate) c.cpu.With(prometheus.Labels{"cpu": strconv.Itoa(i), "mode": "idle"}).Set(float64(cp_time[C.CP_IDLE]) / clockrate) } c.cpu.Collect(ch) return err }
// Expose CPU stats using KVM. func (c *statCollector) Update(ch chan<- prometheus.Metric) (err error) { if os.Geteuid() != 0 && os.Getegid() != 2 { return errors.New("caller should be either root user or kmem group to access /dev/mem") } var errbuf *C.char kd := C.kvm_open(nil, nil, nil, C.O_RDONLY, errbuf) if errbuf != nil { return errors.New("failed to call kvm_open()") } defer C.kvm_close(kd) ncpus := C.kvm_getncpus(kd) for i := 0; i < int(ncpus); i++ { pcpu := C.kvm_getpcpu(kd, C.int(i)) cp_time := ((*C.struct_pcpu)(unsafe.Pointer(pcpu))).pc_cp_time c.cpu.With(prometheus.Labels{"cpu": strconv.Itoa(i), "mode": "user"}).Set(float64(cp_time[C.CP_USER])) c.cpu.With(prometheus.Labels{"cpu": strconv.Itoa(i), "mode": "nice"}).Set(float64(cp_time[C.CP_NICE])) c.cpu.With(prometheus.Labels{"cpu": strconv.Itoa(i), "mode": "system"}).Set(float64(cp_time[C.CP_SYS])) c.cpu.With(prometheus.Labels{"cpu": strconv.Itoa(i), "mode": "interrupt"}).Set(float64(cp_time[C.CP_INTR])) c.cpu.With(prometheus.Labels{"cpu": strconv.Itoa(i), "mode": "idle"}).Set(float64(cp_time[C.CP_IDLE])) } c.cpu.Collect(ch) return err }
func main() { flagUsername := flag.String("username", "nobody", "username for the unprivileged child") isChild, r, w, _, err := privsep.MaybeBecomeChild() if err != nil { log.Fatalf("MaybeBecomeChild failed: %s", err) } who := "parent" if isChild { who = "child" } log.Printf("%s: pid=%d uid=%d euid=%d gid=%d egid=%d", who, os.Getpid(), os.Getuid(), os.Geteuid(), os.Getgid(), os.Getegid()) if isChild { child(r, w) return } if os.Getuid() != 0 { log.Print("Warning: this example only works when run as the root user") } _, r, w, err = privsep.CreateChild(*flagUsername, os.Args[0], nil, nil) if err != nil { log.Fatalf("CreateChild failed: %s", err) } parent(r, w) }
func main() { flagUsername := flag.String("username", "nobody", "username for the unprivileged child") isChild, _, _, files, err := privsep.MaybeBecomeChild() if err != nil { log.Fatalf("MaybeBecomeChild failed: %s", err) } who := "parent" if isChild { who = "child" } log.Printf("%s: pid=%d uid=%d euid=%d gid=%d egid=%d", who, os.Getpid(), os.Getuid(), os.Geteuid(), os.Getgid(), os.Getegid()) if isChild { if len(files) < 1 { log.Fatalf("no extra files: %v", files) } l, err := net.FileListener(files[0]) if err != nil { log.Fatalf("FileListener: %s", err) } child(l) return } if os.Getuid() != 0 { log.Print("Warning: this example only works when run as the root user") } addr := "localhost:1111" laddr, err := net.ResolveTCPAddr("tcp", addr) if err != nil { log.Fatalf("resolve %s: %s", addr, err) } l, err := net.ListenTCP("tcp", laddr) if err != nil { log.Fatalf("listen %s: %s", laddr, err) } sock, err := l.File() if err != nil { log.Fatalf("fd: %s", err) } proc, _, _, err := privsep.CreateChild(*flagUsername, os.Args[0], nil, []*os.File{sock}) if err != nil { log.Fatalf("CreateChild failed: %s", err) } sock.Close() // tidy up so child doesn't run forever defer proc.Kill() parent(laddr) }
func mkroot() (*srv.File, error) { root := new(srv.File) err := root.Add(nil, "/", p.OsUsers.Uid2User(os.Geteuid()), p.OsUsers.Gid2Group(os.Getegid()), p.DMDIR|0555, nil) if err != nil { return nil, err } return root, nil }
func TestGroupName(t *testing.T) { if _, err := osgroup.GroupName(0); err != nil { t.Fatal(err) } gid := os.Getegid() name, err := osgroup.GroupName(gid) if err != nil { t.Fatal(err) } t.Logf("Your group's ID is %d, it's name is %q.", gid, name) }
// Returns permissions in symbolic format. func (i Info) Permissions() string { if i.Uid == os.Geteuid() { return encodePerms(uint8(i.Perm >> KeyPerm(16))) } else { fsgid, err := getfsgid() if (err == nil && i.Gid == int(fsgid)) || i.Gid == os.Getegid() { return encodePerms(uint8(i.Perm >> KeyPerm(8))) } } return encodePerms(uint8(i.Perm)) }
func StartCPUProfile() { filename := "cpu-" + strconv.Itoa(os.Getegid()) + ".pprof" f, err := os.Create(filename) if err != nil { glog.Fatal("record cpu profile failed: ", err) } pprof.StartCPUProfile(f) //time.Sleep(time.Duration(sec) * time.Second) fmt.Printf("create cpu profile %s \n", filename) }
func setupElfAuxv(u models.Usercorn) ([]Elf64Auxv, error) { // set up AT_RANDOM var tmp [16]byte if _, err := rand.Read(tmp[:]); err != nil { return nil, err } randAddr, err := u.PushBytes(tmp[:]) if err != nil { return nil, err } // insert platform string platformAddr, err := u.PushBytes([]byte(u.Loader().Arch() + "\x00")) if err != nil { return nil, err } // main auxv table auxv := []Elf64Auxv{ // TODO: set/track a page size somewhere - on Arch.OS? {ELF_AT_PAGESZ, 4096}, {ELF_AT_BASE, u.InterpBase()}, {ELF_AT_FLAGS, 0}, {ELF_AT_ENTRY, uint64(u.BinEntry())}, {ELF_AT_UID, uint64(os.Getuid())}, {ELF_AT_EUID, uint64(os.Geteuid())}, {ELF_AT_GID, uint64(os.Getgid())}, {ELF_AT_EGID, uint64(os.Getegid())}, {ELF_AT_PLATFORM, platformAddr}, {ELF_AT_CLKTCK, 100}, // 100hz, totally fake {ELF_AT_RANDOM, randAddr}, {ELF_AT_NULL, 0}, } // add phdr information if present in binary phdrOff, _, phdrCount := u.Loader().Header() segments, _ := u.Loader().Segments() for _, s := range segments { if s.ContainsPhys(phdrOff) { phdrOff += s.Addr break } } phdrEnt := 56 if u.Bits() == 32 { phdrEnt = 56 // FIXME } if phdrOff > 0 { auxv = append(auxv, []Elf64Auxv{ {ELF_AT_PHDR, phdrOff}, {ELF_AT_PHENT, uint64(phdrEnt)}, {ELF_AT_PHNUM, uint64(phdrCount)}, }...) } return auxv, nil }
func main() { var err error flag.Parse() rsrv.user = p.OsUsers.Uid2User(os.Geteuid()) rsrv.group = p.OsUsers.Gid2Group(os.Getegid()) rsrv.blksz = *blksize rsrv.blkchan = make(chan []byte, 2048) rsrv.zero = make([]byte, rsrv.blksz) root := new(RFile) err = root.Add(nil, "/", rsrv.user, nil, p.DMDIR|0777, root) if err != nil { log.Println(fmt.Sprintf("Error: %s", err)) return } l := p.NewLogger(*logsz) rsrv.srv = srv.NewFileSrv(&root.File) rsrv.srv.Dotu = true rsrv.srv.Debuglevel = *debug rsrv.srv.Start(rsrv.srv) rsrv.srv.Id = "ramfs" rsrv.srv.Log = l cert := make([]tls.Certificate, 1) cert[0].Certificate = [][]byte{testCertificate} cert[0].PrivateKey = testPrivateKey ls, oerr := tls.Listen("tcp", *addr, &tls.Config{ Rand: rand.Reader, Certificates: cert, CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA}, InsecureSkipVerify: true, }) if oerr != nil { log.Println("can't listen:", oerr) return } err = rsrv.srv.StartListener(ls) if err != nil { log.Println(fmt.Sprintf("Error: %s", err)) return } return }
// NewTemplateResource creates a TemplateResource. func NewTemplateResource(path string, config Config) (*TemplateResource, error) { if config.StoreClient == nil { return nil, errors.New("A valid StoreClient is required.") } // Set the default uid and gid so we can determine if it was // unset from configuration. tc := &TemplateResourceConfig{TemplateResource{Uid: -1, Gid: -1}} log.Debug("Loading template resource from " + path) _, err := toml.DecodeFile(path, &tc) if err != nil { return nil, fmt.Errorf("Cannot process template resource %s - %s", path, err.Error()) } tr := tc.TemplateResource tr.keepStageFile = config.KeepStageFile tr.noop = config.Noop tr.storeClient = config.StoreClient tr.funcMap = newFuncMap() tr.store = memkv.New() tr.syncOnly = config.SyncOnly addFuncs(tr.funcMap, tr.store.FuncMap) if config.Prefix != "" { tr.Prefix = config.Prefix } tr.Prefix = filepath.Join("/", tr.Prefix) if tr.Src == "" { return nil, ErrEmptySrc } if tr.Uid == -1 { tr.Uid = os.Geteuid() } if tr.Gid == -1 { tr.Gid = os.Getegid() } tr.Src = filepath.Join(config.TemplateDir, tr.Src) return &tr, nil }
// RunDir returns the directory where binary files generates should be put. // In case a safe directory isn't found, one will be created. func RunDir() (rundir string, err os.Error) { tempdir := os.TempDir() euid := os.Geteuid() stat, err := os.Stat(tempdir) if err != nil || !stat.IsDirectory() || !canWrite(stat, euid, os.Getegid()) { return "", os.NewError("can't write on directory: " + tempdir) } hostname, err := os.Hostname() if err != nil { return "", os.NewError("can't get hostname: " + err.String()) } prefix := "gorun-" + hostname + "-" + strconv.Itoa(euid) suffix := runtime.GOOS + "_" + runtime.GOARCH prefixi := prefix var i uint for { rundir = filepath.Join(tempdir, prefixi, suffix) // A directory is only considered safe if the owner matches the // user running the script and its permissions prevent someone // else from writing on it. stat, err := os.Stat(rundir) if err == nil && stat.IsDirectory() && stat.Permission() == 0700 && stat.Uid == euid { return rundir, nil } eNoEntError := os.ENOENT if os.Getenv("GOOS") == "windows" { eNoEntError = os.NewError("The system cannot find the path specified.") } if perr, ok := err.(*os.PathError); ok && perr.Error.String() == eNoEntError.String() { err := os.MkdirAll(rundir, 0700) if err == nil { return rundir, nil } } i++ prefixi = prefix + "-" + strconv.Uitoa(i) } panic("unreachable") }
func startDaemon(np string) { // setup args for daemon call args := []string{ fmt.Sprintf("--name=%s", np), "--noconfig", fmt.Sprintf("--errlog=/var/log/npdaemon-%s.log", np), fmt.Sprintf("--output=/var/log/%s.log", np), fmt.Sprintf("--pidfile=/var/run/%s.pid", np), "--unsafe", "--", fmt.Sprintf("/usr/local/bin/%s", np), } // append extra args to args args = append(args, extraArgs...) // start process proc, err := os.StartProcess("/usr/bin/daemon", args, &os.ProcAttr{ Files: []*os.File{os.Stdin, os.Stdout, os.Stderr}, Sys: &syscall.SysProcAttr{ Credential: &syscall.Credential{ Uid: uint32(os.Geteuid()), Gid: uint32(os.Getegid()), }, }, }) if err != nil { fmt.Printf("os/exec returned an error: '%s'\n", err) os.Exit(1) } // wait for daemon to be ready _, err = proc.Wait() if err != nil { fmt.Printf("proc.Wait() failed. %s\n", err) os.Exit(1) } }
// RunDir returns the directory where binary files generates should be put. // In case a safe directory isn't found, one will be created. func RunDir() (rundir string, err error) { tempdir := os.TempDir() euid := os.Geteuid() stat, err := os.Stat(tempdir) if err != nil || !stat.IsDir() || !canWrite(stat, euid, os.Getegid()) { return "", errors.New("can't write on directory: " + tempdir) } hostname, err := os.Hostname() if err != nil { return "", errors.New("can't get hostname: " + err.Error()) } prefix := "gorun-" + hostname + "-" + strconv.Itoa(euid) suffix := runtime.GOOS + "_" + runtime.GOARCH prefixi := prefix var i uint64 for { rundir = filepath.Join(tempdir, prefixi, suffix) // A directory is only considered safe if the owner matches the // user running the script and its permissions prevent someone // else from writing on it. stat, err := os.Stat(rundir) if err == nil && stat.IsDir() && stat.Mode().Perm() == 0700 && sysStat(stat).Uid == uint32(euid) { return rundir, nil } if os.IsNotExist(err) { err := os.MkdirAll(rundir, 0700) if err == nil { return rundir, nil } } i++ prefixi = prefix + "-" + strconv.FormatUint(i, 10) } panic("unreachable") }
func main() { globalFlagset.Parse(os.Args[1:]) args := globalFlagset.Args() if len(args) > 0 { fmt.Fprintln(os.Stderr, "Wrong parameters") os.Exit(1) } if globalFlags.PrintNoNewPrivs { r1, _, err := syscall.Syscall( syscall.SYS_PRCTL, uintptr(unix.PR_GET_NO_NEW_PRIVS), uintptr(0), uintptr(0), ) fmt.Printf("no_new_privs: %v err: %v\n", r1, err) } if globalFlags.CheckMknod != "" { /* format: c:5:2:name */ dev := strings.SplitN(globalFlags.CheckMknod, ":", 4) if len(dev) < 4 { fmt.Fprintln(os.Stderr, "Not enough parameters for mknod") os.Exit(1) } typ := dev[0] major, err := strconv.Atoi(dev[1]) if err != nil { fmt.Fprintln(os.Stderr, "Wrong major") os.Exit(1) } minor, err := strconv.Atoi(dev[2]) if err != nil { fmt.Fprintln(os.Stderr, "Wrong minor") os.Exit(1) } nodeName := dev[3] majorMinor := device.Makedev(uint(major), uint(minor)) mode := uint32(0777) switch typ { case "c": mode |= syscall.S_IFCHR case "b": mode |= syscall.S_IFBLK default: fmt.Fprintln(os.Stderr, "Wrong device node type") os.Exit(1) } if err := syscall.Mknod(nodeName, mode, int(majorMinor)); err != nil { fmt.Fprintf(os.Stderr, "mknod %s: fail: %v\n", nodeName, err) os.Exit(1) } else { fmt.Printf("mknod %s: succeed\n", nodeName) os.Exit(0) } } if globalFlags.SilentSigterm { terminateCh := make(chan os.Signal, 1) signal.Notify(terminateCh, syscall.SIGTERM) go func() { <-terminateCh os.Exit(0) }() } if globalFlags.PreSleep >= 0 { time.Sleep(time.Duration(globalFlags.PreSleep) * time.Second) } if globalFlags.ReadStdin { reader := bufio.NewReader(os.Stdin) fmt.Printf("Enter text:\n") text, _ := reader.ReadString('\n') fmt.Printf("Received text: %s\n", text) } if globalFlags.CheckTty { fd := int(os.Stdin.Fd()) var termios syscall.Termios _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), syscall.TCGETS, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) if err == 0 { fmt.Printf("stdin is a terminal\n") } else { fmt.Printf("stdin is not a terminal\n") } } if globalFlags.CheckPath { envBytes, err := ioutil.ReadFile("/proc/self/environ") if err != nil { fmt.Fprintf(os.Stderr, "Error reading environment from \"/proc/self/environ\": %v\n", err) os.Exit(1) } for _, v := range bytes.Split(envBytes, []byte{0}) { if len(v) == 0 { continue } if strings.HasPrefix(string(v), "PATH=") { if strings.Contains(string(v), "\n") { fmt.Fprintf(os.Stderr, "Malformed PATH: found new line") os.Exit(1) } else { fmt.Printf("PATH is good\n") os.Exit(0) } } else { continue } } fmt.Fprintf(os.Stderr, "PATH not found") os.Exit(1) } if globalFlags.PrintExec { fmt.Fprintf(os.Stdout, "inspect execed as: %s\n", os.Args[0]) } if globalFlags.PrintMsg != "" { fmt.Fprintf(os.Stdout, "%s\n", globalFlags.PrintMsg) messageLoopStr := os.Getenv("MESSAGE_LOOP") messageLoop, err := strconv.Atoi(messageLoopStr) if err == nil { for i := 0; i < messageLoop; i++ { time.Sleep(time.Second) fmt.Fprintf(os.Stdout, "%s\n", globalFlags.PrintMsg) } } } if globalFlags.PrintEnv != "" { fmt.Fprintf(os.Stdout, "%s=%s\n", globalFlags.PrintEnv, os.Getenv(globalFlags.PrintEnv)) } if globalFlags.PrintCapsPid >= 0 { caps, err := capability.NewPid(globalFlags.PrintCapsPid) if err != nil { fmt.Fprintf(os.Stderr, "Cannot get caps: %v\n", err) os.Exit(1) } fmt.Printf("Capability set: effective: %s (%s)\n", caps.StringCap(capability.EFFECTIVE), globalFlags.SuffixMsg) fmt.Printf("Capability set: permitted: %s (%s)\n", caps.StringCap(capability.PERMITTED), globalFlags.SuffixMsg) fmt.Printf("Capability set: inheritable: %s (%s)\n", caps.StringCap(capability.INHERITABLE), globalFlags.SuffixMsg) fmt.Printf("Capability set: bounding: %s (%s)\n", caps.StringCap(capability.BOUNDING), globalFlags.SuffixMsg) if capStr := os.Getenv("CAPABILITY"); capStr != "" { capInt, err := strconv.Atoi(capStr) if err != nil { fmt.Fprintf(os.Stderr, "Environment variable $CAPABILITY is not a valid capability number: %v\n", err) os.Exit(1) } c := capability.Cap(capInt) if caps.Get(capability.BOUNDING, c) { fmt.Printf("%v=enabled (%s)\n", c.String(), globalFlags.SuffixMsg) } else { fmt.Printf("%v=disabled (%s)\n", c.String(), globalFlags.SuffixMsg) } } } if globalFlags.PrintUser { fmt.Printf("User: uid=%d euid=%d gid=%d egid=%d\n", os.Getuid(), os.Geteuid(), os.Getgid(), os.Getegid()) } if globalFlags.PrintGroups { gids, err := os.Getgroups() if err != nil { fmt.Fprintf(os.Stderr, "Error getting groups: %v\n", err) os.Exit(1) } // getgroups(2): It is unspecified whether the effective group ID of // the calling process is included in the returned list. (Thus, an // application should also call getegid(2) and add or remove the // resulting value.) egid := os.Getegid() if !in(gids, egid) { gids = append(gids, egid) sort.Ints(gids) } var b bytes.Buffer for _, gid := range gids { b.WriteString(fmt.Sprintf("%d ", gid)) } fmt.Printf("Groups: %s\n", b.String()) } if globalFlags.WriteFile { fileName := os.Getenv("FILE") if globalFlags.FileName != "" { fileName = globalFlags.FileName } content := os.Getenv("CONTENT") if globalFlags.Content != "" { content = globalFlags.Content } err := ioutil.WriteFile(fileName, []byte(content), 0600) if err != nil { fmt.Fprintf(os.Stderr, "Cannot write to file %q: %v\n", fileName, err) os.Exit(1) } } if globalFlags.ReadFile { fileName := os.Getenv("FILE") if globalFlags.FileName != "" { fileName = globalFlags.FileName } dat, err := ioutil.ReadFile(fileName) if err != nil { fmt.Fprintf(os.Stderr, "Cannot read file %q: %v\n", fileName, err) os.Exit(1) } fmt.Print("<<<") fmt.Print(string(dat)) fmt.Print(">>>\n") } if globalFlags.StatFile { fileName := os.Getenv("FILE") if globalFlags.FileName != "" { fileName = globalFlags.FileName } fi, err := os.Stat(fileName) if err != nil { fmt.Fprintf(os.Stderr, "Cannot stat file %q: %v\n", fileName, err) os.Exit(1) } fmt.Printf("%s: mode: %s\n", fileName, fi.Mode().String()) fmt.Printf("%s: user: %v\n", fileName, fi.Sys().(*syscall.Stat_t).Uid) fmt.Printf("%s: group: %v\n", fileName, fi.Sys().(*syscall.Stat_t).Gid) } if globalFlags.PrintCwd { wd, err := os.Getwd() if err != nil { fmt.Fprintf(os.Stderr, "Cannot get working directory: %v\n", err) os.Exit(1) } fmt.Printf("cwd: %s\n", wd) } if globalFlags.Sleep >= 0 { time.Sleep(time.Duration(globalFlags.Sleep) * time.Second) } if globalFlags.PrintMemoryLimit { memCgroupPath, err := cgroup.GetOwnCgroupPath("memory") if err != nil { fmt.Fprintf(os.Stderr, "Error getting own memory cgroup path: %v\n", err) os.Exit(1) } // we use /proc/1/root to escape the chroot we're in and read our // memory limit limitPath := filepath.Join("/proc/1/root/sys/fs/cgroup/memory", memCgroupPath, "memory.limit_in_bytes") limit, err := ioutil.ReadFile(limitPath) if err != nil { fmt.Fprintf(os.Stderr, "Can't read memory.limit_in_bytes\n") os.Exit(1) } fmt.Printf("Memory Limit: %s\n", string(limit)) } if globalFlags.PrintCPUQuota { cpuCgroupPath, err := cgroup.GetOwnCgroupPath("cpu") if err != nil { fmt.Fprintf(os.Stderr, "Error getting own cpu cgroup path: %v\n", err) os.Exit(1) } // we use /proc/1/root to escape the chroot we're in and read our // cpu quota periodPath := filepath.Join("/proc/1/root/sys/fs/cgroup/cpu", cpuCgroupPath, "cpu.cfs_period_us") periodBytes, err := ioutil.ReadFile(periodPath) if err != nil { fmt.Fprintf(os.Stderr, "Can't read cpu.cpu_period_us\n") os.Exit(1) } quotaPath := filepath.Join("/proc/1/root/sys/fs/cgroup/cpu", cpuCgroupPath, "cpu.cfs_quota_us") quotaBytes, err := ioutil.ReadFile(quotaPath) if err != nil { fmt.Fprintf(os.Stderr, "Can't read cpu.cpu_quota_us\n") os.Exit(1) } period, err := strconv.Atoi(strings.Trim(string(periodBytes), "\n")) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } quota, err := strconv.Atoi(strings.Trim(string(quotaBytes), "\n")) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } quotaMilliCores := quota * 1000 / period fmt.Printf("CPU Quota: %s\n", strconv.Itoa(quotaMilliCores)) } if globalFlags.CheckCgroupMounts { rootCgroupPath := "/proc/1/root/sys/fs/cgroup" testPaths := []string{rootCgroupPath} // test a couple of controllers if they're available if _, err := os.Stat(filepath.Join(rootCgroupPath, "memory")); err == nil { testPaths = append(testPaths, filepath.Join(rootCgroupPath, "memory")) } if _, err := os.Stat(filepath.Join(rootCgroupPath, "cpu")); err == nil { testPaths = append(testPaths, filepath.Join(rootCgroupPath, "cpu")) } for _, p := range testPaths { if err := syscall.Mkdir(filepath.Join(p, "test"), 0600); err == nil || err != syscall.EROFS { fmt.Fprintf(os.Stderr, "check-cgroups: FAIL (%v)", err) os.Exit(1) } } fmt.Println("check-cgroups: SUCCESS") } if globalFlags.PrintNetNS { ns, err := os.Readlink("/proc/self/ns/net") if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } fmt.Printf("NetNS: %s\n", ns) } if globalFlags.PrintIPv4 != "" { iface := globalFlags.PrintIPv4 ips, err := testutils.GetIPsv4(iface) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } if len(ips) == 0 { fmt.Fprintf(os.Stderr, "No IPv4 found for interface %+v:\n", iface) os.Exit(1) } fmt.Printf("%v IPv4: %s\n", iface, ips[0]) } if globalFlags.PrintDefaultGWv4 { gw, err := testutils.GetDefaultGWv4() if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } fmt.Printf("DefaultGWv4: %s\n", gw) } if globalFlags.PrintDefaultGWv6 { gw, err := testutils.GetDefaultGWv6() if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } fmt.Printf("DefaultGWv6: %s\n", gw) } if globalFlags.PrintGWv4 != "" { // TODO: GetGW not implemented yet iface := globalFlags.PrintGWv4 gw, err := testutils.GetGWv4(iface) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } fmt.Printf("%v GWv4: %s\n", iface, gw) } if globalFlags.PrintIPv6 != "" { // TODO } if globalFlags.PrintGWv6 != "" { // TODO } if globalFlags.PrintHostname { hostname, err := os.Hostname() if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } fmt.Printf("Hostname: %s\n", hostname) } if globalFlags.ServeHTTP != "" { err := testutils.HTTPServe(globalFlags.ServeHTTP, globalFlags.ServeHTTPTimeout) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } } if globalFlags.GetHTTP != "" { body, err := testutils.HTTPGet(globalFlags.GetHTTP) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } fmt.Printf("HTTP-Get received: %s\n", body) } if globalFlags.PrintIfaceCount { ifaceCount, err := testutils.GetIfaceCount() if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } fmt.Printf("Interface count: %d\n", ifaceCount) } if globalFlags.PrintAppAnnotation != "" { mdsUrl, appName := os.Getenv("AC_METADATA_URL"), os.Getenv("AC_APP_NAME") body, err := testutils.HTTPGet(fmt.Sprintf("%s/acMetadata/v1/apps/%s/annotations/%s", mdsUrl, appName, globalFlags.PrintAppAnnotation)) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } fmt.Printf("Annotation %s=%s\n", globalFlags.PrintAppAnnotation, body) } if globalFlags.CheckMountNS { appMountNS, err := os.Readlink("/proc/self/ns/mnt") if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } s1MountNS, err := os.Readlink("/proc/1/ns/mnt") if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } if appMountNS != s1MountNS { fmt.Println("check-mountns: DIFFERENT") } else { fmt.Println("check-mountns: IDENTICAL") os.Exit(1) } } os.Exit(globalFlags.ExitCode) }
func stats(request prago.Request) { if !AuthenticateSysadmin(GetUser(request)) { render403(request) return } stats := [][2]string{} stats = append(stats, [2]string{"App name", request.App().Data()["appName"].(string)}) stats = append(stats, [2]string{"App version", request.App().Data()["version"].(string)}) port := request.App().Data()["port"].(int) stats = append(stats, [2]string{"Port", fmt.Sprintf("%d", port)}) developmentModeStr := "false" if request.App().DevelopmentMode { developmentModeStr = "true" } stats = append(stats, [2]string{"Development mode", developmentModeStr}) stats = append(stats, [2]string{"Started at", request.App().StartedAt.Format(time.RFC3339)}) stats = append(stats, [2]string{"Go version", runtime.Version()}) stats = append(stats, [2]string{"Compiler", runtime.Compiler}) stats = append(stats, [2]string{"GOARCH", runtime.GOARCH}) stats = append(stats, [2]string{"GOOS", runtime.GOOS}) stats = append(stats, [2]string{"GOMAXPROCS", fmt.Sprintf("%d", runtime.GOMAXPROCS(-1))}) configStats := request.App().Config.Export() osStats := [][2]string{} osStats = append(osStats, [2]string{"EGID", fmt.Sprintf("%d", os.Getegid())}) osStats = append(osStats, [2]string{"EUID", fmt.Sprintf("%d", os.Geteuid())}) osStats = append(osStats, [2]string{"GID", fmt.Sprintf("%d", os.Getgid())}) osStats = append(osStats, [2]string{"Page size", fmt.Sprintf("%d", os.Getpagesize())}) osStats = append(osStats, [2]string{"PID", fmt.Sprintf("%d", os.Getpid())}) osStats = append(osStats, [2]string{"PPID", fmt.Sprintf("%d", os.Getppid())}) wd, _ := os.Getwd() osStats = append(osStats, [2]string{"Working directory", wd}) hostname, _ := os.Hostname() osStats = append(osStats, [2]string{"Hostname", hostname}) var mStats runtime.MemStats runtime.ReadMemStats(&mStats) memStats := [][2]string{} memStats = append(memStats, [2]string{"Alloc", fmt.Sprintf("%d", mStats.Alloc)}) memStats = append(memStats, [2]string{"TotalAlloc", fmt.Sprintf("%d", mStats.TotalAlloc)}) memStats = append(memStats, [2]string{"Sys", fmt.Sprintf("%d", mStats.Sys)}) memStats = append(memStats, [2]string{"Lookups", fmt.Sprintf("%d", mStats.Lookups)}) memStats = append(memStats, [2]string{"Mallocs", fmt.Sprintf("%d", mStats.Mallocs)}) memStats = append(memStats, [2]string{"Frees", fmt.Sprintf("%d", mStats.Frees)}) memStats = append(memStats, [2]string{"HeapAlloc", fmt.Sprintf("%d", mStats.HeapAlloc)}) memStats = append(memStats, [2]string{"HeapSys", fmt.Sprintf("%d", mStats.HeapSys)}) memStats = append(memStats, [2]string{"HeapIdle", fmt.Sprintf("%d", mStats.HeapIdle)}) memStats = append(memStats, [2]string{"HeapInuse", fmt.Sprintf("%d", mStats.HeapInuse)}) memStats = append(memStats, [2]string{"HeapReleased", fmt.Sprintf("%d", mStats.HeapReleased)}) memStats = append(memStats, [2]string{"HeapObjects", fmt.Sprintf("%d", mStats.HeapObjects)}) memStats = append(memStats, [2]string{"StackInuse", fmt.Sprintf("%d", mStats.StackInuse)}) memStats = append(memStats, [2]string{"StackSys", fmt.Sprintf("%d", mStats.StackSys)}) memStats = append(memStats, [2]string{"MSpanInuse", fmt.Sprintf("%d", mStats.MSpanInuse)}) memStats = append(memStats, [2]string{"MSpanSys", fmt.Sprintf("%d", mStats.MSpanSys)}) memStats = append(memStats, [2]string{"MCacheInuse", fmt.Sprintf("%d", mStats.MCacheInuse)}) memStats = append(memStats, [2]string{"MCacheSys", fmt.Sprintf("%d", mStats.MCacheSys)}) memStats = append(memStats, [2]string{"BuckHashSys", fmt.Sprintf("%d", mStats.BuckHashSys)}) memStats = append(memStats, [2]string{"GCSys", fmt.Sprintf("%d", mStats.GCSys)}) memStats = append(memStats, [2]string{"OtherSys", fmt.Sprintf("%d", mStats.OtherSys)}) memStats = append(memStats, [2]string{"NextGC", fmt.Sprintf("%d", mStats.NextGC)}) memStats = append(memStats, [2]string{"LastGC", fmt.Sprintf("%d", mStats.LastGC)}) memStats = append(memStats, [2]string{"PauseTotalNs", fmt.Sprintf("%d", mStats.PauseTotalNs)}) memStats = append(memStats, [2]string{"NumGC", fmt.Sprintf("%d", mStats.NumGC)}) environmentStats := [][2]string{} for _, e := range os.Environ() { pair := strings.Split(e, "=") environmentStats = append(environmentStats, [2]string{pair[0], pair[1]}) } request.SetData("stats", stats) request.SetData("configStats", configStats) request.SetData("osStats", osStats) request.SetData("memStats", memStats) request.SetData("environmentStats", environmentStats) request.SetData("admin_yield", "admin_stats") prago.Render(request, 200, "admin_layout") }
// Main runs the probe func probeMain() { var ( targets = []string{fmt.Sprintf("localhost:%d", xfer.AppPort)} token = flag.String("token", "default-token", "probe token") httpListen = flag.String("http.listen", "", "listen address for HTTP profiling and instrumentation server") publishInterval = flag.Duration("publish.interval", 3*time.Second, "publish (output) interval") spyInterval = flag.Duration("spy.interval", time.Second, "spy (scan) interval") spyProcs = flag.Bool("processes", true, "report processes (needs root)") procRoot = flag.String("proc.root", "/proc", "location of the proc filesystem") useConntrack = flag.Bool("conntrack", true, "also use conntrack to track connections") insecure = flag.Bool("insecure", false, "(SSL) explicitly allow \"insecure\" SSL connections and transfers") logPrefix = flag.String("log.prefix", "<probe>", "prefix for each log line") logLevel = flag.String("log.level", "info", "logging threshold level: debug|info|warn|error|fatal|panic") dockerEnabled = flag.Bool("docker", false, "collect Docker-related attributes for processes") dockerInterval = flag.Duration("docker.interval", 10*time.Second, "how often to update Docker attributes") dockerBridge = flag.String("docker.bridge", "docker0", "the docker bridge name") kubernetesEnabled = flag.Bool("kubernetes", false, "collect kubernetes-related attributes for containers, should only be enabled on the master node") kubernetesAPI = flag.String("kubernetes.api", "", "Address of kubernetes master api") kubernetesInterval = flag.Duration("kubernetes.interval", 10*time.Second, "how often to do a full resync of the kubernetes data") weaveRouterAddr = flag.String("weave.router.addr", "127.0.0.1:6784", "IP address & port of the Weave router") weaveDNSTarget = flag.String("weave.hostname", fmt.Sprintf("scope.weave.local:%d", xfer.AppPort), "Hostname to lookup in weaveDNS") ) flag.Parse() setLogLevel(*logLevel) setLogFormatter(*logPrefix) // Setup in memory metrics sink inm := metrics.NewInmemSink(time.Minute, 2*time.Minute) sig := metrics.DefaultInmemSignal(inm) defer sig.Stop() metrics.NewGlobal(metrics.DefaultConfig("scope-probe"), inm) defer log.Info("probe exiting") if *spyProcs && os.Getegid() != 0 { log.Warn("-process=true, but that requires root to find everything") } rand.Seed(time.Now().UnixNano()) probeID := strconv.FormatInt(rand.Int63(), 16) var ( hostName = hostname.Get() hostID = hostName // TODO(pb): we should sanitize the hostname ) log.Infof("probe starting, version %s, ID %s", version, probeID) go check() if len(flag.Args()) > 0 { targets = flag.Args() } log.Infof("publishing to: %s", strings.Join(targets, ", ")) probeConfig := appclient.ProbeConfig{ Token: *token, ProbeID: probeID, Insecure: *insecure, } clients := appclient.NewMultiAppClient(func(hostname, endpoint string) (appclient.AppClient, error) { return appclient.NewAppClient( probeConfig, hostname, endpoint, xfer.ControlHandlerFunc(controls.HandleControlRequest), ) }) defer clients.Stop() resolver := appclient.NewResolver(targets, net.LookupIP, clients.Set) defer resolver.Stop() processCache := process.NewCachingWalker(process.NewWalker(*procRoot)) scanner := procspy.NewConnectionScanner(processCache) endpointReporter := endpoint.NewReporter(hostID, hostName, *spyProcs, *useConntrack, scanner) defer endpointReporter.Stop() p := probe.New(*spyInterval, *publishInterval, clients) p.AddTicker(processCache) p.AddReporter( endpointReporter, host.NewReporter(hostID, hostName), process.NewReporter(processCache, hostID, process.GetDeltaTotalJiffies), ) p.AddTagger(probe.NewTopologyTagger(), host.NewTagger(hostID, probeID)) if *dockerEnabled { if err := report.AddLocalBridge(*dockerBridge); err != nil { log.Errorf("Docker: problem with bridge %s: %v", *dockerBridge, err) } if registry, err := docker.NewRegistry(*dockerInterval, clients); err == nil { defer registry.Stop() p.AddTagger(docker.NewTagger(registry, processCache)) p.AddReporter(docker.NewReporter(registry, hostID, p)) } else { log.Errorf("Docker: failed to start registry: %v", err) } } if *kubernetesEnabled { if client, err := kubernetes.NewClient(*kubernetesAPI, *kubernetesInterval); err == nil { defer client.Stop() p.AddReporter(kubernetes.NewReporter(client)) } else { log.Errorf("Kubernetes: failed to start client: %v", err) log.Errorf("Kubernetes: make sure to run Scope inside a POD with a service account or provide a valid kubernetes.api url") } } if *weaveRouterAddr != "" { client := weave.NewClient(sanitize.URL("http://", 6784, "")(*weaveRouterAddr)) weave := overlay.NewWeave(hostID, client) defer weave.Stop() p.AddTagger(weave) p.AddReporter(weave) dockerBridgeIP, err := getFirstAddressOf(*dockerBridge) if err != nil { log.Println("Error getting docker bridge ip:", err) } else { weaveDNSLookup := appclient.LookupUsing(dockerBridgeIP + ":53") weaveResolver := appclient.NewResolver([]string{*weaveDNSTarget}, weaveDNSLookup, clients.Set) defer weaveResolver.Stop() } } if *httpListen != "" { go func() { log.Infof("Profiling data being exported to %s", *httpListen) log.Infof("go tool pprof http://%s/debug/pprof/{profile,heap,block}", *httpListen) log.Infof("Profiling endpoint %s terminated: %v", *httpListen, http.ListenAndServe(*httpListen, nil)) }() } p.Start() defer p.Stop() common.SignalHandlerLoop() }
func main() { var ( targets = []string{fmt.Sprintf("localhost:%d", xfer.AppPort), fmt.Sprintf("scope.weave.local:%d", xfer.AppPort)} token = flag.String("token", "default-token", "probe token") httpListen = flag.String("http.listen", "", "listen address for HTTP profiling and instrumentation server") publishInterval = flag.Duration("publish.interval", 3*time.Second, "publish (output) interval") spyInterval = flag.Duration("spy.interval", time.Second, "spy (scan) interval") spyProcs = flag.Bool("processes", true, "report processes (needs root)") dockerEnabled = flag.Bool("docker", false, "collect Docker-related attributes for processes") dockerInterval = flag.Duration("docker.interval", 10*time.Second, "how often to update Docker attributes") dockerBridge = flag.String("docker.bridge", "docker0", "the docker bridge name") kubernetesEnabled = flag.Bool("kubernetes", false, "collect kubernetes-related attributes for containers, should only be enabled on the master node") kubernetesAPI = flag.String("kubernetes.api", "http://localhost:8080", "Address of kubernetes master api") kubernetesInterval = flag.Duration("kubernetes.interval", 10*time.Second, "how often to do a full resync of the kubernetes data") weaveRouterAddr = flag.String("weave.router.addr", "", "IP address or FQDN of the Weave router") procRoot = flag.String("proc.root", "/proc", "location of the proc filesystem") printVersion = flag.Bool("version", false, "print version number and exit") useConntrack = flag.Bool("conntrack", true, "also use conntrack to track connections") insecure = flag.Bool("insecure", false, "(SSL) explicitly allow \"insecure\" SSL connections and transfers") logPrefix = flag.String("log.prefix", "<probe>", "prefix for each log line") ) flag.Parse() if *printVersion { fmt.Println(version) return } // Setup in memory metrics sink inm := metrics.NewInmemSink(time.Minute, 2*time.Minute) sig := metrics.DefaultInmemSignal(inm) defer sig.Stop() metrics.NewGlobal(metrics.DefaultConfig("scope-probe"), inm) if !strings.HasSuffix(*logPrefix, " ") { *logPrefix += " " } log.SetPrefix(*logPrefix) defer log.Print("probe exiting") if *spyProcs && os.Getegid() != 0 { log.Printf("warning: -process=true, but that requires root to find everything") } rand.Seed(time.Now().UnixNano()) probeID := strconv.FormatInt(rand.Int63(), 16) var ( hostName = probe.Hostname() hostID = hostName // TODO(pb): we should sanitize the hostname ) log.Printf("probe starting, version %s, ID %s", version, probeID) addrs, err := net.InterfaceAddrs() if err != nil { log.Fatal(err) } localNets := report.Networks{} for _, addr := range addrs { // Not all addrs are IPNets. if ipNet, ok := addr.(*net.IPNet); ok { localNets = append(localNets, ipNet) } } if len(flag.Args()) > 0 { targets = flag.Args() } log.Printf("publishing to: %s", strings.Join(targets, ", ")) factory := func(hostname, endpoint string) (string, xfer.Publisher, error) { id, publisher, err := xfer.NewHTTPPublisher(hostname, endpoint, *token, probeID, *insecure) if err != nil { return "", nil, err } return id, xfer.NewBackgroundPublisher(publisher), nil } publishers := xfer.NewMultiPublisher(factory) defer publishers.Stop() clients := xfer.NewMultiAppClient(xfer.ProbeConfig{ Token: *token, ProbeID: probeID, Insecure: *insecure, }, xfer.ControlHandlerFunc(controls.HandleControlRequest), xfer.NewAppClient) defer clients.Stop() resolver := xfer.NewStaticResolver(targets, publishers.Set, clients.Set) defer resolver.Stop() endpointReporter := endpoint.NewReporter(hostID, hostName, *spyProcs, *useConntrack) defer endpointReporter.Stop() processCache := process.NewCachingWalker(process.NewWalker(*procRoot)) p := probe.New(*spyInterval, *publishInterval, publishers) p.AddTicker(processCache) p.AddReporter( endpointReporter, host.NewReporter(hostID, hostName, localNets), process.NewReporter(processCache, hostID), ) p.AddTagger(probe.NewTopologyTagger(), host.NewTagger(hostID, probeID)) if *dockerEnabled { if err := report.AddLocalBridge(*dockerBridge); err != nil { log.Printf("Docker: problem with bridge %s: %v", *dockerBridge, err) } if registry, err := docker.NewRegistry(*dockerInterval); err == nil { defer registry.Stop() p.AddTagger(docker.NewTagger(registry, processCache)) p.AddReporter(docker.NewReporter(registry, hostID, p)) } else { log.Printf("Docker: failed to start registry: %v", err) } } if *kubernetesEnabled { if client, err := kubernetes.NewClient(*kubernetesAPI, *kubernetesInterval); err == nil { defer client.Stop() p.AddReporter(kubernetes.NewReporter(client)) } else { log.Printf("Kubernetes: failed to start client: %v", err) } } if *weaveRouterAddr != "" { weave := overlay.NewWeave(hostID, *weaveRouterAddr) defer weave.Stop() p.AddTicker(weave) p.AddTagger(weave) p.AddReporter(weave) } if *httpListen != "" { go func() { log.Printf("Profiling data being exported to %s", *httpListen) log.Printf("go tool pprof http://%s/debug/pprof/{profile,heap,block}", *httpListen) log.Printf("Profiling endpoint %s terminated: %v", *httpListen, http.ListenAndServe(*httpListen, nil)) }() } p.Start() defer p.Stop() log.Printf("%s", <-interrupt()) }
package myfs import ( "crypto/sha1" "encoding/json" "os" "time" "bazil.org/fuse" "bazil.org/fuse/fs" "dss/util" ) // TODO: should eventually get rid of these... var uid = os.Geteuid() var gid = os.Getegid() type NamedNode interface { fs.Node getName() string setName(name string) getVid() []byte getLastVid() []byte setVid([]byte) isDir() bool isArchive() bool //setDirty(dirty bool) } // Generic information for files and directories type Node struct {
func (k *PosixKernel) Getegid() int { return os.Getegid() }
// Main runs the probe func probeMain(flags probeFlags) { setLogLevel(flags.logLevel) setLogFormatter(flags.logPrefix) // Setup in memory metrics sink inm := metrics.NewInmemSink(time.Minute, 2*time.Minute) sig := metrics.DefaultInmemSignal(inm) defer sig.Stop() metrics.NewGlobal(metrics.DefaultConfig("scope-probe"), inm) defer log.Info("probe exiting") if flags.spyProcs && os.Getegid() != 0 { log.Warn("--probe.process=true, but that requires root to find everything") } rand.Seed(time.Now().UnixNano()) var ( probeID = strconv.FormatInt(rand.Int63(), 16) hostName = hostname.Get() hostID = hostName // TODO(pb): we should sanitize the hostname ) log.Infof("probe starting, version %s, ID %s", version, probeID) log.Infof("command line: %v", os.Args) checkpointFlags := map[string]string{} if flags.kubernetesEnabled { checkpointFlags["kubernetes_enabled"] = "true" } go check(checkpointFlags) var targets = []string{} if flags.token != "" { // service mode if len(flag.Args()) == 0 { targets = append(targets, defaultServiceHost) } } else if !flags.noApp { targets = append(targets, fmt.Sprintf("localhost:%d", xfer.AppPort)) } targets = append(targets, flag.Args()...) log.Infof("publishing to: %s", strings.Join(targets, ", ")) probeConfig := appclient.ProbeConfig{ Token: flags.token, ProbeVersion: version, ProbeID: probeID, Insecure: flags.insecure, } clients := appclient.NewMultiAppClient(func(hostname, endpoint string) (appclient.AppClient, error) { return appclient.NewAppClient( probeConfig, hostname, endpoint, xfer.ControlHandlerFunc(controls.HandleControlRequest), ) }) defer clients.Stop() dnsLookupFn := net.LookupIP if flags.resolver != "" { dnsLookupFn = appclient.LookupUsing(flags.resolver) } resolver := appclient.NewResolver(targets, dnsLookupFn, clients.Set) defer resolver.Stop() p := probe.New(flags.spyInterval, flags.publishInterval, clients) hostReporter := host.NewReporter(hostID, hostName, probeID, version, clients) defer hostReporter.Stop() p.AddReporter(hostReporter) p.AddTagger(probe.NewTopologyTagger(), host.NewTagger(hostID)) var processCache *process.CachingWalker var scanner procspy.ConnectionScanner if flags.procEnabled { processCache = process.NewCachingWalker(process.NewWalker(flags.procRoot)) scanner = procspy.NewConnectionScanner(processCache) p.AddTicker(processCache) p.AddReporter(process.NewReporter(processCache, hostID, process.GetDeltaTotalJiffies)) } endpointReporter := endpoint.NewReporter(hostID, hostName, flags.spyProcs, flags.useConntrack, flags.procEnabled, scanner) defer endpointReporter.Stop() p.AddReporter(endpointReporter) if flags.dockerEnabled { // Don't add the bridge in Kubernetes since container IPs are global and // shouldn't be scoped if !flags.kubernetesEnabled { if err := report.AddLocalBridge(flags.dockerBridge); err != nil { log.Errorf("Docker: problem with bridge %s: %v", flags.dockerBridge, err) } } if registry, err := docker.NewRegistry(flags.dockerInterval, clients, true, hostID); err == nil { defer registry.Stop() if flags.procEnabled { p.AddTagger(docker.NewTagger(registry, processCache)) } p.AddReporter(docker.NewReporter(registry, hostID, probeID, p)) } else { log.Errorf("Docker: failed to start registry: %v", err) } } if flags.kubernetesEnabled { if client, err := kubernetes.NewClient(flags.kubernetesAPI, flags.kubernetesInterval); err == nil { defer client.Stop() reporter := kubernetes.NewReporter(client, clients, probeID, hostID, p) defer reporter.Stop() p.AddReporter(reporter) p.AddTagger(reporter) } else { log.Errorf("Kubernetes: failed to start client: %v", err) log.Errorf("Kubernetes: make sure to run Scope inside a POD with a service account or provide a valid kubernetes.api url") } } if flags.weaveAddr != "" { client := weave.NewClient(sanitize.URL("http://", 6784, "")(flags.weaveAddr)) weave := overlay.NewWeave(hostID, client) defer weave.Stop() p.AddTagger(weave) p.AddReporter(weave) dockerBridgeIP, err := network.GetFirstAddressOf(flags.dockerBridge) if err != nil { log.Println("Error getting docker bridge ip:", err) } else { weaveDNSLookup := appclient.LookupUsing(dockerBridgeIP + ":53") weaveResolver := appclient.NewResolver([]string{flags.weaveHostname}, weaveDNSLookup, clients.Set) defer weaveResolver.Stop() } } pluginRegistry, err := plugins.NewRegistry( flags.pluginsRoot, pluginAPIVersion, map[string]string{ "probe_id": probeID, "api_version": pluginAPIVersion, }, ) if err != nil { log.Errorf("plugins: problem loading: %v", err) } else { defer pluginRegistry.Close() p.AddReporter(pluginRegistry) } if flags.httpListen != "" { go func() { log.Infof("Profiling data being exported to %s", flags.httpListen) log.Infof("go tool pprof http://%s/debug/pprof/{profile,heap,block}", flags.httpListen) log.Infof("Profiling endpoint %s terminated: %v", flags.httpListen, http.ListenAndServe(flags.httpListen, nil)) }() } p.Start() defer p.Stop() common.SignalHandlerLoop() }
func Test_DefaultProvider_Getegid(t *testing.T) { uip := GetDefaultProvider() assert.Equal(t, os.Getegid(), uip.Getegid()) }
func main() { var ( targets = []string{fmt.Sprintf("localhost:%d", xfer.AppPort), fmt.Sprintf("scope.weave.local:%d", xfer.AppPort)} token = flag.String("token", "default-token", "probe token") httpListen = flag.String("http.listen", "", "listen address for HTTP profiling and instrumentation server") publishInterval = flag.Duration("publish.interval", 3*time.Second, "publish (output) interval") spyInterval = flag.Duration("spy.interval", time.Second, "spy (scan) interval") prometheusEndpoint = flag.String("prometheus.endpoint", "/metrics", "Prometheus metrics exposition endpoint (requires -http.listen)") spyProcs = flag.Bool("processes", true, "report processes (needs root)") dockerEnabled = flag.Bool("docker", false, "collect Docker-related attributes for processes") dockerInterval = flag.Duration("docker.interval", 10*time.Second, "how often to update Docker attributes") dockerBridge = flag.String("docker.bridge", "docker0", "the docker bridge name") weaveRouterAddr = flag.String("weave.router.addr", "", "IP address or FQDN of the Weave router") procRoot = flag.String("proc.root", "/proc", "location of the proc filesystem") printVersion = flag.Bool("version", false, "print version number and exit") useConntrack = flag.Bool("conntrack", true, "also use conntrack to track connections") logPrefix = flag.String("log.prefix", "<probe>", "prefix for each log line") ) flag.Parse() if *printVersion { fmt.Println(version) return } if !strings.HasSuffix(*logPrefix, " ") { *logPrefix += " " } log.SetPrefix(*logPrefix) defer log.Print("probe exiting") if *spyProcs && os.Getegid() != 0 { log.Printf("warning: -process=true, but that requires root to find everything") } var ( hostName = hostname() hostID = hostName // TODO(pb): we should sanitize the hostname probeID = hostName // TODO(pb): does this need to be a random string instead? ) log.Printf("probe starting, version %s, ID %s", version, probeID) addrs, err := net.InterfaceAddrs() if err != nil { log.Fatal(err) } localNets := report.Networks{} for _, addr := range addrs { // Not all addrs are IPNets. if ipNet, ok := addr.(*net.IPNet); ok { localNets = append(localNets, ipNet) } } if len(flag.Args()) > 0 { targets = flag.Args() } log.Printf("publishing to: %s", strings.Join(targets, ", ")) factory := func(endpoint string) (string, xfer.Publisher, error) { id, publisher, err := xfer.NewHTTPPublisher(endpoint, *token, probeID) if err != nil { return "", nil, err } return id, xfer.NewBackgroundPublisher(publisher), nil } publishers := xfer.NewMultiPublisher(factory) defer publishers.Stop() resolver := newStaticResolver(targets, publishers.Set) defer resolver.Stop() endpointReporter := endpoint.NewReporter(hostID, hostName, *spyProcs, *useConntrack) defer endpointReporter.Stop() processCache := process.NewCachingWalker(process.NewWalker(*procRoot)) var ( tickers = []Ticker{processCache} reporters = []Reporter{endpointReporter, host.NewReporter(hostID, hostName, localNets), process.NewReporter(processCache, hostID)} taggers = []Tagger{newTopologyTagger(), host.NewTagger(hostID)} ) dockerTagger, dockerReporter, dockerRegistry := func() (*docker.Tagger, *docker.Reporter, docker.Registry) { if !*dockerEnabled { return nil, nil, nil } if err := report.AddLocalBridge(*dockerBridge); err != nil { log.Printf("Docker: problem with bridge %s: %v", *dockerBridge, err) return nil, nil, nil } registry, err := docker.NewRegistry(*dockerInterval) if err != nil { log.Printf("Docker: failed to start registry: %v", err) return nil, nil, nil } return docker.NewTagger(registry, processCache), docker.NewReporter(registry, hostID), registry }() if dockerTagger != nil { taggers = append(taggers, dockerTagger) } if dockerReporter != nil { reporters = append(reporters, dockerReporter) } if dockerRegistry != nil { defer dockerRegistry.Stop() } if *weaveRouterAddr != "" { weave := overlay.NewWeave(hostID, *weaveRouterAddr) tickers = append(tickers, weave) taggers = append(taggers, weave) reporters = append(reporters, weave) } if *httpListen != "" { go func() { log.Printf("Profiling data being exported to %s", *httpListen) log.Printf("go tool pprof http://%s/debug/pprof/{profile,heap,block}", *httpListen) if *prometheusEndpoint != "" { log.Printf("exposing Prometheus endpoint at %s%s", *httpListen, *prometheusEndpoint) http.Handle(*prometheusEndpoint, makePrometheusHandler()) } log.Printf("Profiling endpoint %s terminated: %v", *httpListen, http.ListenAndServe(*httpListen, nil)) }() } quit, done := make(chan struct{}), sync.WaitGroup{} done.Add(2) defer func() { done.Wait() }() // second, wait for the main loops to be killed defer close(quit) // first, kill the main loops var rpt syncReport rpt.swap(report.MakeReport()) go func() { defer done.Done() spyTick := time.Tick(*spyInterval) for { select { case <-spyTick: start := time.Now() for _, ticker := range tickers { if err := ticker.Tick(); err != nil { log.Printf("error doing ticker: %v", err) } } localReport := rpt.copy() localReport = localReport.Merge(doReport(reporters)) localReport = Apply(localReport, taggers) rpt.swap(localReport) if took := time.Since(start); took > *spyInterval { log.Printf("report generation took too long (%s)", took) } case <-quit: return } } }() go func() { defer done.Done() var ( pubTick = time.Tick(*publishInterval) p = xfer.NewReportPublisher(publishers) ) for { select { case <-pubTick: publishTicks.WithLabelValues().Add(1) localReport := rpt.swap(report.MakeReport()) localReport.Window = *publishInterval if err := p.Publish(localReport); err != nil { log.Printf("publish: %v", err) } case <-quit: return } } }() log.Printf("%s", <-interrupt()) }
// LiveProcs is similar to `man 1 fuser`; it takes a prefix and returns a map // of PIDs of any processes accessing files with the prefix. // A process is considered to be accessing a file if it has an open file // descriptor directly referencing the file, has an open Unix socket // referencing a file, or has a file mapped into memory. // This operation is inherently racy (both false positives and false negatives // are possible) and hence this should be considered an approximation only. // TODO(jonboulle): map filename(string) -> []int(pids) instead func LiveProcs(prefix string) (map[int][]string, error) { if os.Getegid() != 0 { return nil, ErrNotRoot } skts, err := unixSocketsWithPrefix(prefix) if err != nil { return nil, err } ps, err := ioutil.ReadDir(procfs) if err != nil { return nil, err } pids := make(map[int][]string) self := os.Getpid() for _, p := range ps { pid, err := strconv.Atoi(p.Name()) if err != nil { continue } if pid == self { continue } // Parse file descriptors pdir := filepath.Join(procfs, p.Name(), "fd") fds, err := ioutil.ReadDir(pdir) switch { case err == nil: case os.IsNotExist(err): // assume we're too late continue default: return nil, err } links := make([]string, len(fds)) for i, fd := range fds { links[i] = path.Join(pdir, fd.Name()) } for _, path := range fdsWithPrefix(links, prefix, skts) { if _, ok := pids[pid]; ok { pids[pid] = make([]string, 0) } pids[pid] = append(pids[pid], path) } // Parse maps mfile := filepath.Join(procfs, p.Name(), "maps") mfh, err := os.Open(mfile) switch { case err == nil: case os.IsNotExist(err): // assume we're too late continue default: return nil, err } paths, err := mmapsWithPrefix(mfh, prefix) mfh.Close() if err != nil { return nil, err } for _, path := range paths { if _, ok := pids[pid]; ok { pids[pid] = make([]string, 0) } pids[pid] = append(pids[pid], path) } } return pids, nil }
func main() { globalFlagset.Parse(os.Args[1:]) args := globalFlagset.Args() if len(args) > 0 { fmt.Fprintln(os.Stderr, "Wrong parameters") os.Exit(1) } if globalFlags.PreSleep >= 0 { time.Sleep(time.Duration(globalFlags.PreSleep) * time.Second) } if globalFlags.ReadStdin { reader := bufio.NewReader(os.Stdin) fmt.Printf("Enter text:\n") text, _ := reader.ReadString('\n') fmt.Printf("Received text: %s\n", text) } if globalFlags.CheckTty { fd := int(os.Stdin.Fd()) var termios syscall.Termios _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), syscall.TCGETS, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) if err == 0 { fmt.Printf("stdin is a terminal\n") } else { fmt.Printf("stdin is not a terminal\n") } } if globalFlags.PrintExec { fmt.Fprintf(os.Stdout, "inspect execed as: %s\n", os.Args[0]) } if globalFlags.PrintMsg != "" { fmt.Fprintf(os.Stdout, "%s\n", globalFlags.PrintMsg) messageLoopStr := os.Getenv("MESSAGE_LOOP") messageLoop, err := strconv.Atoi(messageLoopStr) if err == nil { for i := 0; i < messageLoop; i++ { time.Sleep(time.Second) fmt.Fprintf(os.Stdout, "%s\n", globalFlags.PrintMsg) } } } if globalFlags.PrintEnv != "" { fmt.Fprintf(os.Stdout, "%s=%s\n", globalFlags.PrintEnv, os.Getenv(globalFlags.PrintEnv)) } if globalFlags.PrintCapsPid >= 0 { caps, err := capability.NewPid(globalFlags.PrintCapsPid) if err != nil { fmt.Fprintf(os.Stderr, "Cannot get caps: %v\n", err) os.Exit(1) } fmt.Printf("Capability set: effective: %s\n", caps.StringCap(capability.EFFECTIVE)) fmt.Printf("Capability set: permitted: %s\n", caps.StringCap(capability.PERMITTED)) fmt.Printf("Capability set: inheritable: %s\n", caps.StringCap(capability.INHERITABLE)) fmt.Printf("Capability set: bounding: %s\n", caps.StringCap(capability.BOUNDING)) if capStr := os.Getenv("CAPABILITY"); capStr != "" { capInt, err := strconv.Atoi(capStr) if err != nil { fmt.Fprintf(os.Stderr, "Environment variable $CAPABILITY is not a valid capability number: %v\n", err) os.Exit(1) } c := capability.Cap(capInt) if caps.Get(capability.BOUNDING, c) { fmt.Printf("%v=enabled\n", c.String()) } else { fmt.Printf("%v=disabled\n", c.String()) } } } if globalFlags.PrintUser { fmt.Printf("User: uid=%d euid=%d gid=%d egid=%d\n", os.Getuid(), os.Geteuid(), os.Getgid(), os.Getegid()) } if globalFlags.PrintGroups { gids, err := os.Getgroups() if err != nil { fmt.Fprintf(os.Stderr, "Error getting groups: %v\n", err) os.Exit(1) } // getgroups(2): It is unspecified whether the effective group ID of // the calling process is included in the returned list. (Thus, an // application should also call getegid(2) and add or remove the // resulting value.) egid := os.Getegid() if !in(gids, egid) { gids = append(gids, egid) sort.Ints(gids) } var b bytes.Buffer for _, gid := range gids { b.WriteString(fmt.Sprintf("%d ", gid)) } fmt.Printf("Groups: %s\n", b.String()) } if globalFlags.WriteFile { fileName := os.Getenv("FILE") if globalFlags.FileName != "" { fileName = globalFlags.FileName } content := os.Getenv("CONTENT") if globalFlags.Content != "" { content = globalFlags.Content } err := ioutil.WriteFile(fileName, []byte(content), 0600) if err != nil { fmt.Fprintf(os.Stderr, "Cannot write to file %q: %v\n", fileName, err) os.Exit(1) } } if globalFlags.ReadFile { fileName := os.Getenv("FILE") if globalFlags.FileName != "" { fileName = globalFlags.FileName } dat, err := ioutil.ReadFile(fileName) if err != nil { fmt.Fprintf(os.Stderr, "Cannot read file %q: %v\n", fileName, err) os.Exit(1) } fmt.Print("<<<") fmt.Print(string(dat)) fmt.Print(">>>\n") } if globalFlags.CheckCwd != "" { wd, err := os.Getwd() if err != nil { fmt.Fprintf(os.Stderr, "Cannot get working directory: %v\n", err) os.Exit(1) } if wd != globalFlags.CheckCwd { fmt.Fprintf(os.Stderr, "Working directory: %q. Expected: %q.\n", wd, globalFlags.CheckCwd) os.Exit(1) } } if globalFlags.Sleep >= 0 { time.Sleep(time.Duration(globalFlags.Sleep) * time.Second) } if globalFlags.PrintMemoryLimit { memCgroupPath, err := cgroup.GetOwnCgroupPath("memory") if err != nil { fmt.Fprintf(os.Stderr, "Error getting own memory cgroup path: %v\n", err) os.Exit(1) } // we use /proc/1/root to escape the chroot we're in and read our // memory limit limitPath := filepath.Join("/proc/1/root/sys/fs/cgroup/memory", memCgroupPath, "memory.limit_in_bytes") limit, err := ioutil.ReadFile(limitPath) if err != nil { fmt.Fprintf(os.Stderr, "Can't read memory.limit_in_bytes\n") os.Exit(1) } fmt.Printf("Memory Limit: %s\n", string(limit)) } if globalFlags.PrintCPUQuota { cpuCgroupPath, err := cgroup.GetOwnCgroupPath("cpu") if err != nil { fmt.Fprintf(os.Stderr, "Error getting own cpu cgroup path: %v\n", err) os.Exit(1) } // we use /proc/1/root to escape the chroot we're in and read our // cpu quota periodPath := filepath.Join("/proc/1/root/sys/fs/cgroup/cpu", cpuCgroupPath, "cpu.cfs_period_us") periodBytes, err := ioutil.ReadFile(periodPath) if err != nil { fmt.Fprintf(os.Stderr, "Can't read cpu.cpu_period_us\n") os.Exit(1) } quotaPath := filepath.Join("/proc/1/root/sys/fs/cgroup/cpu", cpuCgroupPath, "cpu.cfs_quota_us") quotaBytes, err := ioutil.ReadFile(quotaPath) if err != nil { fmt.Fprintf(os.Stderr, "Can't read cpu.cpu_quota_us\n") os.Exit(1) } period, err := strconv.Atoi(strings.Trim(string(periodBytes), "\n")) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } quota, err := strconv.Atoi(strings.Trim(string(quotaBytes), "\n")) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } quotaMilliCores := quota * 1000 / period fmt.Printf("CPU Quota: %s\n", strconv.Itoa(quotaMilliCores)) } if globalFlags.CheckCgroupMounts { rootCgroupPath := "/proc/1/root/sys/fs/cgroup" testPaths := []string{rootCgroupPath} // test a couple of controllers if they're available if cgroup.IsIsolatorSupported("memory") { testPaths = append(testPaths, filepath.Join(rootCgroupPath, "memory")) } if cgroup.IsIsolatorSupported("cpu") { testPaths = append(testPaths, filepath.Join(rootCgroupPath, "cpu")) } for _, p := range testPaths { if err := syscall.Mkdir(filepath.Join(p, "test"), 0600); err == nil || err != syscall.EROFS { fmt.Println("check-cgroups: FAIL") os.Exit(1) } } fmt.Println("check-cgroups: SUCCESS") } if globalFlags.PrintNetNS { ns, err := os.Readlink("/proc/self/ns/net") if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } fmt.Printf("NetNS: %s\n", ns) } if globalFlags.PrintIPv4 != "" { iface := globalFlags.PrintIPv4 ips, err := testutils.GetIPsv4(iface) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } fmt.Printf("%v IPv4: %s\n", iface, ips[0]) } if globalFlags.PrintDefaultGWv4 { gw, err := testutils.GetDefaultGWv4() if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } fmt.Printf("DefaultGWv4: %s\n", gw) } if globalFlags.PrintDefaultGWv6 { gw, err := testutils.GetDefaultGWv6() if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } fmt.Printf("DefaultGWv6: %s\n", gw) } if globalFlags.PrintGWv4 != "" { // TODO: GetGW not implemented yet iface := globalFlags.PrintGWv4 gw, err := testutils.GetGWv4(iface) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } fmt.Printf("%v GWv4: %s\n", iface, gw) } if globalFlags.PrintIPv6 != "" { // TODO } if globalFlags.PrintGWv6 != "" { // TODO } if globalFlags.ServeHttp != "" { err := testutils.HttpServe(globalFlags.ServeHttp, globalFlags.ServeHttpTimeout) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } } if globalFlags.GetHttp != "" { body, err := testutils.HttpGet(globalFlags.GetHttp) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } fmt.Printf("HTTP-Get received: %s\n", body) } os.Exit(globalFlags.ExitCode) }