Exemplo n.º 1
0
func main() {

	app := cli.NewApp()
	app.Name = Name
	app.Version = Version
	app.Author = "KUWASHIMA Yuichiro"
	app.Email = "*****@*****.**"
	app.Usage = "viagra [-d minutes] PID"

	app.Flags = GlobalFlags
	app.Commands = Commands
	app.CommandNotFound = CommandNotFound

	app.Action = func(c *cli.Context) {
		var err error
		var min int64
		min, err = strconv.ParseInt(c.String("duration"), 10, 64)
		if err != nil {
			println("Duration is not number")
		}

		if len(c.Args()) > 0 {
			var proc int
			var preNice int
			proc, err = strconv.Atoi(c.Args()[len(c.Args())-1])
			if err != nil {
				println("Proc no parse error")
				return
			}
			preNice, err = syscall.Getpriority(syscall.PRIO_PROCESS, proc)
			if err != nil {
				println("Unknown PID")
				return
			}
			err = syscall.Setpriority(syscall.PRIO_PROCESS, proc, 20)
			if err != nil {
				println("Setpriority failed")
				return
			}
			println("Power up!")
			timer := time.NewTimer(time.Duration(min) * time.Second)
			<-timer.C
			err = syscall.Setpriority(syscall.PRIO_PROCESS, proc, preNice)
			println("Time is up!")
		}
	}

	app.Run(os.Args)
}
Exemplo n.º 2
0
func scannerDaemon(rootDirectoryName string, cacheDirectoryName string,
	configuration *Configuration, fsChannel chan<- *FileSystem,
	logger *log.Logger) {
	runtime.LockOSThread()
	loweredPriority := false
	var oldFS FileSystem
	for {
		fs, err := scanFileSystem(rootDirectoryName, cacheDirectoryName,
			configuration, &oldFS)
		if err != nil {
			if err.Error() == "DisableScan" {
				disableScanAcknowledge <- true
				<-disableScanAcknowledge
				continue
			}
			logger.Printf("Error scanning: %s\n", err)
		} else {
			oldFS.InodeTable = fs.InodeTable
			oldFS.DirectoryInode = fs.DirectoryInode
			fsChannel <- fs
			runtime.GC()
			if !loweredPriority {
				syscall.Setpriority(syscall.PRIO_PROCESS, 0, 15)
				loweredPriority = true
			}
		}
	}
}
Exemplo n.º 3
0
func scannerDaemon(rootDirectoryName string, cacheDirectoryName string,
	ctx *fsrateio.FsRateContext, fsChannel chan *FileSystem) {
	if runtime.GOMAXPROCS(0) < 2 {
		runtime.GOMAXPROCS(2)
	}
	runtime.LockOSThread()
	loweredPriority := false
	var oldFS FileSystem
	for {
		fs, err := scanFileSystem(rootDirectoryName, cacheDirectoryName, ctx,
			&oldFS)
		if err != nil {
			fmt.Printf("Error scanning\t%s\n", err)
		} else {
			oldFS.RegularInodeTable = fs.RegularInodeTable
			oldFS.SymlinkInodeTable = fs.SymlinkInodeTable
			oldFS.InodeTable = fs.InodeTable
			fsChannel <- fs
			if !loweredPriority {
				syscall.Setpriority(syscall.PRIO_PROCESS, 0, 10)
				loweredPriority = true
			}
		}
	}
}
Exemplo n.º 4
0
func main() {
	log.SetFlags(0)
	log.SetPrefix("nice: ")

	flag.Usage = usage
	flag.Parse()

	xprio, err := syscall.Getpriority(syscall.PRIO_PROCESS, 0)
	ck(err)

	if flag.NArg() < 1 {
		fmt.Println(xprio)
		os.Exit(0)
	}

	err = syscall.Setpriority(syscall.PRIO_PROCESS, 0, *prio+xprio)
	ck(err)

	args := flag.Args()
	cmd := exec.Command(args[0], args[1:]...)
	cmd.Stdin = os.Stdin
	cmd.Stdout = os.Stdout
	cmd.Stderr = os.Stderr
	ck(cmd.Run())
}
Exemplo n.º 5
0
func runDocker(cmd *exec.Cmd) {
	stdout, err := cmd.StdoutPipe()
	if err != nil {
		SendError(err, "Failed to get docker piped stdout", nil)
		Logger.Println(err)
		Logger.Println("Cannotget docker piped stdout")
	}
	stderr, err := cmd.StderrPipe()
	if err != nil {
		SendError(err, "Failed to get docker piped stdout", nil)
		Logger.Println(err)
		Logger.Println("Cannotget docker piped stdout")
	}

	//open file to log docker logs
	dockerLog := path.Join(LogDir, DockerLogFileName)
	f, err := os.OpenFile(dockerLog, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)
	if err != nil {
		SendError(err, "Failed to set docker log file", nil)
		Logger.Println(err)
		Logger.Println("Cannot set docker log to", dockerLog)
	} else {
		go io.Copy(f, stdout)
		go io.Copy(f, stderr)
		defer f.Close()
	}

	Logger.Println("Starting docker daemon:", cmd.Args)
	if err := cmd.Start(); err != nil {
		SendError(err, "Failed to start docker daemon", nil)
		Logger.Println("Cannot start docker daemon:", err)
	}
	DockerProcess = cmd.Process
	Logger.Printf("Docker daemon (PID:%d) has been started", DockerProcess.Pid)

	syscall.Setpriority(syscall.PRIO_PROCESS, DockerProcess.Pid, RenicePriority)

	exit_renice := make(chan int)

	go decreaseDockerChildProcessPriority(exit_renice)

	if err := cmd.Wait(); err != nil {
		Logger.Println("Docker daemon died with error:", err)
		out, tailErr := exec.Command("tail", "-n", "10", dockerLog).Output()
		if tailErr != nil {
			SendError(tailErr, "Failed to tail docker logs when docker terminates unexpectedly", nil)
			Logger.Printf("Failed to tail docker logs when docker terminates unexpectedly: %s", err)
			SendError(err, "Docker daemon terminates unexpectedly", nil)
		} else {
			extra := map[string]interface{}{"docker-log": string(out)}
			SendError(err, "Docker daemon terminates unexpectedly", extra)
			Logger.Printf("\n=======DOCKER LOGS BEGIN========\n%s=======DOCKER LOGS END========\n", string(out))
		}
	} else {
		Logger.Print("Docker daemon exited")
	}
	exit_renice <- 1
	DockerProcess = nil
}
Exemplo n.º 6
0
func init() {
	setrlimit(syscall.RLIMIT_AS, &syscall.Rlimit{RLIMIT_AS, RLIMIT_AS})
	setrlimit(syscall.RLIMIT_CORE, &syscall.Rlimit{RLIMIT_CORE, RLIMIT_CORE})
	setrlimit(syscall.RLIMIT_CPU, &syscall.Rlimit{RLIMIT_CPU, RLIMIT_CPU})
	setrlimit(syscall.RLIMIT_DATA, &syscall.Rlimit{RLIMIT_DATA, RLIMIT_DATA})
	setrlimit(syscall.RLIMIT_FSIZE, &syscall.Rlimit{RLIMIT_FSIZE, RLIMIT_FSIZE})
	setrlimit(syscall.RLIMIT_NOFILE, &syscall.Rlimit{RLIMIT_NOFILE, RLIMIT_NOFILE})
	setrlimit(syscall.RLIMIT_STACK, &syscall.Rlimit{RLIMIT_STACK, RLIMIT_STACK})
	runtime.GOMAXPROCS(1)
	if err := syscall.Setpriority(syscall.PRIO_PROCESS, 0, 19); err != nil {
		panic(err)
	}
}
Exemplo n.º 7
0
func decreaseDockerChildProcessPriority(exit_renice chan int) {
	for {
		select {
		case <-exit_renice:
			return
		default:
			out, err := exec.Command("ps", "axo", "pid,ppid,ni").Output()
			if err != nil {
				SendError(err, "Failed to run ps command", nil)
				time.Sleep(ReniceSleepTime * time.Second)
				continue
			}
			lines := strings.Split(string(out), "\n")
			ppids := []int{DockerProcess.Pid}
			for _, line := range lines {
				items := strings.Fields(line)
				if len(items) != 3 {
					continue
				}
				pid, err := strconv.Atoi(items[0])
				if err != nil {
					continue
				}
				ppid, err := strconv.Atoi(items[1])
				if err != nil {
					continue
				}
				ni, err := strconv.Atoi(items[2])
				if err != nil {
					continue
				}
				if ni != RenicePriority {
					continue
				}
				if pid == DockerProcess.Pid {
					continue
				}
				for _, _ppid := range ppids {
					if ppid == _ppid {
						syscall.Setpriority(syscall.PRIO_PROCESS, pid, 0)
						ppids = append(ppids, pid)
						break
					}
				}
			}
			time.Sleep(ReniceSleepTime * time.Second)
		}
	}
}
Exemplo n.º 8
0
func main() {

	syscall.Close(2)
	syscall.Open("/var/log/pam_script_ses_open.err", syscall.O_CREAT|syscall.O_APPEND|syscall.O_WRONLY, 0660)

	// Voodoo: Ensure that code runs in the same thread with the high priority.
	// <pwaller> I did this because you can see threads that don't have the
	// highest priority. Hopefully this helps?
	runtime.LockOSThread()

	me := os.Getpid()
	const HIGHEST_PRIORITY = -20
	err := syscall.Setpriority(syscall.PRIO_PROCESS, me, HIGHEST_PRIORITY)
	if err != nil {
		log.Println("Setpriority() ->", err)
	}

	start := time.Now()
	defer func() {
		// Include the time in milliseconds.
		timeMillis := time.Since(start).Seconds() * 1000
		s := fmt.Sprintf("$PAM_USER $PAM_SERVICE %f $PAM_RHOST", timeMillis)
		log.Println(os.ExpandEnv(s))
	}()

	if !isDataboxUser() {
		log.Println("Skip non-databox user")
		// skip non-databox login
		return
	}

	if pamUser == "" {
		Fatal("PAM_USER not set. Abort.")
	}

	go TimeoutAbort()

	verifyMountNamespace()

	protectProc()

	initCgroup()

	home := path.Join("/var/lib/cobalt/home/", pamUser)
	tmpDir := mktmpdir(home)
	initMounts(home, tmpDir)
}
Exemplo n.º 9
0
func main() {
	flag.Usage = usage
	flag.Parse()
	if flag.NArg() < 1 {
		usage()
	}

	which := syscall.PRIO_PROCESS
	if *gflag {
		which = syscall.PRIO_PGRP
	} else if *uflag {
		which = syscall.PRIO_USER
	}

	for _, arg := range flag.Args() {
		var n int
		var err error

		id := -1
		if *uflag {
			p, err := user.Lookup(arg)
			if err == nil {
				n, err = strconv.Atoi(p.Uid)
			}
		} else {
			n, err = strconv.Atoi(arg)
		}
		if err == nil {
			id = n
		}

		if id < 0 {
			ek(fmt.Errorf("bad %q", arg))
			continue
		}

		prio, err := syscall.Getpriority(which, id)
		if ek(err) {
			continue
		}

		ek(syscall.Setpriority(which, id, prio+*inc))
	}

	os.Exit(status)
}
Exemplo n.º 10
0
func unshareAndBind(workingRootDir string) bool {
	if *unshare {
		// Re-exec myself using the unshare syscall while on a locked thread.
		// This hack is required because syscall.Unshare() operates on only one
		// thread in the process, and Go switches execution between threads
		// randomly. Thus, the namespace can be suddenly switched for running
		// code. This is an aspect of Go that was not well thought out.
		runtime.LockOSThread()
		if err := wsyscall.UnshareMountNamespace(); err != nil {
			fmt.Fprintf(os.Stderr, "Unable to unshare mount namesace: %s\n",
				err)
			return false
		}
		// Ensure the process is slightly niced. Since the Linux implementation
		// of setpriority(2) only applies to a thread, not the whole process
		// (contrary to the POSIX specification), do this in the pinned OS
		// thread so that the whole process (after exec) will be niced.
		syscall.Setpriority(syscall.PRIO_PROCESS, 0, 1)
		args := append(os.Args, "-unshare=false")
		if err := syscall.Exec(args[0], args, os.Environ()); err != nil {
			fmt.Fprintf(os.Stderr, "Unable to Exec:%s: %s\n", args[0], err)
			return false
		}
	}
	syscall.Unmount(workingRootDir, 0)
	err := wsyscall.Mount(*rootDir, workingRootDir, "", wsyscall.MS_BIND, "")
	if err != nil {
		fmt.Fprintf(os.Stderr, "Unable to bind mount %s to %s: %s\n",
			*rootDir, workingRootDir, err)
		return false
	}
	// Clean up -unshare=false so that a subsequent re-exec starts from scratch.
	args := make([]string, 0, len(os.Args)-1)
	for _, arg := range os.Args {
		if arg != "-unshare=false" {
			args = append(args, arg)
		}
	}
	os.Args = args
	return true
}
Exemplo n.º 11
0
func main() {
	flag.Parse()
	var err error
	bytesPerSecond, blocksPerSecond, err := fsbench.GetReadSpeed(*rootDir)
	if err != nil {
		fmt.Printf("Error! %s\n", err)
		return
	}
	var configuration scanner.Configuration
	configuration.ScanFilter, err = filter.NewFilter(nil)
	if err != nil {
		fmt.Printf("Unable to create empty filter\t%s\n", err)
		os.Exit(1)
	}
	configuration.FsScanContext = fsrateio.NewReaderContext(bytesPerSecond,
		blocksPerSecond, 0)
	if *scanSpeed != 0 {
		configuration.FsScanContext.GetContext().SetSpeedPercent(*scanSpeed)
	}
	fmt.Println(configuration.FsScanContext)
	syscall.Setpriority(syscall.PRIO_PROCESS, 0, 10)
	var prev_fs *scanner.FileSystem
	for iter := 0; *numScans < 0 || iter < *numScans; iter++ {
		timeStart := time.Now()
		fs, err := scanner.ScanFileSystem(*rootDir, *objectCache,
			&configuration)
		timeStop := time.Now()
		if iter > 0 {
			fmt.Println()
		}
		if err != nil {
			fmt.Printf("Error! %s\n", err)
			return
		}
		fmt.Print(fs)
		fmt.Printf("Total scanned: %s,\t",
			format.FormatBytes(fs.TotalDataBytes))
		bytesPerSecond := uint64(float64(fs.TotalDataBytes) /
			timeStop.Sub(timeStart).Seconds())
		fmt.Printf("%s/s\n", format.FormatBytes(bytesPerSecond))
		if prev_fs != nil {
			if !scanner.CompareFileSystems(prev_fs, fs, os.Stdout) {
				fmt.Println("Scan results different from last run")
			}
		}
		runtime.GC() // Clean up before showing memory statistics.
		memstats.WriteMemoryStats(os.Stdout)
		if *debugFile != "" {
			file, err := os.Create(*debugFile)
			if err != nil {
				fmt.Printf("Error! %s\n", err)
				return
			}
			w := bufio.NewWriter(file)
			fs.DebugWrite(w, "")
			w.Flush()
			file.Close()
		}
		if *gobFile != "" {
			file, err := os.Create(*gobFile)
			if err != nil {
				fmt.Printf("Error creating: %s\t%s\n", *gobFile, err)
				os.Exit(1)
			}
			encoder := gob.NewEncoder(file)
			encoderStartTime := time.Now()
			encoder.Encode(fs)
			fmt.Printf("Encoder time: %s\n", time.Since(encoderStartTime))
			file.Close()
		}
		prev_fs = fs
		time.Sleep(time.Duration(*interval) * time.Second)
	}
}
Exemplo n.º 12
0
func lowerProcessPrio() {
	syscall.Setpriority(syscall.PRIO_PROCESS, 0, 19)
}
Exemplo n.º 13
0
func main() {
	dockerBinPath := path.Join(DockerDir, DockerBinaryName)
	dockerNewBinPath := path.Join(DockerDir, DockerNewBinaryName)
	dockerNewBinSigPath := path.Join(DockerDir, DockerNewBinarySigName)
	configFilePath := path.Join(TutumHome, ConfigFileName)
	keyFilePath := path.Join(TutumHome, KeyFileName)
	certFilePath := path.Join(TutumHome, CertFileName)
	caFilePath := path.Join(TutumHome, CAFileName)
	ngrokPath := path.Join(DockerDir, NgrokBinaryName)
	ngrokLogPath := path.Join(LogDir, NgrokLogName)
	ngrokConfPath := path.Join(TutumHome, NgrokConfName)

	_ = os.MkdirAll(TutumHome, 0755)
	_ = os.MkdirAll(DockerDir, 0755)
	_ = os.MkdirAll(LogDir, 0755)

	ParseFlag()
	SetLogger(path.Join(LogDir, TutumLogFileName))
	CreatePidFile(TutumPidFile)

	PrepareFiles(configFilePath, dockerBinPath, keyFilePath, certFilePath)
	SetConfigFile(configFilePath)

	regUrl := utils.JoinURL(Conf.TutumHost, RegEndpoint)
	if Conf.TutumUUID == "" {
		os.RemoveAll(keyFilePath)
		os.RemoveAll(certFilePath)
		os.RemoveAll(caFilePath)

		if !*FlagStandalone {
			Logger.Printf("Registering in Tutum via POST: %s...\n", regUrl)
			PostToTutum(regUrl, caFilePath, configFilePath)
		}
	}

	if *FlagStandalone {
		commonName := Conf.CertCommonName
		if commonName == "" {
			commonName = "*"
		}
		CreateCerts(keyFilePath, certFilePath, commonName)
	} else {
		CreateCerts(keyFilePath, certFilePath, Conf.CertCommonName)
	}

	if !*FlagStandalone {
		Logger.Printf("Registering in Tutum via PATCH: %s...\n",
			regUrl+Conf.TutumUUID)
		err := PatchToTutum(regUrl, caFilePath, certFilePath, configFilePath)
		if err != nil {
			Logger.Printf("TutumUUID (%s) is invalid, trying to allocate a new one...\n", Conf.TutumUUID)
			Conf.TutumUUID = ""
			SaveConf(configFilePath, Conf)

			os.RemoveAll(keyFilePath)
			os.RemoveAll(certFilePath)
			os.RemoveAll(caFilePath)

			Logger.Printf("Registering in Tutum via POST: %s...\n", regUrl)
			PostToTutum(regUrl, caFilePath, configFilePath)

			CreateCerts(keyFilePath, certFilePath, Conf.CertCommonName)

			Logger.Printf("Registering in Tutum via PATCH: %s...\n",
				regUrl+Conf.TutumUUID)
			if err = PatchToTutum(regUrl, caFilePath, certFilePath, configFilePath); err != nil {
				SendError(err, "Registion HTTP error", nil)
			}
		}
	}

	if err := SaveConf(configFilePath, Conf); err != nil {
		SendError(err, "Failed to save config to the conf file", nil)
		Logger.Fatalln(err)
	}

	DownloadDocker(DockerBinaryURL, dockerBinPath)
	HandleSig()
	syscall.Setpriority(syscall.PRIO_PROCESS, os.Getpid(), RenicePriority)

	Logger.Println("Starting docker daemon...")
	StartDocker(dockerBinPath, keyFilePath, certFilePath, caFilePath)

	if !*FlagStandalone {
		if *FlagSkipNatTunnel {
			Logger.Println("Skip NAT tunnel")
		} else {
			Logger.Println("Loading NAT tunnel module...")
			go NatTunnel(regUrl, ngrokPath, ngrokLogPath, ngrokConfPath, NodePublicIp)
		}
	}

	if !*FlagStandalone {
		Logger.Println("Verifying the registration with Tutum...")
		go VerifyRegistration(regUrl)
	}

	Logger.Println("Docker server started. Entering maintenance loop")
	for {
		time.Sleep(HeartBeatInterval * time.Second)
		UpdateDocker(dockerBinPath, dockerNewBinPath, dockerNewBinSigPath, keyFilePath, certFilePath, caFilePath)

		// try to restart docker daemon if it dies somehow
		if DockerProcess == nil {
			time.Sleep(HeartBeatInterval * time.Second)
			if DockerProcess == nil && ScheduleToTerminateDocker == false {
				Logger.Println("Respawning docker daemon")
				StartDocker(dockerBinPath, keyFilePath, certFilePath, caFilePath)
			}
		}
	}
}
Exemplo n.º 14
0
func main() {
	dockerBinPath := path.Join(DockerDir, DockerBinaryName)
	dockerNewBinPath := path.Join(DockerDir, DockerNewBinaryName)
	dockerNewBinSigPath := path.Join(DockerDir, DockerNewBinarySigName)
	configFilePath := path.Join(AgentHome, ConfigFileName)
	keyFilePath := path.Join(AgentHome, KeyFileName)
	certFilePath := path.Join(AgentHome, CertFileName)
	caFilePath := path.Join(AgentHome, CAFileName)
	ngrokPath := path.Join(DockerDir, NgrokBinaryName)
	ngrokLogPath := path.Join(LogDir, NgrokLogName)
	ngrokConfPath := path.Join(AgentHome, NgrokConfName)

	_ = os.MkdirAll(AgentHome, 0755)
	_ = os.MkdirAll(DockerDir, 0755)
	_ = os.MkdirAll(LogDir, 0755)

	ParseFlag()

	if *FlagVersion {
		fmt.Println(VERSION)
		return
	}
	SetLogger(path.Join(LogDir, AgentLogFileName))
	Logger.Print("Running dockercloud-agent: version ", VERSION)
	CreatePidFile(AgentPidFile)

	PrepareFiles(configFilePath, dockerBinPath, keyFilePath, certFilePath)
	SetConfigFile(configFilePath)

	regUrl := utils.JoinURL(Conf.Host, RegEndpoint)
	if Conf.UUID == "" {
		os.RemoveAll(keyFilePath)
		os.RemoveAll(certFilePath)
		os.RemoveAll(caFilePath)

		if !*FlagStandalone {
			Logger.Printf("Registering in Docker Cloud via POST: %s", regUrl)
			RegPost(regUrl, caFilePath, configFilePath)
		}
	}

	if *FlagStandalone {
		commonName := Conf.CertCommonName
		if commonName == "" {
			commonName = "*"
		}
		CreateCerts(keyFilePath, certFilePath, commonName)
	} else {
		CreateCerts(keyFilePath, certFilePath, Conf.CertCommonName)
	}

	if utils.FileExist(dockerBinPath) {
		DockerClientVersion = GetDockerClientVersion(dockerBinPath)
	}

	if !*FlagStandalone {
		Logger.Printf("Registering in Docker Cloud via PATCH: %s",
			regUrl+Conf.UUID)
		err := RegPatch(regUrl, caFilePath, certFilePath, configFilePath)
		if err != nil {
			Logger.Printf("PATCH error %s :either UUID (%s) or Token is invalid", err.Error(), Conf.UUID)
			Conf.UUID = ""
			SaveConf(configFilePath, Conf)

			os.RemoveAll(keyFilePath)
			os.RemoveAll(certFilePath)
			os.RemoveAll(caFilePath)

			Logger.Printf("Registering in Docker Cloud via POST: %s", regUrl)
			RegPost(regUrl, caFilePath, configFilePath)

			CreateCerts(keyFilePath, certFilePath, Conf.CertCommonName)
			DownloadDocker(DockerBinaryURL, dockerBinPath)

			Logger.Printf("Registering in Docker Cloud via PATCH: %s",
				regUrl+Conf.UUID)
			if err = RegPatch(regUrl, caFilePath, certFilePath, configFilePath); err != nil {
				SendError(err, "Registion HTTP error", nil)
			}
		}
	}

	if err := SaveConf(configFilePath, Conf); err != nil {
		SendError(err, "Failed to save config to the conf file", nil)
		Logger.Fatalln(err)
	}

	DownloadDocker(DockerBinaryURL, dockerBinPath)
	CreateDockerSymlink(dockerBinPath, DockerSymbolicLink)
	HandleSig()
	syscall.Setpriority(syscall.PRIO_PROCESS, os.Getpid(), RenicePriority)

	Logger.Println("Initializing docker daemon")
	StartDocker(dockerBinPath, keyFilePath, certFilePath, caFilePath)

	if !*FlagStandalone {
		if *FlagSkipNatTunnel {
			Logger.Println("Skip NAT tunnel")
		} else {
			Logger.Println("Loading NAT tunnel module")
			go NatTunnel(regUrl, ngrokPath, ngrokLogPath, ngrokConfPath, Conf.UUID)
		}
	}

	if !*FlagStandalone {
		Logger.Println("Verifying the registration with Docker Cloud")
		go VerifyRegistration(regUrl)
	}

	Logger.Println("Docker server started. Entering maintenance loop")
	for {
		time.Sleep(HeartBeatInterval * time.Second)
		UpdateDocker(dockerBinPath, dockerNewBinPath, dockerNewBinSigPath, keyFilePath, certFilePath, caFilePath)

		// try to restart docker daemon if it dies somehow
		if DockerProcess == nil {
			time.Sleep(HeartBeatInterval * time.Second)
			if DockerProcess == nil && ScheduleToTerminateDocker == false {
				Logger.Println("Respawning docker daemon")
				StartDocker(dockerBinPath, keyFilePath, certFilePath, caFilePath)
			}
		}
	}
}
Exemplo n.º 15
0
func main() {
	flag.Parse()
	var err error
	bytesPerSecond, blocksPerSecond, err := fsbench.GetReadSpeed(*rootDir)
	if err != nil {
		fmt.Printf("Error! %s\n", err)
		return
	}
	ctx := fsrateio.NewContext(bytesPerSecond, blocksPerSecond)
	if *scanSpeed != 0 {
		ctx.SetSpeedPercent(*scanSpeed)
	}
	fmt.Println(ctx)
	syscall.Setpriority(syscall.PRIO_PROCESS, 0, 10)
	var prev_fs *scanner.FileSystem
	sleepDuration, _ := time.ParseDuration(fmt.Sprintf("%ds", *interval))
	for iter := 0; *numScans < 0 || iter < *numScans; iter++ {
		timeStart := time.Now()
		fs, err := scanner.ScanFileSystem(*rootDir, *objectCache, ctx)
		timeStop := time.Now()
		if iter > 0 {
			fmt.Println()
		}
		if err != nil {
			fmt.Printf("Error! %s\n", err)
			return
		}
		fmt.Print(fs)
		fmt.Printf("Total scanned: %s,\t",
			fsrateio.FormatBytes(fs.TotalDataBytes))
		bytesPerSecond := uint64(float64(fs.TotalDataBytes) /
			timeStop.Sub(timeStart).Seconds())
		fmt.Printf("%s/s\n", fsrateio.FormatBytes(bytesPerSecond))
		if prev_fs != nil {
			if !scanner.Compare(prev_fs, fs, os.Stdout) {
				fmt.Println("Scan results different from last run")
			}
		}
		runtime.GC() // Clean up before showing memory statistics.
		memstats.WriteMemoryStats(os.Stdout)
		if *debugFile != "" {
			file, err := os.Create(*debugFile)
			if err != nil {
				fmt.Printf("Error! %s\n", err)
				return
			}
			w := bufio.NewWriter(file)
			fs.DebugWrite(w, "")
			w.Flush()
			file.Close()
		}
		if *rpcFile != "" {
			file, err := os.Create(*rpcFile)
			if err != nil {
				fmt.Printf("Error creating: %s\t%s\n", *rpcFile, err)
				os.Exit(1)
			}
			encoder := gob.NewEncoder(file)
			encoder.Encode(fs)
			file.Close()
		}
		prev_fs = fs
		time.Sleep(sleepDuration)
	}
}
Exemplo n.º 16
0
// main
func main() {
	flag.Usage = usage
	flag.Parse()
	if *Debug {
		*Verbose = true
	}

	const Day = 24 * time.Hour

	// Check interval
	if *Interval < time.Second {
		log.Fatalf("Interval must be >= 1 Second")
	}
	if *Interval >= Day {
		if (*Interval % Day) != 0 {
			log.Fatalf("Interval %s isn't a whole number of days", *Interval)
		}
	} else {
		if (Day % *Interval) != 0 {
			log.Fatalf("Interval %s doesn't divide a day exactly", *Interval)
		}
	}

	// Make output directory
	err := os.MkdirAll(*LogDirectory, 0750)
	if err != nil {
		log.Fatalf("Failed to make log directory %q: %s", *LogDirectory, err)
	}

	if *Cpus <= 0 {
		runtime.GOMAXPROCS(runtime.NumCPU())
	} else {
		runtime.GOMAXPROCS(*Cpus)
	}

	if *UseSyslog {
		w, err := syslog.New(syslog.LOG_INFO, BaseName)
		if err != nil {
			log.Fatalf("Failed to start syslog: %s", err)
		}
		log.SetFlags(0)
		log.SetOutput(w)
	}

	// Setup profiling if desired
	if *CpuProfile != "" {
		log.Printf("Starting cpu profiler on %q", *CpuProfile)
		f, err := os.Create(*CpuProfile)
		if err != nil {
			log.Fatal(err)
		}
		pprof.StartCPUProfile(f)
		defer pprof.StopCPUProfile()
	}

	// Set priority
	if err := syscall.Setpriority(syscall.PRIO_PGRP, 0, *Priority); err != nil {
		log.Printf("Failed to set priority %d: %v", *Priority, err)
	}

	a := NewAccounting()
	config := func(Group int, IpVersion byte, Direction IpDirection, MaskBits int) {
		if Group > 0 {
			log.Printf("Monitoring NFLog multicast group %d for IPv%d %s mask /%d", Group, IpVersion, Direction, MaskBits)
			NewNfLog(Group, IpVersion, Direction, MaskBits, a)
		}
	}

	config(*IPv4DestGroup, 4, IpDest, *IPv4PrefixLength)
	config(*IPv4SourceGroup, 4, IpSource, *IPv4PrefixLength)
	config(*IPv6DestGroup, 6, IpDest, *IPv6PrefixLength)
	config(*IPv6SourceGroup, 6, IpSource, *IPv6PrefixLength)

	if nflogs.Count() == 0 {
		log.Fatal("Not monitoring any groups - exiting")
	}

	// Loop forever accounting stuff
	a.Start()

	// Exit neatly on interrupt
	ch := make(chan os.Signal, 1)
	signal.Notify(ch, syscall.SIGINT)
	signal.Notify(ch, syscall.SIGTERM)
	signal.Notify(ch, syscall.SIGQUIT)
	s := <-ch
	log.Printf("%s received - shutting down", s)
	if *MemProfile != "" {
		log.Printf("Writing memory profile %q\n", *MemProfile)
		f, err := os.Create(*MemProfile)
		if err != nil {
			log.Fatal(err)
		}
		pprof.WriteHeapProfile(f)
		f.Close()
		return
	}
	a.Stop()
	nflogs.Stop()
	log.Printf("Exit")
}