Esempio n. 1
0
func isTimestampValid(signed_on string) error {
	timestamp, err := time.Parse(time.RFC3339, signed_on)
	if err != nil {
		return err
	}

	current_time := time.Now()

	max_time_skew := current_time.Add(5 * time.Minute)
	max_time_offset := current_time.Add(-60 * time.Minute)

	log.Debug("Current:", current_time)
	log.Debug("Timestamp:", timestamp)
	log.Debug("Skew", max_time_skew)
	log.Debug("Offset", max_time_offset)

	if timestamp.Sub(max_time_skew) > 0 {
		err := "Timestamp max skew validation error"
		log.Warn(err)
		return errors.New(err)
	}

	if timestamp.Sub(max_time_offset) < 0 {
		err := "Timestamp max offset validation error"
		log.Warn(err)
		return errors.New(err)
	}

	return nil
}
Esempio n. 2
0
// killCgroupProcesses freezes then iterates over all the processes inside the
// manager's cgroups sending a SIGKILL to each process then waiting for them to
// exit.
func killCgroupProcesses(m cgroups.Manager) error {
	var procs []*os.Process
	if err := m.Freeze(configs.Frozen); err != nil {
		logrus.Warn(err)
	}
	pids, err := m.GetPids()
	if err != nil {
		m.Freeze(configs.Thawed)
		return err
	}
	for _, pid := range pids {
		if p, err := os.FindProcess(pid); err == nil {
			procs = append(procs, p)
			if err := p.Kill(); err != nil {
				logrus.Warn(err)
			}
		}
	}
	if err := m.Freeze(configs.Thawed); err != nil {
		logrus.Warn(err)
	}
	for _, p := range procs {
		if _, err := p.Wait(); err != nil {
			logrus.Warn(err)
		}
	}
	return nil
}
Esempio n. 3
0
func removeEncryption(localIP, remoteIP net.IP, em *encrMap) error {
	em.Lock()
	indices, ok := em.nodes[remoteIP.String()]
	em.Unlock()
	if !ok {
		return nil
	}
	for i, idxs := range indices {
		dir := reverse
		if i == 0 {
			dir = bidir
		}
		fSA, rSA, err := programSA(localIP, remoteIP, idxs, nil, dir, false)
		if err != nil {
			logrus.Warn(err)
		}
		if i != 0 {
			continue
		}
		err = programSP(fSA, rSA, false)
		if err != nil {
			logrus.Warn(err)
		}
	}
	return nil
}
Esempio n. 4
0
func getHandler(w http.ResponseWriter, r *http.Request) *toadError {
	if r.Method == "GET" {
		log.Warn("Receiving GET file request")
		//take filename & send ask chain for hash
		params, err := parseURL(fmt.Sprintf("%s", r.URL))
		if err != nil {
			return &toadError{err, "error parsing URL", 400}
		}
		fileName := params["fileName"]

		log.WithField("=>", fileName).Warn("Looking for filename:")
		hash, err := tscore.GetInfos(fileName)
		if err != nil {
			return &toadError{err, "error getting namereg info", 400}
		}

		log.WithField("=>", hash).Warn("Found corresponding hash:")
		log.Warn("Getting it from IPFS...")
		contents, err := tscore.GetFile(fileName, hash)
		if err != nil {
			return &toadError{err, "error getting file", 400}
		}
		w.Write(contents) //outputfile

		if err := os.Remove(fileName); err != nil {
			return &toadError{err, "error removing file", 400}
		}

		log.Warn("Congratulations, you have successfully retreived you file from the toadserver")
	}
	return nil
}
Esempio n. 5
0
func addUserData(ctx CommandContext, userdatas []string, serverID string) {
	for i := range userdatas {
		keyValue := strings.Split(userdatas[i], "=")
		if len(keyValue) != 2 {
			logrus.Warn("Bad format: ", userdatas[i])
			continue
		}
		var data []byte
		var err error

		// Set userdata
		if keyValue[1][0] == '@' {
			data, err = ioutil.ReadFile(keyValue[1][1:])
			if err != nil {
				logrus.Warn("ReadFile: ", err)
				continue
			}
		} else {
			data = []byte(keyValue[1])
		}
		if err = ctx.API.PatchUserdata(serverID, keyValue[0], data); err != nil {
			logrus.Warn("PatchUserdata: ", err)
			continue
		}
	}
}
Esempio n. 6
0
// checkCgroupCPU reads the cpu information from the cpu cgroup mount point.
func checkCgroupCPU(quiet bool) cgroupCPUInfo {
	mountPoint, err := cgroups.FindCgroupMountpoint("cpu")
	if err != nil {
		if !quiet {
			logrus.Warn(err)
		}
		return cgroupCPUInfo{}
	}

	cpuShares := cgroupEnabled(mountPoint, "cpu.shares")
	if !quiet && !cpuShares {
		logrus.Warn("Your kernel does not support cgroup cpu shares")
	}

	cpuCfsPeriod := cgroupEnabled(mountPoint, "cpu.cfs_period_us")
	if !quiet && !cpuCfsPeriod {
		logrus.Warn("Your kernel does not support cgroup cfs period")
	}

	cpuCfsQuota := cgroupEnabled(mountPoint, "cpu.cfs_quota_us")
	if !quiet && !cpuCfsQuota {
		logrus.Warn("Your kernel does not support cgroup cfs quotas")
	}
	return cgroupCPUInfo{
		CPUShares:    cpuShares,
		CPUCfsPeriod: cpuCfsPeriod,
		CPUCfsQuota:  cpuCfsQuota,
	}
}
Esempio n. 7
0
// Run the synchronization job on the specified repositories. The command From
// options overrides any per-repository starting index.
//
// This function starts NumIndexProcs indexing goroutines and NumFetchProcs
// fetching goroutines, but won't return until all job is done, or a fatal
// error occurs.
//
// Isolated errors (failure to retrieve a particular item, or failure to write
// to the backend) will not interrupt the job. Only the inability to list items
// from GitHub can interrupt prematurely (such as in case of rate limiting).
func (s *syncCmd) Run(repos []*storage.Repository) {
	for _, r := range repos {
		for i := 0; i != s.options.NumIndexProcs; i++ {
			s.wgIndex.Add(1)
			go s.indexingProc(r)
		}

		for i := 0; i != s.options.NumFetchProcs; i++ {
			s.wgFetch.Add(1)
			go s.fetchingProc(r)
		}

		// The command line `--from` option override the configuration defined
		// repository settings.
		from := s.options.From
		if from == 0 {
			from = r.RepositoryConfig.StartIndex
		}
		if err := s.fetchRepositoryItems(r, from, s.options.SleepPerPage, s.options.State); err != nil {
			log.Errorf("error syncing repository %s issues: %v", r.PrettyName(), err)
		}

		// When fetchRepositoryItems is done, all data to fetch has been queued.
		close(s.toFetch)

		// When the fetchingProc is done, all data to index has been queued.
		s.wgFetch.Wait()
		log.Warn("done fetching GitHub API data")
		close(s.toIndex)

		// Wait until indexing completes.
		s.wgIndex.Wait()
		log.Warn("done indexing documents in Elastic Search")
	}
}
Esempio n. 8
0
func (zr *ZookeeperReporter) Connect() (zk.State, error) {
	if zr.ZKConnection != nil {
		state := zr.ZKConnection.State()
		switch state {
		case zk.StateUnknown, zk.StateConnectedReadOnly, zk.StateExpired, zk.StateAuthFailed, zk.StateConnecting:
			{
				//Disconnect, and let Reconnection happen
				log.Warn("Zookeeper Connection is in BAD State [", state, "] Reconnect")
				zr.ZKConnection.Close()
			}
		case zk.StateConnected, zk.StateHasSession:
			{
				log.Debug("Zookeeper Connection of [", zr.ServiceName, "][", zr.InstanceID, "] connected(", state, "), nothing to do.")
				return state, nil
			}
		case zk.StateDisconnected:
			{
				log.Info("Reporter Connection is Disconnected -> Reconnection")
			}
		}
	}
	conn, _, err := zk.Connect(zr.ZKHosts, 10*time.Second)
	if err != nil {
		zr.ZKConnection = nil
		log.Warn("Unable to Connect to ZooKeeper (", err, ")")
		return zk.StateDisconnected, err
	}
	zr.ZKConnection = conn
	var zkLogger ZKDebugLogger
	zr.ZKConnection.SetLogger(zkLogger)
	zr.ZKConnection = conn
	state := zr.ZKConnection.State()
	return state, nil
}
Esempio n. 9
0
func checkCgroupCPU(quiet bool) *cgroupCPUInfo {
	info := &cgroupCPUInfo{}
	mountPoint, err := cgroups.FindCgroupMountpoint("cpu")
	if err != nil {
		if !quiet {
			logrus.Warn(err)
		}
		return info
	}

	info.CPUShares = cgroupEnabled(mountPoint, "cpu.shares")
	if !quiet && !info.CPUShares {
		logrus.Warn("Your kernel does not support cgroup cpu shares")
	}

	info.CPUCfsPeriod = cgroupEnabled(mountPoint, "cpu.cfs_period_us")
	if !quiet && !info.CPUCfsPeriod {
		logrus.Warn("Your kernel does not support cgroup cfs period")
	}

	info.CPUCfsQuota = cgroupEnabled(mountPoint, "cpu.cfs_quota_us")
	if !quiet && !info.CPUCfsQuota {
		logrus.Warn("Your kernel does not support cgroup cfs quotas")
	}
	return info
}
Esempio n. 10
0
func processTable(sess *r.Session, table string) {
	rows, err := r.Db(*env).Table(table).Run(sess)
	if err != nil {
		log.Fatal("Couldn't fetch rows for table ", table)
	}
	defer rows.Close()

	var doc map[string]interface{}
	var size uint64
	var id string
	var ok bool

	// WIP
	for rows.Next(&doc) {
		if size, ok = doc["size"].(uint64); !ok {
			if id, ok = doc["id"].(string); !ok {
				log.Warn("Found a document without ID! ", doc)
				continue
			}
			sizeTerm, err := r.Db(*env).Table(table).Get(id).
				CoerceTo("string").CoerceTo("binary").Count().Run(sess)
			err = sizeTerm.One(&size)
			if err != nil {
				log.Warn("Couldn't compute the size of document ", id, ". It might've been deleted.", err)
				continue
			}
		}
		fmt.Println(doc)
		return
	} else {

	}
}
Esempio n. 11
0
func (sc *ServerConfig) root(w http.ResponseWriter, req *http.Request) {
	w.Header().Set("Access-Control-Allow-Origin", "*")

	logrus.WithFields(logrus.Fields{"client": sc.requestIp(req), "version": "root"}).Debugf("OK: %s", "/")

	answers := sc.answers()

	m := make(map[string]interface{})
	for _, k := range answers.Versions() {
		url, err := sc.router.Get("Version").URL("version", k)
		if err == nil {
			m[k] = (*url).String()
		} else {
			logrus.Warn("Error: ", err.Error())
		}
	}

	// If latest isn't in the list, pretend it is
	_, ok := m["latest"]
	if !ok {
		url, err := sc.router.Get("Version").URL("version", "latest")
		if err == nil {
			m["latest"] = (*url).String()
		} else {
			logrus.Warn("Error: ", err.Error())
		}
	}

	respondSuccess(w, req, m)
}
Esempio n. 12
0
func bridgeCleanup(config *networkConfiguration, logErr bool) {
	var err error

	bridgeName := config.BridgeName
	tableName := "bridge_nw_subnets"
	gwName := fmt.Sprintf("%s_gw0", bridgeName)
	gwIP := config.AddressIPv4.String()
	pfAnchor := fmt.Sprintf("_auto/docker/%s", bridgeName)
	tableAnchor := fmt.Sprintf("_auto/docker/%s", tableName)

	err = exec.Command("/usr/sbin/pfctl", "-a", pfAnchor, "-F", "all").Run()
	if err != nil && logErr {
		logrus.Warn("cannot flush firewall rules")
	}
	err = exec.Command("/usr/sbin/ifconfig", gwName, "unplumb").Run()
	if err != nil && logErr {
		logrus.Warn("cannot remove gateway interface")
	}
	err = exec.Command("/usr/sbin/dladm", "delete-vnic",
		"-t", gwName).Run()
	if err != nil && logErr {
		logrus.Warn("cannot delete vnic")
	}
	err = exec.Command("/usr/sbin/dladm", "delete-etherstub",
		"-t", config.BridgeNameInternal).Run()
	if err != nil && logErr {
		logrus.Warn("cannot delete etherstub")
	}
	err = exec.Command("/usr/sbin/pfctl", "-a", tableAnchor, "-t", tableName, "-T", "delete", gwIP).Run()
	if err != nil && logErr {
		logrus.Warnf("cannot remove bridge network '%s' from PF table", bridgeName)
	}
}
Esempio n. 13
0
//MountebankSetup is used to post mountebank setup configuration to a mountebank endpoint.
func MountebankSetup(endpoint string, config string) error {
	request, err := http.NewRequest("POST", endpoint, strings.NewReader(config))
	if err != nil {
		log.Warn("Error creating request for mountebank setup: ", err.Error())
		return err
	}

	request.Header.Add("Content-Type", "application/json")

	client := &http.Client{}
	resp, err := client.Do(request)
	if err != nil {
		log.Warn("Error returned by client.Do in mountebank setup: ", err.Error())
		return err
	}

	out, err := ioutil.ReadAll(resp.Body)
	if err != nil {
		log.Warn("Error reading response body in mountebank setup: ", err.Error())
		return err
	}
	resp.Body.Close()
	log.Info("Mountebank response: ", string(out))

	return nil

}
Esempio n. 14
0
func readDatabase() (Database, error) {
	kvdb := kv.Instance()

	db := Database{Status: api.StatusInit,
		NodeEntries: make(map[string]NodeEntry)}

	kv, err := kvdb.Get("cluster/database")
	if err != nil && !strings.Contains(err.Error(), "Key not found") {
		log.Warn("Warning, could not read cluster database")
		goto done
	}

	if kv == nil || bytes.Compare(kv.Value, []byte("{}")) == 0 {
		log.Info("Cluster is uninitialized...")
		err = nil
		goto done
	} else {
		err = json.Unmarshal(kv.Value, &db)
		if err != nil {
			log.Warn("Fatal, Could not parse cluster database ", kv)
			goto done
		}
	}

done:
	return db, err
}
Esempio n. 15
0
func (m *Mesos) loadState() (state.State, error) {
	var err error
	var sj state.State

	log.Debug("loadState() called")

	defer func() {
		if rec := recover(); rec != nil {
			err = errors.New("can't connect to Mesos")
		}
	}()

	mh := m.getLeader()
	if mh.Ip == "" {
		log.Warn("No master in zookeeper")
		return sj, errors.New("No master in zookeeper")
	}

	log.Infof("Zookeeper leader: %s:%s", mh.Ip, mh.PortString)

	log.Info("reloading from master ", mh.Ip)
	sj = m.loadFromMaster(mh.Ip, mh.PortString)

	if rip := leaderIP(sj.Leader); rip != mh.Ip {
		log.Warn("master changed to ", rip)
		sj = m.loadFromMaster(rip, mh.PortString)
	}

	return sj, err
}
Esempio n. 16
0
func (c *ClusterManager) heartBeat() {
	for {
		time.Sleep(2 * time.Second)

		// myInfo := c.getInfo()
		// ubcast.Push(NodeUpdate, &myInfo)

		// Process heartbeats from other nodes...
		for id, info := range c.nodeInfo {
			if info.Status == StatusOk && time.Since(info.Timestamp) > 10000*time.Millisecond {
				log.Warn("Detected node ", id, " to be offline.")

				info.Status = StatusOffline
				c.nodeInfo[id] = info

				for e := c.listeners.Front(); e != nil; e = e.Next() {
					err := e.Value.(ClusterListener).Leave(&info)
					if err != nil {
						log.Warn("Failed to notify ",
							e.Value.(ClusterListener).String())
					}
				}
			}
		}
	}
}
Esempio n. 17
0
func mrtFileOpen(filename string, interval uint64) (*os.File, error) {
	realname := filename
	if interval != 0 {
		realname = time.Now().Format(filename)
	}

	i := len(realname)
	for i > 0 && os.IsPathSeparator(realname[i-1]) {
		// skip trailing path separators
		i--
	}
	j := i

	for j > 0 && !os.IsPathSeparator(realname[j-1]) {
		j--
	}

	if j > 0 {
		if err := os.MkdirAll(realname[0:j-1], 0755); err != nil {
			log.Warn(err)
			return nil, err
		}
	}

	file, err := os.OpenFile(realname, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0644)
	if err != nil {
		log.Warn(err)
	}
	return file, err
}
Esempio n. 18
0
// List returns a list of sorted changes
func (cl FileChangelist) List() []Change {
	var changes []Change
	dir, err := os.Open(cl.dir)
	if err != nil {
		return changes
	}
	defer dir.Close()
	fileInfos, err := dir.Readdir(0)
	if err != nil {
		return changes
	}
	sort.Sort(fileChanges(fileInfos))
	for _, f := range fileInfos {
		if f.IsDir() {
			continue
		}
		raw, err := ioutil.ReadFile(path.Join(cl.dir, f.Name()))
		if err != nil {
			logrus.Warn(err.Error())
			continue
		}
		c := &TufChange{}
		err = json.Unmarshal(raw, c)
		if err != nil {
			logrus.Warn(err.Error())
			continue
		}
		changes = append(changes, c)
	}
	return changes
}
Esempio n. 19
0
func (zc *zkflagCheck) Connect() (zk.State, error) {
	if zc.Connection != nil {
		state := zc.Connection.State()
		switch state {
		case zk.StateUnknown, zk.StateConnectedReadOnly, zk.StateExpired, zk.StateAuthFailed, zk.StateConnecting:
			{
				//Disconnect, and let Reconnection happen
				log.Warn("ZKFlag Connection is in BAD State [", state, "] Reconnect")
				zc.Connection.Close()
			}
		case zk.StateConnected, zk.StateHasSession:
			{
				log.Debug("ZKFlag Connection established(", state, "), nothing to do.")
				return state, nil
			}
		case zk.StateDisconnected:
			{
				log.Info("ZKFlag Connection is Disconnected -> Reconnection")
			}
		}
	}
	conn, _, err := zk.Connect(zc.Hosts, 10*time.Second)
	if err != nil {
		zc.Connection = nil
		log.Warn("Unable to Connect to ZKFlag (", err, ")")
		return zk.StateDisconnected, err
	}
	zc.Connection = conn
	var zkLogger ZKDebugLogger
	zc.Connection.SetLogger(zkLogger)
	state := zc.Connection.State()
	return state, nil
}
Esempio n. 20
0
//DoPost spawns the xavi listener as specified by the payload
func (SpawnListenerDef) DoPost(kvs kvstore.KVStore, resp http.ResponseWriter, req *http.Request) (interface{}, error) {
	body, err := ioutil.ReadAll(req.Body)
	req.Body.Close()
	log.Info(fmt.Sprintf("Put request with payload %s", string(body)))

	spawnSpec := new(spawn)
	err = json.Unmarshal(body, spawnSpec)
	if err != nil {
		log.Warn("Error unmarshaling request body")
		resp.WriteHeader(http.StatusBadRequest)
		return nil, err
	}

	log.Info("Spawning a process - form command")
	var pout, perror bytes.Buffer
	cmd := exec.Command("xavi", "listen", "-ln", spawnSpec.ListenerName, "-address", spawnSpec.Address)
	cmd.Stderr = &perror
	cmd.Stdout = &pout

	log.Info("run command")
	err = cmd.Start()
	if err != nil {
		log.Warn("error running command: ", err.Error())
		resp.WriteHeader(http.StatusInternalServerError)
		return nil, err
	}

	pid := cmd.Process.Pid
	log.Info("started process - pid is: ", pid)
	addPid(pid)

	return fmt.Sprintf("started process %d", pid), nil
}
Esempio n. 21
0
func (r *remote) getLastEventTimestamp() int64 {
	t := time.Now()

	fi, err := os.Stat(r.eventTsPath)
	if os.IsNotExist(err) || fi.Size() == 0 {
		return t.Unix()
	}

	f, err := os.Open(r.eventTsPath)
	defer f.Close()
	if err != nil {
		logrus.Warn("libcontainerd: Unable to access last event ts: %v", err)
		return t.Unix()
	}

	b := make([]byte, fi.Size())
	n, err := f.Read(b)
	if err != nil || n != len(b) {
		logrus.Warn("libcontainerd: Unable to read last event ts: %v", err)
		return t.Unix()
	}

	t.UnmarshalText(b)

	return t.Unix()
}
Esempio n. 22
0
// checkCgroupCPU reads the cpu information from the cpu cgroup mount point.
func checkCgroupCPU(cgMounts map[string]string, quiet bool) cgroupCPUInfo {
	mountPoint, ok := cgMounts["cpu"]
	if !ok {
		if !quiet {
			logrus.Warnf("Unable to find cpu cgroup in mounts")
		}
		return cgroupCPUInfo{}
	}

	cpuShares := cgroupEnabled(mountPoint, "cpu.shares")
	if !quiet && !cpuShares {
		logrus.Warn("Your kernel does not support cgroup cpu shares")
	}

	cpuCfsPeriod := cgroupEnabled(mountPoint, "cpu.cfs_period_us")
	if !quiet && !cpuCfsPeriod {
		logrus.Warn("Your kernel does not support cgroup cfs period")
	}

	cpuCfsQuota := cgroupEnabled(mountPoint, "cpu.cfs_quota_us")
	if !quiet && !cpuCfsQuota {
		logrus.Warn("Your kernel does not support cgroup cfs quotas")
	}
	return cgroupCPUInfo{
		CPUShares:    cpuShares,
		CPUCfsPeriod: cpuCfsPeriod,
		CPUCfsQuota:  cpuCfsQuota,
	}
}
Esempio n. 23
0
// HTTP handler to allow clients to add/update schedules
func (a *API) HandlePUT(w http.ResponseWriter, r *http.Request) {

	log.Debug("Handling PUT")

	userID := r.Header.Get("user_id")

	defer r.Body.Close()

	log.Debug("going to decode schedule")
	decoder := json.NewDecoder(r.Body)
	proposal := types.Schedule{}
	err := decoder.Decode(&proposal)
	if err != nil {
		log.Warn("failed to decode proposed schedule: ", err)
		handleError(w, UnmarshalError)
		return
	}

	log.Debug("proposal: ", proposal)
	err = a.db.Put(userID, proposal)
	if err != nil {
		log.Warn("error putting: ", err)
		handleError(w, err)
		return
	}

	w.WriteHeader(200)
}
Esempio n. 24
0
func (ns *NerveService) Run(stop <-chan bool) {
	defer servicesWaitGroup.Done()
	log.Debug("Service Running [", ns.Name, "]")
Loop:
	for {
		// Here The job to check, and report
		status, err := ns.Watcher.Check()
		if err != nil {
			log.Warn("Check error for Service [", ns.Name, "] [", err, "]")
		}
		ns.Reporter.Report(status)

		// Wait for the stop signal
		select {
		case hasToStop := <-stop:
			if hasToStop {
				log.Debug("Nerve: Service [", ns.Name, "]Run Close Signal Received")
			} else {
				log.Debug("Nerve: Service [", ns.Name, "]Run Close Signal Received (but a strange false one)")
			}
			break Loop
		default:
			time.Sleep(time.Millisecond * time.Duration(ns.CheckInterval))
		}
	}
	err := ns.Reporter.Destroy()
	if err != nil {
		log.Warn("Service [", ns.Name, "] has detected an error when destroying Reporter (", err, ")")
	}
	log.Debug("Service [", ns.Name, "] stopped")
}
Esempio n. 25
0
func (a arriba) handleMessageEvent(ev *slack.MessageEvent) {
	logrus.Debugf("Message received %+v", ev)
	if a.botID == "" {
		logrus.Warn("Received message event before finishing initialization")
		return
	}
	if ev.Channel == "" {
		logrus.Warn("Received message with empty channel")
		return
	}
	switch ev.Channel[0] {
	case 'C', 'G':
		// Public and private (group) channels
		smsg, ok := a.extractChannelStandupMsg(ev.Msg)
		if !ok {
			return
		}
		logrus.Infof("Received standup message in channel %s: %+v", ev.Channel, smsg)
		// Garbage-collect old messages
		a.removeOldMessages(ev.Msg.Channel)
		if smsg.text == "" {
			a.sendStatus(ev.Msg.Channel)
		} else {
			a.updateLastStandup(ev.Msg.Channel, ev.Msg.User, smsg)
		}

	case 'D':
		// Direct messages are not supported yet
	}
}
Esempio n. 26
0
// NewDriver returns a new windows driver, called from NewDriver of execdriver.
func NewDriver(root, initPath string, options []string) (*Driver, error) {

	for _, option := range options {
		key, val, err := parsers.ParseKeyValueOpt(option)
		if err != nil {
			return nil, err
		}
		key = strings.ToLower(key)
		switch key {

		case "dummy":
			switch val {
			case "1":
				dummyMode = true
				logrus.Warn("Using dummy mode in Windows exec driver. This is for development use only!")
			}

		case "forcekill":
			switch val {
			case "1":
				forceKill = true
				logrus.Warn("Using force kill mode in Windows exec driver. This is for testing purposes only.")
			}

		default:
			return nil, fmt.Errorf("Unrecognised exec driver option %s\n", key)
		}
	}

	return &Driver{
		root:             root,
		initPath:         initPath,
		activeContainers: make(map[string]*activeContainer),
	}, nil
}
Esempio n. 27
0
// New returns a new SysInfo, using the filesystem to detect which features the kernel supports.
func New(quiet bool) *SysInfo {
	sysInfo := &SysInfo{}
	if cgroupMemoryMountpoint, err := cgroups.FindCgroupMountpoint("memory"); err != nil {
		if !quiet {
			logrus.Warnf("Your kernel does not support cgroup memory limit: %v", err)
		}
	} else {
		// If memory cgroup is mounted, MemoryLimit is always enabled.
		sysInfo.MemoryLimit = true

		_, err1 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.memsw.limit_in_bytes"))
		sysInfo.SwapLimit = err1 == nil
		if !sysInfo.SwapLimit && !quiet {
			logrus.Warn("Your kernel does not support swap memory limit.")
		}

		_, err = ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.oom_control"))
		sysInfo.OomKillDisable = err == nil
		if !sysInfo.OomKillDisable && !quiet {
			logrus.Warnf("Your kernel does not support oom control.")
		}
	}

	if cgroupCpuMountpoint, err := cgroups.FindCgroupMountpoint("cpu"); err != nil {
		if !quiet {
			logrus.Warnf("%v", err)
		}
	} else {
		_, err1 := ioutil.ReadFile(path.Join(cgroupCpuMountpoint, "cpu.cfs_quota_us"))
		sysInfo.CpuCfsQuota = err1 == nil
		if !sysInfo.CpuCfsQuota && !quiet {
			logrus.Warn("Your kernel does not support cgroup cfs quotas")
		}
	}

	// Checek if ipv4_forward is disabled.
	if data, err := ioutil.ReadFile("/proc/sys/net/ipv4/ip_forward"); os.IsNotExist(err) {
		sysInfo.IPv4ForwardingDisabled = true
	} else {
		if enabled, _ := strconv.Atoi(strings.TrimSpace(string(data))); enabled == 0 {
			sysInfo.IPv4ForwardingDisabled = true
		} else {
			sysInfo.IPv4ForwardingDisabled = false
		}
	}

	// Check if AppArmor is supported.
	if _, err := os.Stat("/sys/kernel/security/apparmor"); os.IsNotExist(err) {
		sysInfo.AppArmor = false
	} else {
		sysInfo.AppArmor = true
	}

	// Check if Devices cgroup is mounted, it is hard requirement for container security.
	if _, err := cgroups.FindCgroupMountpoint("devices"); err != nil {
		logrus.Fatalf("Error mounting devices cgroup: %v", err)
	}

	return sysInfo
}
Esempio n. 28
0
// checkCgroupBlkioInfo reads the blkio information from the blkio cgroup mount point.
func checkCgroupBlkioInfo(quiet bool) cgroupBlkioInfo {
	mountPoint, err := cgroups.FindCgroupMountpoint("blkio")
	if err != nil {
		if !quiet {
			logrus.Warn(err)
		}
		return cgroupBlkioInfo{}
	}

	weight := cgroupEnabled(mountPoint, "blkio.weight")
	if !quiet && !weight {
		logrus.Warn("Your kernel does not support cgroup blkio weight")
	}

	weightDevice := cgroupEnabled(mountPoint, "blkio.weight_device")
	if !quiet && !weightDevice {
		logrus.Warn("Your kernel does not support cgroup blkio weight_device")
	}

	readBpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.read_bps_device")
	if !quiet && !readBpsDevice {
		logrus.Warn("Your kernel does not support cgroup blkio throttle.read_bps_device")
	}

	writeBpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.write_bps_device")
	if !quiet && !writeBpsDevice {
		logrus.Warn("Your kernel does not support cgroup blkio throttle.write_bps_device")
	}
	return cgroupBlkioInfo{
		BlkioWeight:         weight,
		BlkioWeightDevice:   weightDevice,
		BlkioReadBpsDevice:  readBpsDevice,
		BlkioWriteBpsDevice: writeBpsDevice,
	}
}
Esempio n. 29
0
func killCgroupProcs(c libcontainer.Container) {
	var procs []*os.Process
	if err := c.Pause(); err != nil {
		logrus.Warn(err)
	}
	pids, err := c.Processes()
	if err != nil {
		// don't care about childs if we can't get them, this is mostly because cgroup already deleted
		logrus.Warnf("Failed to get processes from container %s: %v", c.ID(), err)
	}
	for _, pid := range pids {
		if p, err := os.FindProcess(pid); err == nil {
			procs = append(procs, p)
			if err := p.Kill(); err != nil {
				logrus.Warn(err)
			}
		}
	}
	if err := c.Resume(); err != nil {
		logrus.Warn(err)
	}
	for _, p := range procs {
		if _, err := p.Wait(); err != nil {
			logrus.Warn(err)
		}
	}
}
Esempio n. 30
0
// signalAllProcesses freezes then iterates over all the processes inside the
// manager's cgroups sending a SIGKILL to each process then waiting for them to
// exit.
func signalAllProcesses(m cgroups.Manager, s os.Signal) error {
	var procs []*os.Process
	if err := m.Freeze(configs.Frozen); err != nil {
		logrus.Warn(err)
	}
	pids, err := m.GetAllPids()
	if err != nil {
		m.Freeze(configs.Thawed)
		return err
	}
	for _, pid := range pids {
		p, err := os.FindProcess(pid)
		if err != nil {
			logrus.Warn(err)
			continue
		}
		procs = append(procs, p)
		if err := p.Signal(s); err != nil {
			logrus.Warn(err)
		}
	}
	if err := m.Freeze(configs.Thawed); err != nil {
		logrus.Warn(err)
	}
	for _, p := range procs {
		if _, err := p.Wait(); err != nil {
			logrus.Warn(err)
		}
	}
	return nil
}