Example #1
0
func newRawContainerHandler(name string, cgroupSubsystems *cgroupSubsystems, machineInfoFactory info.MachineInfoFactory) (container.ContainerHandler, error) {
	fsInfo, err := fs.NewFsInfo()
	if err != nil {
		return nil, err
	}
	cHints, err := getContainerHintsFromFile(*argContainerHints)
	if err != nil {
		return nil, err
	}
	var networkInterface *networkInterface
	var externalMounts []mount
	for _, container := range cHints.AllHosts {
		if name == container.FullName {
			networkInterface = container.NetworkInterface
			externalMounts = container.Mounts
			break
		}
	}
	return &rawContainerHandler{
		name: name,
		cgroup: &cgroups.Cgroup{
			Parent: "/",
			Name:   name,
		},
		cgroupSubsystems:   cgroupSubsystems,
		machineInfoFactory: machineInfoFactory,
		stopWatcher:        make(chan error),
		watches:            make(map[string]struct{}),
		fsInfo:             fsInfo,
		networkInterface:   networkInterface,
		externalMounts:     externalMounts,
	}, nil
}
Example #2
0
// New takes a memory storage and returns a new manager.
func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingInterval time.Duration, allowDynamicHousekeeping bool) (Manager, error) {
	if memoryCache == nil {
		return nil, fmt.Errorf("manager requires memory storage")
	}

	// Detect the container we are running on.
	selfContainer, err := cgroups.GetThisCgroupDir("cpu")
	if err != nil {
		return nil, err
	}
	glog.Infof("cAdvisor running in container: %q", selfContainer)

	dockerInfo, err := docker.DockerInfo()
	if err != nil {
		glog.Warningf("Unable to connect to Docker: %v", err)
	}
	context := fs.Context{DockerRoot: docker.RootDir(), DockerInfo: dockerInfo}
	fsInfo, err := fs.NewFsInfo(context)
	if err != nil {
		return nil, err
	}

	// If cAdvisor was started with host's rootfs mounted, assume that its running
	// in its own namespaces.
	inHostNamespace := false
	if _, err := os.Stat("/rootfs/proc"); os.IsNotExist(err) {
		inHostNamespace = true
	}

	newManager := &manager{
		containers:               make(map[namespacedContainerName]*containerData),
		quitChannels:             make([]chan error, 0, 2),
		memoryCache:              memoryCache,
		fsInfo:                   fsInfo,
		cadvisorContainer:        selfContainer,
		inHostNamespace:          inHostNamespace,
		startupTime:              time.Now(),
		maxHousekeepingInterval:  maxHousekeepingInterval,
		allowDynamicHousekeeping: allowDynamicHousekeeping,
		ignoreMetrics:            ignoreMetrics.MetricSet,
	}

	machineInfo, err := getMachineInfo(sysfs, fsInfo, inHostNamespace)
	if err != nil {
		return nil, err
	}
	newManager.machineInfo = *machineInfo
	glog.Infof("Machine: %+v", newManager.machineInfo)

	versionInfo, err := getVersionInfo()
	if err != nil {
		return nil, err
	}
	glog.Infof("Version: %+v", *versionInfo)

	newManager.eventHandler = events.NewEventManager(parseEventsStoragePolicy())
	return newManager, nil
}
Example #3
0
func getMachineInfo(sysFs sysfs.SysFs) (*info.MachineInfo, error) {
	cpuinfo, err := ioutil.ReadFile("/proc/cpuinfo")
	clockSpeed, err := getClockSpeed(cpuinfo)
	if err != nil {
		return nil, err
	}

	// Get the amount of usable memory from /proc/meminfo.
	out, err := ioutil.ReadFile("/proc/meminfo")
	if err != nil {
		return nil, err
	}

	memoryCapacity, err := getMemoryCapacity(out)
	if err != nil {
		return nil, err
	}

	fsInfo, err := fs.NewFsInfo()
	if err != nil {
		return nil, err
	}
	filesystems, err := fsInfo.GetGlobalFsInfo()
	if err != nil {
		return nil, err
	}

	diskMap, err := sysinfo.GetBlockDeviceInfo(sysFs)
	if err != nil {
		return nil, err
	}

	netDevices, err := sysinfo.GetNetworkDevices(sysFs)
	if err != nil {
		return nil, err
	}

	topology, numCores, err := getTopology(sysFs, string(cpuinfo))
	if err != nil {
		return nil, err
	}

	machineInfo := &info.MachineInfo{
		NumCores:       numCores,
		CpuFrequency:   clockSpeed,
		MemoryCapacity: memoryCapacity,
		DiskMap:        diskMap,
		NetworkDevices: netDevices,
		Topology:       topology,
	}

	for _, fs := range filesystems {
		machineInfo.Filesystems = append(machineInfo.Filesystems, info.FsInfo{Device: fs.Device, Capacity: fs.Capacity})
	}

	return machineInfo, nil
}
// New takes a memory storage and returns a new manager.
func New(memoryStorage *memory.InMemoryStorage, sysfs sysfs.SysFs) (Manager, error) {
	if memoryStorage == nil {
		return nil, fmt.Errorf("manager requires memory storage")
	}

	// Detect the container we are running on.
	selfContainer, err := cgroups.GetThisCgroupDir("cpu")
	if err != nil {
		return nil, err
	}
	glog.Infof("cAdvisor running in container: %q", selfContainer)

	context := fs.Context{DockerRoot: docker.RootDir()}
	fsInfo, err := fs.NewFsInfo(context)
	if err != nil {
		return nil, err
	}
	newManager := &manager{
		containers:        make(map[namespacedContainerName]*containerData),
		quitChannels:      make([]chan error, 0, 2),
		memoryStorage:     memoryStorage,
		fsInfo:            fsInfo,
		cadvisorContainer: selfContainer,
		startupTime:       time.Now(),
	}

	machineInfo, err := getMachineInfo(sysfs, fsInfo)
	if err != nil {
		return nil, err
	}
	newManager.machineInfo = *machineInfo
	glog.Infof("Machine: %+v", newManager.machineInfo)

	versionInfo, err := getVersionInfo()
	if err != nil {
		return nil, err
	}
	newManager.versionInfo = *versionInfo
	glog.Infof("Version: %+v", newManager.versionInfo)

	newManager.eventHandler = events.NewEventManager()

	// Register Docker container factory.
	err = docker.Register(newManager, fsInfo)
	if err != nil {
		glog.Errorf("Docker container factory registration failed: %v.", err)
	}

	// Register the raw driver.
	err = raw.Register(newManager, fsInfo)
	if err != nil {
		glog.Errorf("Registration of the raw container factory failed: %v", err)
	}

	return newManager, nil
}
Example #5
0
func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSubsystems, machineInfoFactory info.MachineInfoFactory) (container.ContainerHandler, error) {
	// Create the cgroup paths.
	cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints))
	for key, val := range cgroupSubsystems.MountPoints {
		cgroupPaths[key] = path.Join(val, name)
	}

	fsInfo, err := fs.NewFsInfo()
	if err != nil {
		return nil, err
	}
	cHints, err := getContainerHintsFromFile(*argContainerHints)
	if err != nil {
		return nil, err
	}

	// Generate the equivalent libcontainer state for this container.
	libcontainerState := dockerlibcontainer.State{
		CgroupPaths: cgroupPaths,
	}

	hasNetwork := false
	var externalMounts []mount
	for _, container := range cHints.AllHosts {
		if name == container.FullName {
			libcontainerState.NetworkState = network.NetworkState{
				VethHost:  container.NetworkInterface.VethHost,
				VethChild: container.NetworkInterface.VethChild,
			}
			hasNetwork = true
			externalMounts = container.Mounts
			break
		}
	}

	return &rawContainerHandler{
		name: name,
		cgroup: &cgroups.Cgroup{
			Parent: "/",
			Name:   name,
		},
		cgroupSubsystems:   cgroupSubsystems,
		machineInfoFactory: machineInfoFactory,
		stopWatcher:        make(chan error),
		watches:            make(map[string]struct{}),
		cgroupWatches:      make(map[string]struct{}),
		cgroupPaths:        cgroupPaths,
		libcontainerState:  libcontainerState,
		fsInfo:             fsInfo,
		hasNetwork:         hasNetwork,
		externalMounts:     externalMounts,
	}, nil
}
Example #6
0
func newDockerContainerHandler(
	client *docker.Client,
	name string,
	machineInfoFactory info.MachineInfoFactory,
	dockerRootDir string,
	usesAufsDriver bool,
	cgroupSubsystems *containerLibcontainer.CgroupSubsystems,
) (container.ContainerHandler, error) {
	fsInfo, err := fs.NewFsInfo()
	if err != nil {
		return nil, err
	}

	// Create the cgroup paths.
	cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints))
	for key, val := range cgroupSubsystems.MountPoints {
		cgroupPaths[key] = path.Join(val, name)
	}

	id := ContainerNameToDockerId(name)
	stateDir := DockerStateDir()
	handler := &dockerContainerHandler{
		id:                     id,
		client:                 client,
		name:                   name,
		machineInfoFactory:     machineInfoFactory,
		libcontainerConfigPath: path.Join(stateDir, id, "container.json"),
		libcontainerStatePath:  path.Join(stateDir, id, "state.json"),
		libcontainerPidPath:    path.Join(stateDir, id, "pid"),
		cgroupPaths:            cgroupPaths,
		cgroup: cgroups.Cgroup{
			Parent: "/",
			Name:   name,
		},
		usesAufsDriver: usesAufsDriver,
		fsInfo:         fsInfo,
	}
	handler.storageDirs = append(handler.storageDirs, path.Join(dockerRootDir, pathToAufsDir, path.Base(name)))

	// We assume that if Inspect fails then the container is not known to docker.
	ctnr, err := client.InspectContainer(id)
	if err != nil {
		return nil, fmt.Errorf("failed to inspect container %q: %v", id, err)
	}

	// Add the name and bare ID as aliases of the container.
	handler.aliases = append(handler.aliases, strings.TrimPrefix(ctnr.Name, "/"))
	handler.aliases = append(handler.aliases, id)

	return handler, nil
}
Example #7
0
func getMachineInfo() (*info.MachineInfo, error) {
	// Get the number of CPUs from /proc/cpuinfo.
	out, err := ioutil.ReadFile("/proc/cpuinfo")
	if err != nil {
		return nil, err
	}
	numCores := len(numCpuRegexp.FindAll(out, -1))
	if numCores == 0 {
		return nil, fmt.Errorf("failed to count cores in output: %s", string(out))
	}

	// Get the amount of usable memory from /proc/meminfo.
	out, err = ioutil.ReadFile("/proc/meminfo")
	if err != nil {
		return nil, err
	}
	matches := memoryCapacityRegexp.FindSubmatch(out)
	if len(matches) != 2 {
		return nil, fmt.Errorf("failed to find memory capacity in output: %s", string(out))
	}
	memoryCapacity, err := strconv.ParseInt(string(matches[1]), 10, 64)
	if err != nil {
		return nil, err
	}

	// Capacity is in KB, convert it to bytes.
	memoryCapacity = memoryCapacity * 1024

	fsInfo, err := fs.NewFsInfo()
	if err != nil {
		return nil, err
	}
	filesystems, err := fsInfo.GetGlobalFsInfo()
	if err != nil {
		return nil, err
	}

	machineInfo := &info.MachineInfo{
		NumCores:       numCores,
		MemoryCapacity: memoryCapacity,
	}
	for _, fs := range filesystems {
		machineInfo.Filesystems = append(machineInfo.Filesystems, info.FsInfo{fs.Device, fs.Capacity})
	}

	return machineInfo, nil
}
Example #8
0
func newDockerContainerHandler(
	client *docker.Client,
	name string,
	machineInfoFactory info.MachineInfoFactory,
	dockerRootDir string,
	usesAufsDriver bool,
) (container.ContainerHandler, error) {
	fsInfo, err := fs.NewFsInfo()
	if err != nil {
		return nil, err
	}
	handler := &dockerContainerHandler{
		client:               client,
		name:                 name,
		machineInfoFactory:   machineInfoFactory,
		libcontainerStateDir: path.Join(dockerRootDir, pathToLibcontainerState),
		cgroup: cgroups.Cgroup{
			Parent: "/",
			Name:   name,
		},
		usesAufsDriver: usesAufsDriver,
		fsInfo:         fsInfo,
	}
	handler.storageDirs = append(handler.storageDirs, path.Join(dockerRootDir, pathToAufsDir, path.Base(name)))
	if handler.isDockerRoot() {
		return handler, nil
	}
	id := containerNameToDockerId(name)
	handler.id = id
	ctnr, err := client.InspectContainer(id)
	// We assume that if Inspect fails then the container is not known to docker.
	if err != nil {
		return nil, fmt.Errorf("failed to inspect container %s - %s\n", id, err)
	}
	handler.aliases = append(handler.aliases, path.Join("/docker", ctnr.Name))
	return handler, nil
}
Example #9
0
// New takes a memory storage and returns a new manager.
func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingInterval time.Duration, allowDynamicHousekeeping bool, ignoreMetricsSet container.MetricSet) (Manager, error) {
	if memoryCache == nil {
		return nil, fmt.Errorf("manager requires memory storage")
	}

	// Detect the container we are running on.
	selfContainer, err := cgroups.GetThisCgroupDir("cpu")
	if err != nil {
		return nil, err
	}
	glog.Infof("cAdvisor running in container: %q", selfContainer)

	dockerStatus, err := docker.Status()
	newManager := &manager{
		containers:               make(map[namespacedContainerName]*containerData),
		quitChannels:             make([]chan error, 0, 2),
		memoryCache:              memoryCache,
		cadvisorContainer:        selfContainer,
		startupTime:              time.Now(),
		maxHousekeepingInterval:  maxHousekeepingInterval,
		allowDynamicHousekeeping: allowDynamicHousekeeping,
		ignoreMetrics:            ignoreMetricsSet,
		containerWatchers:        []watcher.ContainerWatcher{},
	}
	hyperStatus, err := newManager.HyperInfo()
	if err != nil {
		glog.Warningf("Unable to connect to Docker: %v", err)
	}
	rktPath, err := rkt.RktPath()
	if err != nil {
		glog.Warningf("unable to connect to Rkt api service: %v", err)
	}

	context := fs.Context{
		Docker: fs.DockerContext{
			Root:         docker.RootDir(),
			Driver:       dockerStatus.Driver,
			DriverStatus: dockerStatus.DriverStatus,
		},
		Hyper: fs.HyperContext{
			Root:         hyper.RootDir(hyperStatus),
			Driver:       hyperStatus.Driver,
			DriverStatus: hyperStatus.DriverStatus,
		},
		RktPath: rktPath,
	}
	fsInfo, err := fs.NewFsInfo(context)
	if err != nil {
		return nil, err
	}

	// If cAdvisor was started with host's rootfs mounted, assume that its running
	// in its own namespaces.
	inHostNamespace := false
	if _, err := os.Stat("/rootfs/proc"); os.IsNotExist(err) {
		inHostNamespace = true
	}

	// Register for new subcontainers.
	eventsChannel := make(chan watcher.ContainerEvent, 16)

	newManager.fsInfo = fsInfo
	newManager.eventsChannel = eventsChannel
	newManager.inHostNamespace = inHostNamespace

	machineInfo, err := machine.Info(sysfs, fsInfo, inHostNamespace)
	if err != nil {
		return nil, err
	}
	newManager.machineInfo = *machineInfo
	glog.Infof("Machine: %+v", newManager.machineInfo)

	versionInfo, err := getVersionInfo()
	if err != nil {
		return nil, err
	}
	glog.Infof("Version: %+v", *versionInfo)

	newManager.eventHandler = events.NewEventManager(parseEventsStoragePolicy())
	return newManager, nil
}