func collectEvents(cURL *url.URL, dnURL *url.URL, start time.Time, end time.Time) {

	cAdvisorClient, err := client.NewClient(cURL.String())
	if err != nil {
		glog.Errorf("tried to make client and got error %v", err)
		return
	}
	params := "?all_events=true&subcontainers=true&start_time=" + start.Format(time.RFC3339) + "&end_time=" + end.Format(time.RFC3339)
	einfo, err := cAdvisorClient.EventStaticInfo(params)
	if err != nil {
		glog.Errorf("got error retrieving event info: %v", err)
		return
	}

	var dataPoints DataPointList

	// The json returned by the metrics is almost in the proper format. We just need to:
	//     add the container alias
	//     rename "timestamp" to "time"
	//     remove "event_data"
	//     add source_type: event

	for idx, ev := range einfo {
		glog.V(3).Infof("static einfo %v: %v", idx, ev)
		hdr := &DataPointHeader{ev.Timestamp, ev.ContainerName, getContainerAlias(cAdvisorClient, ev.ContainerName), "event", config.DataSource}
		dataPoints = append(dataPoints,
			&EventDataPoint{*hdr, "container_event", ev.EventType},
		)
	}

	sendDataPoints(dataPoints, dnURL)
}
Esempio n. 2
0
func monitor(address string, requestChannel chan *request, responseChannel chan *response) {

	client, err := cadvisorClient.NewClient(address)

	if err != nil {
		fmt.Print(err.Error())
		return
	}

	containerInfoRequest := info.ContainerInfoRequest{
		NumStats: 1,
	}
	for signal := range requestChannel {

		containerInfo, err := client.ContainerInfo("/", &containerInfoRequest)

		if err != nil {
			fmt.Print(err.Error())
			return
		}

		//	spew.Dump(containerInfo.Stats[0].Memory)
		//	spew.Dump(containerInfo.Stats[0].Cpu)

		responseChannel <- &response{
			memoryStats: containerInfo.Stats[0].Memory,
			cpuStats:    containerInfo.Stats[0].Cpu,
			identifier:  address,
			timestamp:   signal.timestamp,
		}

	}

}
Esempio n. 3
0
func cpuTotalUsage() {
	staticClient, err := client.NewClient("http://localhost:8080/")
	if err != nil {
		log.Fatalf("CpuUsage monitor tried to make client and got error %v", err)
		return
	}

	//exec shell in host machine to get docker container id
	cmd := exec.Command("/bin/sh", "-c", "docker ps  -q")
	short_id, err := cmd.Output()

	cmd = exec.Command("/bin/sh", "-c", "docker inspect -f   '{{.Id}}' "+string(short_id))
	full_id, err := cmd.Output()
	fmt.Println("Cpu monitor find out the docker container ID %v", string(full_id))

	//containerName := "/docker/container id"
	containerName := "/docker/" + string(full_id)
	query := &info.ContainerInfoRequest{}

	//get ContainerInfo structure according the client
	cInfo, err := staticClient.ContainerInfo(containerName, query)
	if err != nil {
		log.Fatalf("Cpu monitor get ContainerInfo err %v", err)
		return
	}
	fmt.Println("Cpu monitor get container Info container name: %v ,now start get cpu usage", cInfo.Name)

	for i := 1; i < len(cInfo.Stats); i++ {
		cur := cInfo.Stats[i]
		prev := cInfo.Stats[i-1]
		//get interval time duration between the two sample
		f := float64((cur.Timestamp).Sub(prev.Timestamp).Nanoseconds())
		fmt.Printf("Container total usag : %.02f\n", float64(cur.Cpu.Usage.Total-prev.Cpu.Usage.Total)/f)
	}
}
func collectMetrics(cURL *url.URL, dnURL *url.URL, sendToDataNode bool) {

	cAdvisorClient, err := client.NewClient(cURL.String())
	if err != nil {
		glog.Errorf("tried to make cAdvisor client and got error: %v", err)
		return
	}

	request := &info.ContainerInfoRequest{
		NumStats: 1,
	}

	cInfos, err := cAdvisorClient.AllDockerContainers(request)

	if err != nil {
		glog.Errorf("unable to get info on all docker containers: %v", err)
		return
	}

	var dataPoints DataPointList

	for _, info := range cInfos {
		updateContainerAlias(info.Name, getAliasSafely(info.Aliases))
		if sendToDataNode {
			dataPoints = append(dataPoints, allDataPoints(info)...)
		}
	}

	if sendToDataNode {
		glog.Info("Collecting Metrics")
		sendDataPoints(dataPoints, dnURL)
	}
}
Esempio n. 5
0
func cpuTotalUsage() {
	staticClient, err := client.NewClient("http://localhost:8080/")
	if err != nil {
		log.Fatalf("CpuUsage monitor tried to make client and got error %v", err)
		return
	}

	//exec shell in host machine to get docker container id
	cmd := exec.Command("/bin/sh", "-c", "docker ps  -q")
	short_id, err := cmd.Output()

	cmd = exec.Command("/bin/sh", "-c", "docker inspect -f   '{{.Id}}' "+string(short_id))
	full_id, err := cmd.Output()
	fmt.Println("Cpu monitor find out the docker container ID %v", string(full_id))

	//containerName := "/docker/container id"
	containerName := "/docker/" + string(full_id)
	query := &info.ContainerInfoRequest{}

	//get ContainerInfo structure according the client
	cInfo, err := staticClient.ContainerInfo(containerName, query)
	if err != nil {
		log.Fatalf("Cpu monitor get ContainerInfo err %v", err)
		return
	}
	fmt.Println("Cpu monitor get container Info container name: %v ,now start get cpu load ", cInfo.Name)

	for i := 1; i < len(cInfo.Stats); i++ {
		cur := cInfo.Stats[i]
		fmt.Printf("Container cpu load : %.02f \n", float64(cur.Cpu.LoadAverage)/1000)
	}
}
Esempio n. 6
0
func main() {

	if len(os.Args) < 2 {
		log.Fatalf("commad must has one parameters!\n")
		return
	}
	var testingProject = os.Args[1] //"docker"  or  "rkt"
	if testingProject != "docker" && testingProject != "rkt" {
		log.Fatalf("commad is %v %v, is not corrected!\n", os.Args[0], os.Args[1])
		return
	}

	var containers []string
	client, err := client.NewClient("http://localhost:8080/")
	if err != nil {
		log.Fatalf("tried to make client and got error %v\n", err)
		return
	}

	switch testingProject {
	case "docker":
		containers, err = adaptor.GetDockerContainers()
	case "rkt":
		containers, err = adaptor.GetRktContainers()
	default:
		return
	}
	if err != nil {
		log.Fatalf("getContainerName fail, error: %v\n", err)
		return
	}
	//cpuusageinfo := new(CpuUsageInfo)
	mInfo, err := client.MachineInfo()
	var jsonString []byte
	for _, container := range containers {
		fmt.Printf("container %v's cpu info: \n", container)
		cInfo, err := getContainerInfo(client, container)
		if err != nil {
			fmt.Printf("getContainerInfo fail and got error %v\n", err)
			return
		}
		cpuUsageInfo := new(CpuUsageInfo)
		cpuUsageInfo.ContainerID = cInfo.Name
		//fmt.Println(cpuUsageInfo.ContainerID)
		//var usageInfo CpuUsageInfo
		err = getCpu(cInfo, mInfo, cpuUsageInfo)
		fmt.Println(cpuUsageInfo)

		//fmt.Println(usageInfo.ContainerID)
		jsonString, err = json.Marshal(cpuUsageInfo)
		if err != nil {
			log.Fatalf("convert to json err, error:  %v\n", err)
			return
		}

	}
	err = ioutil.WriteFile("./"+testingProject+"_cpu.json", []byte(jsonString), 0666)

}
Esempio n. 7
0
// Gets a client to the cAdvisor being tested.
func (self *realFramework) Client() *client.Client {
	if self.cadvisorClient == nil {
		cadvisorClient, err := client.NewClient(self.Hostname().FullHostname())
		if err != nil {
			self.t.Fatalf("Failed to instantiate the cAdvisor client: %v", err)
		}
		self.cadvisorClient = cadvisorClient
	}
	return self.cadvisorClient
}
Esempio n. 8
0
// TODO: move this into the kubelet itself
func MonitorCAdvisor(k *Kubelet, cp uint) {
	defer util.HandleCrash()
	// TODO: Monitor this connection, reconnect if needed?
	glog.V(1).Infof("Trying to create cadvisor client.")
	cadvisorClient, err := cadvisor.NewClient("http://127.0.0.1:" + strconv.Itoa(int(cp)))
	if err != nil {
		glog.Errorf("Error on creating cadvisor client: %v", err)
		return
	}
	glog.V(1).Infof("Successfully created cadvisor client.")
	k.SetCadvisorClient(cadvisorClient)
}
Esempio n. 9
0
// NewCAdvisor creates an instance of CAdvisor
func NewCAdvisor(url string, numstats int) (*CAdvisor, error) {
	client, err := client.NewClient(url)
	if err != nil {
		return nil, err
	}
	log.Printf("[INFO] [cadvisor] Create cAdvisor client using: %s", url)
	return &CAdvisor{
		Client:   client,
		NumStats: numstats,
		Query:    &info.ContainerInfoRequest{NumStats: numstats},
	}, nil
}
Esempio n. 10
0
func (self *cadvisorSource) GetAllContainers(host Host, start, end time.Time) (subcontainers []*api.Container, root *api.Container, err error) {
	url := fmt.Sprintf("http://%s:%d/", host.IP, host.Port)
	client, err := cadvisorClient.NewClient(url)
	if err != nil {
		return
	}
	subcontainers, root, err = self.getAllContainers(client, start, end)
	if err != nil {
		glog.Errorf("failed to get stats from cadvisor %q - %v\n", url, err)
	}
	return
}
Esempio n. 11
0
func main() {

	if len(os.Args) < 2 {
		fmt.Printf("commad must has one parameters!\n")
		return
	}
	var testingProject = os.Args[1] //"docker"  or  "rkt"
	if testingProject != "docker" && testingProject != "rkt" {
		fmt.Printf("commad is %v %v, is not corrected!\n", os.Args[0], os.Args[1])
		return
	}

	var containers []string
	client, err := client.NewClient("http://localhost:8080/")
	if err != nil {
		fmt.Printf("tried to make client and got error %v\n", err)
		return
	}

	switch testingProject {
	case "docker":
		containers, err = adaptor.GetDockerContainers()
	case "rkt":
		containers, err = adaptor.GetRktContainers()
	default:
		return
	}
	if err != nil {
		fmt.Printf("getContainerName fail, error: %v\n", err)
		return
	}

	var jsonString string = ""
	for _, container := range containers {
		//fmt.Printf("container %v's memory info: \n", container)
		cinfo, err := getContainerInfo(client, container)
		if err != nil {
			fmt.Printf("getContainerInfo fail and got error %v\n", err)
			return
		}
		temp, err := getMemory(cinfo)
		if err != nil {
			fmt.Printf("getMemory faile, error: %v\n", err)
		}
		jsonString = jsonString + temp
	}

	err = ioutil.WriteFile("./"+testingProject+"_memory.json", []byte(jsonString), 0666)
	if err != nil {
		fmt.Printf("ioutil.WriteFile faile, error: %v\n", err)
	}
}
Esempio n. 12
0
func startCadvisorCollector(c *conf.Conf) {
	for _, config := range c.Cadvisor {
		cClient, err := client.NewClient(config.URL)
		if err != nil {
			slog.Warningf("Could not start collector for URL [%s] due to err: %v", config.URL, err)
		}
		collectors = append(collectors, &IntervalCollector{
			F: func() (opentsdb.MultiDataPoint, error) {
				return c_cadvisor(cClient, &config)
			},
			name: "cadvisor",
		})
	}
}
Esempio n. 13
0
// Get node information from cAdvisor.
func (self *CadvisorSource) GetMachineInfo(host Host) (machineInfo *cadvisor.MachineInfo, err error) {
	url := fmt.Sprintf("http://%s:%d/", host.IP, host.Port)
	client, err := cadvisorClient.NewClient(url)
	if err != nil {
		glog.Errorf("Failed to create cAdvisor client: %s", err)
		return nil, fmt.Errorf("Failed to create cAdvisor client: %s", err)
	}
	machineInfo, err = client.MachineInfo()
	if err != nil {
		glog.Errorf("failed to get stats from cadvisor %q - %v\n", url, err)
		return nil, fmt.Errorf("failed to get stats from cadvisor %q - %v\n", url, err)
	}
	return
}
Esempio n. 14
0
// Build list of work
func (swc *scrapWorkCache) buildWorkList(URLList []string) {
	for _, serverURL := range URLList {
		cadvisorClient, localERR := client.NewClient(serverURL)
		if localERR != nil {
			glog.Errorf("Failed connect to server: %v\n", localERR)
			continue
		}

		swc.addWork(&scrapWork2{
			serverURL:  serverURL,
			collector:  NewCadvisorCollector(newCadvisorInfoProvider(cadvisorClient), nameToLabel),
			chRecvOnly: make(chan datapoint.Datapoint),
		})
	}
}
Esempio n. 15
0
func staticClientExample() {
	staticClient, err := client.NewClient("http://localhost:8080/")
	if err != nil {
		glog.Errorf("tried to make client and got error %v", err)
		return
	}
	einfo, err := staticClient.EventStaticInfo("?oom_events=true")
	if err != nil {
		glog.Errorf("got error retrieving event info: %v", err)
		return
	}
	for idx, ev := range einfo {
		glog.Infof("static einfo %v: %v", idx, ev)
	}
}
Esempio n. 16
0
func main() {
	client, err := client.NewClient("http://localhost:8080/")
	if err != nil {
		fmt.Printf("tried to make client and got error %v", err)
		return
	}

	cinfos, err := GetAllContainer(client)
	if err != nil {
		fmt.Printf("tried to SubcontainersInfo and got error %v", err)
		return
	}
	GetContainerMemoryUsage(cinfos)
	fmt.Printf("\n")
	GetContainerMemoryWorkingSet(cinfos)

}
Esempio n. 17
0
func cpuOverallUsage() {
	staticClient, err := client.NewClient("http://localhost:8080/")
	if err != nil {
		log.Fatalf("Cpu monitor tried to make client and got error %v", err)
		return
	}

	//exec shell in host machine to get docker container id
	cmd := exec.Command("/bin/sh", "-c", "docker ps  -q")
	short_id, err := cmd.Output()

	cmd = exec.Command("/bin/sh", "-c", "docker inspect -f   '{{.Id}}' "+string(short_id))
	full_id, err := cmd.Output()
	fmt.Println("Cpu monitor find out the docker container ID %v", string(full_id))

	//containerName := "/docker/container id"
	containerName := "/docker/" + string(full_id)
	query := &info.ContainerInfoRequest{}

	//get ContainerInfo structure according the client
	cInfo, err := staticClient.ContainerInfo(containerName, query)
	if err != nil {
		log.Fatalf("Cpu monitor get ContainerInfo err %v", err)
		return
	}

	mInfo, err := staticClient.MachineInfo()
	if err != nil {
		log.Fatalf("Cpu monitor try to get MachineInfo and got err %v", err)
		return
	}

	fmt.Println("Cpu monitor get container Info container name: %v ,now start get overall cpu usage", cInfo.Name)

	cur := cInfo.Stats[len(cInfo.Stats)-1]

	if len(cInfo.Stats) >= 2 {
		prev := cInfo.Stats[len(cInfo.Stats)-2]
		rawUsage := float64(cur.Cpu.Usage.Total - prev.Cpu.Usage.Total)
		intervalInNs := float64((cur.Timestamp).Sub(prev.Timestamp).Nanoseconds())
		cpuUsage := ((rawUsage / intervalInNs) / float64(mInfo.NumCores)) * 100
		fmt.Printf("cpuUsage %.02f \n", cpuUsage)
	}

}
Esempio n. 18
0
func streamingClientExample(url string) {
	streamingClient, err := client.NewClient("http://localhost:8080/")
	if err != nil {
		glog.Errorf("tried to make client and got error %v", err)
		return
	}
	einfo := make(chan *info.Event)
	go func() {
		err = streamingClient.EventStreamingInfo(url, einfo)
		if err != nil {
			glog.Errorf("got error retrieving event info: %v", err)
			return
		}
	}()
	for ev := range einfo {
		glog.Infof("streaming einfo: %v\n", ev)
	}
}
Esempio n. 19
0
func TestBasicCadvisor(t *testing.T) {
	response := []cadvisor_api.ContainerInfo{}
	data, err := json.Marshal(&response)
	require.NoError(t, err)
	handler := util.FakeHandler{
		StatusCode:   200,
		RequestBody:  "",
		ResponseBody: string(data),
		T:            t,
	}
	server := httptest.NewServer(&handler)
	defer server.Close()
	cadvisorClient, err := client.NewClient(server.URL)
	require.NoError(t, err)
	cadvisorSource := &cadvisorSource{}
	subcontainer, root, err := cadvisorSource.getAllContainers(cadvisorClient, time.Now(), time.Now().Add(time.Minute), time.Second, false)
	require.NoError(t, err)
	assert.Len(t, subcontainer, 0)
	assert.Nil(t, root)
}
Esempio n. 20
0
func main() {
	flag.Parse()
	util.InitLogs()
	defer util.FlushLogs()
	rand.Seed(time.Now().UTC().UnixNano())

	verflag.PrintAndExitIfRequested()

	if *runonce {
		exclusiveFlag := "invalid option: --runonce and %s are mutually exclusive"
		if len(etcdServerList) > 0 {
			glog.Fatalf(exclusiveFlag, "--etcd_servers")
		}
		if *enableServer {
			glog.Infof("--runonce is set, disabling server")
			*enableServer = false
		}
	}

	etcd.SetLogger(util.NewLogger("etcd "))

	// Make an API client if possible.
	if len(apiServerList) < 1 {
		glog.Info("No api servers specified.")
	} else {
		if apiClient, err := getApiserverClient(); err != nil {
			glog.Errorf("Unable to make apiserver client: %v", err)
		} else {
			// Send events to APIserver if there is a client.
			glog.Infof("Sending events to APIserver.")
			record.StartRecording(apiClient.Events(""), "kubelet")
		}
	}

	// Log the events locally too.
	record.StartLogging(glog.Infof)

	capabilities.Initialize(capabilities.Capabilities{
		AllowPrivileged: *allowPrivileged,
	})

	dockerClient, err := docker.NewClient(getDockerEndpoint())
	if err != nil {
		glog.Fatal("Couldn't connect to docker.")
	}

	hostname := getHostname()

	if *rootDirectory == "" {
		glog.Fatal("Invalid root directory path.")
	}
	*rootDirectory = path.Clean(*rootDirectory)
	if err := os.MkdirAll(*rootDirectory, 0750); err != nil {
		glog.Fatalf("Error creating root directory: %v", err)
	}

	// source of all configuration
	cfg := kconfig.NewPodConfig(kconfig.PodConfigNotificationSnapshotAndUpdates)

	// define file config source
	if *config != "" {
		kconfig.NewSourceFile(*config, *fileCheckFrequency, cfg.Channel("file"))
	}

	// define url config source
	if *manifestURL != "" {
		kconfig.NewSourceURL(*manifestURL, *httpCheckFrequency, cfg.Channel("http"))
	}

	// define etcd config source and initialize etcd client
	var etcdClient *etcd.Client
	if len(etcdServerList) > 0 {
		etcdClient = etcd.NewClient(etcdServerList)
	} else if *etcdConfigFile != "" {
		var err error
		etcdClient, err = etcd.NewClientFromFile(*etcdConfigFile)
		if err != nil {
			glog.Fatalf("Error with etcd config file: %v", err)
		}
	}

	if etcdClient != nil {
		glog.Infof("Watching for etcd configs at %v", etcdClient.GetCluster())
		kconfig.NewSourceEtcd(kconfig.EtcdKeyForHost(hostname), etcdClient, cfg.Channel("etcd"))
	}

	// TODO: block until all sources have delivered at least one update to the channel, or break the sync loop
	// up into "per source" synchronizations

	k := kubelet.NewMainKubelet(
		getHostname(),
		dockerClient,
		etcdClient,
		*rootDirectory,
		*networkContainerImage,
		*syncFrequency,
		float32(*registryPullQPS),
		*registryBurst,
		*minimumGCAge,
		*maxContainerCount)

	k.BirthCry()

	go func() {
		util.Forever(func() {
			err := k.GarbageCollectContainers()
			if err != nil {
				glog.Errorf("Garbage collect failed: %v", err)
			}
		}, time.Minute*1)
	}()

	go func() {
		defer util.HandleCrash()
		// TODO: Monitor this connection, reconnect if needed?
		glog.V(1).Infof("Trying to create cadvisor client.")
		cadvisorClient, err := cadvisor.NewClient("http://127.0.0.1:4194")
		if err != nil {
			glog.Errorf("Error on creating cadvisor client: %v", err)
			return
		}
		glog.V(1).Infof("Successfully created cadvisor client.")
		k.SetCadvisorClient(cadvisorClient)
	}()

	// TODO: These should probably become more plugin-ish: register a factory func
	// in each checker's init(), iterate those here.
	health.AddHealthChecker(health.NewExecHealthChecker(k))
	health.AddHealthChecker(health.NewHTTPHealthChecker(&http.Client{}))
	health.AddHealthChecker(&health.TCPHealthChecker{})

	// process pods and exit.
	if *runonce {
		if _, err := k.RunOnce(cfg.Updates()); err != nil {
			glog.Fatalf("--runonce failed: %v", err)
		}
		return
	}

	// start the kubelet
	go util.Forever(func() { k.Run(cfg.Updates()) }, 0)

	// start the kubelet server
	if *enableServer {
		go util.Forever(func() {
			kubelet.ListenAndServeKubeletServer(k, cfg.Channel("http"), net.IP(address), *port, *enableDebuggingHandlers)
		}, 0)
	}

	// runs forever
	select {}
}
Esempio n. 21
0
func (s *HostStatsHandler) Handle(key string, initialMessage string, incomingMessages <-chan string, response chan<- common.Message) {
	defer backend.SignalHandlerClosed(key, response)

	c, err := client.NewClient(config.Config.CAdvisorUrl)
	if err != nil {
		log.WithFields(log.Fields{"error": err}).Error("Couldn't get CAdvisor client.")
		return
	}

	requestUrl, err := url.Parse(initialMessage)
	if err != nil {
		log.WithFields(log.Fields{"error": err, "message": initialMessage}).Error("Couldn't parse url from message.")
		return
	}

	tokenString := requestUrl.Query().Get("token")

	resourceId := ""

	token, err := parseRequestToken(tokenString, config.Config.ParsedPublicKey)
	if err == nil {
		resourceIdInterface, found := token.Claims["resourceId"]
		if found {
			resourceIdVal, ok := resourceIdInterface.(string)
			if ok {
				resourceId = resourceIdVal
			}
		}
	}

	reader, writer := io.Pipe()

	go func(w *io.PipeWriter) {
		for {
			_, ok := <-incomingMessages
			if !ok {
				w.Close()
				return
			}
		}
	}(writer)

	go func(r *io.PipeReader) {
		scanner := bufio.NewScanner(r)
		for scanner.Scan() {
			text := scanner.Text()
			message := common.Message{
				Key:  key,
				Type: common.Body,
				Body: text,
			}
			response <- message
		}
		if err := scanner.Err(); err != nil {
			log.WithFields(log.Fields{"error": err}).Error("Error with the container stat scanner.")
		}
	}(reader)

	count := config.Config.NumStats

	for {
		machineInfo, err := c.MachineInfo()
		if err != nil {
			log.WithFields(log.Fields{"error": err}).Error("Error getting machine info.")
			return
		}

		memLimit := machineInfo.MemoryCapacity

		infos := []info.ContainerInfo{}

		cInfo, err := c.ContainerInfo("", &info.ContainerInfoRequest{
			NumStats: count,
		})
		if err != nil {
			return
		}

		infos = append(infos, *cInfo)

		err = writeAggregatedStats(resourceId, nil, "host", infos, uint64(memLimit), writer)
		if err != nil {
			return
		}

		time.Sleep(1 * time.Second)
		count = 1
	}

	return
}
Esempio n. 22
0
func TestDetailedCadvisor(t *testing.T) {
	rootContainer := api.Container{
		Name: "/",
		Spec: cadvisor_api.ContainerSpec{
			CreationTime: time.Now(),
			HasCpu:       true,
			HasMemory:    true,
		},
		Stats: []*cadvisor_api.ContainerStats{
			{
				Timestamp: time.Now(),
			},
		},
	}
	subContainers := []api.Container{
		{
			Name: "a",
			Spec: cadvisor_api.ContainerSpec{
				CreationTime: time.Now(),
				HasCpu:       true,
				HasMemory:    true,
			},
			Stats: []*cadvisor_api.ContainerStats{
				{
					Timestamp: time.Now(),
				},
			},
		},
		{
			Name: "b",
			Spec: cadvisor_api.ContainerSpec{
				CreationTime: time.Now(),
				HasCpu:       true,
				HasMemory:    true,
			},
			Stats: []*cadvisor_api.ContainerStats{
				{
					Timestamp: time.Now(),
				},
			},
		},
	}

	response := []cadvisor_api.ContainerInfo{
		{
			ContainerReference: cadvisor_api.ContainerReference{
				Name: rootContainer.Name,
			},
			Spec:  rootContainer.Spec,
			Stats: rootContainer.Stats,
		},
	}
	for _, cont := range subContainers {
		response = append(response, cadvisor_api.ContainerInfo{
			ContainerReference: cadvisor_api.ContainerReference{
				Name: cont.Name,
			},
			Spec:  cont.Spec,
			Stats: cont.Stats,
		})
	}
	data, err := json.Marshal(&response)
	require.NoError(t, err)
	handler := util.FakeHandler{
		StatusCode:   200,
		RequestBody:  "",
		ResponseBody: string(data),
		T:            t,
	}
	server := httptest.NewServer(&handler)
	defer server.Close()
	cadvisorClient, err := client.NewClient(server.URL)
	require.NoError(t, err)
	cadvisorSource := &cadvisorSource{}
	subcontainers, root, err := cadvisorSource.getAllContainers(cadvisorClient, time.Now(), time.Now().Add(time.Minute), time.Second, false)
	require.NoError(t, err)
	assert.Len(t, subcontainers, len(subContainers))
	assert.NotNil(t, root)
	assert.True(t, root.Spec.Eq(&rootContainer.Spec))
	for i, stat := range root.Stats {
		assert.True(t, stat.Eq(rootContainer.Stats[i]))
	}
	for i, cont := range subcontainers {
		assert.True(t, subContainers[i].Spec.Eq(&cont.Spec))
		for j, stat := range cont.Stats {
			assert.True(t, subContainers[i].Stats[j].Eq(stat))
		}
	}
}
Esempio n. 23
0
func DockerMetrics(cycle int, cadvisor_addr string) []*types.TimeSeriesData {
	client, err := client.NewClient(cadvisor_addr)
	if err != nil {
		return nil
	}
	request := v1.ContainerInfoRequest{NumStats: 2}
	info, err := client.AllDockerContainers(&request)
	if err != nil {
		return nil
	}
	metrics := []*types.TimeSeriesData{}
	var (
		cName string = "unknown"
	)
	ts := time.Now().Unix()
	for _, i := range info {
		if len(i.Aliases) > 0 {
			cName = i.Aliases[0]
		}
		id := i.Id[:12]
		c := i.Stats[1]
		p := i.Stats[0]
		timeNao := float64(c.Timestamp.Sub(p.Timestamp).Nanoseconds())
		timeSec := c.Timestamp.Sub(p.Timestamp).Seconds()
		metrics = append(metrics,
			&types.TimeSeriesData{
				Metric:    "docker.cpu.load",
				Value:     float64(c.Cpu.LoadAverage),
				Cycle:     cycle,
				Timestamp: ts,
				DataType:  "GAUGE",
				Tags:      map[string]string{"name": cName, "id": id},
			},
			&types.TimeSeriesData{
				Metric:    "docker.cpu.system",
				Value:     float64(c.Cpu.Usage.System-p.Cpu.Usage.System) / timeNao,
				Cycle:     cycle,
				Timestamp: ts,
				DataType:  "GAUGE",
				Tags:      map[string]string{"name": cName, "id": id},
			},
			&types.TimeSeriesData{
				Metric:    "docker.cpu.user",
				Value:     float64(c.Cpu.Usage.User-p.Cpu.Usage.User) / timeNao,
				Cycle:     cycle,
				Timestamp: ts,
				DataType:  "GAUGE",
				Tags:      map[string]string{"name": cName, "id": id},
			},
			&types.TimeSeriesData{
				Metric:    "docker.cpu.total",
				Value:     float64(c.Cpu.Usage.Total-p.Cpu.Usage.Total) / timeNao,
				Cycle:     cycle,
				Timestamp: ts,
				DataType:  "GAUGE",
				Tags:      map[string]string{"name": cName, "id": id},
			},
			&types.TimeSeriesData{
				Metric:    "docker.mem.cache",
				Value:     float64(c.Memory.Cache),
				Cycle:     cycle,
				Timestamp: ts,
				DataType:  "GAUGE",
				Tags:      map[string]string{"name": cName, "id": id},
			},
			&types.TimeSeriesData{
				Metric:    "docker.mem.usage",
				Value:     float64(c.Memory.Usage),
				Cycle:     cycle,
				Timestamp: ts,
				DataType:  "GAUGE",
				Tags:      map[string]string{"name": cName, "id": id},
			},
			&types.TimeSeriesData{
				Metric:    "docker.mem.rss",
				Value:     float64(c.Memory.RSS),
				Cycle:     cycle,
				Timestamp: ts,
				DataType:  "GAUGE",
				Tags:      map[string]string{"name": cName, "id": id},
			},
			&types.TimeSeriesData{
				Metric:    "docker.mem.failcnt",
				Value:     float64(c.Memory.Failcnt),
				Cycle:     cycle,
				Timestamp: ts,
				DataType:  "GAUGE",
				Tags:      map[string]string{"name": cName, "id": id},
			},
			&types.TimeSeriesData{
				Metric:    "docker.mem.workingset",
				Value:     float64(c.Memory.WorkingSet),
				Cycle:     cycle,
				Timestamp: ts,
				DataType:  "GAUGE",
				Tags:      map[string]string{"name": cName, "id": id},
			},
			&types.TimeSeriesData{
				Metric:    "docker.net.readbytes",
				Value:     float64(c.Network.RxBytes-p.Network.RxBytes) / timeSec,
				Cycle:     cycle,
				Timestamp: ts,
				DataType:  "GAUGE",
				Tags:      map[string]string{"name": cName, "iface": c.Network.Name, "id": id},
			},
			&types.TimeSeriesData{
				Metric:    "docker.net.writebytes",
				Value:     float64(c.Network.TxBytes-p.Network.TxBytes) / timeSec,
				Cycle:     cycle,
				Timestamp: ts,
				DataType:  "GAUGE",
				Tags:      map[string]string{"name": cName, "iface": c.Network.Name, "id": id},
			},
			&types.TimeSeriesData{
				Metric:    "docker.net.readpackets",
				Value:     float64(c.Network.RxPackets-p.Network.RxPackets) / timeSec,
				Cycle:     cycle,
				Timestamp: ts,
				DataType:  "GAUGE",
				Tags:      map[string]string{"name": cName, "iface": c.Network.Name, "id": id},
			},
			&types.TimeSeriesData{
				Metric:    "docker.net.writepackets",
				Value:     float64(c.Network.TxPackets-c.Network.TxPackets) / timeSec,
				Cycle:     cycle,
				Timestamp: ts,
				DataType:  "GAUGE",
				Tags:      map[string]string{"name": cName, "iface": c.Network.Name, "id": id},
			},
			&types.TimeSeriesData{
				Metric:    "docker.net.readerrors",
				Value:     float64(c.Network.RxErrors-c.Network.RxErrors) / timeSec,
				Cycle:     cycle,
				Timestamp: ts,
				DataType:  "GAUGE",
				Tags:      map[string]string{"name": cName, "iface": c.Network.Name, "id": id},
			},
			&types.TimeSeriesData{
				Metric:    "docker.net.writeerrors",
				Value:     float64(c.Network.TxErrors-c.Network.TxErrors) / timeSec,
				Cycle:     cycle,
				Timestamp: ts,
				DataType:  "GAUGE",
				Tags:      map[string]string{"name": cName, "iface": c.Network.Name, "id": id},
			},
			&types.TimeSeriesData{
				Metric:    "docker.net.readdropped",
				Value:     float64(c.Network.RxDropped-c.Network.RxDropped) / timeSec,
				Cycle:     cycle,
				Timestamp: ts,
				DataType:  "GAUGE",
				Tags:      map[string]string{"name": cName, "iface": c.Network.Name, "id": id},
			},
			&types.TimeSeriesData{
				Metric:    "docker.net.writedropped",
				Value:     float64(c.Network.TxDropped-c.Network.TxDropped) / timeSec,
				Cycle:     cycle,
				Timestamp: ts,
				DataType:  "GAUGE",
				Tags:      map[string]string{"name": cName, "iface": c.Network.Name, "id": id},
			},
			&types.TimeSeriesData{
				Metric:    "docker.net.tcp.close",
				Value:     float64(c.Network.Tcp.Close),
				Cycle:     cycle,
				Timestamp: ts,
				DataType:  "GAUGE",
				Tags:      map[string]string{"name": cName, "id": id},
			},
			&types.TimeSeriesData{
				Metric:    "docker.net.tcp.closewait",
				Value:     float64(c.Network.Tcp.CloseWait),
				Cycle:     cycle,
				Timestamp: ts,
				DataType:  "GAUGE",
				Tags:      map[string]string{"name": cName, "id": id},
			},
			&types.TimeSeriesData{
				Metric:    "docker.net.tcp.closeing",
				Value:     float64(c.Network.Tcp.Closing),
				Cycle:     cycle,
				Timestamp: ts,
				DataType:  "GAUGE",
				Tags:      map[string]string{"name": cName, "id": id},
			},
			&types.TimeSeriesData{
				Metric:    "docker.net.tcp.established",
				Value:     float64(c.Network.Tcp.Established),
				Cycle:     cycle,
				Timestamp: ts,
				DataType:  "GAUGE",
				Tags:      map[string]string{"name": cName, "id": id},
			},
			&types.TimeSeriesData{
				Metric:    "docker.net.tcp.finwait1",
				Value:     float64(c.Network.Tcp.FinWait1),
				Cycle:     cycle,
				Timestamp: ts,
				DataType:  "GAUGE",
				Tags:      map[string]string{"name": cName, "id": id},
			},
			&types.TimeSeriesData{
				Metric:    "docker.net.tcp.finwait2",
				Value:     float64(c.Network.Tcp.FinWait2),
				Cycle:     cycle,
				Timestamp: ts,
				DataType:  "GAUGE",
				Tags:      map[string]string{"name": cName, "id": id},
			},
			&types.TimeSeriesData{
				Metric:    "docker.net.tcp.lastack",
				Value:     float64(c.Network.Tcp.LastAck),
				Cycle:     cycle,
				Timestamp: ts,
				DataType:  "GAUGE",
				Tags:      map[string]string{"name": cName, "id": id},
			},
			&types.TimeSeriesData{
				Metric:    "docker.net.tcp.listen",
				Value:     float64(c.Network.Tcp.Listen),
				Cycle:     cycle,
				Timestamp: ts,
				DataType:  "GAUGE",
				Tags:      map[string]string{"name": cName, "id": id},
			},
			&types.TimeSeriesData{
				Metric:    "docker.net.tcp.synrecv",
				Value:     float64(c.Network.Tcp.SynRecv),
				Cycle:     cycle,
				Timestamp: ts,
				DataType:  "GAUGE",
				Tags:      map[string]string{"name": cName, "id": id},
			},
			&types.TimeSeriesData{
				Metric:    "docker.net.tcp.synsent",
				Value:     float64(c.Network.Tcp.SynSent),
				Cycle:     cycle,
				Timestamp: ts,
				DataType:  "GAUGE",
				Tags:      map[string]string{"name": cName, "id": id},
			},
			&types.TimeSeriesData{
				Metric:    "docker.net.tcp.timewait",
				Value:     float64(c.Network.Tcp.TimeWait),
				Cycle:     cycle,
				Timestamp: ts,
				DataType:  "GAUGE",
				Tags:      map[string]string{"name": cName, "id": id},
			},
		)
	}
	return metrics
}
Esempio n. 24
0
func main() {

	if len(os.Args) < 2 {
		log.Fatalf("commad must has one parameters!\n")
		return
	}
	var testingProject = os.Args[1] //"docker"  or  "rkt"
	if testingProject != "docker" && testingProject != "rkt" {
		log.Fatalf("commad is %v %v, is not corrected!\n", os.Args[0], os.Args[1])
		return
	}

	var containers []string
	client, err := client.NewClient("http://localhost:8080/")
	if err != nil {
		log.Fatalf("tried to make client and got error %v\n", err)
		return
	}

	switch testingProject {
	case "docker":
		containers, err = adaptor.GetDockerContainers()
	case "rkt":
		containers, err = adaptor.GetRktContainers()
	default:
		return
	}
	if err != nil {
		log.Fatalf("getContainerName fail, error: %v\n", err)
		return
	}

	mInfo, err := client.MachineInfo()
	var jsonString []byte
	for _, container := range containers {
		//Get container info struct from cadvisor client
		cInfo, err := getContainerInfo(client, container)
		if err != nil {
			log.Fatalf("getContainerInfo fail and got error %v\n", err)
			return
		}
		var cpuArray []CpuUsageInfo
		cpuArray = []CpuUsageInfo{}
		cpuUsageInfo := new(CpuUsageInfo)
		cpuUsageInfo.Usage.PerCoreUsage = make(map[string]float64)
		cpuUsageInfo.ContainerID = cInfo.Name

		// Get cpu usage and store  them to result(cpuArray)
		err, result := getCpu(cInfo, mInfo, cpuUsageInfo, cpuArray)
		if err != nil {
			log.Fatalf("Get cpuusage err, error:  %v\n", err)
			return
		}

		//Conver to json
		jsonString, err = json.Marshal(result)
		if err != nil {
			log.Fatalf("convert to json err, error:  %v\n", err)
			return
		}

	}

	//Output to docker_cpu.json file
	err = ioutil.WriteFile("./"+testingProject+"_cpu.json", []byte(jsonString), 0666)

}
Esempio n. 25
0
func (p *prometheusScraper) main(paramDataSendRate, paramNodeServiceDiscoveryRate time.Duration) (err error) {

	kubeClient, err := newKubeClient(p.cfg)
	if err != nil {
		return err
	}

	podToServiceMap := updateServices(kubeClient)
	hostIPtoNameMap, nodeIPs := updateNodes(kubeClient, p.cfg.CadvisorPort)
	p.cfg.CadvisorURL = nodeIPs

	cadvisorServers := make([]*url.URL, len(p.cfg.CadvisorURL))
	for i, serverURL := range p.cfg.CadvisorURL {
		cadvisorServers[i], err = url.Parse(serverURL)
		if err != nil {
			return err
		}
	}

	printVersion()
	cfg, _ := json.MarshalIndent(p.cfg, "", "  ")
	glog.Infof("Scrapper started with following params:\n%v\n", string(cfg))

	scrapWorkCache := newScrapWorkCache(p.cfg, p.forwarder)
	stop := make(chan error, 1)

	scrapWorkCache.setPodToServiceMap(podToServiceMap)
	scrapWorkCache.setHostIPtoNameMap(hostIPtoNameMap)

	scrapWorkCache.buildWorkList(p.cfg.CadvisorURL)

	// Wait on channel input and forward datapoints to SignalFx
	go func() {
		scrapWorkCache.waitAndForward()                // Blocking call!
		stop <- errors.New("all channels were closed") // Stop all timers
	}()

	workPool := workpool.New(runtime.NumCPU(), int32(len(p.cfg.CadvisorURL)+1))

	// Collect data from nodes
	scrapWorkTicker := time.NewTicker(paramDataSendRate)
	go func() {
		for range scrapWorkTicker.C {

			scrapWorkCache.foreachWork(func(i int, w *scrapWork2) bool {
				workPool.PostWork("CollectDataWork", w)
				return true
			})
		}
	}()

	// New nodes and services discovery
	updateNodeAndPodTimer := time.NewTicker(paramNodeServiceDiscoveryRate)
	go func() {

		for range updateNodeAndPodTimer.C {

			podMap := updateServices(kubeClient)
			hostMap, _ := updateNodes(kubeClient, p.cfg.CadvisorPort)

			hostMapCopy := make(map[string]kubeAPI.Node)
			for k, v := range hostMap {
				hostMapCopy[k] = v
			}

			// Remove known nodes
			scrapWorkCache.foreachWork(func(i int, w *scrapWork2) bool {
				delete(hostMapCopy, w.serverURL)
				return true
			})

			if len(hostMapCopy) != 0 {
				scrapWorkCache.setHostIPtoNameMap(hostMap)

				// Add new(remaining) nodes to monitoring
				for serverURL := range hostMapCopy {
					cadvisorClient, localERR := client.NewClient(serverURL)
					if localERR != nil {
						glog.Errorf("Failed connect to server: %v\n", localERR)
						continue
					}

					scrapWorkCache.addWork(&scrapWork2{
						serverURL:  serverURL,
						collector:  NewCadvisorCollector(newCadvisorInfoProvider(cadvisorClient), nameToLabel),
						chRecvOnly: make(chan datapoint.Datapoint),
					})
				}
			}

			scrapWorkCache.setPodToServiceMap(podMap)
		}
	}()

	err = <-stop // Block here till stopped

	updateNodeAndPodTimer.Stop()
	scrapWorkTicker.Stop()

	return
}
Esempio n. 26
0
func (s *ContainerStatsHandler) Handle(key string, initialMessage string, incomingMessages <-chan string, response chan<- common.Message) {
	defer backend.SignalHandlerClosed(key, response)

	requestUrl, err := url.Parse(initialMessage)
	if err != nil {
		log.WithFields(log.Fields{"error": err, "message": initialMessage}).Error("Couldn't parse url from message.")
		return
	}

	tokenString := requestUrl.Query().Get("token")

	containerIds := map[string]string{}

	token, err := parseRequestToken(tokenString, config.Config.ParsedPublicKey)
	if err == nil {
		containerIdsInterface, found := token.Claims["containerIds"]
		if found {
			containerIdsVal, ok := containerIdsInterface.(map[string]interface{})
			if ok {
				for key, val := range containerIdsVal {
					if containerIdsValString, ok := val.(string); ok {
						containerIds[key] = containerIdsValString
					}
				}
			}
		}
	}

	id := ""
	parts := pathParts(requestUrl.Path)
	if len(parts) == 3 {
		id = parts[2]
	}

	container, err := resolveContainer(id)
	if err != nil {
		log.WithFields(log.Fields{"id": id, "error": err}).Error("Couldn't find container for id.")
		return
	}

	c, err := client.NewClient(config.Config.CAdvisorUrl)
	if err != nil {
		log.WithFields(log.Fields{"error": err}).Error("Couldn't get CAdvisor client.")
		return
	}

	reader, writer := io.Pipe()

	go func(w *io.PipeWriter) {
		for {
			_, ok := <-incomingMessages
			if !ok {
				w.Close()
				return
			}
		}
	}(writer)

	go func(r *io.PipeReader) {
		scanner := bufio.NewScanner(r)
		for scanner.Scan() {
			text := scanner.Text()
			message := common.Message{
				Key:  key,
				Type: common.Body,
				Body: text,
			}
			response <- message
		}
		if err := scanner.Err(); err != nil {
			log.WithFields(log.Fields{"error": err}).Error("Error with the container stat scanner.")
		}
	}(reader)

	count := config.Config.NumStats

	for {
		machineInfo, err := c.MachineInfo()
		if err != nil {
			log.WithFields(log.Fields{"error": err}).Error("Error getting machine info.")
			return
		}

		memLimit := machineInfo.MemoryCapacity

		infos := []info.ContainerInfo{}

		if container != "" {
			cInfo, err := c.ContainerInfo(container, &info.ContainerInfoRequest{
				NumStats: count,
			})
			if err != nil {
				return
			}
			infos = append(infos, *cInfo)
		} else {
			cInfos, err := c.AllDockerContainers(&info.ContainerInfoRequest{
				NumStats: count,
			})
			if err != nil {
				return
			}
			infos = append(infos, cInfos...)
		}

		err = writeAggregatedStats(id, containerIds, "container", infos, uint64(memLimit), writer)
		if err != nil {
			return
		}

		time.Sleep(1 * time.Second)
		count = 1
	}

	return
}
Esempio n. 27
0
func (s *StatsHandler) Handle(key string, initialMessage string, incomingMessages <-chan string, response chan<- common.Message) {
	defer backend.SignalHandlerClosed(key, response)

	requestUrl, err := url.Parse(initialMessage)
	if err != nil {
		log.WithFields(log.Fields{"error": err, "message": initialMessage}).Error("Couldn't parse url from message.")
		return
	}

	id := ""
	parts := pathParts(requestUrl.Path)
	if len(parts) == 3 {
		id = parts[2]
	}

	container, err := resolveContainer(id)
	if err != nil {
		log.WithFields(log.Fields{"id": id, "error": err}).Error("Couldn't find container for id.")
		return
	}

	c, err := client.NewClient(config.Config.CAdvisorUrl)
	if err != nil {
		log.WithFields(log.Fields{"error": err}).Error("Couldn't get CAdvisor client.")
		return
	}

	reader, writer := io.Pipe()

	go func(w *io.PipeWriter) {
		for {
			_, ok := <-incomingMessages
			if !ok {
				w.Close()
				return
			}
		}
	}(writer)

	go func(r *io.PipeReader) {
		scanner := bufio.NewScanner(r)
		for scanner.Scan() {
			text := scanner.Text()
			message := common.Message{
				Key:  key,
				Type: common.Body,
				Body: text,
			}
			response <- message
		}
		if err := scanner.Err(); err != nil {
			log.WithFields(log.Fields{"error": err}).Error("Error with the container stat scanner.")
		}
	}(reader)

	count := config.Config.NumStats

	for {
		machineInfo, err := c.MachineInfo()
		if err != nil {
			log.WithFields(log.Fields{"error": err}).Error("Error getting machine info.")
			return
		}

		memLimit := machineInfo.MemoryCapacity

		info, err := c.ContainerInfo(container, &info.ContainerInfoRequest{
			NumStats: count,
		})
		if err != nil {
			return
		}

		err = writeStats(info, memLimit, writer)
		if err != nil {
			return
		}

		time.Sleep(1 * time.Second)
		count = 1
	}
	return
}