コード例 #1
0
ファイル: artifacts.go プロジェクト: docker-slim/docker-slim
func (p *artifactStore) saveReport() {
	sort.Strings(p.nameList)

	creport := report.ContainerReport{
		Monitors: report.MonitorReports{
			Pt:  p.ptMonReport,
			Fan: p.fanMonReport,
		},
	}

	for _, fname := range p.nameList {
		creport.Image.Files = append(creport.Image.Files, p.rawNames[fname])
	}

	artifactDirName := defaultArtifactDirName
	reportName := defaultReportName

	_, err := os.Stat(artifactDirName)
	if os.IsNotExist(err) {
		os.MkdirAll(artifactDirName, 0777)
		_, err = os.Stat(artifactDirName)
		utils.FailOn(err)
	}

	reportFilePath := filepath.Join(artifactDirName, reportName)
	log.Debug("sensor: monitor - saving report to ", reportFilePath)

	reportData, err := json.MarshalIndent(creport, "", "  ")
	utils.FailOn(err)

	err = ioutil.WriteFile(reportFilePath, reportData, 0644)
	utils.FailOn(err)
}
コード例 #2
0
ファイル: info.go プロジェクト: docker-slim/docker-slim
func OnInfo(statePath string, clientConfig *config.DockerClient, imageRef string) {
	fmt.Println("docker-slim: [info] image=", imageRef)

	client := dockerclient.New(clientConfig)

	imageInspector, err := image.NewInspector(client, imageRef)
	utils.FailOn(err)

	log.Info("docker-slim: inspecting 'fat' image metadata...")
	err = imageInspector.Inspect()
	utils.FailOn(err)

	_, artifactLocation := utils.PrepareSlimDirs(statePath, imageInspector.ImageInfo.ID)
	imageInspector.ArtifactLocation = artifactLocation

	log.Infof("docker-slim: [%v] 'fat' image size => %v (%v)\n",
		imageInspector.ImageInfo.ID,
		imageInspector.ImageInfo.VirtualSize,
		humanize.Bytes(uint64(imageInspector.ImageInfo.VirtualSize)))

	log.Info("docker-slim: processing 'fat' image info...")
	err = imageInspector.ProcessCollectedData()
	utils.FailOn(err)

	fmt.Println("docker-slim: [info] done.")
}
コード例 #3
0
func (i *Inspector) ProcessCollectedData() error {
	i.processImageName()

	fatImageDockerInstructions, err := dockerfile.ReverseDockerfileFromHistory(i.ApiClient, i.ImageRef)
	if err != nil {
		return err
	}
	fatImageDockerfileLocation := filepath.Join(i.ArtifactLocation, "Dockerfile.fat")
	err = dockerfile.SaveDockerfileData(fatImageDockerfileLocation, fatImageDockerInstructions)
	utils.FailOn(err)

	return nil
}
コード例 #4
0
ファイル: monitor.go プロジェクト: docker-slim/docker-slim
func Run(stopChan chan struct{}) <-chan *report.PeMonitorReport {
	log.Info("pemon: starting...")

	//"connection refused" with boot2docker...
	watcher, err := pdiscover.NewAllWatcher(pdiscover.PROC_EVENT_ALL)
	utils.FailOn(err)

	reportChan := make(chan *report.PeMonitorReport, 1)

	go func() {
		peReport := &report.PeMonitorReport{
			Children: make(map[int][]int),
			Parents:  make(map[int]int),
		}

	done:
		for {
			select {
			case <-stopChan:
				log.Info("pemon: stopping...")
				break done
			case ev := <-watcher.Fork:
				peReport.Children[ev.ParentPid] = append(peReport.Children[ev.ParentPid], ev.ChildPid)
				peReport.Parents[ev.ChildPid] = ev.ParentPid
			case <-watcher.Exec:
			case <-watcher.Exit:
			case err := <-watcher.Error:
				utils.FailOn(err)
			}
		}

		reportChan <- peReport
		watcher.Close()
	}()

	return reportChan
}
コード例 #5
0
ファイル: profile.go プロジェクト: docker-slim/docker-slim
func OnProfile(doDebug bool,
	statePath string,
	clientConfig *config.DockerClient,
	imageRef string,
	doHttpProbe bool,
	httpProbeCmds []config.HttpProbeCmd,
	doShowContainerLogs bool,
	overrides *config.ContainerOverrides,
	volumeMounts map[string]config.VolumeMount,
	excludePaths map[string]bool,
	includePaths map[string]bool,
	continueAfter *config.ContinueAfter) {
	fmt.Printf("docker-slim: [profile] image=%v\n", imageRef)
	doRmFileArtifacts := false

	client := dockerclient.New(clientConfig)

	imageInspector, err := image.NewInspector(client, imageRef)
	utils.FailOn(err)

	log.Info("docker-slim: inspecting 'fat' image metadata...")
	err = imageInspector.Inspect()
	utils.FailOn(err)

	localVolumePath, artifactLocation := utils.PrepareSlimDirs(statePath, imageInspector.ImageInfo.ID)
	imageInspector.ArtifactLocation = artifactLocation

	log.Infof("docker-slim: [%v] 'fat' image size => %v (%v)\n",
		imageInspector.ImageInfo.ID,
		imageInspector.ImageInfo.VirtualSize,
		humanize.Bytes(uint64(imageInspector.ImageInfo.VirtualSize)))

	log.Info("docker-slim: processing 'fat' image info...")
	err = imageInspector.ProcessCollectedData()
	utils.FailOn(err)

	containerInspector, err := container.NewInspector(client,
		imageInspector,
		localVolumePath,
		overrides,
		doShowContainerLogs,
		volumeMounts,
		excludePaths,
		includePaths,
		doDebug)
	utils.FailOn(err)

	log.Info("docker-slim: starting instrumented 'fat' container...")
	err = containerInspector.RunContainer()
	utils.FailOn(err)

	log.Info("docker-slim: watching container monitor...")

	if "probe" == continueAfter.Mode {
		doHttpProbe = true
	}

	if doHttpProbe {
		probe, err := http.NewCustomProbe(containerInspector, httpProbeCmds)
		utils.FailOn(err)
		probe.Start()
		continueAfter.ContinueChan = probe.DoneChan()
	}

	switch continueAfter.Mode {
	case "enter":
		fmt.Println("docker-slim: press <enter> when you are done using the container...")
		creader := bufio.NewReader(os.Stdin)
		_, _, _ = creader.ReadLine()
	case "signal":
		fmt.Println("docker-slim: send SIGUSR1 when you are done using the container...")
		<-continueAfter.ContinueChan
		fmt.Println("docker-slim: got SIGUSR1...")
	case "timeout":
		fmt.Printf("docker-slim: waiting for the target container (%v seconds)...\n", int(continueAfter.Timeout))
		<-time.After(time.Second * continueAfter.Timeout)
		fmt.Printf("docker-slim: done waiting for the target container...")
	case "probe":
		fmt.Println("docker-slim: waiting for the HTTP probe to finish...")
		<-continueAfter.ContinueChan
		fmt.Println("docker-slim: HTTP probe is done...")
	default:
		utils.Fail("unknown continue-after mode")
	}

	containerInspector.FinishMonitoring()

	log.Info("docker-slim: shutting down 'fat' container...")
	err = containerInspector.ShutdownContainer()
	utils.WarnOn(err)

	log.Info("docker-slim: processing instrumented 'fat' container info...")
	err = containerInspector.ProcessCollectedData()
	utils.FailOn(err)

	if doRmFileArtifacts {
		log.Info("docker-slim: removing temporary artifacts...")
		err = utils.RemoveArtifacts(artifactLocation) //TODO: remove only the "files" subdirectory
		utils.WarnOn(err)
	}

	fmt.Println("docker-slim: [profile] done.")
}
コード例 #6
0
ファイル: client.go プロジェクト: docker-slim/docker-slim
func New(config *config.DockerClient) *docker.Client {
	var client *docker.Client
	var err error

	newTLSClient := func(host string, certPath string, verify bool) (*docker.Client, error) {
		var ca []byte

		cert, err := ioutil.ReadFile(filepath.Join(certPath, "cert.pem"))
		if err != nil {
			return nil, err
		}

		key, err := ioutil.ReadFile(filepath.Join(certPath, "key.pem"))
		if err != nil {
			return nil, err
		}

		if verify {
			var err error
			ca, err = ioutil.ReadFile(filepath.Join(certPath, "ca.pem"))
			if err != nil {
				return nil, err
			}
		}

		return docker.NewVersionedTLSClientFromBytes(host, cert, key, ca, "")
	}

	switch {
	case config.Host != "" &&
		config.UseTLS &&
		config.VerifyTLS &&
		config.TLSCertPath != "":
		client, err = newTLSClient(config.Host, config.TLSCertPath, true)
		utils.FailOn(err)
		log.Debug("docker-slim: new Docker client (TLS,verify) [1]")

	case config.Host != "" &&
		config.UseTLS &&
		!config.VerifyTLS &&
		config.TLSCertPath != "":
		client, err = newTLSClient(config.Host, config.TLSCertPath, false)
		utils.FailOn(err)
		log.Debug("docker-slim: new Docker client (TLS,no verify) [2]")

	case config.Host != "" &&
		!config.UseTLS:
		client, err = docker.NewClient(config.Host)
		utils.FailOn(err)
		log.Debug("docker-slim: new Docker client [3]")

	case config.Host == "" &&
		!config.VerifyTLS &&
		config.Env["DOCKER_TLS_VERIFY"] == "1" &&
		config.Env["DOCKER_CERT_PATH"] != "" &&
		config.Env["DOCKER_HOST"] != "":
		client, err = newTLSClient(config.Env["DOCKER_HOST"], config.Env["DOCKER_CERT_PATH"], false)
		utils.FailOn(err)
		log.Debug("docker-slim: new Docker client (TLS,no verify) [4]")

	case config.Env["DOCKER_HOST"] != "":
		client, err = docker.NewClientFromEnv()
		utils.FailOn(err)
		log.Debug("docker-slim: new Docker client (env) [5]")

	case config.Host == "" && config.Env["DOCKER_HOST"] == "":
		config.Host = "unix:///var/run/docker.sock"
		client, err = docker.NewClient(config.Host)
		utils.FailOn(err)
		log.Debug("docker-slim: new Docker client (default) [6]")

	default:
		utils.Fail("no config for Docker client")
	}

	if config.Env["DOCKER_HOST"] == "" {
		if err := os.Setenv("DOCKER_HOST", config.Host); err != nil {
			utils.WarnOn(err)
		}

		log.Debug("docker-slim: configured DOCKER_HOST env var")
	}

	return client
}
コード例 #7
0
ファイル: monitor.go プロジェクト: docker-slim/docker-slim
func Run(startChan <-chan int,
	stopChan chan struct{},
	appName string,
	appArgs []string,
	dirName string) <-chan *report.PtMonitorReport {
	log.Info("ptmon: starting...")

	sysInfo := system.GetSystemInfo()
	archName := system.MachineToArchName(sysInfo.Machine)
	syscallResolver := system.CallNumberResolver(archName)

	reportChan := make(chan *report.PtMonitorReport, 1)

	go func() {
		ptReport := &report.PtMonitorReport{
			ArchName:     string(archName),
			SyscallStats: map[string]report.SyscallStatInfo{},
		}

		syscallStats := map[int16]uint64{}
		eventChan := make(chan syscallEvent)
		doneMonitoring := make(chan int)

		var app *exec.Cmd

		go func() {
			//Ptrace is not pretty... and it requires that you do all ptrace calls from the same thread
			runtime.LockOSThread()

			var err error
			app, err = target.Start(appName, appArgs, dirName, true)
			utils.FailOn(err)
			targetPid := app.Process.Pid

			log.Debugf("ptmon: target PID ==> %d\n", targetPid)

			var wstat syscall.WaitStatus
			_, err = syscall.Wait4(targetPid, &wstat, 0, nil)
			if err != nil {
				log.Warnf("ptmon: error waiting for %d: %v\n", targetPid, err)
				doneMonitoring <- 1
			}

			log.Debugln("ptmon: initial process status =>", wstat)

			if wstat.Exited() {
				log.Warn("ptmon: app exited (unexpected)")
				doneMonitoring <- 2
			}

			if wstat.Signaled() {
				log.Warn("ptmon: app signalled (unexpected)")
				doneMonitoring <- 3
			}

			syscallReturn := false
			gotCallNum := false
			gotRetVal := false
			var callNum uint64
			var retVal uint64
			for wstat.Stopped() {
				var regs syscall.PtraceRegs

				switch syscallReturn {
				case false:
					if err := syscall.PtraceGetRegs(targetPid, &regs); err != nil {
						log.Fatalf("ptmon: PtraceGetRegs(call): %v", err)
					}

					callNum = regs.Orig_rax
					syscallReturn = true
					gotCallNum = true
				case true:
					if err := syscall.PtraceGetRegs(targetPid, &regs); err != nil {
						log.Fatalf("ptmon: PtraceGetRegs(return): %v", err)
					}

					retVal = regs.Rax
					syscallReturn = false
					gotRetVal = true
				}

				err = syscall.PtraceSyscall(targetPid, 0)
				if err != nil {
					log.Warnf("ptmon: PtraceSyscall error: %v\n", err)
					break
				}
				_, err = syscall.Wait4(targetPid, &wstat, 0, nil)
				if err != nil {
					log.Warnf("ptmon: error waiting 4 %d: %v\n", targetPid, err)
					break
				}

				if gotCallNum && gotRetVal {
					gotCallNum = false
					gotRetVal = false

					eventChan <- syscallEvent{
						callNum: int16(callNum),
						retVal:  retVal,
					}
				}
			}

			log.Infoln("ptmon: monitor is exiting... status=", wstat)
			doneMonitoring <- 0
		}()

	done:
		for {
			select {
			case rc := <-doneMonitoring:
				log.Info("ptmon: done =>", rc)
				break done
			case <-stopChan:
				log.Info("ptmon: stopping...")
				//NOTE: need a better way to stop the target app...
				if err := app.Process.Signal(syscall.SIGTERM); err != nil {
					log.Warnln("ptmon: error stopping target app =>", err)
					if err := app.Process.Kill(); err != nil {
						log.Warnln("ptmon: error killing target app =>", err)
					}
				}
				break done
			case e := <-eventChan:
				ptReport.SyscallCount++

				if _, ok := syscallStats[e.callNum]; ok {
					syscallStats[e.callNum]++
				} else {
					syscallStats[e.callNum] = 1
				}
			}
		}

		log.Debugf("ptmon: executed syscall count = %d\n", ptReport.SyscallCount)
		log.Debugf("ptmon: number of syscalls: %v\n", len(syscallStats))
		for scNum, scCount := range syscallStats {
			log.Debugf("[%v] %v = %v", scNum, syscallResolver(scNum), scCount)
			ptReport.SyscallStats[strconv.FormatInt(int64(scNum), 10)] = report.SyscallStatInfo{
				Number: scNum,
				Name:   syscallResolver(scNum),
				Count:  scCount,
			}
		}

		ptReport.SyscallNum = uint32(len(ptReport.SyscallStats))
		reportChan <- ptReport
	}()

	return reportChan
}
コード例 #8
0
ファイル: monitor.go プロジェクト: docker-slim/docker-slim
func Run(mountPoint string, stopChan chan struct{}) <-chan *report.FanMonitorReport {
	log.Info("fanmon: starting...")

	nd, err := fanapi.Initialize(fanapi.FAN_CLASS_NOTIF, os.O_RDONLY)
	utils.FailOn(err)
	err = nd.Mark(fanapi.FAN_MARK_ADD|fanapi.FAN_MARK_MOUNT,
		fanapi.FAN_MODIFY|fanapi.FAN_ACCESS|fanapi.FAN_OPEN, -1, mountPoint)
	utils.FailOn(err)

	eventsChan := make(chan *report.FanMonitorReport, 1)

	go func() {
		log.Debug("fanmon: fanRunMonitor worker starting")

		fanReport := &report.FanMonitorReport{
			MonitorPid:       os.Getpid(),
			MonitorParentPid: os.Getppid(),
			ProcessFiles:     make(map[string]map[string]*report.FileInfo),
		}

		eventChan := make(chan Event)
		go func() {
			log.Debug("fanmon: fanRunMonitor worker (monitor) starting")
			var eventID uint32

			for {
				data, err := nd.GetEvent()
				utils.FailOn(err)
				log.Debugf("fanmon: data.Mask =>%x\n", data.Mask)

				if (data.Mask & fanapi.FAN_Q_OVERFLOW) == fanapi.FAN_Q_OVERFLOW {
					log.Debug("fanmon: overflow event")
					continue
				}

				doNotify := false
				isRead := false
				isWrite := false

				if (data.Mask & fanapi.FAN_OPEN) == fanapi.FAN_OPEN {
					log.Debug("fanmon: file open")
					doNotify = true
				}

				if (data.Mask & fanapi.FAN_ACCESS) == fanapi.FAN_ACCESS {
					log.Debug("fanmon: file read")
					isRead = true
					doNotify = true
				}

				if (data.Mask & fanapi.FAN_MODIFY) == fanapi.FAN_MODIFY {
					log.Debug("fanmon: file write")
					isWrite = true
					doNotify = true
				}

				path, err := os.Readlink(fmt.Sprintf("/proc/self/fd/%d", data.File.Fd()))
				utils.FailOn(err)
				log.Debug("fanmon: file path =>", path)

				data.File.Close()
				if doNotify {
					eventID++
					e := Event{ID: eventID, Pid: data.Pid, File: path, IsRead: isRead, IsWrite: isWrite}
					eventChan <- e
				}
			}
		}()

	done:
		for {
			select {
			case <-stopChan:
				log.Info("fanmon: stopping...")
				break done
			case e := <-eventChan:
				fanReport.EventCount++
				log.Debug("fanmon: event ", fanReport.EventCount)

				if e.ID == 1 {
					//first event represents the main process
					if pinfo, err := getProcessInfo(e.Pid); (err == nil) && (pinfo != nil) {
						fanReport.MainProcess = pinfo
						fanReport.Processes = make(map[string]*report.ProcessInfo)
						fanReport.Processes[strconv.Itoa(int(e.Pid))] = pinfo
					}
				} else {
					if _, ok := fanReport.Processes[strconv.Itoa(int(e.Pid))]; !ok {
						if pinfo, err := getProcessInfo(e.Pid); (err == nil) && (pinfo != nil) {
							fanReport.Processes[strconv.Itoa(int(e.Pid))] = pinfo
						}
					}
				}

				if _, ok := fanReport.ProcessFiles[strconv.Itoa(int(e.Pid))]; !ok {
					fanReport.ProcessFiles[strconv.Itoa(int(e.Pid))] = make(map[string]*report.FileInfo)
				}

				if existingFi, ok := fanReport.ProcessFiles[strconv.Itoa(int(e.Pid))][e.File]; !ok {
					fi := &report.FileInfo{
						EventCount: 1,
						Name:       e.File,
					}

					if e.IsRead {
						fi.ReadCount = 1
					}

					if e.IsWrite {
						fi.WriteCount = 1
					}

					if pi, ok := fanReport.Processes[strconv.Itoa(int(e.Pid))]; ok && (e.File == pi.Path) {
						fi.ExeCount = 1
					}

					fanReport.ProcessFiles[strconv.Itoa(int(e.Pid))][e.File] = fi
				} else {
					existingFi.EventCount++

					if e.IsRead {
						existingFi.ReadCount++
					}

					if e.IsWrite {
						existingFi.WriteCount++
					}

					if pi, ok := fanReport.Processes[strconv.Itoa(int(e.Pid))]; ok && (e.File == pi.Path) {
						existingFi.ExeCount++
					}
				}
			}
		}

		log.Debugf("fanmon: sending report (processed %v events)...\n", fanReport.EventCount)
		eventsChan <- fanReport
	}()

	return eventsChan
}
コード例 #9
0
ファイル: main.go プロジェクト: docker-slim/docker-slim
func main() {
	flag.Parse()

	if enableDebug {
		log.SetLevel(log.DebugLevel)
	}

	log.Infof("sensor: args => %#v\n", os.Args)

	dirName, err := os.Getwd()
	utils.WarnOn(err)
	log.Debugf("sensor: cwd => %#v\n", dirName)

	initSignalHandlers()
	defer func() {
		log.Debug("defered cleanup on shutdown...")
		cleanupOnShutdown()
	}()

	log.Debug("sensor: setting up channels...")
	doneChan = make(chan struct{})

	err = ipc.InitChannels()
	utils.FailOn(err)

	cmdChan, err := ipc.RunCmdServer(doneChan)
	utils.FailOn(err)

	monDoneChan := make(chan bool, 1)
	monDoneAckChan := make(chan bool)
	pidsChan := make(chan []int, 1)
	ptmonStartChan := make(chan int, 1)

	log.Info("sensor: waiting for commands...")
doneRunning:
	for {
		select {
		case cmd := <-cmdChan:
			log.Debug("\nsensor: command => ", cmd)
			switch data := cmd.(type) {
			case *messages.StartMonitor:
				if data == nil {
					log.Info("sensor: 'start' command - no data...")
					break
				}

				log.Debugf("sensor: 'start' command (%#v) - starting monitor...\n", data)
				monitor(monDoneChan, monDoneAckChan, pidsChan, ptmonStartChan, data, dirName)

				//target app started by ptmon... (long story :-))
				//TODO: need to get the target app pid to pemon, so it can filter process events
				log.Debugf("sensor: target app started => %v %#v\n", data.AppName, data.AppArgs)
				time.Sleep(3 * time.Second)

			case *messages.StopMonitor:
				log.Debug("sensor: 'stop' command - stopping monitor...")
				break doneRunning
			default:
				log.Debug("sensor: ignoring unknown command => ", cmd)
			}

		case <-time.After(time.Second * 5):
			log.Debug(".")
		}
	}

	monDoneChan <- true
	log.Info("sensor: waiting for monitor to finish...")
	<-monDoneAckChan

	ipc.TryPublishEvt(3, "monitor.finish.completed")

	log.Info("sensor: done!")
}