Пример #1
0
// Trap sets up a simplified signal "trap", appropriate for common
// behavior expected from a vanilla unix command-line tool in general
// (and the Docker engine in particular).
//
// * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated.
// * If SIGINT or SIGTERM are received 3 times before cleanup is complete, then cleanup is
//   skipped and the process is terminated immediately (allows force quit of stuck daemon)
// * A SIGQUIT always causes an exit without cleanup, with a goroutine dump preceding exit.
//
func Trap(cleanup func()) {
	c := make(chan os.Signal, 1)
	// we will handle INT, TERM, QUIT here
	signals := []os.Signal{os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT}
	gosignal.Notify(c, signals...)
	go func() {
		interruptCount := uint32(0)
		for sig := range c {
			go func(sig os.Signal) {
				glog.Infof("Processing signal '%v'", sig)
				switch sig {
				case os.Interrupt, syscall.SIGTERM:
					if atomic.LoadUint32(&interruptCount) < 3 {
						// Initiate the cleanup only once
						if atomic.AddUint32(&interruptCount, 1) == 1 {
							// Call the provided cleanup handler
							cleanup()
							os.Exit(0)
						} else {
							return
						}
					} else {
						// 3 SIGTERM/INT signals received; force exit without cleanup
						glog.Infof("Forcing docker daemon shutdown without cleanup; 3 interrupts received")
					}
				case syscall.SIGQUIT:
					DumpStacks()
					glog.Infof("Forcing docker daemon shutdown without cleanup on SIGQUIT")
				}
				//for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal #
				os.Exit(128 + int(sig.(syscall.Signal)))
			}(sig)
		}
	}()
}
Пример #2
0
func CreateVolume(poolName, volName, dev_id string, size int, restore bool) error {
	glog.Infof("/dev/mapper/%s", volName)
	if _, err := os.Stat("/dev/mapper/" + volName); err == nil {
		return nil
	}
	if restore == false {
		parms := fmt.Sprintf("dmsetup message /dev/mapper/%s 0 \"create_thin %s\"", poolName, dev_id)
		if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
			glog.Error(string(res))
			return fmt.Errorf(string(res))
		}
	}
	parms := fmt.Sprintf("dmsetup create %s --table \"0 %d thin /dev/mapper/%s %s\"", volName, size/512, poolName, dev_id)
	if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
		glog.Error(string(res))
		return fmt.Errorf(string(res))
	}

	if restore == false {
		parms = fmt.Sprintf("mkfs.ext4 \"/dev/mapper/%s\"", volName)
		if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
			glog.Error(string(res))
			return fmt.Errorf(string(res))
		}
	}
	return nil
}
Пример #3
0
func (d *Driver) VmMountLayer(id string) error {
	if d.daemon == nil {
		if err := d.Setup(); err != nil {
			return err
		}
	}

	var (
		diffSrc = fmt.Sprintf("%s/diff/%s", d.RootPath(), id)
		volDst  = fmt.Sprintf("%s/images/%s.vdi", d.RootPath(), id)
	)
	podstring, err := MakeMountPod("mac-vm-disk-mount-layer", "puller:latest", id, diffSrc, volDst)
	if err != nil {
		return err
	}
	podId := fmt.Sprintf("pull-%s", utils.RandStr(10, "alpha"))
	vm, ok := d.daemon.VmList[d.pullVm]
	if !ok {
		return fmt.Errorf("can not find VM(%s)", d.pullVm)
	}
	if vm.Status == types.S_VM_IDLE {
		code, cause, err := d.daemon.StartPod(podId, podstring, d.pullVm, nil, false, true, types.VM_KEEP_AFTER_SHUTDOWN)
		if err != nil {
			glog.Errorf("Code is %d, Cause is %s, %s", code, cause, err.Error())
			d.daemon.KillVm(d.pullVm)
			return err
		}
		vm := d.daemon.VmList[d.pullVm]
		// wait for cmd finish
		_, _, ret3, err := vm.GetVmChan()
		if err != nil {
			glog.Error(err.Error())
			return err
		}
		subVmStatus := ret3.(chan *types.VmResponse)
		var vmResponse *types.VmResponse
		for {
			vmResponse = <-subVmStatus
			if vmResponse.VmId == d.pullVm {
				if vmResponse.Code == types.E_POD_FINISHED {
					glog.Infof("Got E_POD_FINISHED code response")
					break
				}
			}
		}

		d.daemon.PodList[podId].Vm = d.pullVm
		// release pod from VM
		code, cause, err = d.daemon.StopPod(podId, "no")
		if err != nil {
			glog.Errorf("Code is %d, Cause is %s, %s", code, cause, err.Error())
			d.daemon.KillVm(d.pullVm)
			return err
		}
		d.daemon.CleanPod(podId)
	} else {
		glog.Errorf("pull vm should not be associated")
	}
	return nil
}
Пример #4
0
func DumpStacks() {
	buf := make([]byte, 16384)
	buf = buf[:runtime.Stack(buf, true)]
	// Note that if the daemon is started with a less-verbose log-level than "info" (the default), the goroutine
	// traces won't show up in the log.
	glog.Infof("=== BEGIN goroutine stack dump ===\n%s\n=== END goroutine stack dump ===", buf)
}
Пример #5
0
func (daemon *Daemon) CmdPodStart(job *engine.Job) error {
	// we can only support 1024 Pods
	if daemon.GetRunningPodNum() >= 1024 {
		return fmt.Errorf("Pod full, the maximum Pod is 1024!")
	}

	podId := job.Args[0]
	vmId := job.Args[1]

	glog.Infof("pod:%s, vm:%s", podId, vmId)
	// Do the status check for the given pod
	if _, ok := daemon.PodList[podId]; !ok {
		return fmt.Errorf("The pod(%s) can not be found, please create it first", podId)
	}
	var lazy bool = hypervisor.HDriver.SupportLazyMode() && vmId == ""

	code, cause, err := daemon.StartPod(podId, "", vmId, nil, lazy, false, types.VM_KEEP_NONE)
	if err != nil {
		glog.Error(err.Error())
		return err
	}

	// Prepare the VM status to client
	v := &engine.Env{}
	v.Set("ID", vmId)
	v.SetInt("Code", code)
	v.Set("Cause", cause)
	if _, err := v.WriteTo(job.Stdout); err != nil {
		return err
	}

	return nil
}
Пример #6
0
func diff(id, parent string) (diff archive.Archive, err error) {

	// create pod

	// start or replace pod
	glog.Infof("Diff between %s and %s", id, parent)
	layerFs := "/tmp/test1"
	if parent == "" {
		archive, err := archive.Tar(layerFs, archive.Uncompressed)
		if err != nil {
			return nil, err
		}
		return ioutils.NewReadCloserWrapper(archive, func() error {
			err := archive.Close()
			return err
		}), nil
	}

	parentFs := "/tmp/test2"

	changes, err := archive.ChangesDirs(layerFs, parentFs)
	if err != nil {
		return nil, err
	}

	archive, err := archive.ExportChanges(layerFs, changes)
	if err != nil {
		return nil, err
	}

	return ioutils.NewReadCloserWrapper(archive, func() error {
		err := archive.Close()
		return err
	}), nil
}
Пример #7
0
// InitDeviceContext will init device info in context
func (ctx *VmContext) InitDeviceContext(spec *pod.UserPod, wg *sync.WaitGroup,
	cInfo []*ContainerInfo, vInfo []*VolumeInfo) {

	ctx.lock.Lock()
	defer ctx.lock.Unlock()

	for i := 0; i < ctx.InterfaceCount; i++ {
		ctx.progress.adding.networks[i] = true
	}

	if cInfo == nil {
		cInfo = []*ContainerInfo{}
	}

	if vInfo == nil {
		vInfo = []*VolumeInfo{}
	}

	ctx.initVolumeMap(spec)

	if glog.V(3) {
		for i, c := range cInfo {
			glog.Infof("#%d Container Info:", i)
			b, err := json.MarshalIndent(c, "...|", "    ")
			if err == nil {
				glog.Info("\n", string(b))
			}
		}
	}

	containers := make([]VmContainer, len(spec.Containers))

	for i, container := range spec.Containers {
		ctx.initContainerInfo(i, &containers[i], &container)
		ctx.setContainerInfo(i, &containers[i], cInfo[i])

		if spec.Tty {
			containers[i].Tty = ctx.attachId
			ctx.attachId++
			ctx.ptys.ttys[containers[i].Tty] = newAttachments(i, true)
		}
	}

	ctx.vmSpec = &VmPod{
		Hostname:   spec.Name,
		Containers: containers,
		Interfaces: nil,
		Routes:     nil,
		ShareDir:   ShareDirTag,
	}

	for _, vol := range vInfo {
		ctx.setVolumeInfo(vol)
	}

	ctx.userSpec = spec
	ctx.wg = wg
}
Пример #8
0
//export DomainDeath_cgo
func DomainDeath_cgo(domid C.uint32_t) {
	defer func() { recover() }() //in case the vmContext or channel has been released
	dom := (uint32)(domid)
	glog.Infof("got xen hypervisor message: domain %d quit", dom)
	if vm, ok := globalDriver.domains[dom]; ok {
		glog.V(1).Infof("Domain %d managed by xen driver, try close it")
		delete(globalDriver.domains, dom)
		vm.Hub <- &hypervisor.VmExit{}
		HyperDomainCleanup(globalDriver.Ctx, vm.DCtx.(*XenContext).ev)
	}
}
Пример #9
0
func (daemon *Daemon) StartVm(vmId string, cpu, mem int, lazy bool, keep int) (*hypervisor.Vm, error) {
	b := &hypervisor.BootConfig{
		CPU:    cpu,
		Memory: mem,
		Kernel: daemon.Kernel,
		Initrd: daemon.Initrd,
		Bios:   daemon.Bios,
		Cbfs:   daemon.Cbfs,
		Vbox:   daemon.VboxImage,
	}

	vm := daemon.NewVm(vmId, cpu, mem, lazy, keep)

	err := vm.Launch(b)
	if err != nil {
		return nil, err
	}
	_, r1, r2, err1 := vm.GetVmChan()
	if err1 != nil {
		return nil, err1
	}
	vmStatus := r1.(chan *types.VmResponse)
	subVmStatus := r2.(chan *types.VmResponse)
	go func(interface{}) {
		defer func() {
			err := recover()
			if err != nil {
				glog.Warning("panic during send shutdown message to channel")
			}
		}()
		for {
			vmResponse := <-vmStatus
			subVmStatus <- vmResponse
		}
	}(subVmStatus)
	var vmResponse *types.VmResponse
	for {
		vmResponse = <-subVmStatus
		glog.V(1).Infof("Get the response from VM, VM id is %s, response code is %d!", vmResponse.VmId, vmResponse.Code)
		if vmResponse.VmId == vmId {
			if vmResponse.Code == types.E_VM_RUNNING {
				glog.Infof("Got E_VM_RUNNING code response")
				break
			} else {
				break
			}
		}
	}
	if vmResponse.Code != types.E_VM_RUNNING {
		return nil, fmt.Errorf("Vbox does not start successfully")
	}
	return vm, nil
}
Пример #10
0
func (proxy *TCPProxy) Run() {
	quit := make(chan bool)
	defer close(quit)
	for {
		client, err := proxy.listener.Accept()
		if err != nil {
			glog.Infof("Stopping proxy on tcp/%v for tcp/%v (%s)", proxy.frontendAddr, proxy.backendAddr, err)
			return
		}
		go proxy.clientLoop(client.(*net.TCPConn), quit)
	}
}
Пример #11
0
func VmAssociate(vmId string, hub chan VmEvent, client chan *types.VmResponse,
	wg *sync.WaitGroup, pack []byte) {

	if glog.V(1) {
		glog.Infof("VM %s trying to reload with serialized data: %s", vmId, string(pack))
	}

	pinfo, err := vmDeserialize(pack)
	if err != nil {
		client <- &types.VmResponse{
			VmId:  vmId,
			Code:  types.E_BAD_REQUEST,
			Cause: err.Error(),
		}
		return
	}

	if pinfo.Id != vmId {
		client <- &types.VmResponse{
			VmId:  vmId,
			Code:  types.E_BAD_REQUEST,
			Cause: "VM ID mismatch",
		}
		return
	}

	context, err := pinfo.vmContext(hub, client, wg)
	if err != nil {
		client <- &types.VmResponse{
			VmId:  vmId,
			Code:  types.E_BAD_REQUEST,
			Cause: err.Error(),
		}
		return
	}

	client <- &types.VmResponse{
		VmId: vmId,
		Code: types.E_OK,
	}

	context.DCtx.Associate(context)

	go waitPts(context)
	go connectToInit(context)
	if glog.V(1) {
		go waitConsoleOutput(context)
	}

	context.Become(stateRunning, "RUNNING")

	context.loop()
}
Пример #12
0
// MAINTAINER some text <*****@*****.**>
//
// Sets the maintainer metadata.
func maintainer(b *Builder, args []string, attributes map[string]bool, original string) error {
	if len(args) != 1 {
		return fmt.Errorf("MAINTAINER requires exactly one argument")
	}

	if err := b.BuilderFlags.Parse(); err != nil {
		return err
	}

	b.maintainer = args[0]
	glog.Infof("MAINTAINER is %s", args[0])
	return b.commit("", b.Config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer))
}
Пример #13
0
func (proxy *UDPProxy) Run() {
	readBuf := make([]byte, UDPBufSize)
	for {
		read, from, err := proxy.listener.ReadFromUDP(readBuf)
		if err != nil {
			// NOTE: Apparently ReadFrom doesn't return
			// ECONNREFUSED like Read do (see comment in
			// UDPProxy.replyLoop)
			if !isClosedError(err) {
				glog.Infof("Stopping proxy on udp/%v for udp/%v (%s)", proxy.frontendAddr, proxy.backendAddr, err)
			}
			break
		}

		fromKey := newConnTrackKey(from)
		proxy.connTrackLock.Lock()
		proxyConn, hit := proxy.connTrackTable[*fromKey]
		if !hit {
			proxyConn, err = net.DialUDP("udp", nil, proxy.backendAddr)
			if err != nil {
				glog.Infof("Can't proxy a datagram to udp/%s: %s", proxy.backendAddr, err)
				proxy.connTrackLock.Unlock()
				continue
			}
			proxy.connTrackTable[*fromKey] = proxyConn
			go proxy.replyLoop(proxyConn, from, fromKey)
		}
		proxy.connTrackLock.Unlock()
		for i := 0; i != read; {
			written, err := proxyConn.Write(readBuf[i:read])
			if err != nil {
				glog.Infof("Can't proxy a datagram to udp/%s: %s", proxy.backendAddr, err)
				break
			}
			i += written
		}
	}
}
Пример #14
0
// register makes a container object usable by the daemon as <container.ID>
func (daemon *Daemon) register(container *Container, updateSuffixarray bool) error {
	if container.daemon != nil || daemon.Exists(container.ID) {
		return fmt.Errorf("Container is already loaded")
	}
	if err := validateID(container.ID); err != nil {
		return err
	}
	if err := daemon.ensureName(container); err != nil {
		return err
	}
	if daemon == nil {
		glog.Error("daemon can not be nil")
		return fmt.Errorf("daemon can not be nil")
	}

	container.daemon = daemon

	// Attach to stdout and stderr
	container.stderr = broadcastwriter.New()
	container.stdout = broadcastwriter.New()
	// Attach to stdin
	if container.Config.OpenStdin {
		container.stdin, container.stdinPipe = io.Pipe()
	} else {
		container.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
	}
	// done
	daemon.containers.Add(container.ID, container)

	// don't update the Suffixarray if we're starting up
	// we'll waste time if we update it for every container
	daemon.idIndex.Add(container.ID)

	if container.IsRunning() {
		glog.Infof("killing old running container %s", container.ID)

		container.SetStopped(&ExitStatus{ExitCode: 0})

		if err := container.Unmount(); err != nil {
			glog.V(1).Infof("unmount error %s", err)
		}
		if err := container.ToDisk(); err != nil {
			glog.V(1).Infof("saving stopped state to disk %s", err)
		}
	}

	return nil
}
Пример #15
0
func (r *resumableRequestReader) Read(p []byte) (n int, err error) {
	if r.client == nil || r.request == nil {
		return 0, fmt.Errorf("client and request can't be nil\n")
	}
	isFreshRequest := false
	if r.lastRange != 0 && r.currentResponse == nil {
		readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize)
		r.request.Header.Set("Range", readRange)
		time.Sleep(5 * time.Second)
	}
	if r.currentResponse == nil {
		r.currentResponse, err = r.client.Do(r.request)
		isFreshRequest = true
	}
	if err != nil && r.failures+1 != r.maxFailures {
		r.cleanUpResponse()
		r.failures++
		time.Sleep(5 * time.Duration(r.failures) * time.Second)
		return 0, nil
	} else if err != nil {
		r.cleanUpResponse()
		return 0, err
	}
	if r.currentResponse.StatusCode == 416 && r.lastRange == r.totalSize && r.currentResponse.ContentLength == 0 {
		r.cleanUpResponse()
		return 0, io.EOF
	} else if r.currentResponse.StatusCode != 206 && r.lastRange != 0 && isFreshRequest {
		r.cleanUpResponse()
		return 0, fmt.Errorf("the server doesn't support byte ranges")
	}
	if r.totalSize == 0 {
		r.totalSize = r.currentResponse.ContentLength
	} else if r.totalSize <= 0 {
		r.cleanUpResponse()
		return 0, fmt.Errorf("failed to auto detect content length")
	}
	n, err = r.currentResponse.Body.Read(p)
	r.lastRange += int64(n)
	if err != nil {
		r.cleanUpResponse()
	}
	if err != nil && err != io.EOF {
		glog.Infof("encountered error during pull and clearing it before resume: %s", err)
		err = nil
	}
	return n, err
}
Пример #16
0
func (devices *DeviceSet) DMLog(level int, file string, line int, dmError int, message string) {
	// By default libdm sends us all the messages including debug ones.
	// We need to filter out messages here and figure out which one
	// should be printed.
	if level > DMLogLevel {
		return
	}

	// FIXME(vbatts) push this back into ./pkg/devicemapper/
	if level <= devicemapper.LogLevelErr {
		glog.Errorf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message)
	} else if level <= devicemapper.LogLevelInfo {
		glog.Infof("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message)
	} else {
		// FIXME(vbatts) push this back into ./pkg/devicemapper/
		glog.V(1).Infof("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message)
	}
}
Пример #17
0
func (proxy *TCPProxy) clientLoop(client *net.TCPConn, quit chan bool) {
	backend, err := net.DialTCP("tcp", nil, proxy.backendAddr)
	if err != nil {
		glog.Infof("Can't forward traffic to backend tcp/%v: %s\n", proxy.backendAddr, err)
		client.Close()
		return
	}

	event := make(chan int64)
	var broker = func(to, from *net.TCPConn) {
		written, err := io.Copy(to, from)
		if err != nil {
			// If the socket we are writing to is shutdown with
			// SHUT_WR, forward it to the other end of the pipe:
			if err, ok := err.(*net.OpError); ok && err.Err == syscall.EPIPE {
				from.CloseWrite()
			}
		}
		to.CloseRead()
		event <- written
	}

	go broker(client, backend)
	go broker(backend, client)

	var transferred int64 = 0
	for i := 0; i < 2; i++ {
		select {
		case written := <-event:
			transferred += written
		case <-quit:
			// Interrupt the two brokers and "join" them.
			client.Close()
			backend.Close()
			for ; i < 2; i++ {
				transferred += <-event
			}
			return
		}
	}
	client.Close()
	backend.Close()
}
Пример #18
0
func (dms *DevMapperStorage) CreateVolume(daemon *Daemon, podId, shortName string) (*hypervisor.VolumeInfo, error) {
	volName := fmt.Sprintf("%s-%s-%s", dms.VolPoolName, podId, shortName)
	dev_id, _ := daemon.GetVolumeId(podId, volName)
	glog.Infof("DeviceID is %d", dev_id)

	restore := dev_id > 0

	for {
		if !restore {
			dev_id = dms.randDevId()
		}
		dev_id_str := strconv.Itoa(dev_id)

		err := dm.CreateVolume(dms.VolPoolName, volName, dev_id_str, DEFAULT_DM_VOL_SIZE, restore)
		if err != nil && !restore && strings.Contains(err.Error(), "failed: File exists") {
			glog.V(1).Infof("retry for dev_id #%d creating collision: %v", dev_id, err)
			continue
		} else if err != nil {
			glog.V(1).Infof("failed to create dev_id #%d: %v", dev_id, err)
			return nil, err
		}

		glog.V(3).Infof("device (%d) created (restore:%v) for %s: %s", dev_id, restore, podId, volName)
		daemon.SetVolumeId(podId, volName, dev_id_str)
		break
	}

	fstype, err := dm.ProbeFsType("/dev/mapper/" + volName)
	if err != nil {
		fstype = "ext4"
	}

	glog.V(1).Infof("volume %s created with dm as %s", shortName, volName)

	return &hypervisor.VolumeInfo{
		Name:     shortName,
		Filepath: path.Join("/dev/mapper/", volName),
		Fstype:   fstype,
		Format:   "raw",
	}, nil
}
Пример #19
0
Файл: vm.go Проект: neujie/hyper
func (daemon *Daemon) GetVM(vmId string, resource *pod.UserResource, lazy bool, keep int) (*hypervisor.Vm, error) {
	if vmId == "" {
		return daemon.StartVm("", resource.Vcpu, resource.Memory, lazy, keep)
	}

	vm, ok := daemon.VmList[vmId]
	if !ok {
		return nil, fmt.Errorf("The VM %s doesn't exist", vmId)
	}
	/* FIXME: check if any pod is running on this vm? */
	glog.Infof("find vm:%s", vm.Id)
	if resource.Vcpu != vm.Cpu {
		return nil, fmt.Errorf("The new pod's cpu setting is different with the VM's cpu")
	}

	if resource.Memory != vm.Mem {
		return nil, fmt.Errorf("The new pod's memory setting is different with the VM's memory")
	}

	return vm, nil
}
Пример #20
0
func (xc *XenContext) Launch(ctx *hypervisor.VmContext) {
	//    go func(){
	extra := []string{
		"-device", fmt.Sprintf("virtio-serial-pci,id=virtio-serial0,bus=pci.0,addr=%d", PCI_AVAILABLE_ADDRESS),
		"-chardev", fmt.Sprintf("socket,id=charch0,path=%s,server,nowait", ctx.HyperSockName),
		"-device", "virtserialport,bus=virtio-serial0.0,nr=1,chardev=charch0,id=channel0,name=sh.hyper.channel.0",
		"-chardev", fmt.Sprintf("socket,id=charch1,path=%s,server,nowait", ctx.TtySockName),
		"-device", "virtserialport,bus=virtio-serial0.0,nr=2,chardev=charch1,id=channel1,name=sh.hyper.channel.1",
		"-fsdev", fmt.Sprintf("local,id=virtio9p,path=%s,security_model=none", ctx.ShareDir),
		"-device", fmt.Sprintf("virtio-9p-pci,fsdev=virtio9p,mount_tag=%s", hypervisor.ShareDirTag),
	}
	domid, ev, err := XlStartDomain(xc.driver.Ctx, ctx.Id, ctx.Boot, ctx.HyperSockName+".test", ctx.TtySockName+".test", ctx.ConsoleSockName, extra)
	if err != nil {
		ctx.Hub <- &hypervisor.VmStartFailEvent{Message: err.Error()}
		return
	}
	xc.domId = domid
	xc.ev = ev
	glog.Infof("Start VM as domain %d", domid)
	xc.driver.domains[(uint32)(domid)] = ctx
	//    }()
}
Пример #21
0
func migrateKey() (err error) {
	// Migrate trust key if exists at ~/.docker/key.json and owned by current user
	oldPath := filepath.Join(homedir.Get(), ".docker", defaultTrustKeyFile)
	newPath := filepath.Join(getDaemonConfDir(), defaultTrustKeyFile)
	if _, statErr := os.Stat(newPath); os.IsNotExist(statErr) && currentUserIsOwner(oldPath) {
		defer func() {
			// Ensure old path is removed if no error occurred
			if err == nil {
				err = os.Remove(oldPath)
			} else {
				glog.Warningf("Key migration failed, key file not removed at %s", oldPath)
			}
		}()

		if err := os.MkdirAll(getDaemonConfDir(), os.FileMode(0644)); err != nil {
			return fmt.Errorf("Unable to create daemon configuration directory: %s", err)
		}

		newFile, err := os.OpenFile(newPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
		if err != nil {
			return fmt.Errorf("error creating key file %q: %s", newPath, err)
		}
		defer newFile.Close()

		oldFile, err := os.Open(oldPath)
		if err != nil {
			return fmt.Errorf("error opening key file %q: %s", oldPath, err)
		}
		defer oldFile.Close()

		if _, err := io.Copy(newFile, oldFile); err != nil {
			return fmt.Errorf("error copying key: %s", err)
		}

		glog.Infof("Migrated key from %s to %s", oldPath, newPath)
	}

	return nil
}
Пример #22
0
func (auth *RequestAuthorization) getToken() (string, error) {
	auth.tokenLock.Lock()
	defer auth.tokenLock.Unlock()
	now := time.Now()
	if now.Before(auth.tokenExpiration) {
		glog.V(1).Infof("Using cached token for %s", auth.authConfig.Username)
		return auth.tokenCache, nil
	}

	for _, challenge := range auth.registryEndpoint.AuthChallenges {
		switch strings.ToLower(challenge.Scheme) {
		case "basic":
			// no token necessary
		case "bearer":
			glog.V(1).Infof("Getting bearer token with %s for %s", challenge.Parameters, auth.authConfig.Username)
			params := map[string]string{}
			for k, v := range challenge.Parameters {
				params[k] = v
			}
			params["scope"] = fmt.Sprintf("%s:%s:%s", auth.resource, auth.scope, strings.Join(auth.actions, ","))
			token, err := getToken(auth.authConfig.Username, auth.authConfig.Password, params, auth.registryEndpoint)
			if err != nil {
				return "", err
			}
			auth.tokenCache = token
			auth.tokenExpiration = now.Add(time.Minute)

			return token, nil
		default:
			glog.Infof("Unsupported auth scheme: %q", challenge.Scheme)
		}
	}

	// Do not expire cache since there are no challenges which use a token
	auth.tokenExpiration = time.Now().Add(time.Hour * 24)

	return "", nil
}
Пример #23
0
func (daemon *Daemon) CmdPodStart(job *engine.Job) error {
	// we can only support 1024 Pods
	if daemon.GetRunningPodNum() >= 1024 {
		return fmt.Errorf("Pod full, the maximum Pod is 1024!")
	}

	var (
		tag         string              = ""
		ttys        []*hypervisor.TtyIO = []*hypervisor.TtyIO{}
		ttyCallback chan *types.VmResponse
	)

	podId := job.Args[0]
	vmId := job.Args[1]
	if len(job.Args) > 2 {
		tag = job.Args[2]
	}
	if tag != "" {
		glog.V(1).Info("Pod Run with client terminal tag: ", tag)
		ttyCallback = make(chan *types.VmResponse, 1)
		ttys = append(ttys, &hypervisor.TtyIO{
			Stdin:     job.Stdin,
			Stdout:    job.Stdout,
			ClientTag: tag,
			Callback:  ttyCallback,
		})
	}

	glog.Infof("pod:%s, vm:%s", podId, vmId)
	// Do the status check for the given pod
	daemon.PodList.Lock()
	glog.V(2).Infof("lock PodList")
	if _, ok := daemon.PodList.Get(podId); !ok {
		glog.V(2).Infof("unlock PodList")
		daemon.PodList.Unlock()
		return fmt.Errorf("The pod(%s) can not be found, please create it first", podId)
	}
	var lazy bool = hypervisor.HDriver.SupportLazyMode() && vmId == ""

	code, cause, err := daemon.StartPod(podId, "", vmId, nil, lazy, false, types.VM_KEEP_NONE, ttys)
	if err != nil {
		glog.Error(err.Error())
		glog.V(2).Infof("unlock PodList")
		daemon.PodList.Unlock()
		return err
	}

	if len(ttys) > 0 {
		glog.V(2).Infof("unlock PodList")
		daemon.PodList.Unlock()
		<-ttyCallback
		return nil
	}
	defer glog.V(2).Infof("unlock PodList")
	defer daemon.PodList.Unlock()

	// Prepare the VM status to client
	v := &engine.Env{}
	v.Set("ID", vmId)
	v.SetInt("Code", code)
	v.Set("Cause", cause)
	if _, err := v.WriteTo(job.Stdout); err != nil {
		return err
	}

	return nil
}
Пример #24
0
func (daemon *Daemon) StartPod(podId, podArgs, vmId string, config interface{}, lazy, autoremove bool, keep int) (int, string, error) {
	var (
		podData []byte
		err     error
		mypod   *hypervisor.Pod
		vm      *hypervisor.Vm = nil
	)

	if podArgs == "" {
		var ok bool
		mypod, ok = daemon.PodList[podId]
		if !ok {
			return -1, "", fmt.Errorf("Can not find the POD instance of %s", podId)
		}

		podData, err = daemon.GetPodByName(podId)
		if err != nil {
			return -1, "", err
		}
	} else {
		podData = []byte(podArgs)

		if err := daemon.CreatePod(podId, podArgs, nil, autoremove); err != nil {
			glog.Error(err.Error())
			return -1, "", err
		}

		mypod = daemon.PodList[podId]
	}

	userPod, err := pod.ProcessPodBytes(podData)
	if err != nil {
		return -1, "", err
	}

	defer func() {
		if vm != nil && err != nil && vmId == "" {
			daemon.KillVm(vm.Id)
		}
	}()

	if vmId == "" {
		glog.V(1).Infof("The config: kernel=%s, initrd=%s", daemon.Kernel, daemon.Initrd)
		var (
			cpu = 1
			mem = 128
		)

		if userPod.Resource.Vcpu > 0 {
			cpu = userPod.Resource.Vcpu
		}

		if userPod.Resource.Memory > 0 {
			mem = userPod.Resource.Memory
		}

		b := &hypervisor.BootConfig{
			CPU:    cpu,
			Memory: mem,
			Kernel: daemon.Kernel,
			Initrd: daemon.Initrd,
			Bios:   daemon.Bios,
			Cbfs:   daemon.Cbfs,
			Vbox:   daemon.VboxImage,
		}

		vm = daemon.NewVm("", cpu, mem, lazy, keep)

		err = vm.Launch(b)
		if err != nil {
			return -1, "", err
		}

		daemon.AddVm(vm)
	} else {
		var ok bool
		vm, ok = daemon.VmList[vmId]
		if !ok {
			err = fmt.Errorf("The VM %s doesn't exist", vmId)
			return -1, "", err
		}
		/* FIXME: check if any pod is running on this vm? */
		glog.Infof("find vm:%s", vm.Id)
		if userPod.Resource.Vcpu != vm.Cpu {
			err = fmt.Errorf("The new pod's cpu setting is different with the VM's cpu")
			return -1, "", err
		}

		if userPod.Resource.Memory != vm.Mem {
			err = fmt.Errorf("The new pod's memory setting is different with the VM's memory")
			return -1, "", err
		}
	}

	fmt.Printf("POD id is %s\n", podId)

	containerInfoList, volumeInfoList, err := daemon.ParsePod(mypod, userPod, vm.Id)
	if err != nil {
		return -1, "", err
	}

	vmResponse := vm.StartPod(mypod, userPod, containerInfoList, volumeInfoList)
	if vmResponse.Data == nil {
		err = fmt.Errorf("VM response data is nil")
		return vmResponse.Code, vmResponse.Cause, err
	}
	data := vmResponse.Data.([]byte)
	err = daemon.UpdateVmData(vm.Id, data)
	if err != nil {
		glog.Error(err.Error())
		return -1, "", err
	}
	// add or update the Vm info for POD
	if err := daemon.UpdateVmByPod(podId, vm.Id); err != nil {
		glog.Error(err.Error())
		return -1, "", err
	}

	// XXX we should not close vmStatus chan, it will be closed in shutdown process
	return vmResponse.Code, vmResponse.Cause, nil
}
Пример #25
0
func (b *Builder) addContext(container *daemon.Container, orig, dest string, decompress bool) error {
	var (
		err        error
		destExists = true
		origPath   = filepath.Join(b.contextPath, orig)
		destPath   string
	)
	destPath = dest
	destStat, err := os.Stat(destPath)
	if err != nil {
		if !os.IsNotExist(err) {
			glog.Error(err.Error())
			return err
		}
		destExists = false
	}

	fi, err := os.Stat(origPath)
	if err != nil {
		if os.IsNotExist(err) {
			return fmt.Errorf("%s: no such file or directory", orig)
		}
		glog.Error(err.Error())
		return err
	}

	if fi.IsDir() {
		err := copyAsDirectory(origPath, destPath, destExists)
		if err != nil {
			glog.Error(err.Error())
		}
		return err
	}

	// If we are adding a remote file (or we've been told not to decompress), do not try to untar it
	if decompress {
		// First try to unpack the source as an archive
		// to support the untar feature we need to clean up the path a little bit
		// because tar is very forgiving.  First we need to strip off the archive's
		// filename from the path but this is only added if it does not end in / .
		tarDest := destPath
		if strings.HasSuffix(tarDest, "/") {
			tarDest = filepath.Dir(destPath)
		}

		// try to successfully untar the orig
		if err := chrootarchive.UntarPath(origPath, tarDest); err == nil {
			return nil
		} else if err != io.EOF {
			glog.Infof("Couldn't untar %s to %s: %s", origPath, tarDest, err)
		}
	}

	if err := system.MkdirAll(filepath.Dir(destPath), 0755); err != nil {
		glog.Error(err.Error())
		return err
	}
	if err := chrootarchive.CopyWithTar(origPath, destPath); err != nil {
		glog.Error(err.Error())
		return err
	}

	resPath := destPath
	if destExists && destStat.IsDir() {
		resPath = filepath.Join(destPath, filepath.Base(origPath))
	}
	_ = resPath
	/*
		if err := fixPermissions(origPath, resPath, 0, 0, destExists); err != nil {
			glog.Error(err.Error())
			return err
		}
	*/
	return nil
}
Пример #26
0
func mainDaemon(config, host string, flDisableIptables bool) {
	glog.V(1).Infof("The config file is %s", config)
	if config == "" {
		config = "/etc/hyper/config"
	}
	if _, err := os.Stat(config); err != nil {
		if os.IsNotExist(err) {
			glog.Errorf("Can not find config file(%s)", config)
			return
		}
		glog.Errorf(err.Error())
		return
	}

	os.Setenv("HYPER_CONFIG", config)
	cfg, err := goconfig.LoadConfigFile(config)
	if err != nil {
		glog.Errorf("Read config file (%s) failed, %s", config, err.Error())
		return
	}

	hyperRoot, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "Root")

	if hyperRoot == "" {
		hyperRoot = "/var/lib/hyper"
	}
	utils.HYPER_ROOT = hyperRoot
	if _, err := os.Stat(hyperRoot); err != nil {
		if err := os.MkdirAll(hyperRoot, 0755); err != nil {
			glog.Errorf(err.Error())
			return
		}
	}

	storageDriver, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "StorageDriver")
	if storageDriver != "" {
		graphdriver.DefaultDriver = storageDriver
	}

	eng := engine.New(config)
	docker.Init()

	d, err := daemon.NewDaemon(eng)
	if err != nil {
		glog.Errorf("The hyperd create failed, %s", err.Error())
		return
	}

	var drivers []string
	if runtime.GOOS == "darwin" {
		drivers = []string{"vbox"}
	} else {
		driver, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "Hypervisor")
		if driver != "" {
			drivers = []string{driver}
		} else {
			drivers = []string{"xen", "kvm", "vbox"}
		}
	}

	for _, dri := range drivers {
		driver := strings.ToLower(dri)
		if hypervisor.HDriver, err = driverloader.Probe(driver); err != nil {
			glog.Warningf("%s", err.Error())
			continue
		} else {
			d.Hypervisor = driver
			glog.Infof("The hypervisor's driver is %s", driver)
			break
		}
	}

	if hypervisor.HDriver == nil {
		glog.Errorf("Please specify the exec driver, such as 'kvm', 'xen' or 'vbox'")
		return
	}

	disableIptables := cfg.MustBool(goconfig.DEFAULT_SECTION, "DisableIptables", false)
	if err = hypervisor.InitNetwork(d.BridgeIface, d.BridgeIP, disableIptables || flDisableIptables); err != nil {
		glog.Errorf("InitNetwork failed, %s", err.Error())
		return
	}

	defaultLog, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "Logger")
	defaultLogCfg, _ := cfg.GetSection("Log")
	d.DefaultLogCfg(defaultLog, defaultLogCfg)

	// Set the daemon object as the global varibal
	// which will be used for puller and builder
	utils.SetDaemon(d)
	if err := d.DockerCli.Setup(); err != nil {
		glog.Error(err.Error())
		return
	}

	stopAll := make(chan os.Signal, 1)
	signal.Notify(stopAll, syscall.SIGINT, syscall.SIGTERM)
	stop := make(chan os.Signal, 1)
	signal.Notify(stop, syscall.SIGHUP)

	// Install the accepted jobs
	if err := d.Install(eng); err != nil {
		glog.Errorf("The hyperd install failed, %s", err.Error())
		return
	}

	glog.V(0).Infof("Hyper daemon: %s %s",
		utils.VERSION,
		utils.GITCOMMIT,
	)

	// after the daemon is done setting up we can tell the api to start
	// accepting connections
	if err := eng.Job("acceptconnections").Run(); err != nil {
		glog.Error("the acceptconnections job run failed!")
		return
	}
	defaultHost := []string{}
	if host != "" {
		defaultHost = append(defaultHost, host)
	}
	defaultHost = append(defaultHost, "unix:///var/run/hyper.sock")
	if d.Host != "" {
		defaultHost = append(defaultHost, d.Host)
	}

	job := eng.Job("serveapi", defaultHost...)

	// The serve API job never exits unless an error occurs
	// We need to start it as a goroutine and wait on it so
	// daemon doesn't exit
	serveAPIWait := make(chan error)
	go func() {
		if err := job.Run(); err != nil {
			glog.Errorf("ServeAPI error: %v", err)
			serveAPIWait <- err
			return
		}
		serveAPIWait <- nil
	}()

	glog.V(0).Info("Daemon has completed initialization")

	if err := d.Restore(); err != nil {
		glog.Warningf("Fail to restore the previous VM")
		return
	}

	// Daemon is fully initialized and handling API traffic
	// Wait for serve API job to complete
	select {
	case errAPI := <-serveAPIWait:
		// If we have an error here it is unique to API (as daemonErr would have
		// exited the daemon process above)
		eng.Shutdown()
		if errAPI != nil {
			glog.Warningf("Shutting down due to ServeAPI error: %v", errAPI)
		}
		break
	case <-stop:
		d.DestroyAndKeepVm()
		eng.Shutdown()
		break
	case <-stopAll:
		d.DestroyAllVm()
		eng.Shutdown()
		break
	}
}
Пример #27
0
func (s *TagStore) pushV2Repository(r *registry.Session, localRepo Repository, out io.Writer, repoInfo *registry.RepositoryInfo, tag string, sf *streamformatter.StreamFormatter) error {
	endpoint, err := r.V2RegistryEndpoint(repoInfo.Index)
	if err != nil {
		if repoInfo.Index.Official {
			glog.V(1).Infof("Unable to push to V2 registry, falling back to v1: %s", err)
			return ErrV2RegistryUnavailable
		}
		return fmt.Errorf("error getting registry endpoint: %s", err)
	}

	tags, err := s.getImageTags(localRepo, tag)
	if err != nil {
		return err
	}
	if len(tags) == 0 {
		return fmt.Errorf("No tags to push for %s", repoInfo.LocalName)
	}

	auth, err := r.GetV2Authorization(endpoint, repoInfo.RemoteName, false)
	if err != nil {
		return fmt.Errorf("error getting authorization: %s", err)
	}

	for _, tag := range tags {
		glog.V(1).Infof("Pushing repository: %s:%s", repoInfo.CanonicalName, tag)

		layerId, exists := localRepo[tag]
		if !exists {
			return fmt.Errorf("tag does not exist: %s", tag)
		}

		layer, err := s.graph.Get(layerId)
		if err != nil {
			return err
		}

		m := &registry.ManifestData{
			SchemaVersion: 1,
			Name:          repoInfo.RemoteName,
			Tag:           tag,
			Architecture:  layer.Architecture,
		}
		var metadata runconfig.Config
		if layer.Config != nil {
			metadata = *layer.Config
		}

		layersSeen := make(map[string]bool)
		layers := []*image.Image{layer}
		for ; layer != nil; layer, err = layer.GetParent() {
			if err != nil {
				return err
			}

			if layersSeen[layer.ID] {
				break
			}
			layers = append(layers, layer)
			layersSeen[layer.ID] = true
		}
		m.FSLayers = make([]*registry.FSLayer, len(layers))
		m.History = make([]*registry.ManifestHistory, len(layers))

		// Schema version 1 requires layer ordering from top to root
		for i, layer := range layers {
			glog.V(1).Infof("Pushing layer: %s", layer.ID)

			if layer.Config != nil && metadata.Image != layer.ID {
				if err := runconfig.Merge(&metadata, layer.Config); err != nil {
					return err
				}
			}
			jsonData, err := layer.RawJson()
			if err != nil {
				return fmt.Errorf("cannot retrieve the path for %s: %s", layer.ID, err)
			}

			checksum, err := layer.GetCheckSum(s.graph.ImageRoot(layer.ID))
			if err != nil {
				return fmt.Errorf("error getting image checksum: %s", err)
			}

			var exists bool
			if len(checksum) > 0 {
				dgst, err := digest.ParseDigest(checksum)
				if err != nil {
					return fmt.Errorf("Invalid checksum %s: %s", checksum, err)
				}

				// Call mount blob
				exists, err = r.HeadV2ImageBlob(endpoint, repoInfo.RemoteName, dgst, auth)
				if err != nil {
					out.Write(sf.FormatProgress(stringid.TruncateID(layer.ID), "Image push failed", nil))
					return err
				}
			}
			if !exists {
				if cs, err := s.pushV2Image(r, layer, endpoint, repoInfo.RemoteName, sf, out, auth); err != nil {
					return err
				} else if cs != checksum {
					// Cache new checksum
					if err := layer.SaveCheckSum(s.graph.ImageRoot(layer.ID), cs); err != nil {
						return err
					}
					checksum = cs
				}
			} else {
				out.Write(sf.FormatProgress(stringid.TruncateID(layer.ID), "Image already exists", nil))
			}
			m.FSLayers[i] = &registry.FSLayer{BlobSum: checksum}
			m.History[i] = &registry.ManifestHistory{V1Compatibility: string(jsonData)}
		}

		if err := validateManifest(m); err != nil {
			return fmt.Errorf("invalid manifest: %s", err)
		}

		glog.V(1).Infof("Pushing %s:%s to v2 repository", repoInfo.LocalName, tag)
		mBytes, err := json.MarshalIndent(m, "", "   ")
		if err != nil {
			return err
		}
		js, err := libtrust.NewJSONSignature(mBytes)
		if err != nil {
			return err
		}

		if err = js.Sign(s.trustKey); err != nil {
			return err
		}

		signedBody, err := js.PrettySignature("signatures")
		if err != nil {
			return err
		}
		glog.Infof("Signed manifest for %s:%s using daemon's key: %s", repoInfo.LocalName, tag, s.trustKey.KeyID())

		// push the manifest
		digest, err := r.PutV2ImageManifest(endpoint, repoInfo.RemoteName, tag, signedBody, mBytes, auth)
		if err != nil {
			return err
		}

		out.Write(sf.FormatStatus("", "Digest: %s", digest))
	}
	return nil
}
Пример #28
0
func (d *Driver) Diff(id, parent string) (diff archive.Archive, err error) {
	if d.daemon == nil {
		if err := d.Setup(); err != nil {
			return nil, err
		}
	}
	var (
		podData string
		tgtDisk string = ""
		code    int
		cause   string
	)
	srcDisk := fmt.Sprintf("%s/images/%s.vdi", d.RootPath(), id)
	if parent != "" {
		tgtDisk = fmt.Sprintf("%s/images/%s.vdi", d.RootPath(), parent)
	}
	outDir := path.Join(utils.HYPER_ROOT, "tar")
	if err := os.MkdirAll(outDir, 0755); err != nil {
		return nil, err
	}
	uuid, err := virtualbox.GetMediumUUID(srcDisk)
	if err == nil {
		srcDisk = uuid
	}
	// create pod
	podId := "diff-" + id[:10]
	podData, err = MakeDiffPod(podId, "puller:latest", id, srcDisk, tgtDisk, outDir)
	if err != nil {
		return nil, err
	}

	// start or replace pod
	vm, ok := d.daemon.VmList[d.pullVm]
	if !ok {
		return nil, fmt.Errorf("can not find VM(%s)", d.pullVm)
	}
	if vm.Status == types.S_VM_IDLE {
		code, cause, err = d.daemon.StartPod(podId, podData, d.pullVm, nil, false, true, types.VM_KEEP_AFTER_SHUTDOWN)
		if err != nil {
			glog.Errorf("Code is %d, Cause is %s, %s", code, cause, err.Error())
			d.daemon.KillVm(d.pullVm)
			return nil, err
		}
		vm := d.daemon.VmList[d.pullVm]
		// wait for cmd finish
		_, _, ret3, err := vm.GetVmChan()
		if err != nil {
			glog.Error(err.Error())
			return nil, err
		}
		subVmStatus := ret3.(chan *types.VmResponse)
		var vmResponse *types.VmResponse
		for {
			vmResponse = <-subVmStatus
			if vmResponse.VmId == d.pullVm {
				if vmResponse.Code == types.E_POD_FINISHED {
					glog.Infof("Got E_POD_FINISHED code response")
					break
				}
			}
		}

		d.daemon.PodList[podId].Vm = d.pullVm
		// release pod from VM
		code, cause, err = d.daemon.StopPod(podId, "no")
		if err != nil {
			glog.Errorf("Code is %d, Cause is %s, %s", code, cause, err.Error())
			d.daemon.KillVm(d.pullVm)
			return nil, err
		}
		d.daemon.CleanPod(podId)
	} else {
		glog.Errorf("pull vm should not be associated")
		return nil, fmt.Errorf("pull vm is not idle")
	}

	tarFile := outDir + "/" + id + ".tar"
	if _, err := os.Stat(tarFile); err != nil {
		// If the parent is nil, the first layer is also nil.
		// So we may not got tar file
		if parent == "" {
			layerFs := fmt.Sprintf("%s/diff/%s", d.RootPath(), id)
			archive, err := archive.Tar(layerFs, archive.Uncompressed)
			if err != nil {
				return nil, err
			}
			return ioutils.NewReadCloserWrapper(archive, func() error {
				err := archive.Close()
				return err
			}), nil
		} else {
			return nil, fmt.Errorf("the out tar file is not exist")
		}
	}
	f, err := os.Open(tarFile)
	if err != nil {
		return nil, err
	}
	var archive io.ReadCloser
	archive = ioutil.NopCloser(f)
	glog.Infof("Diff between %s and %s", id, parent)
	return ioutils.NewReadCloserWrapper(archive, func() error {
		err := archive.Close()
		return err
	}), nil
}
Пример #29
0
func (s *TagStore) pullV2Tag(r *registry.Session, out io.Writer, endpoint *registry.Endpoint, repoInfo *registry.RepositoryInfo, tag string, sf *streamformatter.StreamFormatter, auth *registry.RequestAuthorization) (bool, error) {
	glog.V(1).Infof("Pulling tag from V2 registry: %q", tag)

	remoteDigest, manifestBytes, err := r.GetV2ImageManifest(endpoint, repoInfo.RemoteName, tag, auth)
	if err != nil {
		return false, err
	}

	// loadManifest ensures that the manifest payload has the expected digest
	// if the tag is a digest reference.
	localDigest, manifest, verified, err := s.loadManifest(manifestBytes, tag, remoteDigest)
	if err != nil {
		return false, fmt.Errorf("error verifying manifest: %s", err)
	}

	if verified {
		glog.Infof("Image manifest for %s has been verified", utils.ImageReference(repoInfo.CanonicalName, tag))
	}
	out.Write(sf.FormatStatus(tag, "Pulling from %s", repoInfo.CanonicalName))

	// downloadInfo is used to pass information from download to extractor
	type downloadInfo struct {
		imgJSON    []byte
		img        *image.Image
		digest     digest.Digest
		tmpFile    *os.File
		length     int64
		downloaded bool
		err        chan error
	}

	downloads := make([]downloadInfo, len(manifest.FSLayers))

	for i := len(manifest.FSLayers) - 1; i >= 0; i-- {
		var (
			sumStr  = manifest.FSLayers[i].BlobSum
			imgJSON = []byte(manifest.History[i].V1Compatibility)
		)

		img, err := image.NewImgJSON(imgJSON)
		if err != nil {
			return false, fmt.Errorf("failed to parse json: %s", err)
		}
		downloads[i].img = img

		// Check if exists
		if s.graph.Exists(img.ID) {
			glog.V(1).Infof("Image already exists: %s", img.ID)
			continue
		}

		dgst, err := digest.ParseDigest(sumStr)
		if err != nil {
			return false, err
		}
		downloads[i].digest = dgst

		out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Pulling fs layer", nil))

		downloadFunc := func(di *downloadInfo) error {
			glog.V(1).Infof("pulling blob %q to V1 img %s", sumStr, img.ID)

			if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil {
				if c != nil {
					out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil))
					<-c
					out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))
				} else {
					glog.V(1).Infof("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
				}
			} else {
				defer s.poolRemove("pull", "img:"+img.ID)
				tmpFile, err := ioutil.TempFile("", "GetV2ImageBlob")
				if err != nil {
					return err
				}

				r, l, err := r.GetV2ImageBlobReader(endpoint, repoInfo.RemoteName, di.digest, auth)
				if err != nil {
					return err
				}
				defer r.Close()

				verifier, err := digest.NewDigestVerifier(di.digest)
				if err != nil {
					return err
				}

				if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{
					In:        ioutil.NopCloser(io.TeeReader(r, verifier)),
					Out:       out,
					Formatter: sf,
					Size:      int(l),
					NewLines:  false,
					ID:        stringid.TruncateID(img.ID),
					Action:    "Downloading",
				})); err != nil {
					return fmt.Errorf("unable to copy v2 image blob data: %s", err)
				}

				out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Verifying Checksum", nil))

				if !verifier.Verified() {
					return fmt.Errorf("image layer digest verification failed for %q", di.digest)
				}

				out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))

				glog.V(1).Infof("Downloaded %s to tempfile %s", img.ID, tmpFile.Name())
				di.tmpFile = tmpFile
				di.length = l
				di.downloaded = true
			}
			di.imgJSON = imgJSON

			return nil
		}

		downloads[i].err = make(chan error)
		go func(di *downloadInfo) {
			di.err <- downloadFunc(di)
		}(&downloads[i])
	}

	var tagUpdated bool
	for i := len(downloads) - 1; i >= 0; i-- {
		d := &downloads[i]
		if d.err != nil {
			if err := <-d.err; err != nil {
				return false, err
			}
		}
		if d.downloaded {
			// if tmpFile is empty assume download and extracted elsewhere
			defer os.Remove(d.tmpFile.Name())
			defer d.tmpFile.Close()
			d.tmpFile.Seek(0, 0)
			if d.tmpFile != nil {
				err = s.graph.Register(d.img,
					progressreader.New(progressreader.Config{
						In:        d.tmpFile,
						Out:       out,
						Formatter: sf,
						Size:      int(d.length),
						ID:        stringid.TruncateID(d.img.ID),
						Action:    "Extracting",
					}))
				if err != nil {
					return false, err
				}

				// FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted)
			}
			out.Write(sf.FormatProgress(stringid.TruncateID(d.img.ID), "Pull complete", nil))
			tagUpdated = true
		} else {
			out.Write(sf.FormatProgress(stringid.TruncateID(d.img.ID), "Already exists", nil))
		}

	}

	// Check for new tag if no layers downloaded
	if !tagUpdated {
		repo, err := s.Get(repoInfo.LocalName)
		if err != nil {
			return false, err
		}
		if repo != nil {
			if _, exists := repo[tag]; !exists {
				tagUpdated = true
			}
		} else {
			tagUpdated = true
		}
	}

	if verified && tagUpdated {
		out.Write(sf.FormatStatus(utils.ImageReference(repoInfo.CanonicalName, tag), "The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security."))
	}

	if localDigest != remoteDigest { // this is not a verification check.
		// NOTE(stevvooe): This is a very defensive branch and should never
		// happen, since all manifest digest implementations use the same
		// algorithm.
		out.Write(sf.FormatStatus("", "Remote Digest: %s", remoteDigest))
	}

	out.Write(sf.FormatStatus("", "Digest: %s", localDigest))

	if tag == localDigest.String() {
		// TODO(stevvooe): Ideally, we should always set the digest so we can
		// use the digest whether we pull by it or not. Unfortunately, the tag
		// store treats the digest as a separate tag, meaning there may be an
		// untagged digest image that would seem to be dangling by a user.

		if err = s.SetDigest(repoInfo.LocalName, localDigest.String(), downloads[0].img.ID); err != nil {
			return false, err
		}
	}

	if !utils.DigestReference(tag) {
		// only set the repository/tag -> image ID mapping when pulling by tag (i.e. not by digest)
		if err = s.Tag(repoInfo.LocalName, tag, downloads[0].img.ID, true); err != nil {
			return false, err
		}
	}

	return tagUpdated, nil
}
Пример #30
0
func (b *Builder) run(c *daemon.Container) error {
	if b.Verbose {
	}

	//start the pod
	var (
		mycontainer *hypervisor.Container
		code        int
		cause       string
		err         error
	)
	b.Hyperdaemon.PodList.Find(func(p *hyperdaemon.Pod) bool {
		ps := p.Status()
		if ps == nil {
			return false
		}
		for _, con := range ps.Containers {
			if con.Id == c.ID {
				mycontainer = con
				return true
			}
		}
		return false
	})

	if mycontainer == nil {
		return fmt.Errorf("can not find that container(%s)", c.ID)
	}
	podId := mycontainer.PodId
	// start or replace pod
	vm, ok := b.Hyperdaemon.VmList[b.Name]
	if !ok {
		glog.Warningf("can not find VM(%s)", b.Name)

		bo := &hypervisor.BootConfig{
			CPU:    1,
			Memory: 512,
			Kernel: b.Hyperdaemon.Kernel,
			Initrd: b.Hyperdaemon.Initrd,
			Bios:   b.Hyperdaemon.Bios,
			Cbfs:   b.Hyperdaemon.Cbfs,
			Vbox:   b.Hyperdaemon.VboxImage,
		}

		vm = b.Hyperdaemon.NewVm(b.Name, 1, 512, false, types.VM_KEEP_AFTER_FINISH)

		err = vm.Launch(bo)
		if err != nil {
			return err
		}

		b.Hyperdaemon.AddVm(vm)
	}
	if vm.Status == types.S_VM_IDLE {
		code, cause, err = b.Hyperdaemon.StartPod(podId, "", b.Name, nil, false, false, types.VM_KEEP_AFTER_FINISH, []*hypervisor.TtyIO{})
		if err != nil {
			glog.Errorf("Code is %d, Cause is %s, %s", code, cause, err.Error())
			b.Hyperdaemon.KillVm(b.Name)
			return err
		}
		vm = b.Hyperdaemon.VmList[b.Name]
		// wait for cmd finish
		Status, err := vm.GetResponseChan()
		if err != nil {
			glog.Error(err.Error())
			return err
		}
		defer vm.ReleaseResponseChan(Status)
		var vmResponse *types.VmResponse
		for {
			vmResponse = <-Status
			if vmResponse.VmId == b.Name {
				if vmResponse.Code == types.E_POD_FINISHED {
					glog.Infof("Got E_POD_FINISHED code response")
					break
				}
			}
		}

		pod, ok := b.Hyperdaemon.PodList.Get(podId)
		if !ok {
			return fmt.Errorf("Cannot find pod %s", podId)
		}
		pod.SetVM(b.Name, vm)
		// release pod from VM
		code, cause, err = b.Hyperdaemon.StopPod(podId, "no")
		if err != nil {
			glog.Errorf("Code is %d, Cause is %s, %s", code, cause, err.Error())
			b.Hyperdaemon.KillVm(b.Name)
			return err
		}
	} else {
		glog.Errorf("Vm is not IDLE")
		return fmt.Errorf("Vm is not IDLE")
	}

	return nil
}