Example #1
0
func (daemon *Daemon) AddPod(pod *Pod, podArgs string) (err error) {
	// store the UserPod into the db
	if err = daemon.WritePodToDB(pod.id, []byte(podArgs)); err != nil {
		glog.V(1).Info("Found an error while saveing the POD file")
		return
	}
	defer func() {
		if err != nil {
			daemon.DeletePodFromDB(pod.id)
		}
	}()

	daemon.PodList.Put(pod)
	defer func() {
		if err != nil {
			daemon.RemovePod(pod.id)
		}
	}()

	if err = daemon.WritePodAndContainers(pod.id); err != nil {
		glog.V(1).Info("Found an error while saveing the Containers info")
		return
	}

	pod.status.Handler.Data = daemon
	return nil
}
Example #2
0
// loginV2 tries to login to the v2 registry server. The given registry endpoint has been
// pinged or setup with a list of authorization challenges. Each of these challenges are
// tried until one of them succeeds. Currently supported challenge schemes are:
// 		HTTP Basic Authorization
// 		Token Authorization with a separate token issuing server
// NOTE: the v2 logic does not attempt to create a user account if one doesn't exist. For
// now, users should create their account through other means like directly from a web page
// served by the v2 registry service provider. Whether this will be supported in the future
// is to be determined.
func loginV2(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint) (string, error) {
	glog.V(1).Infof("attempting v2 login to registry endpoint %s", registryEndpoint)
	var (
		err       error
		allErrors []error
	)

	for _, challenge := range registryEndpoint.AuthChallenges {
		glog.V(1).Infof("trying %q auth challenge with params %s", challenge.Scheme, challenge.Parameters)

		switch strings.ToLower(challenge.Scheme) {
		case "basic":
			err = tryV2BasicAuthLogin(authConfig, challenge.Parameters, registryEndpoint)
		case "bearer":
			err = tryV2TokenAuthLogin(authConfig, challenge.Parameters, registryEndpoint)
		default:
			// Unsupported challenge types are explicitly skipped.
			err = fmt.Errorf("unsupported auth scheme: %q", challenge.Scheme)
		}

		if err == nil {
			return "Login Succeeded", nil
		}

		glog.V(1).Infof("error trying auth challenge %q: %s", challenge.Scheme, err)

		allErrors = append(allErrors, err)
	}

	return "", fmt.Errorf("no successful auth challenge for %s - errors: %s", registryEndpoint, allErrors)
}
Example #3
0
func (daemon *Daemon) GetPodByContainerIdOrName(name string) (pod *Pod, idx int, err error) {
	daemon.PodList.RLock()
	glog.V(2).Infof("lock read of PodList")
	defer glog.V(2).Infof("unlock read of PodList")
	defer daemon.PodList.RUnlock()

	err = nil
	wslash := name
	if name[0] != '/' {
		wslash = "/" + name
	}

	var c *hypervisor.Container
	pod = daemon.PodList.Find(func(p *Pod) bool {
		for idx, c = range p.status.Containers {
			if c.Name == wslash || c.Id == name {
				return true
			}
		}
		return false
	})

	if pod == nil {
		err = fmt.Errorf("cannot found container %s", name)
		return
	}

	return
}
Example #4
0
File: vm.go Project: sulochan/hyper
// This function will only be invoked during daemon start
func (daemon *Daemon) AssociateAllVms() error {
	for _, mypod := range daemon.PodList {
		if mypod.Vm == "" {
			continue
		}
		podData, err := daemon.GetPodByName(mypod.Id)
		if err != nil {
			continue
		}
		userPod, err := pod.ProcessPodBytes(podData)
		if err != nil {
			continue
		}
		glog.V(1).Infof("Associate the POD(%s) with VM(%s)", mypod.Id, mypod.Vm)

		vmData, err := daemon.GetVmData(mypod.Vm)
		if err != nil {
			continue
		}
		glog.V(1).Infof("The data for vm(%s) is %v", mypod.Vm, vmData)

		vm := daemon.NewVm(mypod.Vm, userPod.Resource.Vcpu, userPod.Resource.Memory, false, types.VM_KEEP_NONE)

		err = vm.AssociateVm(mypod, vmData)
		if err != nil {
			continue
		}

		daemon.AddVm(vm)
	}
	return nil
}
Example #5
0
// verifyTrustedKeys checks the keys provided against the trust store,
// ensuring that the provided keys are trusted for the namespace. The keys
// provided from this method must come from the signatures provided as part of
// the manifest JWS package, obtained from unpackSignedManifest or libtrust.
func (s *TagStore) verifyTrustedKeys(namespace string, keys []libtrust.PublicKey) (verified bool, err error) {
	if namespace[0] != '/' {
		namespace = "/" + namespace
	}

	for _, key := range keys {
		b, err := key.MarshalJSON()
		if err != nil {
			return false, fmt.Errorf("error marshalling public key: %s", err)
		}
		// Check key has read/write permission (0x03)
		v, err := s.trustService.CheckKey(namespace, b, 0x03)
		if err != nil {
			vErr, ok := err.(trust.NotVerifiedError)
			if !ok {
				return false, fmt.Errorf("error running key check: %s", err)
			}
			glog.V(1).Infof("Key check result: %v", vErr)
		}
		verified = v
	}

	if verified {
		glog.V(1).Infof("Key check result: verified")
	}

	return
}
Example #6
0
func validateEndpoint(endpoint *Endpoint) error {
	glog.V(1).Infof("pinging registry endpoint %s", endpoint)

	// Try HTTPS ping to registry
	endpoint.URL.Scheme = "https"
	if _, err := endpoint.Ping(); err != nil {
		if endpoint.IsSecure {
			// If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry`
			// in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP.
			return fmt.Errorf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host)
		}

		// If registry is insecure and HTTPS failed, fallback to HTTP.
		glog.V(1).Infof("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err)
		endpoint.URL.Scheme = "http"

		var err2 error
		if _, err2 = endpoint.Ping(); err2 == nil {
			return nil
		}

		return fmt.Errorf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2)
	}

	return nil
}
Example #7
0
func (daemon *Daemon) CmdPodRm(job *engine.Job) (err error) {
	var (
		podId = job.Args[0]
		code  = 0
		cause = ""
	)

	daemon.PodsMutex.Lock()
	glog.V(2).Infof("lock PodList")
	defer glog.V(2).Infof("unlock PodList")
	defer daemon.PodsMutex.Unlock()
	code, cause, err = daemon.CleanPod(podId)
	if err != nil {
		return err
	}

	// Prepare the vm status to client
	v := &engine.Env{}
	v.Set("ID", podId)
	v.SetInt("Code", code)
	v.Set("Cause", cause)
	if _, err = v.WriteTo(job.Stdout); err != nil {
		return err
	}

	return nil
}
Example #8
0
func (daemon *Daemon) CmdPodStop(job *engine.Job) error {
	if len(job.Args) == 0 {
		return fmt.Errorf("Can not execute 'stop' command without any pod name!")
	}
	podId := job.Args[0]
	stopVm := job.Args[1]
	daemon.PodsMutex.Lock()
	glog.V(2).Infof("lock PodList")
	defer glog.V(2).Infof("unlock PodList")
	defer daemon.PodsMutex.Unlock()
	code, cause, err := daemon.StopPod(podId, stopVm)
	if err != nil {
		return err
	}

	// Prepare the VM status to client
	v := &engine.Env{}
	v.Set("ID", podId)
	v.SetInt("Code", code)
	v.Set("Cause", cause)
	if _, err := v.WriteTo(job.Stdout); err != nil {
		return err
	}

	return nil
}
Example #9
0
func (cli Docker) SendCmdPull(image string, imagePullConfig *graph.ImagePullConfig) ([]byte, int, error) {
	// We need to create a container via an image object.  If the image
	// is not stored locally, so we need to pull the image from the Docker HUB.

	// Get a Repository name and tag name from the argument, but be careful
	// with the Repository name with a port number.  For example:
	//      localdomain:5000/samba/hipache:latest
	repository, tag := parsers.ParseRepositoryTag(image)
	if err := registry.ValidateRepositoryName(repository); err != nil {
		return nil, -1, err
	}
	if tag == "" {
		tag = "latest"
	}
	if len(tag) > 0 {
		if err := tags.ValidateTagName(tag); err != nil {
			return nil, -1, err
		}
	}

	glog.V(3).Infof("The Repository is %s, and the tag is %s\n", repository, tag)
	glog.V(3).Info("pull the image from the repository!\n")
	err := cli.daemon.Repositories().Pull(repository, tag, imagePullConfig)
	if err != nil {
		return nil, -1, err
	}
	return nil, 200, nil
}
Example #10
0
func (daemon *Daemon) CmdPodCreate(job *engine.Job) error {
	// we can only support 1024 Pods
	if daemon.GetRunningPodNum() >= 1024 {
		return fmt.Errorf("Pod full, the maximum Pod is 1024!")
	}
	podArgs := job.Args[0]
	autoRemove := false
	if job.Args[1] == "yes" || job.Args[1] == "true" {
		autoRemove = true
	}

	podId := fmt.Sprintf("pod-%s", pod.RandStr(10, "alpha"))
	daemon.PodList.Lock()
	glog.V(2).Infof("lock PodList")
	defer glog.V(2).Infof("unlock PodList")
	defer daemon.PodList.Unlock()
	err := daemon.CreatePod(podId, podArgs, autoRemove)
	if err != nil {
		return err
	}

	// Prepare the VM status to client
	v := &engine.Env{}
	v.Set("ID", podId)
	v.SetInt("Code", 0)
	v.Set("Cause", "")
	if _, err := v.WriteTo(job.Stdout); err != nil {
		return err
	}

	return nil
}
Example #11
0
func watchDog(qc *QemuContext, hub chan hypervisor.VmEvent) {
	wdt := qc.wdt
	for {
		msg, ok := <-wdt
		if ok {
			switch msg {
			case "quit":
				glog.V(1).Info("quit watch dog.")
				return
			case "kill":
				success := false
				if qc.process != nil {
					glog.V(0).Infof("kill Qemu... %d", qc.process.Pid)
					if err := qc.process.Kill(); err == nil {
						success = true
					}
				} else {
					glog.Warning("no process to be killed")
				}
				hub <- &hypervisor.VmKilledEvent{Success: success}
				return
			}
		} else {
			glog.V(1).Info("chan closed, quit watch dog.")
			break
		}
	}
}
Example #12
0
func waitConsoleOutput(ctx *VmContext) {

	conn, err := UnixSocketConnect(ctx.ConsoleSockName)
	if err != nil {
		glog.Error("failed to connected to ", ctx.ConsoleSockName, " ", err.Error())
		return
	}

	glog.V(1).Info("connected to ", ctx.ConsoleSockName)

	tc, err := telnet.NewConn(conn)
	if err != nil {
		glog.Error("fail to init telnet connection to ", ctx.ConsoleSockName, ": ", err.Error())
		return
	}
	glog.V(1).Infof("connected %s as telnet mode.", ctx.ConsoleSockName)

	cout := make(chan string, 128)
	go TtyLiner(tc, cout)

	for {
		line, ok := <-cout
		if ok {
			glog.V(1).Info("[console] ", line)
		} else {
			glog.Info("console output end")
			break
		}
	}
}
Example #13
0
func (d *Driver) ApplyDiff(id, parent string, diff archive.ArchiveReader) (size int64, err error) {
	// Mount the root filesystem so we can apply the diff/layer.
	layerFs, err := d.Get(id, "")
	if err != nil {
		return 0, err
	}
	defer d.Put(id)

	start := time.Now().UTC()
	glog.V(1).Info("Start untar layer")
	if size, err = chrootarchive.ApplyLayer(layerFs, diff); err != nil {
		return 0, err
	}
	glog.V(1).Infof("Untar time: %vs", time.Now().UTC().Sub(start).Seconds())
	root := path.Join(utils.HYPER_ROOT, "vbox")
	idDisk := fmt.Sprintf("%s/images/%s.vdi", root, id)
	if _, err = os.Stat(idDisk); err != nil {
		return 0, err
	}
	if err = d.VmMountLayer(id); err != nil {
		return 0, err
	}
	// XXX should remove the image/container's directory
	return size, err
}
Example #14
0
// This function will only be invoked during daemon start
func (vm *Vm) AssociateVm(mypod *Pod, data []byte) error {
	glog.V(1).Infof("Associate the POD(%s) with VM(%s)", mypod.Id, mypod.Vm)
	var (
		PodEvent  = make(chan VmEvent, 128)
		Status    = make(chan *types.VmResponse, 128)
		subStatus = make(chan *types.VmResponse, 128)
	)

	go VmAssociate(mypod.Vm, PodEvent, Status, mypod.Wg, data)

	go vm.handlePodEvent(mypod)

	ass := <-Status
	if ass.Code != types.E_OK {
		glog.Errorf("cannot associate with vm: %s, error status %d (%s)", mypod.Vm, ass.Code, ass.Cause)
		return errors.New("load vm status failed")
	}

	if err := vm.SetVmChan(PodEvent, Status, subStatus); err != nil {
		glog.V(1).Infof("SetVmChan error: %s", err.Error())
		return err
	}

	mypod.Status = types.S_POD_RUNNING
	mypod.SetContainerStatus(types.S_POD_RUNNING)

	vm.Status = types.S_VM_ASSOCIATED
	vm.Pod = mypod

	return nil
}
Example #15
0
func (vm *Vm) Kill() (int, string, error) {
	PodEvent, Status, subStatus, err := vm.GetVmChan()
	if err != nil {
		return -1, "", err
	}
	var Response *types.VmResponse
	shutdownPodEvent := &ShutdownCommand{Wait: false}
	PodEvent.(chan VmEvent) <- shutdownPodEvent
	// wait for the VM response
	for {
		stop := 0
		select {
		case Response = <-Status.(chan *types.VmResponse):
			glog.V(1).Infof("Got response: %d: %s", Response.Code, Response.Cause)
			if Response.Code == types.E_VM_SHUTDOWN {
				stop = 1
			}
		case Response = <-subStatus.(chan *types.VmResponse):
			glog.V(1).Infof("Got response: %d: %s", Response.Code, Response.Cause)
			if Response.Code == types.E_VM_SHUTDOWN {
				stop = 1
			}
		}
		if stop == 1 {
			break
		}
	}
	close(Status.(chan *types.VmResponse))
	close(subStatus.(chan *types.VmResponse))

	return Response.Code, Response.Cause, nil
}
Example #16
0
func (vm *Vm) handlePodEvent(mypod *Pod) {
	glog.V(1).Infof("hyperHandlePodEvent pod %s, vm %s", mypod.Id, vm.Id)

	_, ret2, ret3, err := vm.GetVmChan()
	if err != nil {
		return
	}

	glog.V(1).Infof("hyperHandlePodEvent pod %s, vm %s", mypod.Id, vm.Id)
	Status := ret2.(chan *types.VmResponse)
	subStatus := ret3.(chan *types.VmResponse)

	for {
		defer func() {
			err := recover()
			if err != nil {
				glog.Warning("panic during send shutdown message to channel")
			}
		}()
		Response := <-Status
		subStatus <- Response

		exit := mypod.Handler.Handle(Response, mypod.Handler.Data, mypod, vm)
		if exit {
			break
		}
	}
}
Example #17
0
func deviceRemoveHandler(ctx *VmContext, ev VmEvent) (bool, bool) {
	processed := true
	success := true
	switch ev.Event() {
	case EVENT_CONTAINER_DELETE:
		success = ctx.onContainerRemoved(ev.(*ContainerUnmounted))
		glog.V(1).Info("Unplug container return with ", success)
	case EVENT_INTERFACE_DELETE:
		success = ctx.onInterfaceRemoved(ev.(*InterfaceReleased))
		glog.V(1).Info("Unplug interface return with ", success)
	case EVENT_BLOCK_EJECTED:
		success = ctx.onVolumeRemoved(ev.(*VolumeUnmounted))
		glog.V(1).Info("Unplug block device return with ", success)
	case EVENT_VOLUME_DELETE:
		success = ctx.onBlockReleased(ev.(*BlockdevRemovedEvent))
		glog.V(1).Info("release volume return with ", success)
	case EVENT_INTERFACE_EJECTED:
		n := ev.(*NetDevRemovedEvent)
		nic := ctx.devices.networkMap[n.Index]
		var maps []pod.UserContainerPort

		for _, c := range ctx.userSpec.Containers {
			for _, m := range c.Ports {
				maps = append(maps, m)
			}
		}

		glog.V(1).Infof("release %d interface: %s", n.Index, nic.IpAddr)
		go ctx.ReleaseInterface(n.Index, nic.IpAddr, nic.Fd, maps)
	default:
		processed = false
	}
	return processed, success
}
Example #18
0
func (devices *DeviceSet) AddDevice(hash, baseHash string) error {
	glog.V(1).Infof("[deviceset] AddDevice(hash=%s basehash=%s)", hash, baseHash)
	defer glog.V(1).Infof("[deviceset] AddDevice(hash=%s basehash=%s) END", hash, baseHash)

	baseInfo, err := devices.lookupDevice(baseHash)
	if err != nil {
		return err
	}

	baseInfo.lock.Lock()
	defer baseInfo.lock.Unlock()

	devices.Lock()
	defer devices.Unlock()

	if info, _ := devices.lookupDevice(hash); info != nil {
		return fmt.Errorf("device %s already exists", hash)
	}

	if err := devices.createRegisterSnapDevice(hash, baseInfo); err != nil {
		return err
	}

	return nil
}
Example #19
0
func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, corsHeaders string, dockerVersion version.Version) http.HandlerFunc {
	return func(w http.ResponseWriter, r *http.Request) {
		// log the request
		glog.V(0).Infof("Calling %s %s", localMethod, localRoute)

		if logging {
			glog.V(1).Infof("%s %s", r.Method, r.RequestURI)
		}

		if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") {
			userAgent := strings.Split(r.Header.Get("User-Agent"), "/")
			if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) {
				glog.Warningf("client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion)
			}
		}
		version := version.Version(mux.Vars(r)["version"])
		if version == "" {
			version = utils.APIVERSION
		}
		if corsHeaders != "" {
			writeCorsHeaders(w, r, corsHeaders)
		}

		if version.GreaterThan(utils.APIVERSION) {
			http.Error(w, fmt.Errorf("client and server don't have same version (client API version: %s, server API version: %s)", version, utils.APIVERSION).Error(), http.StatusNotFound)
			return
		}

		if err := handlerFunc(eng, version, w, r, mux.Vars(r)); err != nil {
			glog.Errorf("Handler for %s %s returned error: %s", localMethod, localRoute, err)
			httpError(w, err)
		}
	}
}
Example #20
0
// The caller must make sure that the restart policy and the status is right to restart
func (daemon *Daemon) RestartPod(mypod *hypervisor.PodStatus) error {
	// Remove the pod
	// The pod is stopped, the vm is gone
	for _, c := range mypod.Containers {
		glog.V(1).Infof("Ready to rm container: %s", c.Id)
		if _, _, err := daemon.DockerCli.SendCmdDelete(c.Id); err != nil {
			glog.V(1).Infof("Error to rm container: %s", err.Error())
		}
	}
	daemon.RemovePod(mypod.Id)
	daemon.DeletePodContainerFromDB(mypod.Id)
	daemon.DeleteVolumeId(mypod.Id)

	podData, err := daemon.GetPodByName(mypod.Id)
	if err != nil {
		return err
	}
	var lazy bool = hypervisor.HDriver.SupportLazyMode()

	// Start the pod
	_, _, err = daemon.StartPod(mypod.Id, string(podData), "", nil, lazy, false, types.VM_KEEP_NONE, []*hypervisor.TtyIO{})
	if err != nil {
		glog.Error(err.Error())
		return err
	}

	if err := daemon.WritePodAndContainers(mypod.Id); err != nil {
		glog.Error("Found an error while saving the Containers info")
		return err
	}

	return nil
}
Example #21
0
func (qmp *QmpResponse) UnmarshalJSON(raw []byte) error {
	var tmp map[string]interface{}
	var err error = nil
	json.Unmarshal(raw, &tmp)
	glog.V(2).Info("got a message ", string(raw))
	if _, ok := tmp["event"]; ok {
		msg := &QmpEvent{}
		err = json.Unmarshal(raw, msg)
		glog.V(2).Info("got event: ", msg.Type)
		qmp.msg = msg
	} else if r, ok := tmp["return"]; ok {
		msg := &QmpResult{}
		switch r.(type) {
		case string:
			msg.Return = map[string]interface{}{
				"return": r.(string),
			}
		default:
			err = json.Unmarshal(raw, msg)
		}
		qmp.msg = msg
	} else if _, ok := tmp["error"]; ok {
		msg := &QmpError{}
		err = json.Unmarshal(raw, msg)
		qmp.msg = msg
	}
	return err
}
Example #22
0
func (d *Driver) Put(id string) error {
	// Protect the d.active from concurrent access
	d.Lock()
	defer d.Unlock()

	mount := d.active[id]
	if mount == nil {
		glog.V(1).Infof("Put on a non-mounted device %s", id)
		// but it might be still here
		if d.Exists(id) {
			mergedDir := path.Join(d.dir(id), "merged")
			err := syscall.Unmount(mergedDir, 0)
			if err != nil {
				glog.V(1).Infof("Failed to unmount %s overlay: %v", id, err)
			}
		}
		return nil
	}

	mount.count--
	if mount.count > 0 {
		return nil
	}

	defer delete(d.active, id)
	if mount.mounted {
		err := syscall.Unmount(mount.path, 0)
		if err != nil {
			glog.V(1).Infof("Failed to unmount %s overlay: %v", id, err)
		}
		return err
	}
	return nil
}
Example #23
0
// Run executes the job and blocks until the job completes.
// If the job fails it returns an error
func (job *Job) Run() (err error) {
	defer func() {
		// Wait for all background tasks to complete
		if job.closeIO {
			if err := job.Stdout.Close(); err != nil {
				glog.Errorf("%s\n", err)
			}
			if err := job.Stderr.Close(); err != nil {
				glog.Errorf("%s\n", err)
			}
			if err := job.Stdin.Close(); err != nil {
				glog.Errorf("%s\n", err)
			}
		}
	}()

	if job.Eng.IsShutdown() && !job.GetenvBool("overrideShutdown") {
		return fmt.Errorf("engine is shutdown")
	}
	// FIXME: this is a temporary workaround to avoid Engine.Shutdown
	// waiting 5 seconds for server/api.ServeApi to complete (which it never will)
	// everytime the daemon is cleanly restarted.
	// The permanent fix is to implement Job.Stop and Job.OnStop so that
	// ServeApi can cooperate and terminate cleanly.
	if job.Name != "serveapi" {
		job.Eng.l.Lock()
		job.Eng.tasks.Add(1)
		job.Eng.l.Unlock()
		defer job.Eng.tasks.Done()
	}
	// FIXME: make this thread-safe
	// FIXME: implement wait
	if !job.end.IsZero() {
		return fmt.Errorf("%s: job has already completed", job.Name)
	}
	// Log beginning and end of the job
	if job.Eng.Logging {
		glog.V(0).Infof("+job %s\n", job.CallString())
		defer func() {
			okerr := "OK"
			if err != nil {
				okerr = fmt.Sprintf("ERR: %s", err)
			}
			glog.V(0).Infof("-job %s %s\n", job.CallString(), okerr)
		}()
	}

	if job.handler == nil {
		return fmt.Errorf("%s: command not found\n", job.Name)
	}

	var errorMessage = bytes.NewBuffer(nil)
	job.Stderr.Add(errorMessage)

	err = job.handler(job)
	job.end = time.Now()

	return
}
Example #24
0
func (vm *Vm) StopPod(mypod *Pod, stopVm string) *types.VmResponse {
	var Response *types.VmResponse

	PodEvent, _, Status, err := vm.GetVmChan()
	if err != nil {
		Response = &types.VmResponse{
			Code:  -1,
			Cause: err.Error(),
			Data:  nil,
		}
		return Response
	}

	if mypod.Status != types.S_POD_RUNNING {
		Response = &types.VmResponse{
			Code:  -1,
			Cause: "The POD has already stoppod",
			Data:  nil,
		}
		return Response
	}

	if stopVm == "yes" {
		mypod.Wg.Add(1)
		shutdownPodEvent := &ShutdownCommand{Wait: true}
		PodEvent.(chan VmEvent) <- shutdownPodEvent
		// wait for the VM response
		for {
			Response = <-Status.(chan *types.VmResponse)
			glog.V(1).Infof("Got response: %d: %s", Response.Code, Response.Cause)
			if Response.Code == types.E_VM_SHUTDOWN {
				mypod.Vm = ""
				break
			}
		}
		close(Status.(chan *types.VmResponse))
		// wait for goroutines exit
		mypod.Wg.Wait()
	} else {
		stopPodEvent := &StopPodCommand{}
		PodEvent.(chan VmEvent) <- stopPodEvent
		// wait for the VM response
		for {
			Response = <-Status.(chan *types.VmResponse)
			glog.V(1).Infof("Got response: %d: %s", Response.Code, Response.Cause)
			if Response.Code == types.E_POD_STOPPED || Response.Code == types.E_BAD_REQUEST || Response.Code == types.E_FAILED {
				mypod.Vm = ""
				vm.Status = types.S_VM_IDLE
				break
			}
		}
	}

	mypod.Status = types.S_POD_FAILED
	mypod.SetContainerStatus(types.S_POD_FAILED)

	return Response
}
Example #25
0
func (r *Session) GetRepositoryData(remote string) (*RepositoryData, error) {
	repositoryTarget := fmt.Sprintf("%srepositories/%s/images", r.indexEndpoint.VersionString(1), remote)

	glog.V(1).Infof("[registry] Calling GET %s", repositoryTarget)

	req, err := http.NewRequest("GET", repositoryTarget, nil)
	if err != nil {
		return nil, err
	}
	// this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests
	req.Header.Set("X-Docker-Token", "true")
	res, err := r.client.Do(req)
	if err != nil {
		return nil, err
	}
	defer res.Body.Close()
	if res.StatusCode == 401 {
		return nil, errLoginRequired
	}
	// TODO: Right now we're ignoring checksums in the response body.
	// In the future, we need to use them to check image validity.
	if res.StatusCode == 404 {
		return nil, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res)
	} else if res.StatusCode != 200 {
		errBody, err := ioutil.ReadAll(res.Body)
		if err != nil {
			glog.V(1).Infof("Error reading response body: %s", err)
		}
		return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, remote, errBody), res)
	}

	var endpoints []string
	if res.Header.Get("X-Docker-Endpoints") != "" {
		endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.VersionString(1))
		if err != nil {
			return nil, err
		}
	} else {
		// Assume the endpoint is on the same host
		endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", r.indexEndpoint.URL.Scheme, req.URL.Host))
	}

	remoteChecksums := []*ImgData{}
	if err := json.NewDecoder(res.Body).Decode(&remoteChecksums); err != nil {
		return nil, err
	}

	// Forge a better object from the retrieved data
	imgsData := make(map[string]*ImgData)
	for _, elem := range remoteChecksums {
		imgsData[elem.ID] = elem
	}

	return &RepositoryData{
		ImgList:   imgsData,
		Endpoints: endpoints,
	}, nil
}
Example #26
0
func (daemon *Daemon) shutdown() error {
	glog.V(0).Info("The daemon will be shutdown")
	glog.V(0).Info("Shutdown all VMs")
	for vm := range daemon.VmList {
		daemon.KillVm(vm)
	}
	daemon.db.Close()
	glog.Flush()
	return nil
}
Example #27
0
func InitDriver() *XenDriver {
	if probeXend() {
		glog.Info("xend is running, can not start with xl.")
		return nil
	}

	ctx, res := HyperxlInitializeDriver()
	if res != 0 {
		glog.Info("failed to initialize xen context")
		return nil
	} else if ctx.Version < REQUIRED_VERSION {
		glog.Info("Xen version is not new enough (%d), need 4.5 or higher", ctx.Version)
		return nil
	} else {
		glog.V(1).Info("Xen capabilities: ", ctx.Capabilities)
		hvm := false
		caps := strings.Split(ctx.Capabilities, " ")
		for _, cap := range caps {
			if strings.HasPrefix(cap, "hvm-") {
				hvm = true
				break
			}
		}
		if !hvm {
			glog.Info("Xen installation does not support HVM, current capabilities: %s", ctx.Capabilities)
			return nil
		}
	}

	sigchan := make(chan os.Signal, 1)
	go func() {
		for {
			_, ok := <-sigchan
			if !ok {
				break
			}
			glog.V(1).Info("got SIGCHLD, send msg to libxl")
			HyperxlSigchldHandler(ctx.Ctx)
		}
	}()
	signal.Notify(sigchan, syscall.SIGCHLD)

	xd := &XenDriver{
		Ctx:          ctx.Ctx,
		Logger:       ctx.Logger,
		Version:      ctx.Version,
		Capabilities: ctx.Capabilities,
	}

	xd.domains = make(map[uint32]*hypervisor.VmContext)

	globalDriver = xd
	return globalDriver
}
Example #28
0
func (daemon *Daemon) GetPodVmByName(podName string) (string, error) {
	daemon.PodsMutex.RLock()
	glog.V(2).Infof("lock read of PodList")
	defer glog.V(2).Infof("unlock read of PodList")
	defer daemon.PodsMutex.RUnlock()
	pod := daemon.PodList[podName]
	if pod == nil {
		return "", fmt.Errorf("Not found VM for pod(%s)", podName)
	}
	return pod.Vm, nil
}
Example #29
0
func (devices *DeviceSet) setupBaseImage() error {
	oldInfo, _ := devices.lookupDevice("")
	if oldInfo != nil && oldInfo.Initialized {
		return nil
	}

	if oldInfo != nil && !oldInfo.Initialized {
		glog.V(1).Infof("Removing uninitialized base image")
		if err := devices.DeleteDevice(""); err != nil {
			return err
		}
	}

	if devices.thinPoolDevice != "" && oldInfo == nil {
		_, transactionId, dataUsed, _, _, _, err := devices.poolStatus()
		if err != nil {
			return err
		}
		if dataUsed != 0 {
			return fmt.Errorf("Unable to take ownership of thin-pool (%s) that already has used data blocks",
				devices.thinPoolDevice)
		}
		if transactionId != 0 {
			return fmt.Errorf("Unable to take ownership of thin-pool (%s) with non-zero transaction Id",
				devices.thinPoolDevice)
		}
	}

	glog.V(1).Infof("Initializing base device-mapper thin volume")

	// Create initial device
	info, err := devices.createRegisterDevice("")
	if err != nil {
		return err
	}

	glog.V(1).Infof("Creating filesystem on base device-mapper thin volume")

	if err := devices.activateDeviceIfNeeded(info); err != nil {
		return err
	}

	if err := devices.createFilesystem(info); err != nil {
		return err
	}

	info.Initialized = true
	if err := devices.saveMetadata(info); err != nil {
		info.Initialized = false
		return err
	}

	return nil
}
Example #30
0
func (daemon *Daemon) GetVmByPodId(podId string) (string, error) {
	daemon.PodList.RLock()
	glog.V(2).Infof("lock read of PodList")
	defer glog.V(2).Infof("unlock read of PodList")
	defer daemon.PodList.RUnlock()
	pod, ok := daemon.PodList.Get(podId)
	if !ok {
		return "", fmt.Errorf("Not found Pod %s", podId)
	}
	return pod.status.Vm, nil
}