func (p *Pod) parseContainerJsons(daemon *Daemon, jsons []*dockertypes.ContainerJSON) (err error) { err = nil p.ctnStartInfo = []*hypervisor.ContainerInfo{} for i, c := range p.spec.Containers { if jsons[i] == nil { estr := fmt.Sprintf("container %s of pod %s does not have inspect json", c.Name, p.id) glog.Error(estr) return errors.New(estr) } var ( info *dockertypes.ContainerJSON = jsons[i] ci *hypervisor.ContainerInfo = &hypervisor.ContainerInfo{} ) if c.Name == "" { c.Name = strings.TrimLeft(info.Name, "/") } if c.Image == "" { c.Image = info.Config.Image } glog.Infof("container name %s, image %s", c.Name, c.Image) mountId, err := GetMountIdByContainer(daemon.Storage.Type(), info.ID) if err != nil { estr := fmt.Sprintf("Cannot find mountID for container %s : %s", info.ID, err) glog.Error(estr) return errors.New(estr) } ci.Id = mountId ci.Workdir = info.Config.WorkingDir ci.Cmd = append([]string{info.Path}, info.Args...) // We should ignore these two in runv, instead of clear them, but here is a work around p.spec.Containers[i].Entrypoint = []string{} p.spec.Containers[i].Command = []string{} glog.Infof("container info config %v, Cmd %v, Args %v", info.Config, info.Config.Cmd.Slice(), info.Args) env := make(map[string]string) for _, v := range info.Config.Env { env[v[:strings.Index(v, "=")]] = v[strings.Index(v, "=")+1:] } for _, e := range p.spec.Containers[i].Envs { env[e.Env] = e.Value } ci.Envs = env processImageVolumes(info, info.ID, p.spec, &p.spec.Containers[i]) p.ctnStartInfo = append(p.ctnStartInfo, ci) glog.V(1).Infof("Container Info is \n%v", ci) } return nil }
func (p *Pod) PrepareContainers(sd Storage, dclient DockerInterface) (err error) { err = nil p.containers = []*hypervisor.ContainerInfo{} var ( sharedDir = path.Join(hypervisor.BaseDir, p.vm.Id, hypervisor.ShareDirTag) ) files := make(map[string](pod.UserFile)) for _, f := range p.spec.Files { files[f.Name] = f } for i, c := range p.status.Containers { var ( info *dockertypes.ContainerJSONRaw ci *hypervisor.ContainerInfo ) info, err = getContinerInfo(dclient, c) ci, err = sd.PrepareContainer(c.Id, sharedDir) if err != nil { return err } ci.Workdir = info.Config.WorkingDir ci.Entrypoint = info.Config.Entrypoint.Slice() ci.Cmd = info.Config.Cmd.Slice() env := make(map[string]string) for _, v := range info.Config.Env { env[v[:strings.Index(v, "=")]] = v[strings.Index(v, "=")+1:] } for _, e := range p.spec.Containers[i].Envs { env[e.Env] = e.Value } ci.Envs = env processImageVolumes(info, c.Id, p.spec, &p.spec.Containers[i]) err = processInjectFiles(&p.spec.Containers[i], files, sd, c.Id, sd.RootPath(), sharedDir) if err != nil { return err } p.containers = append(p.containers, ci) glog.V(1).Infof("Container Info is \n%v", ci) } return nil }
func (p *Pod) setupMountsAndFiles(sd Storage) (err error) { if len(p.ctnStartInfo) != len(p.spec.Containers) { estr := fmt.Sprintf("Prepare error, pod %s does not get container infos well", p.id) glog.Error(estr) err = errors.New(estr) return err } var ( sharedDir = path.Join(hypervisor.BaseDir, p.vm.Id, hypervisor.ShareDirTag) files = make(map[string](pod.UserFile)) ) for _, f := range p.spec.Files { files[f.Name] = f } for i, c := range p.status.Containers { var ( ci *hypervisor.ContainerInfo ) mountId := p.ctnStartInfo[i].Id glog.Infof("container ID: %s, mountId %s\n", c.Id, mountId) ci, err = sd.PrepareContainer(mountId, sharedDir) if err != nil { return err } err = processInjectFiles(&p.spec.Containers[i], files, sd, mountId, sd.RootPath(), sharedDir) if err != nil { return err } ci.Id = c.Id ci.Cmd = p.ctnStartInfo[i].Cmd ci.Envs = p.ctnStartInfo[i].Envs ci.Entrypoint = p.ctnStartInfo[i].Entrypoint ci.Workdir = p.ctnStartInfo[i].Workdir p.ctnStartInfo[i] = ci } return nil }