Beispiel #1
0
func (r *responseStream) Error(ctx context.Context, num uint64, code [2]int, msg string) error {
	r.Lock()
	defer r.Unlock()
	if r.closed {
		apexctx.GetLogger(r.ctx).WithError(errStreamIsClosed).Error("responseStream.Error")
		return errStreamIsClosed
	}
	defer r.close(ctx)

	p := msgpackBytePool.Get().([]byte)[:0]
	defer msgpackBytePool.Put(p)

	// NOTE: `3` without headers!
	p = msgp.AppendArrayHeader(p, 3)
	p = msgp.AppendUint64(p, r.channel)
	p = msgp.AppendUint64(p, num)

	// code_category + error message
	p = msgp.AppendArrayHeader(p, 2)

	// code & category
	p = msgp.AppendArrayHeader(p, 2)
	p = msgp.AppendInt(p, code[0])
	p = msgp.AppendInt(p, code[1])

	// error message
	p = msgp.AppendString(p, msg)

	if _, err := r.wr.Write(p); err != nil {
		apexctx.GetLogger(r.ctx).WithError(err).Errorf("responseStream.Error")
		return err
	}
	return nil
}
Beispiel #2
0
func (r *responseStream) Write(ctx context.Context, num uint64, data []byte) error {
	r.Lock()
	defer r.Unlock()

	if r.closed {
		apexctx.GetLogger(r.ctx).WithError(errStreamIsClosed).Error("responseStream.Write")
		return errStreamIsClosed
	}

	p := msgpackBytePool.Get().([]byte)[:0]
	defer msgpackBytePool.Put(p)

	// NOTE: `3` without headers!
	p = msgp.AppendArrayHeader(p, 3)
	p = msgp.AppendUint64(p, r.channel)
	p = msgp.AppendUint64(p, num)

	p = msgp.AppendArrayHeader(p, 1)
	p = msgp.AppendStringFromBytes(p, data)

	if _, err := r.wr.Write(p); err != nil {
		apexctx.GetLogger(r.ctx).WithError(err).Error("responseStream.Write")
		return err
	}
	return nil
}
Beispiel #3
0
func newProcess(ctx context.Context, executable string, args, env []string, workDir string, output io.Writer) (*process, error) {
	pr := process{
		ctx: ctx,
	}

	pr.cmd = &exec.Cmd{
		Env:         env,
		Args:        args,
		Dir:         workDir,
		Path:        executable,
		SysProcAttr: getSysProctAttr(),
	}
	// It's imposible to set io.Writer directly to Cmd, because of
	// https://github.com/golang/go/issues/13155
	stdErrRd, err := pr.cmd.StderrPipe()
	if err != nil {
		return nil, err
	}
	stdOutRd, err := pr.cmd.StdoutPipe()
	if err != nil {
		return nil, err
	}
	go io.Copy(output, stdErrRd)
	go io.Copy(output, stdOutRd)

	if err = pr.cmd.Start(); err != nil {
		apexctx.GetLogger(ctx).WithError(err).Errorf("unable to start executable %s", pr.cmd.Path)
		return nil, err
	}

	apexctx.GetLogger(ctx).WithField("pid", pr.cmd.Process.Pid).Info("executable has been launched")
	return &pr, nil
}
Beispiel #4
0
func (d *initialDispatch) onSpool(opts Profile, name string) (Dispatcher, error) {
	isolateType := opts.Type()
	if isolateType == "" {
		err := fmt.Errorf("corrupted profile: %v", opts)
		apexctx.GetLogger(d.ctx).Error("unable to detect isolate type from a profile")
		d.stream.Error(d.ctx, replySpoolError, errBadProfile, err.Error())
		return nil, err
	}

	box, ok := getBoxes(d.ctx)[isolateType]
	if !ok {
		apexctx.GetLogger(d.ctx).WithField("isolatetype", isolateType).Error("requested isolate type is not available")
		err := fmt.Errorf("isolate type %s is not available", isolateType)
		d.stream.Error(d.ctx, replySpoolError, errUnknownIsolate, err.Error())
		return nil, err
	}

	ctx, cancel := context.WithCancel(d.ctx)

	go func() {
		if err := box.Spool(ctx, name, opts); err != nil {
			d.stream.Error(ctx, replySpoolError, errSpoolingFailed, err.Error())
			return
		}
		// NOTE: make sure that nil is packed as []interface{}
		d.stream.Close(ctx, replySpoolOk)
	}()

	return newSpoolCancelationDispatch(ctx, cancel, d.stream), nil
}
Beispiel #5
0
func checkLimits(ctx context.Context) {
	var l syscall.Rlimit
	if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l); err != nil {
		apexctx.GetLogger(ctx).WithError(err).Error("get RLIMIT_NOFILE")
		return
	}

	if l.Cur < desiredRlimit {
		apexctx.GetLogger(ctx).Warnf("RLIMIT_NOFILE %d is less that desired %d", l.Cur, desiredRlimit)
	}
}
Beispiel #6
0
func (r *blobRepo) Get(ctx context.Context, repository distribution.Repository, dgst digest.Digest) (string, error) {
	apexctx.GetLogger(ctx).WithField("digest", dgst).Info("get a blob from Repository")
	path := filepath.Join(r.BlobRepositoryConfig.SpoolPath, dgst.String())
	_, err := os.Lstat(path)
	if err == nil {
		apexctx.GetLogger(ctx).WithField("digest", dgst).Info("the blob has already downloaded")
		return path, nil
	}
	if !os.IsNotExist(err) {
		return "", err
	}

	return r.download(ctx, repository, dgst)
}
Beispiel #7
0
func (c *container) Cleanup(portoConn porto.API) {
	if !c.cleanupEnabled {
		return
	}

	var err error
	if err = portoConn.UnlinkVolume(c.volumePath, c.containerID); err != nil {
		apexctx.GetLogger(c.ctx).WithField("id", c.containerID).WithError(err).Warnf("Unlink volume %s", c.volumePath)
	} else {
		apexctx.GetLogger(c.ctx).WithField("id", c.containerID).Debugf("Unlink volume %s successfully", c.volumePath)
	}
	if err = portoConn.UnlinkVolume(c.volumePath, "self"); err != nil {
		apexctx.GetLogger(c.ctx).WithField("id", "self").WithError(err).Warnf("Unlink volume %s", c.volumePath)
	} else {
		apexctx.GetLogger(c.ctx).WithField("id", "self").Debugf("Unlink volume %s successfully", c.volumePath)
	}
	if err = portoConn.Destroy(c.containerID); err != nil {
		apexctx.GetLogger(c.ctx).WithField("id", c.containerID).WithError(err).Warn("Destroy error")
	} else {
		apexctx.GetLogger(c.ctx).WithField("id", c.containerID).Debugf("Destroyed")
	}
	if err = os.RemoveAll(c.rootDir); err != nil {
		apexctx.GetLogger(c.ctx).WithField("id", c.containerID).WithError(err).Warnf("Remove dirs %s", c.rootDir)
	} else {
		apexctx.GetLogger(c.ctx).WithField("id", c.containerID).Debugf("Remove dirs %s successfully", c.rootDir)
	}
}
Beispiel #8
0
func (r *blobRepo) download(ctx context.Context, repository distribution.Repository, dgst digest.Digest) (string, error) {
	ch := make(chan asyncSpoolResult, 1)
	r.mu.Lock()
	downloading, ok := r.inProgress[dgst]
	r.inProgress[dgst] = append(downloading, ch)
	if !ok {
		go func() {
			path, err := r.fetch(ctx, repository, dgst)
			res := asyncSpoolResult{path: path, err: err}
			r.mu.Lock()
			for _, ch := range r.inProgress[dgst] {
				ch <- res
			}
			r.mu.Unlock()
		}()
	}
	r.mu.Unlock()

	apexctx.GetLogger(ctx).WithField("digest", dgst).Info("the blob downloading is in progress. Waiting")
	select {
	case <-ctx.Done():
		return "", ctx.Err()
	case res := <-ch:
		return res.path, res.err
	}
}
Beispiel #9
0
func containerRemove(client client.APIClient, ctx context.Context, id string) {
	var err error
	defer apexctx.GetLogger(ctx).WithField("id", id).Trace("removing").Stop(&err)

	removeOpts := types.ContainerRemoveOptions{}
	err = client.ContainerRemove(ctx, id, removeOpts)
}
Beispiel #10
0
// decodeImagePull detects Error of an image pulling proces
// by decoding reply from Docker
// Although Docker should reply with JSON Encoded items
// one per line, in different versions it could vary.
// This decoders can detect error even in mixed replies:
// {"Status": "OK"}\n{"Status": "OK"}
// {"Status": "OK"}{"Error": "error"}
func decodeImagePull(ctx context.Context, r io.Reader) error {
	logger := apexctx.GetLogger(ctx)
	more := true

	rd := bufio.NewReader(r)
	for more {
		line, err := rd.ReadBytes('\n')
		switch err {
		case nil:
			// pass
		case io.EOF:
			if len(line) == 0 {
				return nil
			}
			more = false
		default:
			return err
		}

		if len(line) == 0 {
			return fmt.Errorf("Empty response line")
		}

		if line[len(line)-1] == '\n' {
			line = line[:len(line)-1]
		}

		if err = decodePullLine(line); err != nil {
			logger.WithError(err).Errorf("unable to decode JSON docker reply")
			return err
		}
	}
	return nil
}
Beispiel #11
0
func (p *process) remove() {
	if !atomic.CompareAndSwapUint32(&p.removed, 0, 1) {
		apexctx.GetLogger(p.ctx).WithField("id", p.containerID).Info("already removed")
		return
	}
	containerRemove(p.client, p.ctx, p.containerID)
}
Beispiel #12
0
// fetch downloads the blob to a tempfile, renames it to the expected name
func (r *blobRepo) fetch(ctx context.Context, repository distribution.Repository, dgst digest.Digest) (path string, err error) {
	defer apexctx.GetLogger(ctx).WithField("digest", dgst).Trace("fetch the blob").Stop(&err)
	tempFilePath := filepath.Join(r.SpoolPath, fmt.Sprintf("%s-%d", dgst.String(), rand.Int63()))
	f, err := os.Create(tempFilePath)
	if err != nil {
		return "", err
	}
	defer f.Close()
	defer os.Remove(tempFilePath)

	blob, err := repository.Blobs(ctx).Open(ctx, dgst)
	if err != nil {
		return "", err
	}
	defer blob.Close()

	if _, err = io.Copy(f, blob); err != nil {
		return "", err
	}
	f.Close()
	blob.Close()

	resultFilePath := filepath.Join(r.SpoolPath, dgst.String())
	if err = os.Rename(tempFilePath, resultFilePath); err != nil {
		return "", err
	}

	return resultFilePath, nil
}
Beispiel #13
0
func (p *process) Kill() (err error) {
	defer apexctx.GetLogger(p.ctx).WithField("id", p.containerID).Trace("Sending SIGKILL").Stop(&err)
	// release HTTP connections
	defer p.cancellation()
	defer p.remove()

	return p.client.ContainerKill(p.ctx, p.containerID, "SIGKILL")
}
Beispiel #14
0
func (b *Box) loadJournal(ctx context.Context) error {
	f, err := os.Open(b.config.Journal)
	if err != nil {
		apexctx.GetLogger(ctx).Warnf("unable to open Journal file: %v", err)
		if os.IsNotExist(err) {
			return nil
		}
		return err
	}
	defer f.Close()

	if err = b.journal.Load(f); err != nil {
		apexctx.GetLogger(ctx).WithError(err).Error("unable to load Journal")
		return err
	}

	return nil
}
Beispiel #15
0
func (g *GraphiteExporter) Send(ctx context.Context, r metrics.Registry) error {
	d := net.Dialer{
		DualStack: true,
		Cancel:    ctx.Done(),
	}

	conn, err := d.Dial("tcp", g.addr)
	if err != nil {
		return err
	}
	defer conn.Close()

	if deadline, ok := ctx.Deadline(); ok {
		conn.SetWriteDeadline(deadline)
	}

	w := bufio.NewWriter(conn)
	now := time.Now().Unix()
	r.Each(func(name string, value interface{}) {
		switch metric := value.(type) {
		case metrics.Counter:
			fmt.Fprintf(w, "%s.%s %d %d\n", g.prefix, name, metric.Count(), now)
		case metrics.Gauge:
			fmt.Fprintf(w, "%s.%s %d %d\n", g.prefix, name, metric.Value(), now)
		case metrics.Meter:
			m := metric.Snapshot()
			fmt.Fprintf(w, "%s.%s.count %d %d\n", g.prefix, name, m.Count(), now)
			fmt.Fprintf(w, "%s.%s.rate1m %.2f %d\n", g.prefix, name, m.Rate1(), now)
			fmt.Fprintf(w, "%s.%s.rat5m %.2f %d\n", g.prefix, name, m.Rate5(), now)
			fmt.Fprintf(w, "%s.%s.rate15m %.2f %d\n", g.prefix, name, m.Rate15(), now)
			fmt.Fprintf(w, "%s.%s.ratemean %.2f %d\n", g.prefix, name, m.RateMean(), now)
		case metrics.Timer:
			t := metric.Snapshot()
			ps := t.Percentiles(g.percentiles)
			fmt.Fprintf(w, "%s.%s.count %d %d\n", g.prefix, name, t.Count(), now)
			fmt.Fprintf(w, "%s.%s.min_%s %d %d\n", g.prefix, name, g.duStr, t.Min()/int64(g.du), now)
			fmt.Fprintf(w, "%s.%s.max_%s %d %d\n", g.prefix, name, g.duStr, t.Max()/int64(g.du), now)
			fmt.Fprintf(w, "%s.%s.mean_%s %.2f %d\n", g.prefix, name, g.duStr, t.Mean()/float64(g.du), now)
			// fmt.Fprintf(w, "%s.%s.std-dev_%s %.2f %d\n", g.prefix, name, g.duStr, t.StdDev()/float64(g.du), now)
			for psIdx, psKey := range g.percentiles {
				key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
				fmt.Fprintf(w, "%s.%s.%s_%s %.2f %d\n", g.prefix, name, key, g.duStr, ps[psIdx]/float64(g.du), now)
			}
			fmt.Fprintf(w, "%s.%s.rate1m %.2f %d\n", g.prefix, name, t.Rate1(), now)
			fmt.Fprintf(w, "%s.%s.rate5m %.2f %d\n", g.prefix, name, t.Rate5(), now)
			fmt.Fprintf(w, "%s.%s.rate15m %.2f %d\n", g.prefix, name, t.Rate15(), now)
			fmt.Fprintf(w, "%s.%s.ratemean %.2f %d\n", g.prefix, name, t.RateMean(), now)
		case metrics.Healthcheck:
			// pass
		default:
			apexctx.GetLogger(ctx).Warnf("Graphite: skip metric `%s` of unknown type %T", name, value)
		}
		w.Flush()
	})
	return nil
}
Beispiel #16
0
func (st *cocaineCodeStorage) Spool(ctx context.Context, appname string) (data []byte, err error) {
	storage, err := st.createStorage(ctx)
	if err != nil {
		return nil, err
	}
	defer storage.Close()
	defer apexctx.GetLogger(ctx).WithField("app", appname).Trace("read code from storage").Stop(&err)

	channel, err := storage.Call(ctx, "read", "apps", appname)
	if err != nil {
		return nil, err
	}

	res, err := channel.Get(ctx)
	if err != nil {
		return nil, err
	}

	num, val, err := res.Result()
	if err != nil || num != 0 || len(val) != 1 {
		return nil, fmt.Errorf("invalid Storage service reply err: %v, num %d, len(val): %d", err, num, len(val))
	}

	var raw, rest []byte
	raw, ok := val[0].([]byte)
	if !ok {
		return nil, fmt.Errorf("invalid Storage.Read value type %T", val[0])
	}

	switch tp := msgp.NextType(raw); tp {
	case msgp.BinType:
		data, rest, err = msgp.ReadBytesZC(raw)
	case msgp.StrType:
		data, rest, err = msgp.ReadStringZC(raw)
	default:
		return nil, fmt.Errorf("invalid msgpack type for an archive: %s", tp)
	}

	if len(rest) != 0 {
		apexctx.GetLogger(ctx).WithField("app", appname).Warnf("Some left unpacked: %d", len(rest))
	}
	return data, err
}
Beispiel #17
0
func (c *container) Kill() (err error) {
	defer apexctx.GetLogger(c.ctx).WithField("id", c.containerID).Trace("Kill container").Stop(&err)
	containersKilledCounter.Inc(1)
	portoConn, err := portoConnect()
	if err != nil {
		return err
	}
	defer portoConn.Close()
	defer c.Cleanup(portoConn)

	// After Kill the container must be in `dead` state
	// Wait seems redundant as we sent SIGKILL
	value, err := portoConn.GetData(c.containerID, "stdout")
	if err != nil {
		apexctx.GetLogger(c.ctx).WithField("id", c.containerID).WithError(err).Warn("unable to get stdout")
	}
	// TODO: add StringWriter interface to an output
	c.output.Write([]byte(value))
	apexctx.GetLogger(c.ctx).WithField("id", c.containerID).Infof("%d bytes of stdout have been sent", len(value))

	value, err = portoConn.GetData(c.containerID, "stderr")
	if err != nil {
		apexctx.GetLogger(c.ctx).WithField("id", c.containerID).WithError(err).Warn("unable to get stderr")
	}
	c.output.Write([]byte(value))
	apexctx.GetLogger(c.ctx).WithField("id", c.containerID).Infof("%d bytes of stderr have been sent", len(value))

	if err = portoConn.Kill(c.containerID, syscall.SIGKILL); err != nil {
		if !isEqualPortoError(err, portorpc.EError_InvalidState) {
			return err
		}
		return nil
	}

	if _, err = portoConn.Wait([]string{c.containerID}, 5*time.Second); err != nil {
		return err
	}

	return nil
}
Beispiel #18
0
func collect(ctx context.Context) {
	goroutines.Update(int64(runtime.NumGoroutine()))
	count, err := fds.GetOpenFds()
	if err != nil {
		apexctx.GetLogger(ctx).WithError(err).Error("get open fd count")
		return
	}

	openFDs.Update(int64(count))
	threads.Update(int64(pprof.Lookup("threadcreate").Count()))

	metrics.DefaultRegistry.RunHealthchecks()
}
Beispiel #19
0
func (p *process) collectOutput(started chan struct{}, writer io.Writer) {
	attachOpts := types.ContainerAttachOptions{
		Stream: true,
		Stdin:  false,
		Stdout: true,
		Stderr: true,
	}

	hjResp, err := p.client.ContainerAttach(p.ctx, p.containerID, attachOpts)
	if err != nil {
		apexctx.GetLogger(p.ctx).WithError(err).Errorf("unable to attach to stdout/err of %s", p.containerID)
		return
	}
	defer hjResp.Close()

	var header = make([]byte, headerSize)
	for {
		// https://docs.docker.com/engine/reference/api/docker_remote_api_v1.22/#attach-a-container
		/// NOTE: some logs can be lost because of EOF
		_, err := hjResp.Reader.Read(header)
		if err != nil {
			if err == io.EOF {
				return
			}
			apexctx.GetLogger(p.ctx).WithError(err).Errorf("unable to read header for hjResp of %s", p.containerID)
			return
		}

		var size uint32
		if err = binary.Read(bytes.NewReader(header[4:]), binary.BigEndian, &size); err != nil {
			apexctx.GetLogger(p.ctx).WithError(err).Errorf("unable to decode size from header %s", p.containerID)
			return
		}

		if _, err = io.CopyN(writer, hjResp.Reader, int64(size)); err != nil {
			return
		}
	}
}
Beispiel #20
0
func newProcess(ctx context.Context, executable string, args, env []string, workDir string, output io.Writer) (*process, error) {
	pr := process{
		ctx: ctx,
	}

	pr.cmd = &exec.Cmd{
		Env:         env,
		Args:        args,
		Dir:         workDir,
		Path:        executable,
		Stdout:      output,
		Stderr:      output,
		SysProcAttr: getSysProctAttr(),
	}

	if err := pr.cmd.Start(); err != nil {
		apexctx.GetLogger(ctx).WithError(err).Errorf("unable to start executable %s", pr.cmd.Path)
		return nil, err
	}
	apexctx.GetLogger(ctx).WithField("pid", pr.cmd.Process.Pid).Info("executable has been launched")
	return &pr, nil
}
Beispiel #21
0
// Spool spools an image with a tag latest
func (b *Box) Spool(ctx context.Context, name string, opts isolate.Profile) (err error) {
	profile, err := convertProfile(opts)
	if err != nil {
		apexctx.GetLogger(ctx).WithError(err).WithFields(log.Fields{"name": name}).Info("unbale to convert raw profile to Docker specific profile")
		return err
	}

	if profile.Registry == "" {
		apexctx.GetLogger(ctx).WithFields(log.Fields{"name": name}).Info("local image will be used")
		return nil
	}

	defer apexctx.GetLogger(ctx).WithField("name", name).Trace("spooling an image").Stop(&err)

	pullOpts := types.ImagePullOptions{
		All: false,
	}

	if registryAuth, ok := b.config.RegistryAuth[profile.Registry]; ok {
		pullOpts.RegistryAuth = registryAuth
	}

	ref := fmt.Sprintf("%s:%s", filepath.Join(profile.Registry, profile.Repository, name), "latest")

	body, err := b.client.ImagePull(ctx, ref, pullOpts)
	if err != nil {
		apexctx.GetLogger(ctx).WithError(err).WithFields(
			log.Fields{"name": name, "ref": ref}).Error("unable to pull an image")
		return err
	}
	defer body.Close()

	if err := decodeImagePull(ctx, body); err != nil {
		return err
	}

	return nil
}
Beispiel #22
0
func (p portoProfile) applyContainerLimits(ctx context.Context, portoConn porto.API, id string) error {
	limits, ok := p.Profile["container"]
	if !ok {
		apexctx.GetLogger(ctx).WithField("container", id).Info("no container limits")
		return nil
	}

	switch limits := limits.(type) {
	case map[string]interface{}:
		log := apexctx.GetLogger(ctx).WithField("container", id)
		for limit, value := range limits {
			strvalue := fmt.Sprintf("%s", value)
			log.Debugf("apply %s %s", limit, strvalue)
			if err := portoConn.SetProperty(id, limit, strvalue); err != nil {
				return err
			}
		}

		return nil
	default:
		return fmt.Errorf("invalid resources type %T", limits)
	}
}
Beispiel #23
0
func newConnectionHandler(ctx context.Context, newDisp dispatcherInit) (*ConnectionHandler, error) {
	connID := getID(ctx)
	ctx = apexctx.WithLogger(ctx, apexctx.GetLogger(ctx).WithField("conn.id", connID))

	return &ConnectionHandler{
		ctx:            ctx,
		sessions:       newSessions(),
		highestChannel: 0,

		newDispatcher: newDisp,

		connID: connID,
	}, nil
}
Beispiel #24
0
func (p portoProfile) applyVolumeLimits(ctx context.Context, id string, vp map[string]string) error {
	limits, ok := p.Profile["volume"]
	if !ok {
		apexctx.GetLogger(ctx).WithField("container", id).Info("no volume limits")
		return nil
	}

	switch limits := limits.(type) {
	case map[string]interface{}:
		log := apexctx.GetLogger(ctx).WithField("container", id)
		for limit, value := range limits {
			strvalue := fmt.Sprintf("%s", value)
			log.Debugf("apply volume limit %s %s", limit, strvalue)
			vp[limit] = strvalue
		}

		return nil
	default:
		return fmt.Errorf("invalid resources type %T", limits)
	}

	return nil
}
Beispiel #25
0
func (s *spoolCancelationDispatch) Handle(id uint64, r *msgp.Reader) (Dispatcher, error) {
	switch id {
	case spoolCancel:
		// Skip empty array
		apexctx.GetLogger(s.ctx).Debug("Spool.Cancel()")
		r.Skip()
		// TODO: cancel only if I'm spooling
		s.cancel()
		// NOTE: do not return an err on purpose
		s.stream.Close(s.ctx, replySpoolOk)
		return nil, nil
	default:
		return nil, fmt.Errorf("unknown transition id: %d", id)
	}
}
Beispiel #26
0
// Spool spools code of an app from Cocaine Storage service
func (b *Box) Spool(ctx context.Context, name string, opts isolate.Profile) (err error) {
	spoolPath := b.spoolPath
	if val, ok := opts["spool"]; ok {
		spoolPath = fmt.Sprintf("%s", val)
	}
	defer apexctx.GetLogger(ctx).WithField("name", name).WithField("spoolpath", spoolPath).Trace("processBox.Spool").Stop(&err)
	data, err := b.fetch(ctx, name)
	if err != nil {
		return err
	}

	if isolate.IsCancelled(ctx) {
		return nil
	}

	return unpackArchive(ctx, data, filepath.Join(spoolPath, name))
}
Beispiel #27
0
func (b *Box) wait() {
	var (
		ws  syscall.WaitStatus
		pid int
		err error
	)

	for {
		// NOTE: there is possible logic race here
		// Wait -> new fork/exec replaces old one in the map -> locked.Wait
		pid, err = syscall.Wait4(-1, &ws, syscall.WNOHANG, nil)
		switch {
		case pid > 0:
			// NOTE: I fully understand that handling signals from library is a bad idea,
			// but there's nothing better in this case
			// If `pid` is not in the map, it means that it's not our worker
			// NOTE: the lock is locked in the outer scope
			pr, ok := b.children[pid]
			if ok {
				delete(b.children, pid)
				// Send SIGKILL to a process group associated with the child
				killPg(pid)
				// There is no point to check error here,
				// as it always returns "Wait error", because Wait4 has been already called.
				// But we have to call Wait to close all associated fds and to release other resources
				pr.Wait()
				procsWaitedCounter.Inc(1)
			}
		case err == syscall.EINTR:
			// NOTE: although man says that EINTR is not possible in this case, let's be on the side
			// EINTR
			// WNOHANG was not set and an unblocked signal or a SIGCHLD was caught; see signal(7).
		case err == syscall.ECHILD:
			// exec.Cmd was failed to start, but SIGCHLD arrived.
			// Actually, `non-born` child has been already waited by exec.Cmd
			// So do nothing and return
			return
		default:
			if err != nil {
				apexctx.GetLogger(b.ctx).WithError(err).Error("Wait4 error")
			}
			return
		}
	}
}
Beispiel #28
0
func (b *Box) dumpJournal(ctx context.Context) (err error) {
	defer apexctx.GetLogger(ctx).Trace("dump journal").Stop(&err)
	tempfile, err := ioutil.TempFile(filepath.Dir(b.config.Journal), "portojournalbak")
	if err != nil {
		return err
	}
	defer os.Remove(tempfile.Name())
	defer tempfile.Close()

	if err = b.journal.Dump(tempfile); err != nil {
		return err
	}

	if err = os.Rename(tempfile.Name(), b.config.Journal); err != nil {
		return err
	}

	return nil
}
Beispiel #29
0
// Spawn spawns a prcess using container
func (b *Box) Spawn(ctx context.Context, config isolate.SpawnConfig, output io.Writer) (isolate.Process, error) {
	profile, err := convertProfile(config.Opts)
	if err != nil {
		apexctx.GetLogger(ctx).WithError(err).WithFields(log.Fields{"name": config.Name}).Info("unable to convert raw profile to Docker specific profile")
		return nil, err
	}
	start := time.Now()

	spawningQueueSize.Inc(1)
	if spawningQueueSize.Count() > 10 {
		spawningQueueSize.Dec(1)
		return nil, syscall.EAGAIN
	}
	err = b.spawnSM.Acquire(ctx)
	spawningQueueSize.Dec(1)
	if err != nil {
		return nil, isolate.ErrSpawningCancelled
	}
	defer b.spawnSM.Release()

	containersCreatedCounter.Inc(1)
	pr, err := newContainer(ctx, b.client, profile, config.Name, config.Executable, config.Args, config.Env)
	if err != nil {
		containersErroredCounter.Inc(1)
		return nil, err
	}

	b.muContainers.Lock()
	b.containers[pr.containerID] = pr
	b.muContainers.Unlock()

	if err = pr.startContainer(output); err != nil {
		containersErroredCounter.Inc(1)
		return nil, err
	}

	totalSpawnTimer.UpdateSince(start)
	return pr, nil
}
Beispiel #30
0
// HandleConn decodes commands from Cocaine runtime and calls dispatchers
func (h *ConnectionHandler) HandleConn(conn io.ReadWriteCloser) {
	defer func() {
		conn.Close()
		apexctx.GetLogger(h.ctx).Errorf("Connection has been closed")
	}()

	ctx, cancel := context.WithCancel(h.ctx)
	defer cancel()
	logger := apexctx.GetLogger(h.ctx)

	r := msgp.NewReader(conn)
LOOP:
	for {
		hasHeaders, channel, c, err := h.next(r)
		if err != nil {
			if err == io.EOF {
				return
			}
			apexctx.GetLogger(h.ctx).WithError(err).Errorf("next(): unable to read message")
			return
		}
		logger.Infof("channel %d, number %d", channel, c)

		dispatcher, ok := h.sessions.Get(channel)
		if !ok {
			if channel <= h.highestChannel {
				// dispatcher was detached from ResponseStream.OnClose
				// This message must be `close` message.
				// `channel`, `number` are parsed, skip `args` and probably `headers`
				logger.Infof("dispatcher for channel %d was detached", channel)
				r.Skip()
				if hasHeaders {
					r.Skip()
				}
				continue LOOP
			}

			h.highestChannel = channel

			ctx = apexctx.WithLogger(ctx, logger.WithField("channel", fmt.Sprintf("%s.%d", h.connID, channel)))
			rs := newResponseStream(ctx, conn, channel)
			rs.OnClose(func(ctx context.Context) {
				h.sessions.Detach(channel)
			})
			dispatcher = h.newDispatcher(ctx, rs)
		}

		dispatcher, err = dispatcher.Handle(c, r)
		// NOTE: remove it when the headers are being handling properly
		if hasHeaders {
			r.Skip()
		}

		if err != nil {
			if err == ErrInvalidArgsNum {
				logger.WithError(err).Errorf("channel %d, number %d", channel, c)
				return
			}

			logger.WithError(err).Errorf("Handle returned an error")
			h.sessions.Detach(channel)
			continue LOOP
		}
		if dispatcher == nil {
			h.sessions.Detach(channel)
			continue LOOP
		}

		h.sessions.Attach(channel, dispatcher)
	}
}