Exemple #1
0
func (k *Kloud) Info(r *kite.Request) (interface{}, error) {
	machine, err := k.GetMachine(r)
	if err != nil {
		return nil, err
	}

	if machine.State() == machinestate.NotInitialized {
		return &InfoResponse{
			State: machinestate.NotInitialized,
			Name:  "not-initialized-instance",
		}, nil
	}

	ctx := request.NewContext(context.Background(), r)
	response, err := machine.HandleInfo(ctx)
	if err != nil {
		return nil, err
	}

	if response.State == machinestate.Unknown {
		response.State = machine.State()
	}

	return response, nil
}
Exemple #2
0
func (k *Kloud) newStack(r *kite.Request, req *TeamRequest) (Stacker, context.Context, error) {
	if k.NewStack != nil {
		return k.NewStack(r, req)
	}

	p, ok := k.providers[req.Provider]
	if !ok {
		return nil, nil, NewError(ErrProviderNotFound)
	}
	// Build context value.
	ctx := request.NewContext(context.Background(), r)
	ctx = context.WithValue(ctx, TeamRequestKey, req)
	if k.PublicKeys != nil {
		ctx = publickeys.NewContext(ctx, k.PublicKeys)
	}

	if k.ContextCreator != nil {
		ctx = k.ContextCreator(ctx)
	}

	if req.StackID != "" {
		evID := r.Method + "-" + req.StackID
		ctx = eventer.NewContext(ctx, k.NewEventer(evID))

		k.Log.Debug("Eventer created %q", evID)
	} else if req.Identifier != "" {
		evID := r.Method + "-" + req.GroupName + "-" + req.Identifier
		ctx = eventer.NewContext(ctx, k.NewEventer(evID))

		k.Log.Debug("Eventer created %q", evID)
	}

	if req.Debug {
		ctx = k.setTraceID(r.Username, r.Method, ctx)
	}

	// Create stack handler.
	v, err := p.Stack(ctx)
	if err != nil {
		return nil, nil, errors.New("error creating stack: " + err.Error())
	}

	s, ok := v.(Stacker)
	if !ok {
		return nil, nil, NewError(ErrStackNotImplemented)
	}

	return s, ctx, nil
}
Exemple #3
0
func (q *Queue) Check(s *provider.Stacker) error {
	var m models.Machine

	err := q.FetchProvider(s.Provider.Name, &m)
	if err != nil {
		// do not show an error if the query didn't find anything, that
		// means there is no such a document, which we don't care
		if err == mgo.ErrNotFound {
			return nil
		}

		q.Log.Debug("no running machines for %q provider found", s.Provider.Name)

		return fmt.Errorf("check %q provider error: %s", s.Provider.Name, err)
	}

	req := &kite.Request{
		Method: "internal",
	}

	if u := m.Owner(); u != nil {
		req.Username = u.Username
	}

	ctx := request.NewContext(context.Background(), req)

	bm, err := s.BuildBaseMachine(ctx, &m)
	if err != nil {
		return err
	}

	machine, err := s.BuildMachine(ctx, bm)
	if err != nil {
		return err
	}

	switch err := q.CheckUsage(s.Provider.Name, machine, bm, ctx); err {
	case nil:
		return nil
	case kite.ErrNoKitesAvailable, kontrol.ErrQueryFieldsEmpty, klient.ErrDialingFailed:
		return nil
	default:
		return fmt.Errorf("[%s] check usage of AWS klient kite [%s] err: %s", m.ObjectId.Hex(), m.IpAddress, err)
	}
}
Exemple #4
0
func (k *Kloud) authorizedKlient(r *kite.Request) (*klient.Klient, error) {
	if r.Args == nil {
		return nil, NewError(ErrNoArguments)
	}

	var args *AdminRequest
	if err := r.Args.One().Unmarshal(&args); err != nil {
		return nil, err
	}

	if args.MachineId == "" {
		return nil, errors.New("machineId is not passed")
	}

	if args.GroupName == "" {
		return nil, errors.New("groupName is not passed")
	}

	k.Log.Debug("Got arguments %+v for method: %s", args, r.Method)

	isAdmin, err := modelhelper.IsAdmin(r.Username, args.GroupName)
	if err != nil {
		return nil, err
	}

	if !isAdmin {
		return nil, fmt.Errorf("User '%s' is not an admin of group '%s'", r.Username, args.GroupName)
	}

	k.Log.Debug("User '%s' is an admin. Checking for machine permission", r.Username)

	machine, err := modelhelper.GetMachine(args.MachineId)
	if err != nil {
		return nil, fmt.Errorf("getMachine(%s) err: %s", args.MachineId, err)
	}

	g, err := modelhelper.GetGroup(args.GroupName)
	if err != nil {
		return nil, err
	}

	isGroupMember := false
	for _, group := range machine.Groups {
		if group.Id.Hex() == g.Id.Hex() {
			isGroupMember = true
		}
	}
	if !isGroupMember {
		return nil, fmt.Errorf("'%s' machine does not belong to '%s' group",
			args.MachineId, args.GroupName)
	}

	k.Log.Debug("Incoming user is authorized, setting up DB and Klient connection")

	// Now we are ready to go.
	ctx := request.NewContext(context.Background(), r)
	ctx = k.ContextCreator(ctx)
	sess, ok := session.FromContext(ctx)
	if !ok {
		return nil, errors.New("internal server error (err: session context is not available)")
	}

	k.Log.Debug("Calling Klient method: %s", r.Method)
	return klient.NewWithTimeout(sess.Kite, machine.QueryString, time.Second*10)
}
Exemple #5
0
// coreMethods is running and returning the response for the given machineFunc.
// This method is used to avoid duplicate codes in many codes (because we do
// the same steps for each of them).
func (k *Kloud) coreMethods(r *kite.Request, fn machineFunc) (result interface{}, reqErr error) {
	// calls with zero arguments causes args to be nil. Check it that we
	// don't get a beloved panic
	if r.Args == nil {
		return nil, NewError(ErrNoArguments)
	}

	k.Log.Debug("solo: calling %q by %q with %q", r.Username, r.Method, r.Args.Raw)

	var args struct {
		MachineId string
		Provider  string
		Debug     bool
	}

	if err := r.Args.One().Unmarshal(&args); err != nil {
		return nil, err
	}

	if args.MachineId == "" {
		return nil, NewError(ErrMachineIdMissing)
	}

	if args.Provider == "" {
		return nil, NewError(ErrProviderIsMissing)
	}

	// Lock the machine id so no one else can access it. It means this
	// kloud instance is now responsible for this machine id. Its basically
	// a distributed lock. It's unlocked when there is an error or if the
	// method call is finished (unlocking is done inside the responsible
	// method calls).
	if r.Method != "info" {
		if err := k.Locker.Lock(args.MachineId); err != nil {
			return nil, err
		}

		// if something goes wrong after step reset the document which is was
		// set in the by previous step by Locker.Lock(). If there is no error,
		// the lock will be unlocked in the respective method  function.
		defer func() {
			if reqErr != nil {
				// otherwise that means Locker.Lock or something else in
				// ControlFunc failed. Reset the lock again so it can be acquired by
				// others.
				k.Locker.Unlock(args.MachineId)
			}
		}()
	}

	provider, ok := k.providers[args.Provider]
	if !ok {
		return nil, NewError(ErrProviderNotFound)
	}

	p, ok := provider.(Provider)
	if !ok {
		return nil, NewError(ErrProviderNotImplemented)
	}

	ctx := request.NewContext(context.Background(), r)
	// add publicKeys to be deployed to the machine, the machine provider is
	// responsible of deploying it to the machine while building it.
	if k.PublicKeys != nil {
		ctx = publickeys.NewContext(ctx, k.PublicKeys)
	}

	if k.ContextCreator != nil {
		ctx = k.ContextCreator(ctx)
	}

	// if debug is enabled, generate TraceID and pass it with the context
	if args.Debug {
		ctx = k.setTraceID(r.Username, r.Method, ctx)
	}

	// old events are not needed anymore, so we're just going to remove them.
	k.cleanupEventers(args.MachineId)

	// each method has his own unique eventer
	eventId := r.Method + "-" + args.MachineId
	ev := k.NewEventer(eventId)
	ctx = eventer.NewContext(ctx, ev)

	machine, err := p.Machine(ctx, args.MachineId)
	if err != nil {
		return nil, err
	}

	m, ok := machine.(Machiner)
	if !ok {
		return nil, NewError(ErrMachineNotImplemented)
	}

	if m.ProviderName() != args.Provider {
		k.Log.Debug("want provider %q, got %q", m.ProviderName(), args.Provider)

		return nil, NewError(ErrProviderIsWrong)
	}

	// Check if the given method is in valid methods of that current state. For
	// example if the method is "build", and the state is "stopped" than this
	// will return an error.
	if !methodIn(r.Method, m.State().ValidMethods()...) {
		return nil, fmt.Errorf("%s not allowed for current state '%s'. Allowed methods are: %v",
			r.Method, strings.ToLower(m.State().String()), m.State().ValidMethods())
	}

	pair, ok := states[r.Method]
	if !ok {
		return nil, fmt.Errorf("no state pair available for %s", r.Method)
	}

	tags := []string{
		"instanceId:" + args.MachineId,
		"provider:" + args.Provider,
	}

	ctx = k.traceRequest(ctx, tags)

	ev.Push(&eventer.Event{
		Message: r.Method + " started",
		Status:  pair.start,
	})

	// Start our core method in a goroutine to not block it for the client
	// side. However we do return an event id which is an unique for tracking
	// the current status of the running method.
	go func() {
		finalEvent := &eventer.Event{
			Message:    r.Method + " finished",
			Status:     pair.final,
			Percentage: 100,
		}

		k.Log.Info("[%s] ======> %s started (requester: %s, provider: %s)<======",
			args.MachineId, strings.ToUpper(r.Method), r.Username, args.Provider)
		start := time.Now()
		err := fn(ctx, m)
		if err != nil {
			// don't pass the error directly to the eventer, mask it to avoid
			// error leaking to the client. We just log it here.
			k.Log.Error("[%s] ======> %s finished with error: '%s' (requester: %s, provider: %s) <======",
				args.MachineId, strings.ToUpper(r.Method), err, r.Username, args.Provider)

			finalEvent.Error = strings.ToTitle(r.Method) + " failed. Please contact support."

			// however, eventerErr is an error we want to pass explicitly to
			// the client side
			if eventerErr, ok := err.(*EventerError); ok {
				finalEvent.Error = eventerErr.Error()
			}

			finalEvent.Status = m.State() // fallback to to old state
		} else {
			k.Log.Info("[%s] ======> %s finished (time: %s, requester: %s, provider: %s) <======",
				args.MachineId, strings.ToUpper(r.Method), time.Since(start), r.Username, args.Provider)
		}

		ev.Push(finalEvent)
		k.Locker.Unlock(args.MachineId)
		k.send(ctx)
	}()

	return &ControlResult{
		EventId: eventId,
	}, nil
}
Exemple #6
0
func (k *Kloud) GetMachine(r *kite.Request) (machine Machiner, reqErr error) {
	// calls with zero arguments causes args to be nil. Check it that we
	// don't get a beloved panic
	if r.Args == nil {
		return nil, NewError(ErrNoArguments)
	}

	var args struct {
		MachineId string
		Provider  string
		Debug     bool
	}

	if err := r.Args.One().Unmarshal(&args); err != nil {
		return nil, err
	}

	if args.MachineId == "" {
		return nil, NewError(ErrMachineIdMissing)
	}

	// Lock the machine id so no one else can access it. It means this
	// kloud instance is now responsible for this machine id. Its basically
	// a distributed lock. It's unlocked when there is an error or if the
	// method call is finished (unlocking is done inside the responsible
	// method calls).
	if r.Method != "info" {
		if err := k.Locker.Lock(args.MachineId); err != nil {
			return nil, err
		}

		// if something goes wrong after step reset the document which is was
		// set in the by previous step by Locker.Lock(). If there is no error,
		// the lock will be unlocked in the respective method  function.
		defer func() {
			if reqErr != nil {
				// otherwise that means Locker.Lock or something else in
				// ControlFunc failed. Reset the lock again so it can be acquired by
				// others.
				k.Locker.Unlock(args.MachineId)
			}
		}()
	}

	if args.Provider == "" {
		return nil, NewError(ErrProviderIsMissing)
	}

	provider, ok := k.providers[args.Provider]
	if !ok {
		return nil, NewError(ErrProviderAvailable)
	}

	p, ok := provider.(Provider)
	if !ok {
		return nil, NewError(ErrProviderNotImplemented)
	}

	ctx := request.NewContext(context.Background(), r)

	if args.Debug {
		ctx = k.setTraceID(r.Username, r.Method, ctx)
	}

	v, err := p.Machine(ctx, args.MachineId)
	if err != nil {
		return nil, err
	}

	m, ok := v.(Machiner)
	if !ok {
		return nil, NewError(ErrMachineNotImplemented)
	}

	return m, nil
}