Пример #1
0
func addContainersWithHost(args *changeUnitsPipelineArgs) ([]container, error) {
	a := args.app
	w := args.writer
	var units int
	processMsg := make([]string, 0, len(args.toAdd))
	for processName, v := range args.toAdd {
		units += v.Quantity
		processMsg = append(processMsg, fmt.Sprintf("[%s: %d]", processName, v.Quantity))
	}
	imageId := args.imageId
	var destinationHost []string
	if args.toHost != "" {
		destinationHost = []string{args.toHost}
	}
	if w == nil {
		w = ioutil.Discard
	}
	var plural string
	if units > 1 {
		plural = "s"
	}
	fmt.Fprintf(w, "\n---- Starting new unit%s %s ----\n", plural, strings.Join(processMsg, " "))
	oldContainers := make([]container, 0, units)
	for processName, cont := range args.toAdd {
		for i := 0; i < cont.Quantity; i++ {
			oldContainers = append(oldContainers, container{ProcessName: processName, Status: cont.Status.String()})
		}
	}
	rollbackCallback := func(c *container) {
		log.Errorf("Removing container %q due failed add units.", c.ID)
		errRem := args.provisioner.removeContainer(c, a)
		if errRem != nil {
			log.Errorf("Unable to destroy container %q: %s", c.ID, errRem)
		}
	}
	var (
		createdContainers []*container
		m                 sync.Mutex
	)
	err := runInContainers(oldContainers, func(c *container, _ chan *container) error {
		c, err := args.provisioner.start(c, a, imageId, w, destinationHost...)
		if err != nil {
			return err
		}
		m.Lock()
		createdContainers = append(createdContainers, c)
		m.Unlock()
		return nil
	}, rollbackCallback, true)
	if err != nil {
		return nil, err
	}
	result := make([]container, len(createdContainers))
	i := 0
	for _, c := range createdContainers {
		result[i] = *c
		i++
	}
	return result, nil
}
Пример #2
0
func deploy(app provision.App, version string, w io.Writer) (string, error) {
	commands, err := deployCmds(app, version)
	if err != nil {
		return "", err
	}
	imageId := getImage(app)
	actions := []*action.Action{&insertEmptyContainerInDB, &createContainer, &startContainer, &updateContainerInDB}
	pipeline := action.NewPipeline(actions...)
	err = pipeline.Execute(app, imageId, commands)
	if err != nil {
		log.Errorf("error on execute deploy pipeline for app %s - %s", app.GetName(), err)
		return "", err
	}
	c := pipeline.Result().(container)
	err = c.logs(w)
	if err != nil {
		log.Errorf("error on get logs for container %s - %s", c.ID, err)
		return "", err
	}
	_, err = dockerCluster().WaitContainer(c.ID)
	if err != nil {
		log.Errorf("Process failed for container %q: %s", c.ID, err)
		return "", err
	}
	imageId, err = c.commit()
	if err != nil {
		log.Errorf("error on commit container %s - %s", c.ID, err)
		return "", err
	}
	c.remove()
	return imageId, nil
}
Пример #3
0
func (r *hipacheRouter) AddRoute(name string, address *url.URL) error {
	backendName, err := router.Retrieve(name)
	if err != nil {
		return err
	}
	domain, err := config.GetString(r.prefix + ":domain")
	if err != nil {
		log.Errorf("error on getting hipache domain in add route for %s - %s", backendName, address)
		return &routeError{"add", err}
	}
	frontend := "frontend:" + backendName + "." + domain
	if err := r.addRoute(frontend, address.String()); err != nil {
		log.Errorf("error on add route for %s - %s", backendName, address)
		return &routeError{"add", err}
	}
	cnames, err := r.getCNames(backendName)
	if err != nil {
		log.Errorf("error on get cname in add route for %s - %s", backendName, address)
		return err
	}
	if cnames == nil {
		return nil
	}
	for _, cname := range cnames {
		err = r.addRoute("frontend:"+cname, address.String())
		if err != nil {
			return err
		}
	}
	return nil
}
Пример #4
0
func (p *dockerProvisioner) Destroy(app provision.App) error {
	containers, err := listContainersByApp(app.GetName())
	if err != nil {
		log.Errorf("Failed to list app containers: %s", err.Error())
		return err
	}
	var containersGroup sync.WaitGroup
	containersGroup.Add(len(containers))
	for _, c := range containers {
		go func(c container) {
			defer containersGroup.Done()
			err := removeContainer(&c)
			if err != nil {
				log.Errorf("Unable to destroy container %s: %s", c.ID, err.Error())
			}
		}(c)
	}
	containersGroup.Wait()
	err = removeImage(assembleImageName(app.GetName()))
	if err != nil {
		log.Errorf("Failed to remove image: %s", err.Error())
	}
	r, err := getRouter()
	if err != nil {
		log.Errorf("Failed to get router: %s", err.Error())
		return err
	}
	err = r.RemoveBackend(app.GetName())
	if err != nil {
		log.Errorf("Failed to remove route backend: %s", err.Error())
		return err
	}
	return nil
}
Пример #5
0
func notify(appName string, messages []interface{}) {
	factory, err := queue.Factory()
	if err != nil {
		log.Errorf("Error on logs notify: %s", err.Error())
		return
	}
	q, err := factory.Get(logQueueName(appName))
	if err != nil {
		log.Errorf("Error on logs notify: %s", err.Error())
		return
	}
	pubSubQ, ok := q.(queue.PubSubQ)
	if !ok {
		log.Debugf("Queue does not support pubsub, logs only in database.")
		return
	}
	for _, msg := range messages {
		bytes, err := json.Marshal(msg)
		if err != nil {
			log.Errorf("Error on logs notify: %s", err.Error())
			continue
		}
		err = pubSubQ.Pub(bytes)
		if err != nil {
			log.Errorf("Error on logs notify: %s", err.Error())
		}
	}
}
Пример #6
0
func (l *lockUpdater) spin() {
	set := map[Target]struct{}{}
	for {
		select {
		case added := <-l.addCh:
			set[*added] = struct{}{}
		case removed := <-l.removeCh:
			delete(set, *removed)
		case <-l.stopCh:
			return
		case <-time.After(lockUpdateInterval):
		}
		conn, err := db.Conn()
		if err != nil {
			log.Errorf("[events] [lock update] error getting db conn: %s", err)
			continue
		}
		coll := conn.Events()
		slice := make([]interface{}, len(set))
		i := 0
		for id := range set {
			slice[i], _ = id.GetBSON()
			i++
		}
		err = coll.Update(bson.M{"_id": bson.M{"$in": slice}}, bson.M{"$set": bson.M{"lockupdatetime": time.Now().UTC()}})
		if err != nil && err != mgo.ErrNotFound {
			log.Errorf("[events] [lock update] error updating: %s", err)
		}
		conn.Close()
	}
}
Пример #7
0
func (r hipacheRouter) AddRoute(name, address string) error {
	backendName, err := router.Retrieve(name)
	if err != nil {
		return err
	}
	domain, err := config.GetString("hipache:domain")
	if err != nil {
		log.Errorf("error on getting hipache domain in add route for %s - %s", backendName, address)
		return &routeError{"add", err}
	}
	frontend := "frontend:" + backendName + "." + domain
	if err := r.addRoute(frontend, address); err != nil {
		log.Errorf("error on add route for %s - %s", backendName, address)
		return &routeError{"add", err}
	}
	cname, err := r.getCName(backendName)
	if err != nil {
		log.Errorf("error on get cname in add route for %s - %s", backendName, address)
		return err
	}
	if cname == "" {
		return nil
	}
	return r.addRoute("frontend:"+cname, address)
}
Пример #8
0
func runRoutesRebuildOnce(appName string, lock bool) bool {
	if appFinder == nil {
		return false
	}
	a, err := appFinder(appName)
	if err != nil {
		log.Errorf("[routes-rebuild-task] error getting app %q: %s", appName, err)
		return false
	}
	if a == nil {
		return true
	}
	if lock {
		var locked bool
		locked, err = a.InternalLock("rebuild-routes-task")
		if err != nil || !locked {
			return false
		}
		defer a.Unlock()
	}
	if err != nil {
		log.Errorf("[routes-rebuild-task] error getting app %q: %s", appName, err)
		return false
	}
	_, err = RebuildRoutes(a)
	if err != nil {
		log.Errorf("[routes-rebuild-task] error rebuilding app %q: %s", appName, err)
		return false
	}
	return true
}
Пример #9
0
// handle is the function called by the queue handler on each message.
func handle(msg *queue.Message) {
	switch msg.Action {
	case regenerateApprc:
		if len(msg.Args) < 1 {
			log.Errorf("Error handling %q: this action requires at least 1 argument.", msg.Action)
			return
		}
		app, err := ensureAppIsStarted(msg)
		if err != nil {
			log.Error(err.Error())
			return
		}
		err = app.SerializeEnvVars()
		if err != nil {
			log.Error(err.Error())
		}
	case BindService:
		err := bindUnit(msg)
		if err != nil {
			log.Error(err.Error())
			return
		}
	default:
		log.Errorf("Error handling %q: invalid action.", msg.Action)
	}
}
Пример #10
0
// Rename renames a repository.
func Rename(oldName, newName string) error {
	log.Debugf("Renaming repository %q to %q", oldName, newName)
	repo, err := Get(oldName)
	if err != nil {
		log.Errorf("repository.Rename: Repository %q not found: %s", oldName, err)
		return err
	}
	newRepo := repo
	newRepo.Name = newName
	conn, err := db.Conn()
	if err != nil {
		return err
	}
	defer conn.Close()
	err = conn.Repository().Insert(newRepo)
	if err != nil {
		log.Errorf("repository.Rename: Error adding new repository %q: %s", newName, err)
		return err
	}
	err = conn.Repository().RemoveId(oldName)
	if err != nil {
		log.Errorf("repository.Rename: Error removing old repository %q: %s", oldName, err)
		return err
	}
	return fs.Filesystem().Rename(barePath(oldName), barePath(newName))
}
Пример #11
0
// New creates a representation of a git repository. It creates a Git
// repository using the "bare-dir" setting and saves repository's meta data in
// the database.
func New(name string, users []string, isPublic bool) (*Repository, error) {
	log.Debugf("Creating repository %q", name)
	r := &Repository{Name: name, Users: users, IsPublic: isPublic}
	if v, err := r.isValid(); !v {
		log.Errorf("repository.New: Invalid repository %q: %s", name, err)
		return r, err
	}
	if err := newBare(name); err != nil {
		log.Errorf("repository.New: Error creating bare repository for %q: %s", name, err)
		return r, err
	}
	barePath := barePath(name)
	if barePath != "" && isPublic {
		ioutil.WriteFile(barePath+"/git-daemon-export-ok", []byte(""), 0644)
		if f, err := fs.Filesystem().Create(barePath + "/git-daemon-export-ok"); err == nil {
			f.Close()
		}
	}
	conn, err := db.Conn()
	if err != nil {
		return nil, err
	}
	defer conn.Close()
	err = conn.Repository().Insert(&r)
	if mgo.IsDup(err) {
		log.Errorf("repository.New: Duplicate repository %q", name)
		return r, fmt.Errorf("A repository with this name already exists.")
	}
	return r, err
}
Пример #12
0
// RemoveUnits removes n units from the app. It's a process composed of
// multiple steps:
//
//     1. Remove units from the provisioner
//     2. Remove units from the app list
//     3. Update quota
func (app *App) RemoveUnits(n uint) error {
	if n == 0 {
		ReleaseApplicationLock(app.Name)
		return stderr.New("Cannot remove zero units.")
	} else if l := uint(len(app.Units())); l == n {
		ReleaseApplicationLock(app.Name)
		return stderr.New("Cannot remove all units from an app.")
	} else if n > l {
		ReleaseApplicationLock(app.Name)
		return fmt.Errorf("Cannot remove %d units from this app, it has only %d units.", n, l)
	}
	go func() {
		defer ReleaseApplicationLock(app.Name)
		Provisioner.RemoveUnits(app, n)
		conn, err := db.Conn()
		if err != nil {
			log.Errorf("Error: %s", err)
		}
		defer conn.Close()
		dbErr := conn.Apps().Update(
			bson.M{"name": app.Name},
			bson.M{
				"$set": bson.M{
					"quota.inuse": len(app.Units()),
				},
			},
		)
		if dbErr != nil {
			log.Errorf("Error: %s", dbErr)
		}
	}()
	return nil
}
Пример #13
0
// Deploy runs a deployment of an application. It will first try to run an
// archive based deploy (if opts.ArchiveURL is not empty), and then fallback to
// the Git based deployment.
func Deploy(opts DeployOptions) error {
	var outBuffer bytes.Buffer
	start := time.Now()
	logWriter := LogWriter{App: opts.App}
	logWriter.Async()
	defer logWriter.Close()
	writer := io.MultiWriter(&tsuruIo.NoErrorWriter{Writer: opts.OutputStream}, &outBuffer, &logWriter)
	imageId, err := deployToProvisioner(&opts, writer)
	elapsed := time.Since(start)
	saveErr := saveDeployData(&opts, imageId, outBuffer.String(), elapsed, err)
	if saveErr != nil {
		log.Errorf("WARNING: couldn't save deploy data, deploy opts: %#v", opts)
	}
	if err != nil {
		return err
	}
	err = incrementDeploy(opts.App)
	if err != nil {
		log.Errorf("WARNING: couldn't increment deploy count, deploy opts: %#v", opts)
	}
	if opts.App.UpdatePlatform == true {
		opts.App.SetUpdatePlatform(false)
	}
	return nil
}
Пример #14
0
// New creates a representation of a git repository. It creates a Git
// repository using the "bare-dir" setting and saves repository's meta data in
// the database.
func New(name string, users, readOnlyUsers []string, isPublic bool) (*Repository, error) {
	log.Debugf("Creating repository %q", name)
	r := &Repository{Name: name, Users: users, ReadOnlyUsers: readOnlyUsers, IsPublic: isPublic}
	if v, err := r.isValid(); !v {
		log.Errorf("repository.New: Invalid repository %q: %s", name, err)
		return r, err
	}
	conn, err := db.Conn()
	if err != nil {
		return nil, err
	}
	defer conn.Close()
	err = conn.Repository().Insert(r)
	if err != nil {
		if mgo.IsDup(err) {
			return nil, ErrRepositoryAlreadyExists
		}
		return nil, err
	}
	if err = newBare(name); err != nil {
		log.Errorf("repository.New: Error creating bare repository for %q: %s", name, err)
		conn.Repository().Remove(bson.M{"_id": r.Name})
		return r, err
	}
	barePath := barePath(name)
	if barePath != "" && isPublic {
		if f, createErr := fs.Filesystem().Create(barePath + "/git-daemon-export-ok"); createErr == nil {
			f.Close()
		}
	}
	return r, err
}
Пример #15
0
func syncRepositoryApps(user *auth.User, beforeApps []string, roleCache map[string]*permission.Role) error {
	err := user.Reload()
	if err != nil {
		return err
	}
	afterApps, err := deployableApps(user, roleCache)
	if err != nil {
		return err
	}
	afterMap := map[string]struct{}{}
	for _, a := range afterApps {
		afterMap[a] = struct{}{}
	}
	manager := repository.Manager()
	for _, a := range beforeApps {
		var err error
		if _, ok := afterMap[a]; !ok {
			err = manager.RevokeAccess(a, user.Email)
		}
		if err != nil {
			log.Errorf("error revoking gandalf access for app %s, user %s: %s", a, user.Email, err)
		}
	}
	for _, a := range afterApps {
		err := manager.GrantAccess(a, user.Email)
		if err != nil {
			log.Errorf("error granting gandalf access for app %s, user %s: %s", a, user.Email, err)
		}
	}
	return nil

}
Пример #16
0
Файл: app.go Проект: tsuru/tsuru
func (app *App) BindUnit(unit *provision.Unit) error {
	instances, err := app.serviceInstances()
	if err != nil {
		return err
	}
	var i int
	var instance service.ServiceInstance
	for i, instance = range instances {
		err = instance.BindUnit(app, unit)
		if err != nil {
			log.Errorf("Error binding the unit %s with the service instance %s: %s", unit.ID, instance.Name, err)
			break
		}
	}
	if err != nil {
		for j := i - 1; j >= 0; j-- {
			instance = instances[j]
			rollbackErr := instance.UnbindUnit(app, unit)
			if rollbackErr != nil {
				log.Errorf("Error unbinding unit %s with the service instance %s during rollback: %s", unit.ID, instance.Name, rollbackErr)
			}
		}
	}
	return err
}
Пример #17
0
func (h *NodeHealer) tryHealingNode(node provision.Node, reason string, lastCheck *NodeChecks) error {
	_, hasIaas := node.Metadata()["iaas"]
	if !hasIaas {
		log.Debugf("node %q doesn't have IaaS information, healing (%s) won't run on it.", node.Address(), reason)
		return nil
	}
	poolName := node.Metadata()[poolMetadataName]
	evt, err := event.NewInternal(&event.Opts{
		Target:       event.Target{Type: event.TargetTypeNode, Value: node.Address()},
		InternalKind: "healer",
		CustomData: NodeHealerCustomData{
			Node:      provision.NodeToSpec(node),
			Reason:    reason,
			LastCheck: lastCheck,
		},
		Allowed: event.Allowed(permission.PermPoolReadEvents, permission.Context(permission.CtxPool, poolName)),
	})
	if err != nil {
		if _, ok := err.(event.ErrEventLocked); ok {
			// Healing in progress.
			return nil
		}
		return errors.Wrap(err, "Error trying to insert node healing event, healing aborted")
	}
	var createdNode *provision.NodeSpec
	var evtErr error
	defer func() {
		var updateErr error
		if evtErr == nil && createdNode == nil {
			updateErr = evt.Abort()
		} else {
			updateErr = evt.DoneCustomData(evtErr, createdNode)
		}
		if updateErr != nil {
			log.Errorf("error trying to update healing event: %s", updateErr)
		}
	}()
	_, err = node.Provisioner().GetNode(node.Address())
	if err != nil {
		if err == provision.ErrNodeNotFound {
			return nil
		}
		evtErr = errors.Wrap(err, "unable to check if node still exists")
		return evtErr
	}
	shouldHeal, err := h.shouldHealNode(node)
	if err != nil {
		evtErr = errors.Wrap(err, "unable to check if node should be healed")
		return evtErr
	}
	if !shouldHeal {
		return nil
	}
	log.Errorf("initiating healing process for node %q due to: %s", node.Address(), reason)
	createdNode, evtErr = h.healNode(node)
	return evtErr
}
Пример #18
0
func handle(msg *queue.Message) {
	if msg.Action == addUnitToLoadBalancer {
		if len(msg.Args) < 1 {
			log.Errorf("Failed to handle %q: it requires at least one argument.", msg.Action)
			return
		}
		a := qApp{name: msg.Args[0]}
		unitNames := msg.Args[1:]
		sort.Strings(unitNames)
		status, err := (&JujuProvisioner{}).collectStatus()
		if err != nil {
			log.Errorf("Failed to handle %q: juju status failed.\n%s.", msg.Action, err)
			return
		}
		var units []provision.Unit
		for _, u := range status {
			if u.AppName != a.name {
				continue
			}
			n := sort.SearchStrings(unitNames, u.Name)
			if len(unitNames) == 0 ||
				n < len(unitNames) && unitNames[n] == u.Name {
				units = append(units, u)
			}
		}
		if len(units) == 0 {
			log.Errorf("Failed to handle %q: units not found.", msg.Action)
			return
		}
		var noID []string
		var ok []provision.Unit
		for _, u := range units {
			if u.InstanceId == "pending" || u.InstanceId == "" {
				noID = append(noID, u.Name)
			} else {
				ok = append(ok, u)
			}
		}
		if len(noID) == len(units) {
			getQueue(queueName).Put(msg, 0)
		} else {
			router, _ := Router()
			for _, u := range units {
				router.AddRoute(a.GetName(), u.InstanceId)
			}
			if len(noID) > 0 {
				args := []string{a.name}
				args = append(args, noID...)
				msg := queue.Message{
					Action: msg.Action,
					Args:   args,
				}
				getQueue(queueName).Put(&msg, 1e9)
			}
		}
	}
}
Пример #19
0
func (s *segregatedScheduler) filterByMemoryUsage(a *app.App, nodes []cluster.Node, maxMemoryRatio float32, TotalMemoryMetadata string) ([]cluster.Node, error) {
	if maxMemoryRatio == 0 || TotalMemoryMetadata == "" {
		return nodes, nil
	}
	hosts := make([]string, len(nodes))
	for i := range nodes {
		hosts[i] = urlToHost(nodes[i].Address)
	}
	containers, err := s.provisioner.ListContainers(bson.M{"hostaddr": bson.M{"$in": hosts}, "id": bson.M{"$nin": s.ignoredContainers}})
	if err != nil {
		return nil, err
	}
	hostReserved := make(map[string]int64)
	for _, cont := range containers {
		a, err := app.GetByName(cont.AppName)
		if err != nil {
			return nil, err
		}
		hostReserved[cont.HostAddr] += a.Plan.Memory
	}
	megabyte := float64(1024 * 1024)
	nodeList := make([]cluster.Node, 0, len(nodes))
	for _, node := range nodes {
		totalMemory, _ := strconv.ParseFloat(node.Metadata[TotalMemoryMetadata], 64)
		shouldAdd := true
		if totalMemory != 0 {
			maxMemory := totalMemory * float64(maxMemoryRatio)
			host := urlToHost(node.Address)
			nodeReserved := hostReserved[host] + a.Plan.Memory
			if nodeReserved > int64(maxMemory) {
				shouldAdd = false
				tryingToReserveMB := float64(a.Plan.Memory) / megabyte
				reservedMB := float64(hostReserved[host]) / megabyte
				limitMB := maxMemory / megabyte
				log.Errorf("Node %q has reached its memory limit. "+
					"Limit %0.4fMB. Reserved: %0.4fMB. Needed additional %0.4fMB",
					host, limitMB, reservedMB, tryingToReserveMB)
			}
		}
		if shouldAdd {
			nodeList = append(nodeList, node)
		}
	}
	if len(nodeList) == 0 {
		autoScaleEnabled, _ := config.GetBool("docker:auto-scale:enabled")
		errMsg := fmt.Sprintf("no nodes found with enough memory for container of %q: %0.4fMB",
			a.Name, float64(a.Plan.Memory)/megabyte)
		if autoScaleEnabled {
			// Allow going over quota temporarily because auto-scale will be
			// able to detect this and automatically add a new nodes.
			log.Errorf("WARNING: %s. Will ignore memory restrictions.", errMsg)
			return nodes, nil
		}
		return nil, errors.New(errMsg)
	}
	return nodeList, nil
}
Пример #20
0
// ReleaseApplicationLock releases a lock hold on an app, currently it's called
// by a middleware, however, ideally, it should be called individually by each
// handler since they might be doing operations in background.
func ReleaseApplicationLock(appName string) {
	conn, err := db.Conn()
	if err != nil {
		log.Errorf("Error getting DB, couldn't unlock %s: %s", appName, err.Error())
	}
	defer conn.Close()
	err = conn.Apps().Update(bson.M{"name": appName, "lock.locked": true}, bson.M{"$set": bson.M{"lock": AppLock{}}})
	if err != nil {
		log.Errorf("Error updating entry, couldn't unlock %s: %s", appName, err.Error())
	}
}
Пример #21
0
func removeContainer(c *container) error {
	err := c.stop()
	if err != nil {
		log.Errorf("error on stop unit %s - %s", c.ID, err)
	}
	err = c.remove()
	if err != nil {
		log.Errorf("error on remove container %s - %s", c.ID, err)
	}
	return err
}
Пример #22
0
func (h *ContainerHealer) healContainerIfNeeded(cont container.Container) error {
	if cont.LastSuccessStatusUpdate.IsZero() {
		if !cont.MongoID.Time().Before(time.Now().Add(-h.maxUnresponsiveTime)) {
			return nil
		}
	}
	isAsExpected, err := h.isAsExpected(cont)
	if err != nil {
		log.Errorf("Containers healing: couldn't verify running processes in container %q: %s", cont.ID, err)
	}
	if isAsExpected {
		cont.SetStatus(h.provisioner, cont.ExpectedStatus(), true)
		return nil
	}
	locked := h.locker.Lock(cont.AppName)
	if !locked {
		return errors.Errorf("Containers healing: unable to heal %q couldn't lock app %s", cont.ID, cont.AppName)
	}
	defer h.locker.Unlock(cont.AppName)
	// Sanity check, now we have a lock, let's find out if the container still exists
	_, err = h.provisioner.GetContainer(cont.ID)
	if err != nil {
		if _, isNotFound := err.(*provision.UnitNotFoundError); isNotFound {
			return nil
		}
		return errors.Wrapf(err, "Containers healing: unable to heal %q couldn't verify it still exists", cont.ID)
	}
	a, err := app.GetByName(cont.AppName)
	if err != nil {
		return errors.Wrapf(err, "Containers healing: unable to heal %q couldn't get app %q", cont.ID, cont.AppName)
	}
	log.Errorf("Initiating healing process for container %q, unresponsive since %s.", cont.ID, cont.LastSuccessStatusUpdate)
	evt, err := event.NewInternal(&event.Opts{
		Target:       event.Target{Type: event.TargetTypeContainer, Value: cont.ID},
		InternalKind: "healer",
		CustomData:   cont,
		Allowed: event.Allowed(permission.PermAppReadEvents, append(permission.Contexts(permission.CtxTeam, a.Teams),
			permission.Context(permission.CtxApp, a.Name),
			permission.Context(permission.CtxPool, a.Pool),
		)...),
	})
	if err != nil {
		return errors.Wrap(err, "Error trying to insert container healing event, healing aborted")
	}
	newCont, healErr := h.healContainer(cont)
	if healErr != nil {
		healErr = errors.Errorf("Error healing container %q: %s", cont.ID, healErr.Error())
	}
	err = evt.DoneCustomData(healErr, newCont)
	if err != nil {
		log.Errorf("Error trying to update containers healing event: %s", err)
	}
	return healErr
}
Пример #23
0
func (h *ContainerHealer) runContainerHealerOnce() {
	containers, err := listUnresponsiveContainers(h.provisioner, h.maxUnresponsiveTime)
	if err != nil {
		log.Errorf("Containers Healing: couldn't list unresponsive containers: %s", err.Error())
	}
	for _, cont := range containers {
		err := h.healContainerIfNeeded(cont)
		if err != nil {
			log.Errorf(err.Error())
		}
	}
}
Пример #24
0
func sendResetPassword(u *auth.User, t *passwordToken) {
	var body bytes.Buffer
	err := resetEmailData.Execute(&body, t)
	if err != nil {
		log.Errorf("Failed to send password token to user %q: %s", u.Email, err)
		return
	}
	err = sendEmail(u.Email, body.Bytes())
	if err != nil {
		log.Errorf("Failed to send password token for user %q: %s", u.Email, err)
	}
}
Пример #25
0
func (c *Container) Create(args *CreateArgs) error {
	port, err := getPort()
	if err != nil {
		log.Errorf("error on getting port for container %s - %s", c.AppName, port)
		return err
	}
	securityOpts, _ := config.GetList("docker:security-opts")
	var exposedPorts map[docker.Port]struct{}
	if !args.Deploy {
		exposedPorts = map[docker.Port]struct{}{
			docker.Port(port + "/tcp"): {},
		}
	}
	var user string
	if args.Building {
		user = c.user()
	}
	config := docker.Config{
		Image:        args.ImageID,
		Cmd:          args.Commands,
		Entrypoint:   []string{},
		ExposedPorts: exposedPorts,
		AttachStdin:  false,
		AttachStdout: false,
		AttachStderr: false,
		Memory:       args.App.GetMemory(),
		MemorySwap:   args.App.GetMemory() + args.App.GetSwap(),
		CPUShares:    int64(args.App.GetCpuShare()),
		SecurityOpts: securityOpts,
		User:         user,
	}
	c.addEnvsToConfig(args, port, &config)
	opts := docker.CreateContainerOptions{Name: c.Name, Config: &config}
	var nodeList []string
	if len(args.DestinationHosts) > 0 {
		var nodeName string
		nodeName, err = c.hostToNodeAddress(args.Provisioner, args.DestinationHosts[0])
		if err != nil {
			return err
		}
		nodeList = []string{nodeName}
	}
	schedulerOpts := []string{args.App.GetName(), args.ProcessName}
	addr, cont, err := args.Provisioner.Cluster().CreateContainerSchedulerOpts(opts, schedulerOpts, nodeList...)
	if err != nil {
		log.Errorf("error on creating container in docker %s - %s", c.AppName, err)
		return err
	}
	c.ID = cont.ID
	c.HostAddr = net.URLToHost(addr)
	return nil
}
Пример #26
0
func (s *SAMLAuthScheme) callback(params map[string]string) error {
	xml, ok := params["xml"]
	if !ok {
		return ErrMissingFormValueError
	}
	log.Debugf("Data received from identity provider: %s", xml)
	response, err := s.Parser.Parse(xml)
	if err != nil {
		log.Errorf("Got error while parsing IDP data: %s", err)
		return ErrParseResponseError
	}
	sp, err := s.createSP()
	if err != nil {
		return err
	}
	err = validateResponse(response, sp)
	if err != nil {
		log.Errorf("Got error while validing IDP data: %s", err)
		if strings.Contains(err.Error(), "assertion has expired") {
			return ErrRequestNotFound
		}
		return ErrParseResponseError
	}
	requestId, err := getRequestIdFromResponse(response)
	if requestId == "" && err == ErrRequestIdNotFound {
		log.Debugf("Request ID %s not found: %s", requestId, err.Error())
		return err
	}
	req := request{}
	err = req.getById(requestId)
	if err != nil {
		return err
	}
	email, err := getUserIdentity(response)
	if err != nil {
		return err
	}
	if !validation.ValidateEmail(email) {
		if strings.Contains(email, "@") {
			return &tsuruErrors.ValidationError{Message: "attribute user identity contains invalid character"}
		}
		// we need create a unique email for the user
		email = strings.Join([]string{email, "@", s.idpHost()}, "")
		if !validation.ValidateEmail(email) {
			return &tsuruErrors.ValidationError{Message: "could not create valid email with auth:saml:idp-attribute-user-identity"}
		}
	}
	req.Authed = true
	req.Email = email
	req.Update()
	return nil
}
Пример #27
0
// creates a new container in Docker.
func (c *container) create(app provision.App, imageId string, cmds []string, destinationHosts ...string) error {
	port, err := getPort()
	if err != nil {
		log.Errorf("error on getting port for container %s - %s", c.AppName, port)
		return err
	}
	user, _ := config.GetString("docker:ssh:user")
	gitUnitRepo, _ := config.GetString("git:unit-repo")
	sharedMount, _ := config.GetString("docker:sharedfs:mountpoint")
	sharedBasedir, _ := config.GetString("docker:sharedfs:hostdir")
	exposedPorts := map[docker.Port]struct{}{
		docker.Port(port + "/tcp"): {},
		docker.Port("22/tcp"):      {},
	}
	config := docker.Config{
		Image:        imageId,
		Cmd:          cmds,
		User:         user,
		ExposedPorts: exposedPorts,
		AttachStdin:  false,
		AttachStdout: false,
		AttachStderr: false,
		Memory:       int64(app.GetMemory() * 1024 * 1024),
		MemorySwap:   int64(app.GetSwap() * 1024 * 1024),
	}
	config.Env = append(config.Env, fmt.Sprintf("TSURU_APP_DIR=%s", gitUnitRepo))
	if sharedMount != "" && sharedBasedir != "" {
		config.Volumes = map[string]struct{}{
			sharedMount: {},
		}

		config.Env = append(config.Env, fmt.Sprintf("TSURU_SHAREDFS_MOUNTPOINT=%s", sharedMount))
	}
	opts := docker.CreateContainerOptions{Name: c.Name, Config: &config}
	var nodeList []string
	if len(destinationHosts) > 0 {
		nodeName, err := hostToNodeAddress(destinationHosts[0])
		if err != nil {
			return err
		}
		nodeList = []string{nodeName}
	}
	addr, cont, err := dockerCluster().CreateContainerSchedulerOpts(opts, app.GetName(), nodeList...)
	if err != nil {
		log.Errorf("error on creating container in docker %s - %s", c.AppName, err)
		return err
	}
	c.ID = cont.ID
	c.HostAddr = urlToHost(addr)
	c.User = user
	return nil
}
Пример #28
0
func (*dockerProvisioner) Addr(app provision.App) (string, error) {
	r, err := getRouterForApp(app)
	if err != nil {
		log.Errorf("Failed to get router: %s", err)
		return "", err
	}
	addr, err := r.Addr(app.GetName())
	if err != nil {
		log.Errorf("Failed to obtain app %s address: %s", app.GetName(), err)
		return "", err
	}
	return addr, nil
}
Пример #29
0
func injectEnvsAndRestart(a provision.App) {
	time.Sleep(5e9)
	err := a.SerializeEnvVars()
	if err != nil {
		log.Errorf("Failed to serialize env vars: %s.", err)
	}
	var buf bytes.Buffer
	w := app.LogWriter{App: a, Writer: &buf}
	err = a.Restart(&w)
	if err != nil {
		log.Errorf("Failed to restart app %q (%s): %s.", a.GetName(), err, buf.String())
	}
}
Пример #30
0
// creates a new container in Docker.
func (c *container) create(args runContainerActionsArgs) error {
	port, err := getPort()
	if err != nil {
		log.Errorf("error on getting port for container %s - %s", c.AppName, port)
		return err
	}
	user, err := config.GetString("docker:user")
	if err != nil {
		user, _ = config.GetString("docker:ssh:user")
	}
	securityOpts, _ := config.GetList("docker:security-opts")
	var exposedPorts map[docker.Port]struct{}
	if !args.isDeploy {
		exposedPorts = map[docker.Port]struct{}{
			docker.Port(port + "/tcp"): {},
		}
	}
	config := docker.Config{
		Image:        args.imageID,
		Cmd:          args.commands,
		User:         user,
		ExposedPorts: exposedPorts,
		AttachStdin:  false,
		AttachStdout: false,
		AttachStderr: false,
		Memory:       args.app.GetMemory(),
		MemorySwap:   args.app.GetMemory() + args.app.GetSwap(),
		CPUShares:    int64(args.app.GetCpuShare()),
		SecurityOpts: securityOpts,
	}
	c.addEnvsToConfig(args.app, &config)
	opts := docker.CreateContainerOptions{Name: c.Name, Config: &config}
	var nodeList []string
	if len(args.destinationHosts) > 0 {
		nodeName, err := args.provisioner.hostToNodeAddress(args.destinationHosts[0])
		if err != nil {
			return err
		}
		nodeList = []string{nodeName}
	}
	schedulerOpts := []string{args.app.GetName(), args.processName}
	addr, cont, err := args.provisioner.getCluster().CreateContainerSchedulerOpts(opts, schedulerOpts, nodeList...)
	if err != nil {
		log.Errorf("error on creating container in docker %s - %s", c.AppName, err)
		return err
	}
	c.ID = cont.ID
	c.HostAddr = urlToHost(addr)
	c.User = user
	return nil
}