Beispiel #1
0
// RecreateContainers relaunch all bs containers in the cluster for the given
// DockerProvisioner, logging progress to the given writer.
//
// It assumes that the given writer is thread safe.
func RecreateContainers(p DockerProvisioner, w io.Writer) error {
	cluster := p.Cluster()
	nodes, err := cluster.UnfilteredNodes()
	if err != nil {
		return err
	}
	errChan := make(chan error, len(nodes))
	wg := sync.WaitGroup{}
	log.Debugf("[bs containers] recreating %d containers", len(nodes))
	for i := range nodes {
		wg.Add(1)
		go func(i int) {
			defer wg.Done()
			node := &nodes[i]
			pool := node.Metadata["pool"]
			log.Debugf("[bs containers] recreating container in %s [%s]", node.Address, pool)
			fmt.Fprintf(w, "relaunching bs container in the node %s [%s]\n", node.Address, pool)
			err := createContainer(node.Address, pool, p, true)
			if err != nil {
				msg := fmt.Sprintf("[bs containers] failed to create container in %s [%s]: %s", node.Address, pool, err)
				log.Error(msg)
				err = errors.New(msg)
				errChan <- err
			}
		}(i)
	}
	wg.Wait()
	close(errChan)
	return <-errChan
}
Beispiel #2
0
// chooseNode finds which is the node with the minimum number
// of containers and returns it
func (segregatedScheduler) chooseNode(nodes []node, contName string) (node, error) {
	var chosenNode node
	hosts := make([]string, len(nodes))
	hostsMap := make(map[string]node)
	// Only hostname is saved in the docker containers collection
	// so we need to extract and map then to the original node.
	for i, node := range nodes {
		host := urlToHost(node.Address)
		hosts[i] = host
		hostsMap[host] = node
	}
	log.Debugf("[scheduler] Possible nodes for container %s: %#v", contName, hosts)
	hostMutex.Lock()
	defer hostMutex.Unlock()
	countMap, err := aggregateNodesByHost(hosts)
	if err != nil {
		return chosenNode, err
	}
	// Finally finding the host with the minimum amount of containers.
	var minHost string
	minCount := math.MaxInt32
	for _, host := range hosts {
		count := countMap[host]
		if count < minCount {
			minCount = count
			minHost = host
		}
	}
	chosenNode = hostsMap[minHost]
	log.Debugf("[scheduler] Chosen node for container %s: %#v Count: %d", contName, chosenNode, minCount)
	coll := collection()
	defer coll.Close()
	err = coll.Update(bson.M{"name": contName}, bson.M{"$set": bson.M{"hostaddr": minHost}})
	return chosenNode, err
}
Beispiel #3
0
// chooseNode finds which is the node with the minimum number
// of containers and returns it
func (s *segregatedScheduler) chooseNode(nodes []cluster.Node, contName string, appName, process string) (string, error) {
	var chosenNode string
	hosts, hostsMap := s.nodesToHosts(nodes)
	log.Debugf("[scheduler] Possible nodes for container %s: %#v", contName, hosts)
	s.hostMutex.Lock()
	defer s.hostMutex.Unlock()
	hostCountMap, err := s.aggregateContainersByHost(hosts)
	if err != nil {
		return chosenNode, err
	}
	appCountMap, err := s.aggregateContainersByHostAppProcess(hosts, appName, process)
	if err != nil {
		return chosenNode, err
	}
	// Finally finding the host with the minimum value for
	// the pair [appCount, hostCount]
	var minHost string
	minCount := math.MaxInt32
	for _, host := range hosts {
		adjCount := appCountMap[host]*10000 + hostCountMap[host]
		if adjCount < minCount {
			minCount = adjCount
			minHost = host
		}
	}
	chosenNode = hostsMap[minHost]
	log.Debugf("[scheduler] Chosen node for container %s: %#v Count: %d", contName, chosenNode, minCount)
	if contName != "" {
		coll := s.provisioner.collection()
		defer coll.Close()
		err = coll.Update(bson.M{"name": contName}, bson.M{"$set": bson.M{"hostaddr": minHost}})
	}
	return chosenNode, err
}
Beispiel #4
0
// Execute executes the pipeline.
//
// The execution starts in the forward phase, calling the Forward function of
// all actions. If none of the Forward calls return error, the pipeline
// execution ends in the forward phase and is "committed".
//
// If any of the Forward calls fails, the executor switches to the backward phase
// (roll back) and call the Backward function for each action completed. It
// does not call the Backward function of the action that has failed.
//
// After rolling back all completed actions, it returns the original error
// returned by the action that failed.
func (p *Pipeline) Execute(params ...interface{}) error {
	var (
		r   Result
		err error
	)
	if len(p.actions) == 0 {
		return ErrPipelineNoActions
	}
	fwCtx := FWContext{Params: params}
	for i, a := range p.actions {
		log.Debugf("[pipeline] running the Forward for the %s action", a.Name)
		if a.Forward == nil {
			err = ErrPipelineForwardMissing
		} else if len(fwCtx.Params) < a.MinParams {
			err = ErrPipelineFewParameters
		} else {
			r, err = a.Forward(fwCtx)
			a.rMutex.Lock()
			a.result = r
			a.rMutex.Unlock()
			fwCtx.Previous = r
		}
		if err != nil {
			log.Debugf("[pipeline] error running the Forward for the %s action - %s", a.Name, err)
			if a.OnError != nil {
				a.OnError(fwCtx, err)
			}
			p.rollback(i-1, params)
			return err
		}
	}
	return nil
}
Beispiel #5
0
// chooseNodeWithMaxContainersCount finds which is the node with maximum number
// of containers and returns it
func (s *segregatedScheduler) chooseContainerFromMaxContainersCountInNode(nodes []cluster.Node, appName, process string) (string, error) {
	hosts, hostsMap := s.nodesToHosts(nodes)
	log.Debugf("[scheduler] Possible nodes for remove a container: %#v", hosts)
	s.hostMutex.Lock()
	defer s.hostMutex.Unlock()
	hostCountMap, err := s.aggregateContainersByHost(hosts)
	if err != nil {
		return "", err
	}
	appCountMap, err := s.aggregateContainersByHostAppProcess(hosts, appName, process)
	if err != nil {
		return "", err
	}
	// Finally finding the host with the maximum value for
	// the pair [appCount, hostCount]
	var maxHost string
	maxCount := 0
	for _, host := range hosts {
		adjCount := appCountMap[host]*10000 + hostCountMap[host]
		if adjCount > maxCount {
			maxCount = adjCount
			maxHost = host
		}
	}
	chosenNode := hostsMap[maxHost]
	log.Debugf("[scheduler] Chosen node for remove a container: %#v Count: %d", chosenNode, hostCountMap[maxHost])
	containerID, err := s.getContainerFromHost(maxHost, appName, process)
	if err != nil {
		return "", err
	}
	return containerID, err
}
Beispiel #6
0
func (b beanstalkdFactory) Handler(f func(*Message), names ...string) (Handler, error) {
	name := "default"
	if len(names) > 0 {
		name = names[0]
	}
	return &executor{
		inner: func() {
			if message, err := get(5e9, names...); err == nil {
				log.Debugf("Dispatching %q message to handler function.", message.Action)
				go func(m *Message) {
					f(m)
					if m.fail {
						q := beanstalkdQ{name: name}
						q.Put(m, 0)
					}
				}(message)
			} else {
				log.Debugf("Failed to get message from the queue: %s. Trying again...", err)
				if e, ok := err.(*net.OpError); ok && e.Op == "dial" {
					time.Sleep(5e9)
				}
			}
		},
	}, nil
}
Beispiel #7
0
func ensureContainersStarted(p DockerProvisioner, w io.Writer, relaunch bool, names []string, nodes ...cluster.Node) error {
	if w == nil {
		w = ioutil.Discard
	}
	var err error
	if len(names) == 0 {
		names, err = nodecontainer.AllNodeContainersNames()
		if err != nil {
			return err
		}
	}
	if len(nodes) == 0 {
		nodes, err = p.Cluster().UnfilteredNodes()
		if err != nil {
			return err
		}
	}
	errChan := make(chan error, len(nodes)*len(names))
	wg := sync.WaitGroup{}
	log.Debugf("[node containers] recreating %d containers", len(nodes)*len(names))
	recreateContainer := func(node *cluster.Node, confName string) {
		defer wg.Done()
		pool := node.Metadata["pool"]
		containerConfig, confErr := nodecontainer.LoadNodeContainer(pool, confName)
		if confErr != nil {
			errChan <- confErr
			return
		}
		if !containerConfig.Valid() {
			return
		}
		log.Debugf("[node containers] recreating container %q in %s [%s]", confName, node.Address, pool)
		fmt.Fprintf(w, "relaunching node container %q in the node %s [%s]\n", confName, node.Address, pool)
		confErr = create(containerConfig, node, pool, p, relaunch)
		if confErr != nil {
			confErr = errors.Wrapf(confErr, "[node containers] failed to create container in %s [%s]", node.Address, pool)
			errChan <- log.WrapError(confErr)
		}
	}
	for i := range nodes {
		wg.Add(1)
		go func(node *cluster.Node) {
			defer wg.Done()
			for j := range names {
				wg.Add(1)
				go recreateContainer(node, names[j])
			}
		}(&nodes[i])
	}
	wg.Wait()
	close(errChan)
	var allErrors []error
	for err = range errChan {
		allErrors = append(allErrors, err)
	}
	if len(allErrors) == 0 {
		return nil
	}
	return tsuruErrors.NewMultiError(allErrors...)
}
Beispiel #8
0
func (factory redismqQFactory) Handler(f func(*Message), names ...string) (Handler, error) {
	name := "default"
	if len(names) > 0 {
		name = names[0]
	}
	consumerName := fmt.Sprintf("handler-%d", time.Now().UnixNano())
	queue, err := factory.get(name, consumerName)
	if err != nil {
		return nil, err
	}
	return &executor{
		inner: func() {
			if message, err := queue.Get(5e9); err == nil {
				log.Debugf("Dispatching %q message to handler function.", message.Action)
				go func(m *Message) {
					f(m)
					if m.fail {
						queue.Put(m, 0)
					}
				}(message)
			} else {
				log.Debugf("Failed to get message from the queue: %s. Trying again...", err)
				if e, ok := err.(*net.OpError); ok && e.Op == "dial" {
					time.Sleep(5e9)
				}
			}
		},
	}, nil
}
Beispiel #9
0
// Execute executes the pipeline.
//
// The execution starts in the forward phase, calling the Forward function of
// all actions. If none of the Forward calls return error, the pipeline
// execution ends in the forward phase and is "committed".
//
// If any of the Forward calls fails, the executor switches to the backward phase
// (roll back) and call the Backward function for each action completed. It
// does not call the Backward function of the action that has failed.
//
// After rolling back all completed actions, it returns the original error
// returned by the action that failed.
func (p *Pipeline) Execute(params ...interface{}) error {
	var (
		r   Result
		err error
	)
	if len(p.actions) == 0 {
		return errors.New("No actions to execute.")
	}
	fwCtx := FWContext{Params: params}
	for i, a := range p.actions {
		log.Debugf("[pipeline] running the Forward for the %s action", a.Name)
		if a.Forward == nil {
			err = errors.New("All actions must define the forward function.")
		} else if len(fwCtx.Params) < a.MinParams {
			err = errors.New("Not enough parameters to call Action.Forward.")
		} else {
			r, err = a.Forward(fwCtx)
			a.rMutex.Lock()
			a.result = r
			a.rMutex.Unlock()
			fwCtx.Previous = r
		}
		if err != nil {
			log.Debugf("[pipeline] error running the Forward for the %s action - %s", a.Name, err)
			if a.OnError != nil {
				a.OnError(fwCtx, err)
			}
			p.rollback(i-1, params)
			return err
		}
	}
	return nil
}
Beispiel #10
0
func RemoveNamedContainers(p DockerProvisioner, w io.Writer, name string, pool string) error {
	var nodes []cluster.Node
	var err error
	if pool == "" {
		nodes, err = p.Cluster().UnfilteredNodes()
	} else {
		nodes, err = p.Cluster().UnfilteredNodesForMetadata(map[string]string{"pool": pool})
	}
	if err != nil {
		return errors.WithStack(err)
	}
	errChan := make(chan error, len(nodes))
	wg := sync.WaitGroup{}
	removeContainer := func(node *cluster.Node) {
		pool := node.Metadata["pool"]
		client, err := node.Client()
		if err != nil {
			errChan <- err
			return
		}
		err = client.StopContainer(name, 10)
		if err != nil {
			if _, ok := err.(*docker.NoSuchContainer); ok {
				log.Debugf("[node containers] no such container %q in %s [%s]", name, node.Address, pool)
				fmt.Fprintf(w, "no such node container %q in the node %s [%s]\n", name, node.Address, pool)
				return
			}
			if _, ok := err.(*docker.ContainerNotRunning); !ok {
				err = errors.Wrapf(err, "[node containers] failed to stop container in %s [%s]", node.Address, pool)
				errChan <- err
				return
			}
		}
		log.Debugf("[node containers] removing container %q in %s [%s]", name, node.Address, pool)
		fmt.Fprintf(w, "removing node container %q in the node %s [%s]\n", name, node.Address, pool)
		err = client.RemoveContainer(docker.RemoveContainerOptions{ID: name, Force: true})
		if err != nil {
			err = errors.Wrapf(err, "[node containers] failed to remove container in %s [%s]", node.Address, pool)
			errChan <- err
		}
	}
	for i := range nodes {
		wg.Add(1)
		go func(node *cluster.Node) {
			removeContainer(node)
			wg.Done()
		}(&nodes[i])
	}
	wg.Wait()
	close(errChan)
	var allErrors []error
	for err := range errChan {
		allErrors = append(allErrors, err)
	}
	if len(allErrors) == 0 {
		return nil
	}
	return tsuruErrors.NewMultiError(allErrors...)
}
Beispiel #11
0
func ensureContainersStarted(p DockerProvisioner, w io.Writer, relaunch bool, nodes ...cluster.Node) error {
	if w == nil {
		w = ioutil.Discard
	}
	confNames, err := scopedconfig.FindAllScopedConfigNames(nodeContainerCollection)
	if err != nil {
		return err
	}
	if len(nodes) == 0 {
		nodes, err = p.Cluster().UnfilteredNodes()
		if err != nil {
			return err
		}
	}
	errChan := make(chan error, len(nodes))
	wg := sync.WaitGroup{}
	log.Debugf("[node containers] recreating %d containers", len(nodes))
	recreateContainer := func(node *cluster.Node, confName string) {
		defer wg.Done()
		pool := node.Metadata["pool"]
		containerConfig, confErr := LoadNodeContainer(pool, confName)
		if confErr != nil {
			errChan <- confErr
			return
		}
		log.Debugf("[node containers] recreating container %q in %s [%s]", confName, node.Address, pool)
		fmt.Fprintf(w, "relaunching node container %q in the node %s [%s]\n", confName, node.Address, pool)
		confErr = containerConfig.create(node.Address, pool, p, relaunch)
		if confErr != nil {
			msg := fmt.Sprintf("[node containers] failed to create container in %s [%s]: %s", node.Address, pool, confErr)
			log.Error(msg)
			errChan <- errors.New(msg)
		}
	}
	for i := range nodes {
		wg.Add(1)
		go func(node *cluster.Node) {
			defer wg.Done()
			for j := range confNames {
				wg.Add(1)
				go recreateContainer(node, confNames[j])
			}
		}(&nodes[i])
	}
	wg.Wait()
	close(errChan)
	var allErrors []string
	for err = range errChan {
		allErrors = append(allErrors, err.Error())
	}
	if len(allErrors) == 0 {
		return nil
	}
	return fmt.Errorf("multiple errors: %s", strings.Join(allErrors, ", "))
}
Beispiel #12
0
func (s *SAMLAuthScheme) callback(params map[string]string) error {
	xml, ok := params["xml"]
	if !ok {
		return ErrMissingFormValueError
	}
	log.Debugf("Data received from identity provider: %s", xml)
	response, err := s.Parser.Parse(xml)
	if err != nil {
		log.Errorf("Got error while parsing IDP data: %s", err)
		return ErrParseResponseError
	}
	sp, err := s.createSP()
	if err != nil {
		return err
	}
	err = validateResponse(response, sp)
	if err != nil {
		log.Errorf("Got error while validing IDP data: %s", err)
		if strings.Contains(err.Error(), "assertion has expired") {
			return ErrRequestNotFound
		}
		return ErrParseResponseError
	}
	requestId, err := getRequestIdFromResponse(response)
	if requestId == "" && err == ErrRequestIdNotFound {
		log.Debugf("Request ID %s not found: %s", requestId, err.Error())
		return err
	}
	req := request{}
	err = req.getById(requestId)
	if err != nil {
		return err
	}
	email, err := getUserIdentity(response)
	if err != nil {
		return err
	}
	if !validation.ValidateEmail(email) {
		if strings.Contains(email, "@") {
			return &tsuruErrors.ValidationError{Message: "attribute user identity contains invalid character"}
		}
		// we need create a unique email for the user
		email = strings.Join([]string{email, "@", s.idpHost()}, "")
		if !validation.ValidateEmail(email) {
			return &tsuruErrors.ValidationError{Message: "could not create valid email with auth:saml:idp-attribute-user-identity"}
		}
	}
	req.Authed = true
	req.Email = email
	req.Update()
	return nil
}
Beispiel #13
0
// commit commits an image in docker based in the container
// and returns the image repository.
func (c *container) commit() (string, error) {
	log.Debugf("commiting container %s", c.ID)
	repository := assembleImageName(c.AppName)
	opts := docker.CommitContainerOptions{Container: c.ID, Repository: repository}
	image, err := dockerCluster().CommitContainer(opts)
	if err != nil {
		log.Errorf("Could not commit docker image: %s", err)
		return "", err
	}
	log.Debugf("image %s generated from container %s", image.ID, c.ID)
	pushImage(repository)
	return repository, nil
}
Beispiel #14
0
func (h *NodeHealer) HandleError(node *cluster.Node) time.Duration {
	h.Lock()
	if h.locks[node.Address] == nil {
		h.locks[node.Address] = &sync.Mutex{}
	}
	h.Unlock()
	h.locks[node.Address].Lock()
	defer h.locks[node.Address].Unlock()
	failures := node.FailureCount()
	if failures < h.failuresBeforeHealing {
		log.Debugf("%d failures detected in node %q, waiting for more failures before healing.", failures, node.Address)
		return h.disabledTime
	}
	if !node.HasSuccess() {
		log.Debugf("Node %q has never been successfully reached, healing won't run on it.", node.Address)
		return h.disabledTime
	}
	_, hasIaas := node.Metadata["iaas"]
	if !hasIaas {
		log.Debugf("Node %q doesn't have IaaS information, healing won't run on it.", node.Address)
		return h.disabledTime
	}
	healingCounter, err := healingCountFor("node", node.Address, consecutiveHealingsTimeframe)
	if err != nil {
		log.Errorf("Node healing: couldn't verify number of previous healings for %s: %s", node.Address, err.Error())
		return h.disabledTime
	}
	if healingCounter > consecutiveHealingsLimitInTimeframe {
		log.Errorf("Node healing: number of healings for node %s in the last %d minutes exceeds limit of %d: %d",
			node.Address, consecutiveHealingsTimeframe/time.Minute, consecutiveHealingsLimitInTimeframe, healingCounter)
		return h.disabledTime
	}
	log.Errorf("Initiating healing process for node %q after %d failures.", node.Address, failures)
	evt, err := NewHealingEvent(*node)
	if err != nil {
		log.Errorf("Error trying to insert healing event: %s", err.Error())
		return h.disabledTime
	}
	createdNode, err := h.healNode(node)
	if err != nil {
		log.Errorf("Error healing: %s", err.Error())
	}
	err = evt.Update(createdNode, err)
	if err != nil {
		log.Errorf("Error trying to update healing event: %s", err.Error())
	}
	if createdNode.Address != "" {
		return 0
	}
	return h.disabledTime
}
Beispiel #15
0
// Heal iterates through all juju machines verifying if
// a juju-machine-agent is down and heal these machines.
func (h instanceMachineHealer) Heal() error {
	p := JujuProvisioner{}
	output, _ := p.getOutput()
	for _, machine := range output.Machines {
		if machine.AgentState == "down" {
			log.Debugf("Healing juju-machine-agent in machine %s", machine.InstanceID)
			upStartCmd("stop", "juju-machine-agent", machine.IPAddress)
			upStartCmd("start", "juju-machine-agent", machine.IPAddress)
		} else {
			log.Debugf("juju-machine-agent for machine %s needs no cure, skipping...", machine.InstanceID)
		}
	}
	return nil
}
Beispiel #16
0
func (h *NodeHealer) tryHealingNode(node *cluster.Node, reason string, extra interface{}) error {
	_, hasIaas := node.Metadata["iaas"]
	if !hasIaas {
		log.Debugf("node %q doesn't have IaaS information, healing (%s) won't run on it.", node.Address, reason)
		return nil
	}
	evt, err := NewHealingEventWithReason(*node, reason, extra)
	if err != nil {
		if mgo.IsDup(err) {
			// Healing in progress.
			return nil
		}
		return fmt.Errorf("error trying to insert healing event: %s", err.Error())
	}
	var createdNode cluster.Node
	var evtErr error
	defer func() {
		var created interface{}
		if createdNode.Address != "" {
			created = createdNode
		}
		updateErr := evt.Update(created, evtErr)
		if updateErr != nil {
			log.Errorf("error trying to update healing event: %s", updateErr.Error())
		}
	}()
	_, err = h.provisioner.Cluster().GetNode(node.Address)
	if err != nil {
		if err == clusterStorage.ErrNoSuchNode {
			return nil
		}
		evtErr = fmt.Errorf("unable to check if not still exists: %s", err)
		return evtErr
	}
	healingCounter, err := healingCountFor("node", node.Address, consecutiveHealingsTimeframe)
	if err != nil {
		evtErr = fmt.Errorf("couldn't verify number of previous healings for %s: %s", node.Address, err)
		return evtErr
	}
	if healingCounter > consecutiveHealingsLimitInTimeframe {
		log.Debugf("number of healings for node %s in the last %d minutes exceeds limit of %d: %d",
			node.Address, consecutiveHealingsTimeframe/time.Minute, consecutiveHealingsLimitInTimeframe, healingCounter)
		return nil
	}
	log.Errorf("initiating healing process for node %q due to: %s", node.Address, reason)
	createdNode, evtErr = h.healNode(node)
	return evtErr
}
Beispiel #17
0
func (c *Client) BindUnit(instance *ServiceInstance, app bind.App, unit bind.Unit) error {
	log.Debugf("Calling bind of instance %q and %q unit at %q API",
		instance.Name, unit.GetIp(), instance.ServiceName)
	var resp *http.Response
	params := map[string][]string{
		"app-host":  {app.GetIp()},
		"unit-host": {unit.GetIp()},
	}
	resp, err := c.issueRequest("/resources/"+instance.GetIdentifier()+"/bind", "POST", params)
	if err != nil {
		if m, _ := regexp.MatchString("", err.Error()); m {
			return fmt.Errorf("%s api is down.", instance.Name)
		}
		return err
	}
	if resp.StatusCode == http.StatusPreconditionFailed {
		return &errors.HTTP{Code: resp.StatusCode, Message: "You cannot bind any app to this service instance because it is not ready yet."}
	}
	if resp.StatusCode > 299 {
		msg := fmt.Sprintf("Failed to bind the instance %q to the unit %q: %s", instance.Name, unit.GetIp(), c.buildErrorMessage(err, resp))
		log.Error(msg)
		return &errors.HTTP{Code: http.StatusInternalServerError, Message: msg}
	}
	return nil
}
Beispiel #18
0
func collectUnit(container container, units chan<- provision.Unit, wg *sync.WaitGroup) {
	defer wg.Done()
	unit := provision.Unit{
		Name:    container.ID,
		AppName: container.AppName,
		Type:    container.Type,
	}
	if container.Status == "error" {
		unit.Status = provision.StatusDown
		units <- unit
		return
	}
	if container.Status == "running" {
		unit.Ip = container.HostAddr
		ip, hostPort, err := container.networkInfo()
		if err == nil &&
			(hostPort != container.HostPort || ip != container.IP) {
			err = fixContainer(&container, ip, hostPort)
			if err != nil {
				log.Errorf("error on fix container hostport for [container %s]", container.ID)
				return
			}
		}
		addr := strings.Replace(container.getAddress(), "http://", "", 1)
		conn, err := net.Dial("tcp", addr)
		if err != nil {
			unit.Status = provision.StatusUnreachable
		} else {
			conn.Close()
			unit.Status = provision.StatusStarted
		}
		log.Debugf("collected data for [container %s] - [app %s]", container.ID, container.AppName)
		units <- unit
	}
}
Beispiel #19
0
func (s *SAMLAuthScheme) Parse(xml string) (*saml.Response, error) {
	if xml == "" {
		return nil, ErrMissingFormValueError
	}
	var response *saml.Response
	var err error
	if !s.BaseConfig.DeflatEncodedResponse {
		response, err = saml.ParseEncodedResponse(xml)
	} else {
		response, err = saml.ParseCompressedEncodedResponse(xml)
	}
	if err != nil || response == nil {
		return nil, errors.Wrapf(err, "unable to parse identity provider data: %s", xml)
	}
	sp, err := s.createSP()
	if err != nil {
		return nil, errors.Wrap(err, "unable to create service provider object")
	}
	if response.IsEncrypted() {
		if err = response.Decrypt(sp.PrivateKeyPath); err != nil {
			respData, _ := response.String()
			return nil, errors.Wrapf(err, "unable to decrypt identity provider data: %s", respData)
		}
	}
	resp, _ := response.String()
	log.Debugf("Data received from identity provider decoded: %s", resp)
	return response, nil
}
Beispiel #20
0
// New creates a representation of a git repository. It creates a Git
// repository using the "bare-dir" setting and saves repository's meta data in
// the database.
func New(name string, users []string, isPublic bool) (*Repository, error) {
	log.Debugf("Creating repository %q", name)
	r := &Repository{Name: name, Users: users, IsPublic: isPublic}
	if v, err := r.isValid(); !v {
		log.Errorf("repository.New: Invalid repository %q: %s", name, err)
		return r, err
	}
	if err := newBare(name); err != nil {
		log.Errorf("repository.New: Error creating bare repository for %q: %s", name, err)
		return r, err
	}
	barePath := barePath(name)
	if barePath != "" && isPublic {
		ioutil.WriteFile(barePath+"/git-daemon-export-ok", []byte(""), 0644)
		if f, err := fs.Filesystem().Create(barePath + "/git-daemon-export-ok"); err == nil {
			f.Close()
		}
	}
	conn, err := db.Conn()
	if err != nil {
		return nil, err
	}
	defer conn.Close()
	err = conn.Repository().Insert(&r)
	if mgo.IsDup(err) {
		log.Errorf("repository.New: Duplicate repository %q", name)
		return r, fmt.Errorf("A repository with this name already exists.")
	}
	return r, err
}
Beispiel #21
0
func (c *Client) Create(instance *ServiceInstance, user, requestID string) error {
	var err error
	var resp *http.Response
	params := map[string][]string{
		"name":      {instance.Name},
		"user":      {user},
		"team":      {instance.TeamOwner},
		"requestID": {requestID},
	}
	if instance.PlanName != "" {
		params["plan"] = []string{instance.PlanName}
	}
	if instance.Description != "" {
		params["description"] = []string{instance.Description}
	}
	log.Debugf("Attempting to call creation of service instance for %q, params: %#v", instance.ServiceName, params)
	resp, err = c.issueRequest("/resources", "POST", params)
	if err == nil {
		defer resp.Body.Close()
		if resp.StatusCode < 300 {
			return nil
		}
		if resp.StatusCode == http.StatusConflict {
			return ErrInstanceAlreadyExistsInAPI
		}
	}
	err = errors.Wrapf(c.buildErrorMessage(err, resp), "Failed to create the instance %s", instance.Name)
	return log.WrapError(err)
}
Beispiel #22
0
// chooseNodeToAdd finds which is the node with the minimum number of containers
// and returns it
func (s *segregatedScheduler) chooseNodeToAdd(nodes []cluster.Node, contName string, appName, process string) (string, error) {
	log.Debugf("[scheduler] Possible nodes for container %s: %#v", contName, nodes)
	s.hostMutex.Lock()
	defer s.hostMutex.Unlock()
	chosenNode, _, err := s.minMaxNodes(nodes, appName, process)
	if err != nil {
		return "", err
	}
	log.Debugf("[scheduler] Chosen node for container %s: %#v", contName, chosenNode)
	if contName != "" {
		coll := s.provisioner.Collection()
		defer coll.Close()
		err = coll.Update(bson.M{"name": contName}, bson.M{"$set": bson.M{"hostaddr": net.URLToHost(chosenNode)}})
	}
	return chosenNode, err
}
Beispiel #23
0
// New creates a representation of a git repository. It creates a Git
// repository using the "bare-dir" setting and saves repository's meta data in
// the database.
func New(name string, users, readOnlyUsers []string, isPublic bool) (*Repository, error) {
	log.Debugf("Creating repository %q", name)
	r := &Repository{Name: name, Users: users, ReadOnlyUsers: readOnlyUsers, IsPublic: isPublic}
	if v, err := r.isValid(); !v {
		log.Errorf("repository.New: Invalid repository %q: %s", name, err)
		return r, err
	}
	conn, err := db.Conn()
	if err != nil {
		return nil, err
	}
	defer conn.Close()
	err = conn.Repository().Insert(r)
	if err != nil {
		if mgo.IsDup(err) {
			return nil, ErrRepositoryAlreadyExists
		}
		return nil, err
	}
	if err = newBare(name); err != nil {
		log.Errorf("repository.New: Error creating bare repository for %q: %s", name, err)
		conn.Repository().Remove(bson.M{"_id": r.Name})
		return r, err
	}
	barePath := barePath(name)
	if barePath != "" && isPublic {
		if f, createErr := fs.Filesystem().Create(barePath + "/git-daemon-export-ok"); createErr == nil {
			f.Close()
		}
	}
	return r, err
}
Beispiel #24
0
func (h elbInstanceHealer) checkInstances(names []string) ([]elbInstance, error) {
	if elbSupport, _ := config.GetBool("juju:use-elb"); !elbSupport {
		return nil, nil
	}
	lbs, err := h.describeLoadBalancers(names)
	if err != nil {
		return nil, err
	}
	var unhealthy []elbInstance
	description := "Instance has failed at least the UnhealthyThreshold number of health checks consecutively."
	state := "OutOfService"
	reasonCode := "Instance"
	for _, lb := range lbs {
		instances, err := h.describeInstancesHealth(lb)
		if err != nil {
			return nil, err
		}
		for _, instance := range instances {
			if instance.description == description &&
				instance.state == state &&
				instance.reasonCode == reasonCode {
				unhealthy = append(unhealthy, instance)
			}
		}
	}
	log.Debugf("Found %d unhealthy instances.", len(unhealthy))
	return unhealthy, nil
}
Beispiel #25
0
func (c *Client) BindApp(instance *ServiceInstance, app bind.App) (map[string]string, error) {
	log.Debugf("Calling bind of instance %q and %q app at %q API",
		instance.Name, app.GetName(), instance.ServiceName)
	var resp *http.Response
	params := map[string][]string{
		"app-host": {app.GetIp()},
	}
	resp, err := c.issueRequest("/resources/"+instance.GetIdentifier()+"/bind-app", "POST", params)
	if resp != nil && resp.StatusCode == http.StatusNotFound {
		resp, err = c.issueRequest("/resources/"+instance.GetIdentifier()+"/bind", "POST", params)
	}
	if err != nil {
		if m, _ := regexp.MatchString("", err.Error()); m {
			return nil, fmt.Errorf("%s api is down.", instance.Name)
		}
		return nil, err
	}
	if err == nil && resp.StatusCode < 300 {
		var result map[string]string
		err = c.jsonFromResponse(resp, &result)
		if err != nil {
			return nil, err
		}
		return result, nil
	}
	if resp.StatusCode == http.StatusPreconditionFailed {
		return nil, &errors.HTTP{Code: resp.StatusCode, Message: "You cannot bind any app to this service instance because it is not ready yet."}
	}
	msg := fmt.Sprintf("Failed to bind the instance %q to the app %q: %s", instance.Name, app.GetName(), c.buildErrorMessage(err, resp))
	log.Error(msg)
	return nil, &errors.HTTP{Code: http.StatusInternalServerError, Message: msg}
}
Beispiel #26
0
func notify(appName string, messages []interface{}) {
	factory, err := queue.Factory()
	if err != nil {
		log.Errorf("Error on logs notify: %s", err.Error())
		return
	}
	q, err := factory.Get(logQueueName(appName))
	if err != nil {
		log.Errorf("Error on logs notify: %s", err.Error())
		return
	}
	pubSubQ, ok := q.(queue.PubSubQ)
	if !ok {
		log.Debugf("Queue does not support pubsub, logs only in database.")
		return
	}
	for _, msg := range messages {
		bytes, err := json.Marshal(msg)
		if err != nil {
			log.Errorf("Error on logs notify: %s", err.Error())
			continue
		}
		err = pubSubQ.Pub(bytes)
		if err != nil {
			log.Errorf("Error on logs notify: %s", err.Error())
		}
	}
}
Beispiel #27
0
func (c *Client) BindUnit(instance *ServiceInstance, app bind.App, unit bind.Unit) error {
	log.Debugf("Calling bind of instance %q and %q unit at %q API",
		instance.Name, unit.GetIp(), instance.ServiceName)
	var resp *http.Response
	params := map[string][]string{
		"app-host":  {app.GetIp()},
		"unit-host": {unit.GetIp()},
	}
	resp, err := c.issueRequest("/resources/"+instance.GetIdentifier()+"/bind", "POST", params)
	if err != nil {
		return log.WrapError(errors.Wrapf(err, `Failed to bind the instance "%s/%s" to the unit %q`, instance.ServiceName, instance.Name, unit.GetIp()))
	}
	defer resp.Body.Close()
	switch resp.StatusCode {
	case http.StatusPreconditionFailed:
		return ErrInstanceNotReady
	case http.StatusNotFound:
		return ErrInstanceNotFoundInAPI
	}
	if resp.StatusCode > 299 {
		err = errors.Wrapf(c.buildErrorMessage(err, resp), `Failed to bind the instance "%s/%s" to the unit %q`, instance.ServiceName, instance.Name, unit.GetIp())
		return log.WrapError(err)
	}
	return nil
}
Beispiel #28
0
// Rename renames a repository.
func Rename(oldName, newName string) error {
	log.Debugf("Renaming repository %q to %q", oldName, newName)
	repo, err := Get(oldName)
	if err != nil {
		log.Errorf("repository.Rename: Repository %q not found: %s", oldName, err)
		return err
	}
	newRepo := repo
	newRepo.Name = newName
	conn, err := db.Conn()
	if err != nil {
		return err
	}
	defer conn.Close()
	err = conn.Repository().Insert(newRepo)
	if err != nil {
		log.Errorf("repository.Rename: Error adding new repository %q: %s", newName, err)
		return err
	}
	err = conn.Repository().RemoveId(oldName)
	if err != nil {
		log.Errorf("repository.Rename: Error removing old repository %q: %s", oldName, err)
		return err
	}
	return fs.Filesystem().Rename(barePath(oldName), barePath(newName))
}
Beispiel #29
0
func (a *autoScaleConfig) logDebug(msg string, params ...interface{}) {
	msg = fmt.Sprintf("[node autoscale] %s", msg)
	if a.writer != nil {
		fmt.Fprintf(a.writer, msg+"\n", params...)
	}
	log.Debugf(msg, params...)
}
Beispiel #30
0
func (c *Client) Status(instance *ServiceInstance, requestID string) (string, error) {
	log.Debugf("Attempting to call status of service instance %q at %q api", instance.Name, instance.ServiceName)
	var (
		resp *http.Response
		err  error
	)
	url := "/resources/" + instance.GetIdentifier() + "/status"
	params := map[string][]string{
		"requestID": {requestID},
	}
	if resp, err = c.issueRequest(url, "GET", params); err == nil {
		defer resp.Body.Close()
		switch resp.StatusCode {
		case http.StatusOK:
			var data []byte
			data, err = ioutil.ReadAll(resp.Body)
			return string(data), err
		case http.StatusAccepted:
			return "pending", nil
		case http.StatusNoContent:
			return "up", nil
		case http.StatusNotFound:
			return "not implemented for this service", nil
		case http.StatusInternalServerError:
			return "down", nil
		}
	}
	err = errors.Wrapf(c.buildErrorMessage(err, resp), "Failed to get status of instance %s", instance.Name)
	return "", log.WrapError(err)
}