예제 #1
0
// Provide allows the provider to provide configurations to traefik
// using the given configuration channel.
func (provider *ConsulCatalog) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool, constraints types.Constraints) error {
	config := api.DefaultConfig()
	config.Address = provider.Endpoint
	client, err := api.NewClient(config)
	if err != nil {
		return err
	}
	provider.client = client
	provider.Constraints = append(provider.Constraints, constraints...)

	pool.Go(func(stop chan bool) {
		notify := func(err error, time time.Duration) {
			log.Errorf("Consul connection error %+v, retrying in %s", err, time)
		}
		operation := func() error {
			return provider.watch(configurationChan, stop)
		}
		err := backoff.RetryNotify(operation, job.NewBackOff(backoff.NewExponentialBackOff()), notify)
		if err != nil {
			log.Errorf("Cannot connect to consul server %+v", err)
		}
	})

	return err
}
예제 #2
0
func (provider *Marathon) loadMarathonConfig() *types.Configuration {
	var MarathonFuncMap = template.FuncMap{
		"getBackend":                  provider.getBackend,
		"getPort":                     provider.getPort,
		"getWeight":                   provider.getWeight,
		"getDomain":                   provider.getDomain,
		"getProtocol":                 provider.getProtocol,
		"getPassHostHeader":           provider.getPassHostHeader,
		"getPriority":                 provider.getPriority,
		"getEntryPoints":              provider.getEntryPoints,
		"getFrontendRule":             provider.getFrontendRule,
		"getFrontendBackend":          provider.getFrontendBackend,
		"hasCircuitBreakerLabels":     provider.hasCircuitBreakerLabels,
		"hasLoadBalancerLabels":       provider.hasLoadBalancerLabels,
		"hasMaxConnLabels":            provider.hasMaxConnLabels,
		"getMaxConnExtractorFunc":     provider.getMaxConnExtractorFunc,
		"getMaxConnAmount":            provider.getMaxConnAmount,
		"getLoadBalancerMethod":       provider.getLoadBalancerMethod,
		"getCircuitBreakerExpression": provider.getCircuitBreakerExpression,
		"getSticky":                   provider.getSticky,
	}

	applications, err := provider.marathonClient.Applications(nil)
	if err != nil {
		log.Errorf("Failed to create a client for marathon, error: %s", err)
		return nil
	}

	tasks, err := provider.marathonClient.AllTasks(&marathon.AllTasksOpts{Status: "running"})
	if err != nil {
		log.Errorf("Failed to create a client for marathon, error: %s", err)
		return nil
	}

	//filter tasks
	filteredTasks := fun.Filter(func(task marathon.Task) bool {
		return provider.taskFilter(task, applications, provider.ExposedByDefault)
	}, tasks.Tasks).([]marathon.Task)

	//filter apps
	filteredApps := fun.Filter(func(app marathon.Application) bool {
		return provider.applicationFilter(app, filteredTasks)
	}, applications.Apps).([]marathon.Application)

	templateObjects := struct {
		Applications []marathon.Application
		Tasks        []marathon.Task
		Domain       string
	}{
		filteredApps,
		filteredTasks,
		provider.Domain,
	}

	configuration, err := provider.getConfiguration("templates/marathon.tmpl", MarathonFuncMap, templateObjects)
	if err != nil {
		log.Error(err)
	}
	return configuration
}
예제 #3
0
func (server *Server) prepareServer(entryPointName string, router *middlewares.HandlerSwitcher, entryPoint *EntryPoint, oldServer *manners.GracefulServer, middlewares ...negroni.Handler) (*manners.GracefulServer, error) {
	log.Infof("Preparing server %s %+v", entryPointName, entryPoint)
	// middlewares
	var negroni = negroni.New()
	for _, middleware := range middlewares {
		negroni.Use(middleware)
	}
	negroni.UseHandler(router)
	tlsConfig, err := server.createTLSConfig(entryPointName, entryPoint.TLS, router)
	if err != nil {
		log.Errorf("Error creating TLS config %s", err)
		return nil, err
	}

	if oldServer == nil {
		return manners.NewWithServer(
			&http.Server{
				Addr:      entryPoint.Address,
				Handler:   negroni,
				TLSConfig: tlsConfig,
			}), nil
	}
	gracefulServer, err := oldServer.HijackListener(&http.Server{
		Addr:      entryPoint.Address,
		Handler:   negroni,
		TLSConfig: tlsConfig,
	}, tlsConfig)
	if err != nil {
		log.Errorf("Error hijacking server %s", err)
		return nil, err
	}
	return gracefulServer, nil
}
예제 #4
0
파일: acme.go 프로젝트: vdemeester/traefik
func (a *ACME) retrieveCertificates() {
	log.Infof("Retrieving ACME certificates...")
	for _, domain := range a.Domains {
		// check if cert isn't already loaded
		account := a.store.Get().(*Account)
		if _, exists := account.DomainsCertificate.exists(domain); !exists {
			domains := []string{}
			domains = append(domains, domain.Main)
			domains = append(domains, domain.SANs...)
			certificateResource, err := a.getDomainsCertificates(domains)
			if err != nil {
				log.Errorf("Error getting ACME certificate for domain %s: %s", domains, err.Error())
				continue
			}
			transaction, object, err := a.store.Begin()
			if err != nil {
				log.Errorf("Error creating ACME store transaction from domain %s: %s", domain, err.Error())
				continue
			}
			account = object.(*Account)
			_, err = account.DomainsCertificate.addCertificateForDomains(certificateResource, domain)
			if err != nil {
				log.Errorf("Error adding ACME certificate for domain %s: %s", domains, err.Error())
				continue
			}

			if err = transaction.Commit(account); err != nil {
				log.Errorf("Error Saving ACME account %+v: %s", account, err.Error())
				continue
			}
		}
	}
	log.Infof("Retrieved ACME certificates")
}
예제 #5
0
파일: kv.go 프로젝트: vdemeester/traefik
func (provider *Kv) provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool, constraints types.Constraints) error {
	provider.Constraints = append(provider.Constraints, constraints...)
	operation := func() error {
		if _, err := provider.kvclient.Exists("qmslkjdfmqlskdjfmqlksjazçueznbvbwzlkajzebvkwjdcqmlsfj"); err != nil {
			return fmt.Errorf("Failed to test KV store connection: %v", err)
		}
		if provider.Watch {
			pool.Go(func(stop chan bool) {
				err := provider.watchKv(configurationChan, provider.Prefix, stop)
				if err != nil {
					log.Errorf("Cannot watch KV store: %v", err)
				}
			})
		}
		configuration := provider.loadConfig()
		configurationChan <- types.ConfigMessage{
			ProviderName:  string(provider.storeType),
			Configuration: configuration,
		}
		return nil
	}
	notify := func(err error, time time.Duration) {
		log.Errorf("KV connection error: %+v, retrying in %s", err, time)
	}
	err := backoff.RetryNotify(operation, job.NewBackOff(backoff.NewExponentialBackOff()), notify)
	if err != nil {
		return fmt.Errorf("Cannot connect to KV server: %v", err)
	}
	return nil
}
예제 #6
0
// Provide allows the provider to provide configurations to traefik
// using the given configuration channel.
func (provider *Kubernetes) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool, constraints types.Constraints) error {
	k8sClient, err := provider.newK8sClient()
	if err != nil {
		return err
	}
	provider.Constraints = append(provider.Constraints, constraints...)

	pool.Go(func(stop chan bool) {
		operation := func() error {
			for {
				stopWatch := make(chan struct{}, 1)
				defer close(stopWatch)
				log.Debugf("Using label selector: '%s'", provider.LabelSelector)
				eventsChan, err := k8sClient.WatchAll(provider.LabelSelector, stopWatch)
				if err != nil {
					log.Errorf("Error watching kubernetes events: %v", err)
					timer := time.NewTimer(1 * time.Second)
					select {
					case <-timer.C:
						return err
					case <-stop:
						return nil
					}
				}
				for {
					select {
					case <-stop:
						return nil
					case event := <-eventsChan:
						log.Debugf("Received event from kubernetes %+v", event)
						templateObjects, err := provider.loadIngresses(k8sClient)
						if err != nil {
							return err
						}
						if reflect.DeepEqual(provider.lastConfiguration.Get(), templateObjects) {
							log.Debugf("Skipping event from kubernetes %+v", event)
						} else {
							provider.lastConfiguration.Set(templateObjects)
							configurationChan <- types.ConfigMessage{
								ProviderName:  "kubernetes",
								Configuration: provider.loadConfig(*templateObjects),
							}
						}
					}
				}
			}
		}

		notify := func(err error, time time.Duration) {
			log.Errorf("Kubernetes connection error %+v, retrying in %s", err, time)
		}
		err := backoff.RetryNotify(operation, job.NewBackOff(backoff.NewExponentialBackOff()), notify)
		if err != nil {
			log.Errorf("Cannot connect to Kubernetes server %+v", err)
		}
	})

	return nil
}
예제 #7
0
// Begin creates a transaction with the KV store.
func (d *Datastore) Begin() (Transaction, Object, error) {
	id := uuid.NewV4().String()
	log.Debugf("Transaction %s begins", id)
	remoteLock, err := d.kv.NewLock(d.lockKey, &store.LockOptions{TTL: 20 * time.Second, Value: []byte(id)})
	if err != nil {
		return nil, nil, err
	}
	stopCh := make(chan struct{})
	ctx, cancel := context.WithCancel(d.ctx)
	var errLock error
	go func() {
		_, errLock = remoteLock.Lock(stopCh)
		cancel()
	}()
	select {
	case <-ctx.Done():
		if errLock != nil {
			return nil, nil, errLock
		}
	case <-d.ctx.Done():
		stopCh <- struct{}{}
		return nil, nil, d.ctx.Err()
	}

	// we got the lock! Now make sure we are synced with KV store
	operation := func() error {
		meta := d.get()
		if meta.Lock != id {
			return fmt.Errorf("Object lock value: expected %s, got %s", id, meta.Lock)
		}
		return nil
	}
	notify := func(err error, time time.Duration) {
		log.Errorf("Datastore sync error: %v, retrying in %s", err, time)
		err = d.reload()
		if err != nil {
			log.Errorf("Error reloading: %+v", err)
		}
	}
	ebo := backoff.NewExponentialBackOff()
	ebo.MaxElapsedTime = 60 * time.Second
	err = backoff.RetryNotify(operation, ebo, notify)
	if err != nil {
		return nil, nil, fmt.Errorf("Datastore cannot sync: %v", err)
	}

	// we synced with KV store, we can now return Setter
	return &datastoreTransaction{
		Datastore:  d,
		remoteLock: remoteLock,
		id:         id,
	}, d.meta.object, nil
}
예제 #8
0
파일: mesos.go 프로젝트: containous/traefik
func detectMasters(zk string, masters []string) <-chan []string {
	changed := make(chan []string, 1)
	if zk != "" {
		log.Debugf("Starting master detector for ZK ", zk)
		if md, err := detector.New(zk); err != nil {
			log.Errorf("failed to create master detector: %v", err)
		} else if err := md.Detect(detect.NewMasters(masters, changed)); err != nil {
			log.Errorf("failed to initialize master detector: %v", err)
		}
	} else {
		changed <- masters
	}
	return changed
}
예제 #9
0
func setLabels(kvs ...string) taskOpt {
	return func(t *state.Task) {
		if len(kvs)%2 != 0 {
			panic("odd number")
		}

		for i := 0; i < len(kvs); i += 2 {
			var label = state.Label{Key: kvs[i], Value: kvs[i+1]}
			log.Errorf("Label1.1 : %v", label)
			t.Labels = append(t.Labels, label)
			log.Errorf("Label1.2 : %v", t.Labels)
		}

	}
}
예제 #10
0
func (server *Server) postLoadConfig() {
	if server.globalConfiguration.ACME == nil {
		return
	}
	if server.leadership != nil && !server.leadership.IsLeader() {
		return
	}
	if server.globalConfiguration.ACME.OnHostRule {
		currentConfigurations := server.currentConfigurations.Get().(configs)
		for _, configuration := range currentConfigurations {
			for _, frontend := range configuration.Frontends {
				for _, route := range frontend.Routes {
					rules := Rules{}
					domains, err := rules.ParseDomains(route.Rule)
					if err != nil {
						log.Errorf("Error parsing domains: %v", err)
					} else {
						server.globalConfiguration.ACME.LoadCertificateForDomains(domains)
					}
				}

			}
		}
	}
}
예제 #11
0
파일: kv.go 프로젝트: vdemeester/traefik
func (provider *Kv) watchKv(configurationChan chan<- types.ConfigMessage, prefix string, stop chan bool) error {
	operation := func() error {
		events, err := provider.kvclient.WatchTree(provider.Prefix, make(chan struct{}))
		if err != nil {
			return fmt.Errorf("Failed to KV WatchTree: %v", err)
		}
		for {
			select {
			case <-stop:
				return nil
			case _, ok := <-events:
				if !ok {
					return errors.New("watchtree channel closed")
				}
				configuration := provider.loadConfig()
				if configuration != nil {
					configurationChan <- types.ConfigMessage{
						ProviderName:  string(provider.storeType),
						Configuration: configuration,
					}
				}
			}
		}
	}

	notify := func(err error, time time.Duration) {
		log.Errorf("KV connection error: %+v, retrying in %s", err, time)
	}
	err := backoff.RetryNotify(operation, job.NewBackOff(backoff.NewExponentialBackOff()), notify)
	if err != nil {
		return fmt.Errorf("Cannot connect to KV server: %v", err)
	}
	return nil
}
예제 #12
0
// GetPrivateKey returns private key
func (a *Account) GetPrivateKey() crypto.PrivateKey {
	if privateKey, err := x509.ParsePKCS1PrivateKey(a.PrivateKey); err == nil {
		return privateKey
	}
	log.Errorf("Cannot unmarshall private key %+v", a.PrivateKey)
	return nil
}
예제 #13
0
func (provider *Marathon) getBackend(task marathon.Task, applications []marathon.Application) string {
	application, errApp := getApplication(task, applications)
	if errApp != nil {
		log.Errorf("Unable to get marathon application from task %s", task.AppID)
		return ""
	}
	return provider.getFrontendBackend(application)
}
예제 #14
0
파일: mesos.go 프로젝트: containous/traefik
func (provider *Mesos) getBackend(task state.Task, applications []state.Task) string {
	application, errApp := getMesos(task, applications)
	if errApp != nil {
		log.Errorf("Unable to get mesos application from task %s", task.DiscoveryInfo.Name)
		return ""
	}
	return provider.getFrontendBackend(application)
}
예제 #15
0
// Participate tries to be a leader
func (l *Leadership) Participate(pool *safe.Pool) {
	pool.GoCtx(func(ctx context.Context) {
		log.Debugf("Node %s running for election", l.Cluster.Node)
		defer log.Debugf("Node %s no more running for election", l.Cluster.Node)
		backOff := backoff.NewExponentialBackOff()
		operation := func() error {
			return l.run(ctx, l.candidate)
		}

		notify := func(err error, time time.Duration) {
			log.Errorf("Leadership election error %+v, retrying in %s", err, time)
		}
		err := backoff.RetryNotify(operation, backOff, notify)
		if err != nil {
			log.Errorf("Cannot elect leadership %+v", err)
		}
	})
}
예제 #16
0
파일: acme.go 프로젝트: vdemeester/traefik
func (a *ACME) renewCertificates() error {
	log.Debugf("Testing certificate renew...")
	account := a.store.Get().(*Account)
	for _, certificateResource := range account.DomainsCertificate.Certs {
		if certificateResource.needRenew() {
			log.Debugf("Renewing certificate %+v", certificateResource.Domains)
			renewedCert, err := a.client.RenewCertificate(acme.CertificateResource{
				Domain:        certificateResource.Certificate.Domain,
				CertURL:       certificateResource.Certificate.CertURL,
				CertStableURL: certificateResource.Certificate.CertStableURL,
				PrivateKey:    certificateResource.Certificate.PrivateKey,
				Certificate:   certificateResource.Certificate.Certificate,
			}, true, OSCPMustStaple)
			if err != nil {
				log.Errorf("Error renewing certificate: %v", err)
				continue
			}
			log.Debugf("Renewed certificate %+v", certificateResource.Domains)
			renewedACMECert := &Certificate{
				Domain:        renewedCert.Domain,
				CertURL:       renewedCert.CertURL,
				CertStableURL: renewedCert.CertStableURL,
				PrivateKey:    renewedCert.PrivateKey,
				Certificate:   renewedCert.Certificate,
			}
			transaction, object, err := a.store.Begin()
			if err != nil {
				return err
			}
			account = object.(*Account)
			err = account.DomainsCertificate.renewCertificates(renewedACMECert, certificateResource.Domains)
			if err != nil {
				log.Errorf("Error renewing certificate: %v", err)
				continue
			}

			if err = transaction.Commit(account); err != nil {
				log.Errorf("Error Saving ACME account %+v: %s", account, err.Error())
				continue
			}
		}
	}
	return nil
}
예제 #17
0
func (d *Datastore) watchChanges() error {
	stopCh := make(chan struct{})
	kvCh, err := d.kv.Watch(d.lockKey, stopCh)
	if err != nil {
		return err
	}
	go func() {
		ctx, cancel := context.WithCancel(d.ctx)
		operation := func() error {
			for {
				select {
				case <-ctx.Done():
					stopCh <- struct{}{}
					return nil
				case _, ok := <-kvCh:
					if !ok {
						cancel()
						return err
					}
					err = d.reload()
					if err != nil {
						return err
					}
					// log.Debugf("Datastore object change received: %+v", d.meta)
					if d.listener != nil {
						err := d.listener(d.meta.object)
						if err != nil {
							log.Errorf("Error calling datastore listener: %s", err)
						}
					}
				}
			}
		}
		notify := func(err error, time time.Duration) {
			log.Errorf("Error in watch datastore: %+v, retrying in %s", err, time)
		}
		err := backoff.RetryNotify(operation, job.NewBackOff(backoff.NewExponentialBackOff()), notify)
		if err != nil {
			log.Errorf("Error in watch datastore: %v", err)
		}
	}()
	return nil
}
예제 #18
0
func (provider *Marathon) getProtocol(task marathon.Task, applications []marathon.Application) string {
	application, errApp := getApplication(task, applications)
	if errApp != nil {
		log.Errorf("Unable to get marathon application from task %s", task.AppID)
		return "http"
	}
	if label, err := provider.getLabel(application, "traefik.protocol"); err == nil {
		return label
	}
	return "http"
}
예제 #19
0
func (provider *Docker) getMaxConnAmount(container dockerData) int64 {
	if label, err := getLabel(container, "traefik.backend.maxconn.amount"); err == nil {
		i, errConv := strconv.ParseInt(label, 10, 64)
		if errConv != nil {
			log.Errorf("Unable to parse traefik.backend.maxconn.amount %s", label)
			return math.MaxInt64
		}
		return i
	}
	return math.MaxInt64
}
예제 #20
0
func (provider *Marathon) getMaxConnAmount(application marathon.Application) int64 {
	if label, err := provider.getLabel(application, "traefik.backend.maxconn.amount"); err == nil {
		i, errConv := strconv.ParseInt(label, 10, 64)
		if errConv != nil {
			log.Errorf("Unable to parse traefik.backend.maxconn.amount %s", label)
			return math.MaxInt64
		}
		return i
	}
	return math.MaxInt64
}
예제 #21
0
파일: mesos.go 프로젝트: vdemeester/traefik
func (provider *Mesos) loadMesosConfig() *types.Configuration {
	var mesosFuncMap = template.FuncMap{
		"getBackend":         provider.getBackend,
		"getPort":            provider.getPort,
		"getHost":            provider.getHost,
		"getWeight":          provider.getWeight,
		"getDomain":          provider.getDomain,
		"getProtocol":        provider.getProtocol,
		"getPassHostHeader":  provider.getPassHostHeader,
		"getPriority":        provider.getPriority,
		"getEntryPoints":     provider.getEntryPoints,
		"getFrontendRule":    provider.getFrontendRule,
		"getFrontendBackend": provider.getFrontendBackend,
		"getID":              provider.getID,
		"getFrontEndName":    provider.getFrontEndName,
		"replace":            replace,
	}

	t := records.NewRecordGenerator(time.Duration(provider.StateTimeoutSecond) * time.Second)
	sj, err := t.FindMaster(provider.Masters...)
	if err != nil {
		log.Errorf("Failed to create a client for mesos, error: %s", err)
		return nil
	}
	tasks := provider.taskRecords(sj)

	//filter tasks
	filteredTasks := fun.Filter(func(task state.Task) bool {
		return mesosTaskFilter(task, provider.ExposedByDefault)
	}, tasks).([]state.Task)

	filteredApps := []state.Task{}
	for _, value := range filteredTasks {
		if !taskInSlice(value, filteredApps) {
			filteredApps = append(filteredApps, value)
		}
	}

	templateObjects := struct {
		Applications []state.Task
		Tasks        []state.Task
		Domain       string
	}{
		filteredApps,
		filteredTasks,
		provider.Domain,
	}

	configuration, err := provider.getConfiguration("templates/mesos.tmpl", mesosFuncMap, templateObjects)
	if err != nil {
		log.Error(err)
	}
	return configuration
}
예제 #22
0
파일: mesos.go 프로젝트: containous/traefik
func (provider *Mesos) getProtocol(task state.Task, applications []state.Task) string {
	application, errApp := getMesos(task, applications)
	if errApp != nil {
		log.Errorf("Unable to get mesos application from task %s", task.DiscoveryInfo.Name)
		return "http"
	}
	if label, err := provider.getLabel(application, "traefik.protocol"); err == nil {
		return label
	}
	return "http"
}
예제 #23
0
파일: retry.go 프로젝트: vdemeester/traefik
// Flush sends any buffered data to the client.
func (rw *ResponseRecorder) Flush() {
	_, err := rw.responseWriter.Write(rw.Body.Bytes())
	if err != nil {
		log.Errorf("Error writing response in ResponseRecorder: %s", err)
		rw.err = err
	}
	rw.Body.Reset()
	flusher, ok := rw.responseWriter.(http.Flusher)
	if ok {
		flusher.Flush()
	}
}
예제 #24
0
파일: acme.go 프로젝트: vdemeester/traefik
// LoadCertificateForDomains loads certificates from ACME for given domains
func (a *ACME) LoadCertificateForDomains(domains []string) {
	domains = fun.Map(types.CanonicalDomain, domains).([]string)
	safe.Go(func() {
		operation := func() error {
			if a.client == nil {
				return fmt.Errorf("ACME client still not built")
			}
			return nil
		}
		notify := func(err error, time time.Duration) {
			log.Errorf("Error getting ACME client: %v, retrying in %s", err, time)
		}
		ebo := backoff.NewExponentialBackOff()
		ebo.MaxElapsedTime = 30 * time.Second
		err := backoff.RetryNotify(operation, ebo, notify)
		if err != nil {
			log.Errorf("Error getting ACME client: %v", err)
			return
		}
		account := a.store.Get().(*Account)
		var domain Domain
		if len(domains) == 0 {
			// no domain
			return

		} else if len(domains) > 1 {
			domain = Domain{Main: domains[0], SANs: domains[1:]}
		} else {
			domain = Domain{Main: domains[0]}
		}
		if _, exists := account.DomainsCertificate.exists(domain); exists {
			// domain already exists
			return
		}
		certificate, err := a.getDomainsCertificates(domains)
		if err != nil {
			log.Errorf("Error getting ACME certificates %+v : %v", domains, err)
			return
		}
		log.Debugf("Got certificate for domains %+v", domains)
		transaction, object, err := a.store.Begin()

		if err != nil {
			log.Errorf("Error creating transaction %+v : %v", domains, err)
			return
		}
		account = object.(*Account)
		_, err = account.DomainsCertificate.addCertificateForDomains(certificate, domain)
		if err != nil {
			log.Errorf("Error adding ACME certificates %+v : %v", domains, err)
			return
		}
		if err = transaction.Commit(account); err != nil {
			log.Errorf("Error Saving ACME account %+v: %v", account, err)
			return
		}
	})
}
예제 #25
0
func (server *Server) startProviders() {
	// start providers
	for _, provider := range server.providers {
		jsonConf, _ := json.Marshal(provider)
		log.Infof("Starting provider %v %s", reflect.TypeOf(provider), jsonConf)
		currentProvider := provider
		safe.Go(func() {
			err := currentProvider.Provide(server.configurationChan, server.routinesPool, server.globalConfiguration.Constraints)
			if err != nil {
				log.Errorf("Error starting provider %s", err)
			}
		})
	}
}
예제 #26
0
func (c *challengeProvider) getCertificate(domain string) (cert *tls.Certificate, exists bool) {
	log.Debugf("Challenge GetCertificate %s", domain)
	if !strings.HasSuffix(domain, ".acme.invalid") {
		return nil, false
	}
	c.lock.RLock()
	defer c.lock.RUnlock()
	account := c.store.Get().(*Account)
	if account.ChallengeCerts == nil {
		return nil, false
	}
	account.Init()
	var result *tls.Certificate
	operation := func() error {
		for _, cert := range account.ChallengeCerts {
			for _, dns := range cert.certificate.Leaf.DNSNames {
				if domain == dns {
					result = cert.certificate
					return nil
				}
			}
		}
		return fmt.Errorf("Cannot find challenge cert for domain %s", domain)
	}
	notify := func(err error, time time.Duration) {
		log.Errorf("Error getting cert: %v, retrying in %s", err, time)
	}
	ebo := backoff.NewExponentialBackOff()
	ebo.MaxElapsedTime = 60 * time.Second
	err := backoff.RetryNotify(operation, ebo, notify)
	if err != nil {
		log.Errorf("Error getting cert: %v", err)
		return nil, false
	}
	return result, true
}
예제 #27
0
func (l *Leadership) onElection(elected bool) {
	if elected {
		log.Infof("Node %s elected leader ♚", l.Cluster.Node)
		l.leader.Set(true)
		l.Start()
	} else {
		log.Infof("Node %s elected slave ♝", l.Cluster.Node)
		l.leader.Set(false)
		l.Stop()
	}
	for _, listener := range l.listeners {
		err := listener(elected)
		if err != nil {
			log.Errorf("Error calling Leadership listener: %s", err)
		}
	}
}
예제 #28
0
파일: mesos.go 프로젝트: containous/traefik
func (provider *Mesos) getPort(task state.Task, applications []state.Task) string {
	application, err := getMesos(task, applications)
	if err != nil {
		log.Errorf("Unable to get mesos application from task %s", task.DiscoveryInfo.Name)
		return ""
	}

	if portIndexLabel, err := provider.getLabel(application, "traefik.portIndex"); err == nil {
		if index, err := strconv.Atoi(portIndexLabel); err == nil {
			return strconv.Itoa(task.DiscoveryInfo.Ports.DiscoveryPorts[index].Number)
		}
	}
	if portValueLabel, err := provider.getLabel(application, "traefik.port"); err == nil {
		return portValueLabel
	}

	for _, port := range task.DiscoveryInfo.Ports.DiscoveryPorts {
		return strconv.Itoa(port.Number)
	}
	return ""
}
예제 #29
0
func (provider *Marathon) getPort(task marathon.Task, applications []marathon.Application) string {
	application, err := getApplication(task, applications)
	if err != nil {
		log.Errorf("Unable to get marathon application from task %s", task.AppID)
		return ""
	}

	if portIndexLabel, err := provider.getLabel(application, "traefik.portIndex"); err == nil {
		if index, err := strconv.Atoi(portIndexLabel); err == nil {
			return strconv.Itoa(task.Ports[index])
		}
	}
	if portValueLabel, err := provider.getLabel(application, "traefik.port"); err == nil {
		return portValueLabel
	}

	for _, port := range task.Ports {
		return strconv.Itoa(port)
	}
	return ""
}
예제 #30
0
func (provider *Kubernetes) loadIngresses(k8sClient k8s.Client) (*types.Configuration, error) {
	ingresses := k8sClient.GetIngresses(provider.Namespaces)

	templateObjects := types.Configuration{
		map[string]*types.Backend{},
		map[string]*types.Frontend{},
	}
	PassHostHeader := provider.getPassHostHeader()
	for _, i := range ingresses {
		for _, r := range i.Spec.Rules {
			for _, pa := range r.HTTP.Paths {
				if _, exists := templateObjects.Backends[r.Host+pa.Path]; !exists {
					templateObjects.Backends[r.Host+pa.Path] = &types.Backend{
						Servers: make(map[string]types.Server),
					}
				}
				if _, exists := templateObjects.Frontends[r.Host+pa.Path]; !exists {
					templateObjects.Frontends[r.Host+pa.Path] = &types.Frontend{
						Backend:        r.Host + pa.Path,
						PassHostHeader: PassHostHeader,
						Routes:         make(map[string]types.Route),
						Priority:       len(pa.Path),
					}
				}
				if len(r.Host) > 0 {
					if _, exists := templateObjects.Frontends[r.Host+pa.Path].Routes[r.Host]; !exists {
						templateObjects.Frontends[r.Host+pa.Path].Routes[r.Host] = types.Route{
							Rule: "Host:" + r.Host,
						}
					}
				}
				if len(pa.Path) > 0 {
					ruleType := i.Annotations["traefik.frontend.rule.type"]

					switch strings.ToLower(ruleType) {
					case "pathprefixstrip":
						ruleType = "PathPrefixStrip"
					case "pathstrip":
						ruleType = "PathStrip"
					case "path":
						ruleType = "Path"
					case "pathprefix":
						ruleType = "PathPrefix"
					case "":
						ruleType = "PathPrefix"
					default:
						log.Warnf("Unknown RuleType %s for %s/%s, falling back to PathPrefix", ruleType, i.ObjectMeta.Namespace, i.ObjectMeta.Name)
						ruleType = "PathPrefix"
					}

					templateObjects.Frontends[r.Host+pa.Path].Routes[pa.Path] = types.Route{
						Rule: ruleType + ":" + pa.Path,
					}
				}
				service, exists, err := k8sClient.GetService(i.ObjectMeta.Namespace, pa.Backend.ServiceName)
				if err != nil || !exists {
					log.Warnf("Error retrieving service %s/%s: %v", i.ObjectMeta.Namespace, pa.Backend.ServiceName, err)
					delete(templateObjects.Frontends, r.Host+pa.Path)
					continue
				}

				protocol := "http"
				for _, port := range service.Spec.Ports {
					if equalPorts(port, pa.Backend.ServicePort) {
						if port.Port == 443 {
							protocol = "https"
						}
						endpoints, exists, err := k8sClient.GetEndpoints(service.ObjectMeta.Namespace, service.ObjectMeta.Name)
						if err != nil || !exists {
							log.Errorf("Error retrieving endpoints %s/%s: %v", service.ObjectMeta.Namespace, service.ObjectMeta.Name, err)
							continue
						}
						if len(endpoints.Subsets) == 0 {
							log.Warnf("Endpoints not found for %s/%s, falling back to Service ClusterIP", service.ObjectMeta.Namespace, service.ObjectMeta.Name)
							templateObjects.Backends[r.Host+pa.Path].Servers[string(service.UID)] = types.Server{
								URL:    protocol + "://" + service.Spec.ClusterIP + ":" + strconv.Itoa(int(port.Port)),
								Weight: 1,
							}
						} else {
							for _, subset := range endpoints.Subsets {
								for _, address := range subset.Addresses {
									url := protocol + "://" + address.IP + ":" + strconv.Itoa(endpointPortNumber(port, subset.Ports))
									name := url
									if address.TargetRef != nil && address.TargetRef.Name != "" {
										name = address.TargetRef.Name
									}
									templateObjects.Backends[r.Host+pa.Path].Servers[name] = types.Server{
										URL:    url,
										Weight: 1,
									}
								}
							}
						}
						break
					}
				}
			}
		}
	}
	return &templateObjects, nil
}