func init() { bin := path.Base(os.Args[0]) flag.Usage = func() { fmt.Fprintf(os.Stderr, ` Usage of %s ================ Example: %% %s `, bin, bin) flag.PrintDefaults() } serverConfig = conf.New() serverConfig.VersionBuildStamp = Buildstamp serverConfig.VersionGitHash = Githash //config from file is loaded. //the values will be overwritten by command line flags flag.BoolVar(&serverConfig.DebugEnabled, "debug", serverConfig.DebugEnabled, "Enable debug output") flag.BoolVar(&serverConfig.Oauth2Enabled, "oauth", serverConfig.Oauth2Enabled, "Enable OAuth2") flag.IntVar(&serverConfig.AuthorizationType, "team-auth", serverConfig.AuthorizationType, "Enable team based authorization") flag.StringVar(&serverConfig.AuthURL, "oauth-authurl", "", "OAuth2 Auth URL") flag.StringVar(&serverConfig.TokenURL, "oauth-tokeninfourl", "", "OAuth2 Auth URL") flag.StringVar(&serverConfig.TLSCertfilePath, "tls-cert", serverConfig.TLSCertfilePath, "TLS Certfile") flag.StringVar(&serverConfig.TLSKeyfilePath, "tls-key", serverConfig.TLSKeyfilePath, "TLS Keyfile") flag.IntVar(&serverConfig.Port, "port", serverConfig.Port, "Listening TCP Port of the service.") if serverConfig.Port == 0 { serverConfig.Port = 8082 //default port when no option is provided } flag.DurationVar(&serverConfig.LogFlushInterval, "flush-interval", time.Second*5, "Interval to flush Logs to disk.") }
// Deploy deploys a new application // takes CreateRequest from backend as argument func (mb *MarathonBackend) Deploy(cr *CreateRequest) (string, error) { glog.Infof("Deploying a new application with name %s", cr.Name) app := marathon.NewDockerApplication() id := cr.Name ports := cr.Ports cpu := float64(cr.CPULimit) storage := 0.0 //TODO: setup limit for storage memory := float64(cr.MemoryLimit) labels := cr.Labels imageurl := cr.ImageURL env := cr.Env replicas := cr.Replicas app.Name(id) uris := strings.Fields(DOCKERCFG) app.Uris = &uris app.CPU(cpu).Memory(memory).Storage(storage).Count(replicas) app.Env = &env portmappings := make([]marathon.PortMapping, 0, len(ports)) for _, port := range ports { portmappings = append(portmappings, marathon.PortMapping{ContainerPort: port, HostPort: 0, Protocol: "tcp"}) //TODO: change to protocol params, we probably want to have UDP too. } app.Container.Docker.PortMappings = &portmappings //fluentd implementation if conf.New().FluentdEnabled { app.Container.Docker.AddParameter("log-driver", "fluentd") app.Container.Docker.AddParameter("log-opt", "\"fluentd-address=localhost:24224\"") if DOCKERVERSION == "1.9" { //this is unsupported if docker < 1.9 app.Container.Docker.AddParameter("log-opt", "\"tag={{.ImageName}}/{{.Name}}\"") } else { app.Container.Docker.AddParameter("log-opt", "\"fluentd-tag={{.Name}}\"") } } app.Labels = &labels forcepull := true app.Container.Docker.Container(imageurl).ForcePullImage = &forcepull volumes := make([]marathon.Volume, 0, len(cr.Volumes)) for _, volume := range cr.Volumes { volumes = append(volumes, marathon.Volume{ContainerPath: volume.ContainerPath, HostPath: volume.HostPath, Mode: volume.Mode}) } app.Container.Volumes = &volumes //TODO must implement configurable health checks application, err := mb.Client.CreateApplication(app) glog.Info(application) //TODO do we want to get some more information? Container IDs? I guess they can be not stable if err != nil { glog.Errorf("Could not create application %s, error %s", app.ID, err) return "", err } glog.Infof("Application was created, %s", app.ID) return app.ID, nil }
// getMarathonClient connects to mesos cluster // returns marathon interface like tasks, applications // groups, deployment, subscriptions, ... func initMarathonClient() marathon.Marathon { config := marathon.NewDefaultConfig() chimpConfig := conf.New() config.URL = chimpConfig.Endpoint if chimpConfig.MarathonAuth.Enabled { config.HTTPBasicAuthUser = chimpConfig.MarathonAuth.MarathonHttpUser config.HTTPBasicPassword = chimpConfig.MarathonAuth.MarathonHttpPassword } client, err := marathon.NewClient(config) if err != nil { glog.Fatalf("Failed to create a client for marathon, error: %s", err) } return client }
func rootHandler(ginCtx *gin.Context) { config := conf.New() ginCtx.JSON(http.StatusOK, gin.H{"chimp-server": fmt.Sprintf("Build Time: %s - Git Commit Hash: %s", config.VersionBuildStamp, config.VersionGitHash)}) }
"github.com/golang/glog" backend "github.com/zalando/chimp/backend" "github.com/zalando/chimp/conf" . "github.com/zalando/chimp/types" "github.com/zalando/chimp/validators" ) //Backend contains the current backend type Backend struct { BackendType string Backend backend.Backend } // Bootstrap backend var se = Backend{ BackendType: conf.New().BackendType, } //Start initializes the current backend func Start() { se.Backend = backend.New() } //BackendError is the erro representation that should be consumed by "frontend" serving layer //resembles (but no need to be 1:1) what frontend needs to give to the user which is based on our RESTful API Guidelines doc. type BackendError struct { Status int Title string //error message coming from THIS layer Detail string //error message coming from backends }
func getKubernetesClient() *k8s.Client { client := k8s.NewOrDie(&k8s.Config{Host: conf.New().Endpoint, Version: "v1"}) //PANIC if config not correct return client }
// GetApp returns a specific application from marathon // marathon.Application is a struct with a lot of details // about the application itself func (mb *MarathonBackend) GetApp(req *ArtifactRequest) (*Artifact, error) { application, err := mb.Client.Application(req.Name) if err != nil { glog.Errorf("Could not get application %s, error: %s", req.Name, err) return nil, err } var status = "RUNNING" //this is just our base case. we then check the status below var message string if !application.AllTaskRunning() { //deploying or waiting or failed, we just don't know! //TODO: this is due to marathon API. This must be discussed and improved in marathon! status = "DEPLOYING/WAITING" //also in case of errors the app is never "FAILED" when the policy is to accept deployments //and try to retry till more resources are available if application.LastTaskFailure != nil { message = fmt.Sprintf("%s, %s AT %s", application.LastTaskFailure.State, application.LastTaskFailure.Message, application.LastTaskFailure.Timestamp) } } endpoints := make([]string, 0, len(application.Tasks)) //transforming the data coming from kubernetes into chimp structure replicas := make([]*Replica, 0, len(application.Tasks)) for _, replica := range application.Tasks { //copying container data structure containers := make([]*Container, 0, 1) status := true statString := "OK" for _, hc := range replica.HealthCheckResults { if hc.Alive == false { status = false } } if !status { statString = "NOT ALIVE" } containerName, err := buildContainerName(replica.Host, replica.ID) logInfo := map[string]string{} if err == nil { //If the URL cannot be build I don't want to encounter any other problem remoteURL := "https://www.scalyr.com/events?mode=log&filter=$logfile%3D%27%2Ffluentd%2F%2F" + containerName + "%27%20$serverHost%3D%27" + strings.Split(replica.Host, ".")[0] + "%27" logInfo = map[string]string{"containerName": containerName, "remoteURL": remoteURL} } container := Container{ ImageURL: application.Container.Docker.Image, Status: statString, LogInfo: logInfo, } containers = append(containers, &container) ports := make([]*PortType, 0, len(replica.Ports)) for _, port := range replica.Ports { ports = append(ports, &PortType{Port: port, Protocol: ""}) } endpoints = append(endpoints, fmt.Sprintf("http://%s:%s/", replica.Host, intslice2str(replica.Ports, ""))) replica := Replica{Status: statString, Containers: containers, Endpoints: endpoints, Ports: ports} //HACK, this shouldn't be added only one time endpoints = nil replicas = append(replicas, &replica) } var ep string if strings.HasPrefix(application.ID, "/") { ep = application.ID[1:len(application.ID)] } else { ep = application.ID } endpoint := fmt.Sprintf(conf.New().EndpointPattern, ep) artifact := Artifact{ Name: application.ID, Message: message, Status: status, Labels: application.Labels, Env: application.Env, RunningReplicas: replicas, RequestedReplicas: *application.Instances, CPUS: application.CPUs, Memory: *application.Mem, Endpoint: endpoint, } return &artifact, nil }