// Checkout project from repository func (b *builder) updateCode() { p, err := b.scm.ImportPathFromRepo(b.BuildConfig.AppRepo) b.projectPath = path.Join(b.BuildConfig.Jail, "src", p) if err != nil { log.Fatal(err.Error()) } out, err := b.term.Exec("ls " + b.projectPath) if err != nil { fmt.Println("Creating project directories") out, err = b.term.Exec("mkdir -p " + b.projectPath) if err != nil { log.Fatal("Could not create project directories") } fmt.Println(string(out)) } // Fetch code base b.scm.SetTerminal(b.term) b.scm.Checkout(b.BuildConfig.AppRepo, b.BuildConfig.RepoBranch, b.projectPath) }
func (b *builder) validatePackage() { // Validate this package is a command var err error b.pack, err = context.ImportDir(".", 0) if err != nil { log.Fatal("Could not import package for validation") } if !b.pack.IsCommand() { log.Fatal("Package is not a command") } }
func (c *SSHConn) ExecPath(cmd, path string) (out []byte, err error) { var session *ssh.Session session, err = c.client.NewSession() if err != nil { log.Fatal("Failed to create session: " + err.Error()) } defer session.Close() envVars := "" if c.env != nil { for name, value := range c.env { envVars = envVars + name + "=\"" + value + "\" " /* TODO: This should be the proper way to set the environment, but fails for some reason * Investigate why and possibly send pull-request to maintainer err = session.Setenv(name, value) if err != nil { log.Fatal("Failed to set environment: " + err.Error()) } */ } } cmd = envVars + cmd if path != "" { cmd = "cd " + path + " && " + cmd } return session.CombinedOutput(cmd) }
func SetLogLevel(criteria *skynet.Criteria, level string) { instances, err := skynet.GetServiceManager().ListInstances(criteria) if err != nil { log.Fatal(err) } var wait sync.WaitGroup for _, instance := range filterDaemon(instances) { wait.Add(1) go func(instance skynet.ServiceInfo) { fmt.Println("Setting LogLevel to " + level + " for: " + instance.UUID) d := daemon.GetDaemonForService(&instance) in := daemon.SubServiceLogLevelRequest{ UUID: instance.UUID, Level: level, } out, err := d.SubServiceLogLevel(in) if err != nil { fmt.Println("Returned Error: " + err.Error()) wait.Done() return } logLevelTemplate.Execute(os.Stdout, out) wait.Done() }(instance) } wait.Wait() }
func GetServiceManager() ServiceManager { if manager == nil { log.Fatal("No ServiceManager provided") } return manager }
func Stop(criteria *skynet.Criteria) { instances, err := skynet.GetServiceManager().ListInstances(criteria) if err != nil { log.Fatal(err) } var wait sync.WaitGroup for _, instance := range filterDaemon(instances) { wait.Add(1) go func(instance skynet.ServiceInfo) { fmt.Println("Stopping: " + instance.UUID) d := daemon.GetDaemonForService(&instance) in := daemon.StopSubServiceRequest{ UUID: instance.UUID, } out, err := d.StopSubService(in) if err != nil { fmt.Println("Returned Error: " + err.Error()) wait.Done() return } stopTemplate.Execute(os.Stdout, out) wait.Done() }(instance) } wait.Wait() }
// Daemon will run and maintain skynet services. // // Daemon will run the "SkynetDeployment" service, which can be used // to remotely spawn new services on the host. func main() { si := skynet.NewServiceInfo("SkynetDaemon", "2.0.0") deployment := NewSkynetDaemon() s := service.CreateService(deployment, si) deployment.Service = s // handle panic so that we remove ourselves from the pool in case of catastrophic failure defer func() { s.Shutdown() deployment.closeStateFile() if err := recover(); err != nil { e := err.(error) log.Fatal("Unrecovered error occured: " + e.Error()) } }() // Collect Host metrics statTicker := time.Tick((5 * time.Second)) go func() { for _ = range statTicker { deployment.updateHostStats(si.ServiceAddr.IPAddress) } }() // If we pass false here service will not be Registered // we could do other work/tasks by implementing the Started method and calling Register() when we're ready s.Start().Wait() }
func StopDaemon(criteria *skynet.Criteria) { hosts, err := skynet.GetServiceManager().ListHosts(criteria) if err != nil { log.Fatal(err) } var wait sync.WaitGroup for _, host := range hosts { wait.Add(1) go func(host string) { d := daemon.GetDaemonForHost(host) in := daemon.StopRequest{} out, err := d.Stop(in) if err != nil { fmt.Println("Returned Error: " + err.Error()) wait.Done() return } if out.Ok { fmt.Printf("Daemon stopped on host: %v\n", host) } else { fmt.Printf("Failed to stop daemon on host: %v\n", host) } wait.Done() }(host) } wait.Wait() }
func getInstances(c *skynet.Criteria) []skynet.ServiceInfo { instances, err := skynet.GetServiceManager().ListInstances(c) if err != nil { log.Fatal(err) } return instances }
func getHosts(c *skynet.Criteria) []string { hosts, err := skynet.GetServiceManager().ListHosts(c) if err != nil { log.Fatal(err) } return hosts }
func getVersions(c *skynet.Criteria) []string { versions, err := skynet.GetServiceManager().ListVersions(c) if err != nil { log.Fatal(err) } return versions }
func getServices(c *skynet.Criteria) []string { services, err := skynet.GetServiceManager().ListServices(c) if err != nil { log.Fatal(err) } return services }
func (b *builder) setupScm() { switch b.BuildConfig.RepoType { case "git": b.scm = new(GitScm) default: log.Fatal("unkown RepoType") } }
func (b *builder) runCommands(cmds []string) { for _, cmd := range cmds { out, err := b.term.Exec(cmd) fmt.Println(string(out)) if err != nil { log.Fatal("Failed to execute dependent command: " + cmd + "\n" + err.Error()) } } }
// NewUUID() provides unique identifier strings. func NewUUID() string { b := make([]byte, 16) _, err := io.ReadFull(rand.Reader, b) if err != nil { log.Fatal(err) } b[6] = (b[6] & 0x0F) | 0x40 b[8] = (b[8] &^ 0x40) | 0x80 return fmt.Sprintf("%x-%x-%x-%x-%x", b[:4], b[4:6], b[6:8], b[8:10], b[10:]) }
func NewClient(config *skynet.ClientConfig) *Client { if config.MaxConnectionsToInstance == 0 { log.Fatal("Must allow at least one instance connection") } client := &Client{ Config: config, servicePools: map[string]*servicePool{}, } return client }
func (b *builder) testSkynet() { fmt.Println("Testing Skynet") p := path.Join(b.BuildConfig.Jail, "src/github.com/skynetservices/skynet2") b.getPackageDependencies(p) out, err := b.term.ExecPath(b.BuildConfig.GoRoot+"/bin/go test ./...", p) fmt.Println(string(out)) if err != nil { log.Fatal("Failed tests: " + err.Error()) } }
func newBuilder(config string) *builder { if config == "" { config = "./build.cfg" } f, err := ioutil.ReadFile(config) if err != nil { log.Fatal("Failed to read: " + config) } b := new(builder) err = json.Unmarshal(f, b) if err != nil { log.Fatal("Failed to parse " + config + ": " + err.Error()) } if isHostLocal(b.BuildConfig.Host) { fmt.Println("Connecting to build machine: " + b.BuildConfig.Host) b.term = new(LocalTerminal) } else { sshClient := new(SSHConn) b.term = sshClient fmt.Println("Connecting to build machine: " + b.BuildConfig.Host) err = sshClient.Connect(b.BuildConfig.Host, b.BuildConfig.User) if err != nil { log.Fatal("Failed to connect to build machine: " + err.Error()) } } b.validatePackage() return b }
func (b *builder) getPackageDependencies(p string) { flags := []string{"-d"} if b.BuildConfig.UpdatePackages { flags = append(flags, "-u") } fmt.Println("Fetching dependencies") out, err := b.term.ExecPath(b.BuildConfig.GoRoot+"/bin/go get "+strings.Join(flags, " ")+" ./...", p) fmt.Println(string(out)) if err != nil { log.Fatal("Failed to fetch dependencies\n" + err.Error()) } }
func (b *builder) runTests() { p := path.Join(b.projectPath, b.BuildConfig.AppPath) fmt.Println("Testing packages") out, err := b.term.ExecPath(b.BuildConfig.GoRoot+"/bin/go test", p) fmt.Println(string(out)) if err != nil { log.Fatal("Failed tests: " + err.Error()) } if b.BuildConfig.TestSkynet { b.testSkynet() } }
func criteriaFromArgs(args []string) (*skynet.Criteria, []string) { flagset := flag.NewFlagSet("deploy", flag.ExitOnError) services := flagset.String("services", "", "services") regions := flagset.String("regions", "", "regions") instances := flagset.String("instances", "", "instances") hosts := flagset.String("hosts", "", "hosts") registered := flagset.String("registered", "", "registered") flagsetArgs, args := config.SplitFlagsetFromArgs(flagset, args) err := flagset.Parse(flagsetArgs) if err != nil { log.Fatal(err) } regionCriteria := make([]string, 0, 0) if len(*regions) > 0 { regionCriteria = strings.Split(*regions, ",") } hostCriteria := make([]string, 0, 0) if len(*hosts) > 0 { hostCriteria = strings.Split(*hosts, ",") } instanceCriteria := make([]string, 0, 0) if len(*instances) > 0 { instanceCriteria = strings.Split(*instances, ",") } var reg *bool if *registered == "true" { *reg = true } else if *registered == "false" { *reg = false } return &skynet.Criteria{ Regions: regionCriteria, Hosts: hostCriteria, Instances: instanceCriteria, Registered: reg, Services: serviceCriteriaFromCsv(*services), }, args }
func (b *builder) buildProject() { p := path.Join(b.projectPath, b.BuildConfig.AppPath) flags := "-v" if b.BuildConfig.BuildAllPackages { flags = flags + " -a" } fmt.Println("Building packages") out, err := b.term.ExecPath(b.BuildConfig.GoRoot+"/bin/go install "+flags, p) fmt.Println(string(out)) if err != nil { log.Fatal("Failed build: " + err.Error()) } }
func init() { flagset := flag.NewFlagSet("config", flag.ContinueOnError) flagset.StringVar(&configFile, "config", "", "Config File") flagset.StringVar(&uuid, "uuid", "", "uuid") args, _ := SplitFlagsetFromArgs(flagset, os.Args[1:]) flagset.Parse(args) // Ensure we have a UUID if uuid == "" { uuid = NewUUID() } if configFile == "" { for _, f := range defaultConfigFiles { if _, err := os.Stat(f); err == nil { configFile = f break } } } if configFile == "" { log.Println(log.ERROR, "Failed to find config file") conf = config.NewDefault() return } if _, err := os.Stat(configFile); os.IsNotExist(err) { log.Println(log.ERROR, "Config file does not exist", err) conf = config.NewDefault() return } var err error if conf, err = config.ReadDefault(configFile); err != nil { conf = config.NewDefault() log.Fatal(err) } // Set default log level from config, this can be overriden at the service level when the service is created if l, err := conf.RawStringDefault("log.level"); err == nil { log.SetLogLevel(log.LevelFromString(l)) } }
func Start(criteria *skynet.Criteria, args []string) { if len(args) < 1 { fmt.Println("Please provide a service name 'sky start binaryName'") return } hosts, err := skynet.GetServiceManager().ListHosts(criteria) if err != nil { log.Fatal(err) } var wait sync.WaitGroup for _, host := range hosts { wait.Add(1) go func(host string) { fmt.Println("Starting on host: " + host) d := daemon.GetDaemonForHost(host) in := daemon.StartSubServiceRequest{ BinaryName: args[0], Args: shellquote.Join(args[1:]...), // TODO: maybe an optional flag to change this? Registered: true, } out, err := d.StartSubService(in) if err != nil { fmt.Println("Returned Error: " + err.Error()) wait.Done() return } startTemplate.Execute(os.Stdout, out) wait.Done() }(host) } wait.Wait() }
// Daemon will run and maintain skynet services. // // Daemon will run the "SkynetDeployment" service, which can be used // to remotely spawn new services on the host. func main() { config, _ := skynet.GetServiceConfig() log.Println(log.INFO, "Connecting to ZooKeeper: ", os.Getenv("SKYNET_ZOOKEEPER")) skynet.SetServiceManager(zkmanager.NewZookeeperServiceManager(os.Getenv("SKYNET_ZOOKEEPER"), 1*time.Second)) config.Name = "SkynetDaemon" config.Version = "2" deployment := NewSkynetDaemon() s := service.CreateService(deployment, config) deployment.Service = s // handle panic so that we remove ourselves from the pool in case of catastrophic failure defer func() { s.Shutdown() deployment.closeStateFile() if err := recover(); err != nil { e := err.(error) log.Fatal("Unrecovered error occured: " + e.Error()) } }() // Collect Host metrics statTicker := time.Tick((5 * time.Second)) go func() { for _ = range statTicker { deployment.updateHostStats(config.ServiceAddr.IPAddress) } }() // If we pass false here service will not be Registered // we could do other work/tasks by implementing the Started method and calling Register() when we're ready s.Start().Wait() }
func SetDaemonLogLevel(criteria *skynet.Criteria, level string) { hosts, err := skynet.GetServiceManager().ListHosts(criteria) if err != nil { log.Fatal(err) } var wait sync.WaitGroup for _, host := range hosts { wait.Add(1) go func(host string) { d := daemon.GetDaemonForHost(host) in := daemon.LogLevelRequest{ Level: level, } out, err := d.LogLevel(in) if err != nil { fmt.Println("Returned Error: " + err.Error()) wait.Done() return } if out.Ok { fmt.Printf("Set daemon log level to %v on host: %v\n", level, host) } else { fmt.Printf("Failed to set daemon log level to %v on host: %v\n", level, host) } wait.Done() }(host) } wait.Wait() }
func (b *builder) deploy(hosts []string) { for _, host := range hosts { var out []byte var err error if isHostLocal(host) && isHostLocal(b.BuildConfig.Host) { // Built locally, deploying locally fmt.Println("Copying local binary") // First move binary to .old in case it's currently running command := exec.Command("mv", path.Join(b.DeployConfig.DeployPath, b.DeployConfig.BinaryName), path.Join(b.DeployConfig.DeployPath, b.DeployConfig.BinaryName+".old")) out, err = command.CombinedOutput() if err == nil { fmt.Println(string(out)) command = exec.Command("cp", path.Join(b.BuildConfig.Jail, "bin", path.Base(b.BuildConfig.AppPath)), path.Join(b.DeployConfig.DeployPath, b.DeployConfig.BinaryName)) out, err = command.CombinedOutput() } } else if isHostLocal(host) && !isHostLocal(b.BuildConfig.Host) { // Built remotely, deploying locally fmt.Println("Copying binary from build machine") h, p := splitHostPort(b.BuildConfig.Host) // First move binary to .old in case it's currently running command := exec.Command("mv", path.Join(b.DeployConfig.DeployPath, b.DeployConfig.BinaryName), path.Join(b.DeployConfig.DeployPath, b.DeployConfig.BinaryName+".old")) out, _ = command.CombinedOutput() fmt.Println(string(out)) command = exec.Command("scp", "-P", p, b.BuildConfig.User+"@"+h+":"+path.Join(b.BuildConfig.Jail, "bin", path.Base(b.BuildConfig.AppPath)), path.Join(b.DeployConfig.DeployPath, b.DeployConfig.BinaryName)) out, err = command.CombinedOutput() } else if !isHostLocal(host) && isHostLocal(b.BuildConfig.Host) { // Built locally, deploying remotely fmt.Println("Pushing binary to host: " + host) h, p := splitHostPort(host) // First move binary to .old in case it's currently running command := exec.Command("ssh", "-p", p, b.DeployConfig.User+"@"+h, "mv", path.Join(b.DeployConfig.DeployPath, b.DeployConfig.BinaryName), path.Join(b.DeployConfig.DeployPath, b.DeployConfig.BinaryName+".old")) out, _ = command.CombinedOutput() fmt.Println(string(out)) command = exec.Command("scp", "-P", p, path.Join(b.BuildConfig.Jail, "bin", path.Base(b.BuildConfig.AppPath)), b.DeployConfig.User+"@"+h+":"+path.Join(b.DeployConfig.DeployPath, b.DeployConfig.BinaryName)) out, err = command.CombinedOutput() } else if !isHostLocal(host) && !isHostLocal(b.BuildConfig.Host) { // Built remotely, deployed remotely fmt.Println("Pushing binary from build box to host: " + host) h, p := splitHostPort(host) // First move binary to .old in case it's currently running out, _ := b.term.Exec("ssh -p " + p + " " + b.DeployConfig.User + "@" + h + " mv " + path.Join(b.DeployConfig.DeployPath, b.DeployConfig.BinaryName) + " " + path.Join(b.DeployConfig.DeployPath, b.DeployConfig.BinaryName+".old")) fmt.Println(string(out)) out, err = b.term.Exec("scp -P " + p + " " + path.Join(b.BuildConfig.Jail, "bin", path.Base(b.BuildConfig.AppPath)) + " " + b.DeployConfig.User + "@" + h + ":" + path.Join(b.DeployConfig.DeployPath, b.DeployConfig.BinaryName)) } fmt.Println(string(out)) if err != nil { log.Fatal("Failed to deploy: " + err.Error()) } } }
func main() { log.SetLogLevel(log.ERROR) var args []string criteria, args := criteriaFromArgs(os.Args[1:]) if len(args) == 0 { CommandLineHelp() return } switch args[0] { case "help", "h": CommandLineHelp() case "build", "b": flagset := flag.NewFlagSet("build", flag.ExitOnError) configFile := flagset.String("build", "./build.cfg", "build config file") flagsetArgs, _ := config.SplitFlagsetFromArgs(flagset, args) err := flagset.Parse(flagsetArgs) if err != nil { log.Fatal(err) return } Build(*configFile) case "deploy", "d": flagset := flag.NewFlagSet("deploy", flag.ExitOnError) configFile := flagset.String("build", "./build.cfg", "build config file") flagsetArgs, _ := config.SplitFlagsetFromArgs(flagset, args) err := flagset.Parse(flagsetArgs) if err != nil { log.Fatal(err) return } Deploy(*configFile, criteria) case "hosts": ListHosts(criteria) case "regions": ListRegions(criteria) case "services": ListServices(criteria) case "versions": ListVersions(criteria) case "instances": ListInstances(criteria) case "start": Start(criteria, args[1:]) case "stop": Stop(criteria) case "restart": Restart(criteria) case "register": Register(criteria) case "unregister": Unregister(criteria) case "log": SetLogLevel(criteria, args[1]) case "daemon": if len(args) >= 2 { switch args[1] { case "log": if len(args) >= 3 { SetDaemonLogLevel(criteria, args[2]) } else { fmt.Println("Must supply a log level") } case "stop": StopDaemon(criteria) } } else { fmt.Println("Supported subcommands for daemon are log, and stop") } case "cli": InteractiveShell() default: fmt.Println("Unknown Command: ", args[0]) CommandLineHelp() } }