func spawnCluster(userID string) { defer func() { if err := recover(); err != nil { globalCache.perUserID[userID].bufStream <- fmt.Sprintf("[cluster - panic] %+v", err) delete(globalCache.perUserID, userID) return } }() portPrefix := atomic.LoadInt32(&portStart) fs := make([]*etcdproc.Flags, 3) for i := range fs { df, err := etcdproc.NewFlags(fmt.Sprintf("etcd%d", i+1), globalPorts, int(portPrefix)+i, "etcd-cluster-token", "new", uuid.NewV4().String(), false, false, "", "", "") if err != nil { globalCache.perUserID[userID].bufStream <- fmt.Sprintf("[cluster - error] %+v", err) return } fs[i] = df } atomic.AddInt32(&portStart, 7) cs, err := etcdproc.CreateCluster(os.Stdout, globalCache.perUserID[userID].bufStream, etcdproc.ToHTML, cmdFlag.EtcdBinary, fs...) if err != nil { globalCache.perUserID[userID].bufStream <- fmt.Sprintf("[cluster - error] %+v", err) return } globalCache.mu.Lock() globalCache.perUserID[userID].cluster = cs globalCache.mu.Unlock() // this does not run with the program exits with os.Exit(0) defer func() { cs.RemoveAllDataDirs() globalCache.mu.Lock() globalCache.perUserID[userID].cluster = nil globalCache.mu.Unlock() }() errChan, done := make(chan error), make(chan struct{}) go func() { globalCache.perUserID[userID].bufStream <- boldHTMLMsg("Starting all of those 3 nodes in default cluster group") if err := cs.StartAll(); err != nil { errChan <- err return } done <- struct{}{} }() select { case err := <-errChan: globalCache.perUserID[userID].bufStream <- fmt.Sprintf("[cluster - error] %+v", err) case <-done: globalCache.perUserID[userID].bufStream <- boldHTMLMsg("Cluster done!") case <-globalCache.perUserID[userID].donec: globalCache.perUserID[userID].bufStream <- boldHTMLMsg("Cluster done!") case <-time.After(cmdFlag.Timeout): globalCache.perUserID[userID].bufStream <- boldHTMLMsg("Cluster time out!") } }
func CommandFunc(cmd *cobra.Command, args []string) { defer func() { if err := recover(); err != nil { fmt.Fprintln(os.Stdout, "[demo.CommandFunc - panic]", err) os.Exit(0) } }() fs := make([]*etcdproc.Flags, cmdFlag.ClusterSize) for i := range fs { df, err := etcdproc.NewFlags(fmt.Sprintf("etcd%d", i+1), globalPorts, 11+i, "etcd-cluster-token", "new", uuid.NewV4().String(), cmdFlag.IsClientTLS, cmdFlag.IsPeerTLS, cmdFlag.CertPath, cmdFlag.PrivateKeyPath, cmdFlag.CAPath) if err != nil { fmt.Fprintln(os.Stdout, "exiting with:", err) return } fs[i] = df } c, err := etcdproc.CreateCluster(os.Stdout, nil, etcdproc.ToTerminal, cmdFlag.EtcdBinary, fs...) if err != nil { fmt.Fprintln(os.Stdout, "exiting with:", err) return } if cmdFlag.ProcSave { f, err := openToOverwrite(cmdFlag.ProcPath) if err != nil { fmt.Fprintln(os.Stdout, "exiting with:", err) return } c.WriteProc(f) f.Close() } // this does not run with the program exits with os.Exit(0) defer c.RemoveAllDataDirs() fmt.Fprintf(os.Stdout, "\n") fmt.Fprintln(os.Stdout, "####### Starting all of those 3 nodes in default cluster group") clusterDone := make(chan struct{}) go func() { defer func() { clusterDone <- struct{}{} }() if err := c.StartAll(); err != nil { fmt.Fprintln(os.Stdout, "exiting with:", err) return } }() operationDone := make(chan struct{}) if cmdFlag.IsSimpleSimulation { go func() { time.Sleep(cmdFlag.Pause) fmt.Fprintf(os.Stdout, "\n") fmt.Fprintln(os.Stdout, "####### Stress") if err := c.Stress(cmdFlag.ConnectionNumber, cmdFlag.ClientNumber, cmdFlag.StressNumber, 15, 15); err != nil { fmt.Fprintln(os.Stdout, "exiting with:", err) return } time.Sleep(cmdFlag.Pause) fmt.Fprintf(os.Stdout, "\n") fmt.Fprintln(os.Stdout, "####### SimpleStress") if err := c.SimpleStress(); err != nil { fmt.Fprintln(os.Stdout, "exiting with:", err) return } }() } else if !cmdFlag.IsSimple { go func() { defer func() { operationDone <- struct{}{} }() time.Sleep(cmdFlag.Pause) fmt.Fprintf(os.Stdout, "\n") fmt.Fprintln(os.Stdout, "####### Terminate") if err := c.Terminate(nameToTerminate); err != nil { fmt.Fprintln(os.Stdout, "exiting with:", err) return } // Stress here to trigger log compaction // (make terminated node fall behind) time.Sleep(cmdFlag.Pause) fmt.Fprintf(os.Stdout, "\n") fmt.Fprintln(os.Stdout, "####### Restart") if err := c.Restart(nameToTerminate); err != nil { fmt.Fprintln(os.Stdout, "exiting with:", err) return } key, val := []byte("sample_key"), []byte("sample_value") time.Sleep(cmdFlag.Pause) fmt.Fprintf(os.Stdout, "\n") fmt.Fprintln(os.Stdout, "####### Put") if err := c.Put(key, val); err != nil { fmt.Fprintln(os.Stdout, "exiting with:", err) return } time.Sleep(cmdFlag.Pause) fmt.Fprintf(os.Stdout, "\n") fmt.Fprintln(os.Stdout, "####### Range") if err := c.Range(key); err != nil { fmt.Fprintln(os.Stdout, "exiting with:", err) return } time.Sleep(cmdFlag.Pause) fmt.Fprintf(os.Stdout, "\n") fmt.Fprintln(os.Stdout, "####### Stress") if err := c.Stress(cmdFlag.ConnectionNumber, cmdFlag.ClientNumber, cmdFlag.StressNumber, 15, 15); err != nil { fmt.Fprintln(os.Stdout, "exiting with:", err) return } time.Sleep(cmdFlag.Pause) fmt.Fprintf(os.Stdout, "\n") fmt.Fprintln(os.Stdout, "####### SimpleStress") if err := c.SimpleStress(); err != nil { fmt.Fprintln(os.Stdout, "exiting with:", err) return } time.Sleep(cmdFlag.Pause) fmt.Fprintf(os.Stdout, "\n") fmt.Fprintln(os.Stdout, "####### WatchAndPut") if err := c.WatchAndPut(cmdFlag.ConnectionNumber, cmdFlag.ClientNumber, cmdFlag.StressNumber); err != nil { fmt.Fprintln(os.Stdout, "exiting with:", err) return } if !cmdFlag.IsClientTLS { // TODO: not working for now time.Sleep(cmdFlag.Pause) fmt.Fprintf(os.Stdout, "\n") fmt.Fprintln(os.Stdout, "####### GetStats #1") vm, ne, err := c.GetStats() if err != nil { fmt.Fprintln(os.Stdout, "exiting with:", err) return } else { fmt.Fprintf(os.Stdout, "Endpoint To Stats: %+v\n", vm) fmt.Fprintf(os.Stdout, "Name To Endpoint : %+v\n", ne) fmt.Fprintf(os.Stdout, "\n") fmt.Fprintln(os.Stdout, "####### GetStats #2") endpoints := []string{} for _, endpoint := range ne { endpoints = append(endpoints, endpoint) } sort.Strings(endpoints) vm2, ne2, err := etcdproc.GetStats(endpoints...) if err != nil { fmt.Fprintln(os.Stdout, "exiting with:", err) return } fmt.Fprintf(os.Stdout, "Endpoint To Stats: %+v\n", vm2) fmt.Fprintf(os.Stdout, "Name To Endpoint : %+v\n", ne2) } time.Sleep(cmdFlag.Pause) fmt.Fprintf(os.Stdout, "\n") fmt.Fprintln(os.Stdout, "####### GetMetrics #1") { vm, ne, err := c.GetMetrics() if err != nil { fmt.Fprintln(os.Stdout, "exiting with:", err) return } else { for n, mm := range vm { var fb uint64 if fv, ok := mm["etcd_storage_db_total_size_in_bytes"]; ok { fb = uint64(fv) } fmt.Fprintf(os.Stdout, "%s: etcd_storage_keys_total = %f\n", n, mm["etcd_storage_keys_total"]) fmt.Fprintf(os.Stdout, "%s: etcd_storage_db_total_size_in_bytes = %s\n", n, humanize.Bytes(fb)) } fmt.Fprintf(os.Stdout, "Name To Endpoint: %+v\n", ne) fmt.Fprintf(os.Stdout, "\n") fmt.Fprintln(os.Stdout, "####### GetMetrics #2") endpoints := []string{} for _, endpoint := range ne { endpoints = append(endpoints, endpoint) } sort.Strings(endpoints) vm2, ne2, err := etcdproc.GetMetrics(endpoints...) if err != nil { fmt.Fprintln(os.Stdout, "exiting with:", err) return } for n, mm := range vm2 { var fb uint64 if fv, ok := mm["etcd_storage_db_total_size_in_bytes"]; ok { fb = uint64(fv) } fmt.Fprintf(os.Stdout, "%s: etcd_storage_keys_total = %f\n", n, mm["etcd_storage_keys_total"]) fmt.Fprintf(os.Stdout, "%s: etcd_storage_db_total_size_in_bytes = %s\n", n, humanize.Bytes(fb)) } fmt.Fprintf(os.Stdout, "Name To Endpoint : %+v\n", ne2) } } fmt.Println() } }() } select { case <-clusterDone: fmt.Fprintln(os.Stdout, "[demo.CommandFunc END] etcd cluster terminated!") return case <-operationDone: fmt.Fprintln(os.Stdout, "[demo.CommandFunc END] operation terminated!") return case <-time.After(cmdFlag.Timeout): fmt.Fprintln(os.Stdout, "[demo.CommandFunc END] timed out!") return } }