func TestAppendReader(t *testing.T) { kv := NewAppend(KV_APPENDFILE) kv.Set("a", []byte("foo")) if err := <-kv.Sync(); err != nil { t.Error(err) } syscall.Sync() r := NewAppend(KV_APPENDFILE) if a := r.Get("a"); string(a) != "foo" { t.Error(a) } r.Close() kv.Set("a", []byte("bar")) if err := <-kv.Sync(); err != nil { t.Error(err) } syscall.Sync() r = NewAppend(KV_APPENDFILE) if a := r.Get("a"); string(a) != "bar" { t.Error(a) } r.Close() kv.Close() os.Remove(KV_APPENDFILE) }
func SysInit() error { cfg, err := config.LoadConfig() if err != nil { return err } _, err = config.ChainCfgFuncs(cfg, loadImages, func(cfg *config.CloudConfig) (*config.CloudConfig, error) { p, err := compose.GetProject(cfg, false) if err != nil { return cfg, err } return cfg, p.Up() }, func(cfg *config.CloudConfig) (*config.CloudConfig, error) { syscall.Sync() return cfg, nil }, func(cfg *config.CloudConfig) (*config.CloudConfig, error) { log.Infof("RancherOS %s started", config.VERSION) return cfg, nil }) return err }
func TestFileParallel(t *testing.T) { kv1 := NewFile(KV_FILE) kv1.Set("a", []byte("foo")) kv1.Set("b", []byte("bar")) <-kv1.Sync() kv2 := NewFile(KV_FILE) if a := kv2.Get("a"); string(a) != "foo" { t.Error(a) } kv1.Set("a", []byte("baz")) <-kv1.Sync() syscall.Sync() // New reader should get new value kv3 := NewFile(KV_FILE) if a := kv3.Get("a"); string(a) != "baz" { t.Error(a) } kv3.Close() // Old reader shall still have the previous value if a := kv2.Get("a"); string(a) != "foo" { t.Error(a) } kv2.Close() // But new client should get the new value os.Remove(KV_FILE) }
func reboot() { log.Infof("Rebooting the system") if debugLevel > 0 { log.Info("Squashing reboot for debug init") return } syscall.Sync() syscall.Reboot(syscall.LINUX_REBOOT_CMD_RESTART) }
func main() { flag.Usage = func() { fmt.Fprintf(os.Stderr, "%s", Help) os.Exit(1) } flag.Parse() // Nothing can faile syscall.Sync() }
// exit cleanly shuts down the system func halt() { log.Infof("Powering off the system") if strings.HasSuffix(os.Args[0], "-debug") { log.Info("Squashing power off for debug tether") return } syscall.Sync() syscall.Reboot(syscall.LINUX_REBOOT_CMD_POWER_OFF) }
func doRestartApp() { time.Sleep(1) syscall.Sync() out, err := exec.Command("/bin/systemctl", "restart", "stratux").Output() if err != nil { log.Printf("restart error: %s\n%s", err.Error(), out) } else { log.Printf("restart: %s\n", out) } }
// exit cleanly shuts down the system func halt() { log.Infof("Powering off the system") if debugLevel > 0 { log.Info("Squashing power off for debug init") return } syscall.Sync() syscall.Reboot(syscall.LINUX_REBOOT_CMD_POWER_OFF) }
func reboot() { log.Infof("Rebooting the system") if strings.HasSuffix(os.Args[0], "-debug") { log.Info("Squashing reboot for debug init") return } syscall.Sync() syscall.Reboot(syscall.LINUX_REBOOT_CMD_RESTART) }
func luksUnmount() (err error) { args := []string{conf.mountPoint} cmd := "/bin/umount" status.Log(syslog.LOG_NOTICE, "unmounting encrypted volume on %s", conf.mountPoint) syscall.Sync() _, err = execCommand(cmd, args, true, "") return }
func reboot(code int) { err := shutDownContainers() if err != nil { log.Error(err) } syscall.Sync() err = syscall.Reboot(code) if err != nil { log.Fatal(err) } }
func (store *dirStore) Flush() <-chan error { // In disk store flush does not really return any errors because it can not // know which write goroutines are now running so it can't collect their // errors, also Sync() doesn't return errors either. So error channel is just // to meet the interface requirements and to wait for Flush() to complete. c := make(chan error) go func() { store.wg.Wait() syscall.Sync() close(c) }() return c }
// tincStatServer serves data pulled from the tinc log file // Tinc logs connection and network information after getting a USR1 and USR2 // The following output is current: // //{ // "total_bytes_in": 115324, // "total_bytes_out": 67990, // "connections": [ // { // "name": "some_random_node", // "ip": "192.0.2.15", // "port": 2003 // } // ] //} func tincStatServer(w http.ResponseWriter, req *http.Request) { // Get tinc pid tincPid, err := findTincPid() if err != nil { log.Fatalf("findTincPid: %s", err) } // Get first list of lines lines1, err := readLines("/var/log/tinc/tinc.log") if err != nil { log.Fatalf("readLines: %s", err) } // Send signals usr12(tincPid) // Confirm flush of data to file syscall.Sync() // Get second list of lines lines2, err := readLines("/var/log/tinc/tinc.log") if err != nil { log.Fatalf("readLines: %s", err) } // Print out and save unique lines in the second set var loglines []string for i, line := range lines2 { if list_contains(line, lines1) == false { fmt.Println(i, line) loglines = append(loglines, line) } } // Convert the raw loglines output to a tincstat object ts, err := parseTincStat(loglines) if err != nil { w.WriteHeader(http.StatusInternalServerError) return } // Create the JSON representation of tinc status data, err := json.MarshalIndent(ts, " ", "") if err != nil { w.WriteHeader(http.StatusInternalServerError) return } // Write the HTTP response headers and body. w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) io.WriteString(w, string(data)) }
func TestAppendOverwrite(t *testing.T) { kv := NewAppend(KV_APPENDFILE) kv.Set("a", []byte("foo")) kv.Set("a", []byte("bar")) kv.Set("a", []byte("baz")) kv.Close() syscall.Sync() kv = NewAppend(KV_APPENDFILE) if a := kv.Get("a"); string(a) != "baz" { t.Error(a) } os.Remove(KV_APPENDFILE) }
func RunWatchdog() { log.Println("started watchdog") for { watchmutex.Lock() if watchlock != "" && watchcmd != nil { syscall.Sync() _, err := os.Stat(watchlock) if err != nil { log.Println(err) log.Println("killing process behind", watchlock) errK := watchcmd.Process.Kill() if errK != nil { log.Println(errK) } watchlock = "" watchcmd = nil } } watchmutex.Unlock() time.Sleep(*flag_poll) } }
func TestLarge(t *testing.T) { dir, err := ioutil.TempDir("", "large") require.NoError(t, err) t.Log(dir) f, err := OpenLarge(dir, 1<<10) require.NoError(t, err) defer f.Close() data := []byte("hello world") n, err := f.WriteAt(data, 0) require.NoError(t, err) require.Equal(t, len(data), n) b := make([]byte, len(data)) n, err = f.ReadAt(b, 0) require.NoError(t, err) require.Equal(t, len(data), n) require.Equal(t, data, b) n, err = f.WriteAt(data, 1020) require.NoError(t, err) require.Equal(t, len(data), n) b = make([]byte, len(data)) n, err = f.ReadAt(b, 1020) require.NoError(t, err) require.Equal(t, len(data), n) require.Equal(t, data, b) syscall.Sync() _, err = os.Stat(path.Join(dir, "0")) require.NoError(t, err) _, err = os.Stat(path.Join(dir, "1")) require.NoError(t, err) }
func SysInit() error { cfg, err := config.LoadConfig() if err != nil { return err } initFuncs := []config.InitFunc{ loadImages, runContainers, func(cfg *config.Config) error { syscall.Sync() return nil }, func(cfg *config.Config) error { log.Infof("RancherOS %s started", config.VERSION) return nil }, tailConsole, } return config.RunInitFuncs(cfg, initFuncs) }
// BlockDeviceDiscard runs discard for the given path. // This is used as a workaround for the kernel not discarding block so // on the thin pool when we remove a thinp device, so we do it // manually func BlockDeviceDiscard(path string) error { file, err := os.OpenFile(path, os.O_RDWR, 0) if err != nil { return err } defer file.Close() size, err := GetBlockDeviceSize(file) if err != nil { return err } if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil { return err } // Without this sometimes the remove of the device that happens after // discard fails with EBUSY. syscall.Sync() return nil }
func main() { flag.Usage = usage flag.Parse() if flag.NArg() != 0 { usage() } syscall.Sync() if *pflag && *rflag { usage() } cmd := syscall.LINUX_REBOOT_CMD_HALT if *pflag { cmd = syscall.LINUX_REBOOT_CMD_POWER_OFF } if *rflag { cmd = syscall.LINUX_REBOOT_CMD_RESTART } ck(syscall.Reboot(cmd)) }
func SysInit() error { cfg, err := config.LoadConfig() if err != nil { return err } initFuncs := []config.InitFunc{ loadImages, func(cfg *config.CloudConfig) error { return compose.RunServices(cfg) }, func(cfg *config.CloudConfig) error { syscall.Sync() return nil }, func(cfg *config.CloudConfig) error { log.Infof("RancherOS %s started", config.VERSION) return nil }, } return config.RunInitFuncs(cfg, initFuncs) }
func handleRebootRequest(w http.ResponseWriter, r *http.Request) { syscall.Sync() syscall.Reboot(syscall.LINUX_REBOOT_CMD_RESTART) }
func sync() { syscall.Sync() }
func Sync() { if err := syscall.Sync(); err != nil { fmt.Fprintf(os.Stderr, "Encountered an error during sync: %v\n", err) os.Exit(1) } }
func main() { syscall.Sync() }
func handleShutdownRequest(w http.ResponseWriter, r *http.Request) { syscall.Sync() syscall.Reboot(syscall.LINUX_REBOOT_CMD_POWER_OFF) }
func Sync() { syscall.Sync() }
func doReboot() { syscall.Sync() syscall.Reboot(syscall.LINUX_REBOOT_CMD_RESTART) }