func TestDiskQueueWriterCorruption(t *testing.T) { l := newTestLogger(t) nsqLog.Logger = l dqName := "test_disk_queue_corruption" + strconv.Itoa(int(time.Now().Unix())) tmpDir, err := ioutil.TempDir("", fmt.Sprintf("nsq-test-%d", time.Now().UnixNano())) if err != nil { panic(err) } defer os.RemoveAll(tmpDir) // require a non-zero message length for the corrupt (len 0) test below dq, _ := newDiskQueueWriter(dqName, tmpDir, 1000, 10, 1<<10, 1) dqReader := newDiskQueueReader(dqName, dqName, tmpDir, 1000, 10, 1<<10, 5, 2*time.Second, nil, true) defer dqReader.Close() defer dq.Close() msg := make([]byte, 123) // 127 bytes per message, 8 (1016 bytes) messages per file var e BackendQueueEnd for i := 0; i < 25; i++ { dq.Put(msg) } dq.Flush() e = dq.GetQueueReadEnd() dqReader.UpdateQueueEnd(e, false) equal(t, dq.(*diskQueueWriter).diskWriteEnd.TotalMsgCnt(), int64(25)) // corrupt the 2nd file dqFn := dq.(*diskQueueWriter).fileName(1) os.Truncate(dqFn, 500) // 3 valid messages, 5 corrupted for i := 0; i < 19; i++ { // 1 message leftover in 4th file m, _ := dqReader.TryReadOne() equal(t, m.Data, msg) equal(t, m.Err, nil) } // corrupt the 4th (current) file dqFn = dq.(*diskQueueWriter).fileName(3) os.Truncate(dqFn, 100) dq.Put(msg) // in 5th file dq.Flush() e = dq.GetQueueReadEnd() dqReader.UpdateQueueEnd(e, true) readResult, _ := dqReader.TryReadOne() equal(t, readResult.Data, msg) // write a corrupt (len 0) message at the 5th (current) file dq.(*diskQueueWriter).writeFile.Write([]byte{0, 0, 0, 0}) // force a new 6th file - put into 5th, then readOne errors, then put into 6th dq.Put(msg) dq.Put(msg) dq.Flush() e = dq.GetQueueReadEnd() dqReader.UpdateQueueEnd(e, true) readResult, _ = dqReader.TryReadOne() equal(t, readResult.Data, msg) }
func TestDiskQueueCorruption(t *testing.T) { log.SetOutput(ioutil.Discard) defer log.SetOutput(os.Stdout) dqName := "test_disk_queue_corruption" + strconv.Itoa(int(time.Now().Unix())) dq := NewDiskQueue(dqName, os.TempDir(), 1000, 5) msg := make([]byte, 123) for i := 0; i < 25; i++ { dq.Put(msg) } assert.Equal(t, dq.Depth(), int64(25)) // corrupt the 2nd file dqFn := dq.(*DiskQueue).fileName(1) os.Truncate(dqFn, 500) for i := 0; i < 19; i++ { assert.Equal(t, <-dq.ReadChan(), msg) } // corrupt the 4th (current) file dqFn = dq.(*DiskQueue).fileName(3) os.Truncate(dqFn, 100) dq.Put(msg) assert.Equal(t, <-dq.ReadChan(), msg) }
func TestDiskQueueCorruption(t *testing.T) { Convey("TestDiskQueueCorruption", t, func() { l := log.New(os.Stderr, "", log.LstdFlags) dqName := "test_disk_queue_corruption" + strconv.Itoa(int(time.Now().Unix())) dq := newDiskQueue(dqName, os.TempDir(), 1000, 5, 2*time.Second, l) msg := make([]byte, 123) for i := 0; i < 25; i++ { dq.Put(msg) } So(dq.Depth(), ShouldEqual, int64(25)) // corrupt the 2nd file dqFn := dq.(*diskQueue).fileName(1) os.Truncate(dqFn, 500) for i := 0; i < 19; i++ { So(string(<-dq.ReadChan()), ShouldEqual, string(msg)) } // corrupt the 4th (current) file dqFn = dq.(*diskQueue).fileName(3) os.Truncate(dqFn, 100) dq.Put(msg) So(string(<-dq.ReadChan()), ShouldEqual, string(msg)) dq.Delete() }) }
func TestDiskQueueCorruption(t *testing.T) { l := newTestLogger(t) dqName := "test_disk_queue_corruption" + strconv.Itoa(int(time.Now().Unix())) tmpDir, err := ioutil.TempDir("", fmt.Sprintf("nsq-test-%d", time.Now().UnixNano())) if err != nil { panic(err) } defer os.RemoveAll(tmpDir) dq := newDiskQueue(dqName, tmpDir, 1000, 5, 2*time.Second, l) msg := make([]byte, 123) for i := 0; i < 25; i++ { dq.Put(msg) } equal(t, dq.Depth(), int64(25)) // corrupt the 2nd file dqFn := dq.(*diskQueue).fileName(1) os.Truncate(dqFn, 500) for i := 0; i < 19; i++ { equal(t, <-dq.ReadChan(), msg) } // corrupt the 4th (current) file dqFn = dq.(*diskQueue).fileName(3) os.Truncate(dqFn, 100) dq.Put(msg) equal(t, <-dq.ReadChan(), msg) }
func TestDiskQueueCorruption(t *testing.T) { l := newTestLogger(t) dqName := "test_disk_queue_corruption" + strconv.Itoa(int(time.Now().Unix())) dq := newDiskQueue(dqName, os.TempDir(), 1000, 5, 2*time.Second, l) msg := make([]byte, 123) for i := 0; i < 25; i++ { dq.Put(msg) } equal(t, dq.Depth(), int64(25)) // corrupt the 2nd file dqFn := dq.(*diskQueue).fileName(1) os.Truncate(dqFn, 500) for i := 0; i < 19; i++ { equal(t, <-dq.ReadChan(), msg) } // corrupt the 4th (current) file dqFn = dq.(*diskQueue).fileName(3) os.Truncate(dqFn, 100) dq.Put(msg) equal(t, <-dq.ReadChan(), msg) }
func TestDiskQueueCorruption(t *testing.T) { dqName := "test_disk_queue_corruption" + strconv.Itoa(int(time.Now().Unix())) tmpDir, err := ioutil.TempDir("", fmt.Sprintf("diskqueue-test-%d", time.Now().UnixNano())) if err != nil { panic(err) } defer os.RemoveAll(tmpDir) dq := newDiskQueue(dqName, tmpDir, 1000, 10, 1<<10, 5, 2*time.Second) defer dq.Close() //消息长度 123 + 4 字节head = 127 // 每个文件最多可存放8条消息 msg := make([]byte, 123) // 127 bytes per message, 8 (1016 bytes) message perfile for i := 0; i < 25; i++ { dq.Put(msg) } // 存放25条消息, 占用4个文件 8, 8, 8, 1 Equal(t, dq.Depth(), int64(25)) // corrupt the 2nd file // 第2个文件长度设置为500, 第二个文件后5个文件丢失, 目前总的可用消息数量为20 dqFn := dq.(*diskQueue).fileName(1) // 跟新文件长度 os.Truncate(dqFn, 500) // 3 valid messages, 5 corrupted // 8 + 3 + 8 // 取19个消息,第4个文件耽搁消息暂不取 for i := 0; i < 19; i++ { Equal(t, <-dq.ReadChan(), msg) // 1 message leftover in 4th file } // corrupt the 4th (current) file // 设置第四个文件长度为100, 读消息出错,会跳到下一个文件 // 如果此事设置 time.sleep 超过一定时长, 会先读出数据,单不会报错 dqFn = dq.(*diskQueue).fileName(3) os.Truncate(dqFn, 100) // 第四个文件损坏了,直接跳到第5个文件 dq.Put(msg) // in 5th file Equal(t, <-dq.ReadChan(), msg) // 写入0长度到文件5 dq.(*diskQueue).writeFile.Write([]byte{0, 0, 0, 0}) // 写数据到文件5 dq.Put(msg) // 写到文件6 dq.Put(msg) Equal(t, <-dq.ReadChan(), msg) }
// Clear the entire file and resize it to initial size. func (file *DataFile) Clear() (err error) { if err = file.Buf.Unmap(); err != nil { return } else if err = os.Truncate(file.Path, 0); err != nil { return } else if err = os.Truncate(file.Path, int64(file.Growth)); err != nil { return } else if file.Buf, err = gommap.Map(file.Fh, gommap.RDWR, 0); err != nil { return } file.Used, file.Size = 0, file.Growth tdlog.Infof("%s cleared: %d of %d bytes in-use", file.Path, file.Used, file.Size) return }
// saveVolumeIndex save volumes index info to disk. func (s *Store) saveVolumeIndex() (err error) { var ( tn, n int v *volume.Volume ) if _, err = s.vf.Seek(0, os.SEEK_SET); err != nil { log.Errorf("vf.Seek() error(%v)", err) return } for _, v = range s.Volumes { if n, err = s.vf.WriteString(fmt.Sprintf("%s\n", string(v.Meta()))); err != nil { log.Errorf("vf.WriteString() error(%v)", err) return } tn += n } if err = s.vf.Sync(); err != nil { log.Errorf("vf.Sync() error(%v)", err) return } if err = os.Truncate(s.conf.Store.VolumeIndex, int64(tn)); err != nil { log.Errorf("os.Truncate() error(%v)", err) } return }
func (bl *Binlog) Archive(path string) error { bl.wrMutex.Lock() defer bl.wrMutex.Unlock() e := bl.f.Close() if e != nil { return e } srcFile, e := os.OpenFile(bl.f.Name(), os.O_RDONLY, 0777) if e != nil { return e } newFile, e := os.Create(path) if e != nil { return e } _, e = io.Copy(newFile, srcFile) if e != nil { return e } e = srcFile.Close() if e != nil { return e } e = os.Truncate(srcFile.Name(), 0) bl.f, e = os.OpenFile(srcFile.Name(), os.O_WRONLY|os.O_APPEND, 0777) if e != nil { return e } return nil }
func testRepair(t *testing.T, name string, index int64, s int64, cutSize int64, useMmap bool) { var r *tableReader var err error if err := os.Truncate(name, s-cutSize); err != nil { t.Fatal(err) } if r, err = newTableReader(path.Dir(name), index, useMmap); err != nil { t.Fatal(err) } defer r.Close() var ll Log for i := 0; i < 10; i++ { if err := r.GetLog(uint64(i+1), &ll); err != nil { t.Fatal(err, i) } else if len(ll.Data) != 4096 { t.Fatal(len(ll.Data)) } else if ll.Data[0] != byte(i+1) { t.Fatal(ll.Data[0]) } } if err := r.GetLog(12, &ll); err == nil { t.Fatal("must nil") } if s != int64(r.data.Size()) { t.Fatalf("repair error size %d != %d", s, r.data.Size()) } }
func (bm *BackingStore) Create(id string, quota int64) (string, error) { log := bm.Logger.Session("create", lager.Data{"id": id, "quota": quota}) path := bm.backingStorePath(id) f, err := os.Create(path) if err != nil { return "", fmt.Errorf("creating the backing store file: %s", err) } f.Close() if quota == 0 { return "", errors.New("cannot have zero sized quota") } if err := os.Truncate(path, quota); err != nil { return "", fmt.Errorf("truncating the file returned error: %s", err) } output, err := exec.Command("mkfs.ext4", "-O", "^has_journal", "-F", path, ).CombinedOutput() if err != nil { log.Error("formatting-file", err, lager.Data{"path": path, "output": string(output)}) return "", fmt.Errorf("formatting filesystem: %s", err) } return path, nil }
func (db *Csv) DeleteActivity(id int64) (err error) { var pos int64 var line []byte pos, line, err = db.findActivityLine(id) if err == io.EOF { return ErrNotFound } /* read data past the line */ var data []byte data, err = db.readAll(pos + int64(len(line))) if err != nil { return } err = db.writeBytes(pos, data) if err != nil { return } db.Mutex.Lock() err = os.Truncate(db.Filename, pos+int64(len(data))) db.Mutex.Unlock() return }
// Clear the entire file and resize it to initial size. func (file *DataFile) Clear() (err error) { if err = file.Close(); err != nil { return } if err = os.Truncate(file.Path, 0); err != nil { return } if err = os.Truncate(file.Path, int64(file.Growth)); err != nil { return } if err = file.Reopen(); err != nil { return } tdlog.Infof("%s cleared: %d of %d bytes in-use", file.Path, file.Used, file.Size) return }
// copyFileSizeSparse copies file source to destination dest. func copyFileSizeSparse(source string, dest string, allocatedSize int64) (err error) { df, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE, 0644) if err != nil { return err } err = os.Truncate(dest, allocatedSize) if err != nil { return err } err = df.Close() if err != nil { return err } if err != nil { return err } createWriterTruncator := func(file *os.File) (writer io.Writer) { return NewSparseFilesWriter(NewSeekBufferedWriter(file, ioBufferSize)) } createBufReader := func(file2 *os.File) (writer io.Reader) { return bufio.NewReaderSize(file2, ioReadBufferSize) } return doCopy(source, dest, createBufReader, createWriterTruncator) }
func fileWriter(t *testing.T, file *os.File, logs []string) { filename := file.Name() time.Sleep(1 * time.Second) // wait for start Tail... for _, line := range logs { if strings.Index(line, RotateMarker) != -1 { log.Println("fileWriter: rename file => file.old") os.Rename(filename, filename+".old") file.Close() file, _ = os.OpenFile(filename, os.O_CREATE|os.O_WRONLY, 0644) log.Println("fileWriter: re-opened file") } else if strings.Index(line, TruncateMarker) != -1 { time.Sleep(1 * time.Second) log.Println("fileWriter: truncate(file, 0)") os.Truncate(filename, 0) file.Seek(int64(0), os.SEEK_SET) } _, err := file.WriteString(line) log.Print("fileWriter: wrote ", line) if err != nil { log.Println("write failed", err) } time.Sleep(1 * time.Millisecond) } file.Close() }
// PutContent stores the []byte content at a location designated by "path". func (d *driver) PutContent(ctx context.Context, subPath string, contents []byte) error { if _, err := d.WriteStream(ctx, subPath, 0, bytes.NewReader(contents)); err != nil { return err } return os.Truncate(d.fullPath(subPath), int64(len(contents))) }
func (f *FILE) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error { log.Debugf("Setattr: Path %v, file %v, valid %v", f.Entry.Prefix, f.Entry.Name, req.Valid) entry, err := entrySetAttr(f.RData, f.Entry, req) if err != nil { log.Errorf("Setattr: error %v", err) return err } // Size truncate, so adjust numChunks if entry.Stat.Size < f.Entry.Stat.Size { lastChunkSize := entry.Stat.Size & int64(f.RData.Config.ChunkSize-1) if lastChunkSize > 0 { os.Truncate(f.cacheName+"."+strconv.FormatInt(entry.NumChunks-1, 10), lastChunkSize) } for i := entry.NumChunks; i < f.Entry.NumChunks; i++ { os.Remove(f.cacheName + "." + strconv.FormatInt(i, 10)) } } f.Entry = entry resp.Attr.Mode = f.Entry.Stat.Mode resp.Attr.Size = uint64(f.Entry.Stat.Size) resp.Attr.Uid = f.Entry.Stat.Uid resp.Attr.Gid = f.Entry.Stat.Gid return nil }
func setup() { var err error rootDir, err = ioutil.TempDir("/var/tmp", "btrfs-test-") if err != nil { log.Fatalf("Cannot create tmp directory, err=%s", err) } mount = path.Join(rootDir, "btrfs") if err := os.MkdirAll(mount, 0700); err != nil { log.Fatalf("ERROR: MkdirAll %s, err=%s", mount, err) } imageFileName := filepath.Join(rootDir, "btrfs.img") ioutil.WriteFile(imageFileName, []byte("datadatadata"), 0700) os.Truncate(imageFileName, 1024*1024*1024) // 1GB if err := run("mkfs.btrfs", imageFileName); err != nil { log.Fatalf("ERROR: mkfs.btrfs %s, err=%s", imageFileName, err) } if err := run("mount", imageFileName, mount); err != nil { log.Fatalf("ERROR: mount %s %s, err=%s", imageFileName, mount, err) } }
// Opens the log file and reads existing entries. The log can remain open and // continue to append entries to the end of the log. func (l *Log) open(path string) error { // Read all the entries from the log if one exists. var readBytes int64 var err error debugln("log.open.open ", path) // open log file l.file, err = os.OpenFile(path, os.O_RDWR, 0600) l.path = path if err != nil { // if the log file does not exist before // we create the log file and set commitIndex to 0 if os.IsNotExist(err) { l.file, err = os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0600) debugln("log.open.create ", path) return err } return err } debugln("log.open.exist ", path) // Read the file and decode entries. for { // Instantiate log entry and decode into it. entry, _ := newLogEntry(l, 0, 0, nil) entry.Position, _ = l.file.Seek(0, os.SEEK_CUR) n, err := entry.decode(l.file) if err != nil { if err == io.EOF { debugln("open.log.append: finish ") } else { if err = os.Truncate(path, readBytes); err != nil { return fmt.Errorf("raft.Log: Unable to recover: %v", err) } } break } if entry.Index > l.startIndex { // Append entry. l.entries = append(l.entries, entry) if entry.Index <= l.commitIndex { command, err := newCommand(entry.CommandName, entry.Command) if err != nil { continue } l.ApplyFunc(command) } debugln("open.log.append log index ", entry.Index) } readBytes += int64(n) } l.results = make([]*logResult, len(l.entries)) debugln("open.log.recovery number of log ", len(l.entries)) return nil }
func testTruncate(t *testing.T, toSize int64) { t.Parallel() f := &truncate{} mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{fstestutil.ChildMap{"child": f}}) if err != nil { t.Fatal(err) } defer mnt.Close() err = os.Truncate(mnt.Dir+"/child", toSize) if err != nil { t.Fatalf("Truncate: %v", err) } gotr := f.RecordedSetattr() if gotr == (fuse.SetattrRequest{}) { t.Fatalf("no recorded SetattrRequest") } if g, e := gotr.Size, uint64(toSize); g != e { t.Errorf("got Size = %q; want %q", g, e) } if g, e := gotr.Valid&^fuse.SetattrLockOwner, fuse.SetattrSize; g != e { t.Errorf("got Valid = %q; want %q", g, e) } t.Logf("Got request: %#v", gotr) }
func TestUnionFsTruncateTimestamp(t *testing.T) { wd, clean := setupUfs(t) defer clean() contents := "hello" fn := wd + "/mnt/y" err := ioutil.WriteFile(fn, []byte(contents), 0644) if err != nil { t.Fatalf("WriteFile failed: %v", err) } time.Sleep(200 * time.Millisecond) truncTs := time.Now() err = os.Truncate(fn, 3) if err != nil { t.Fatalf("Truncate failed: %v", err) } fi, err := os.Lstat(fn) if err != nil { t.Fatalf("Lstat failed: %v", err) } if truncTs.Sub(fi.ModTime()) > 100*time.Millisecond { t.Error("timestamp drift", truncTs, fi.ModTime()) } }
func (r *Registrar) WriteRegistry() error { r.Lock() defer r.Unlock() // can't truncate a file that does not exist: _, err := os.Stat(r.registryFile) if os.IsExist(err) { err := os.Truncate(r.registryFile, 0) if err != nil { logp.Info("WriteRegistry: os.Truncate: err=%v\n", err) return err } } // if "json.Marshal" or "ioutil.WriteFile" fail then most likely // unifiedbeat does not have access to the registry file jsonState, err := json.Marshal(r.State) if err != nil { logp.Info("WriteRegistry: json.Marshal: err=%v\n", err) return err } // https://golang.org/pkg/io/ioutil/#WriteFile // If the file does not exist, WriteFile creates it with // permissions 0644; otherwise it is truncated. err = ioutil.WriteFile(r.registryFile, jsonState, 0644) if err != nil { logp.Info("WriteRegistry: ioutil.WriteFile: err=%v\n", err) return err } return nil }
func (n *fileNode) Truncate(file fuse.File, size uint64, context *fuse.Context) (code fuse.Status) { if file != nil { return file.Truncate(size) } else if n.backing != "" { return fuse.ToStatus(os.Truncate(n.backing, int64(size))) } return fuse.OK }
func Truncate(name string, size int64) error { f, err := os.Create(name) if err != nil { return err } f.Close() err = os.Truncate(name, size) return err }
// Opens the log file and reads existing entries. The log can remain open and // continue to append entries to the end of the log. func (l *Log) Open(path string) error { l.mutex.Lock() defer l.mutex.Unlock() // Read all the entries from the log if one exists. var lastIndex int = 0 if _, err := os.Stat(path); !os.IsNotExist(err) { // Open the log file. file, err := os.Open(path) if err != nil { return err } defer file.Close() reader := bufio.NewReader(file) // Read the file and decode entries. for { if _, err := reader.Peek(1); err == io.EOF { break } // Instantiate log entry and decode into it. entry := NewLogEntry(l, 0, 0, nil) n, err := entry.Decode(reader) if err != nil { file.Close() if err = os.Truncate(path, int64(lastIndex)); err != nil { return fmt.Errorf("raft.Log: Unable to recover: %v", err) } break } // Apply the command. if err = l.ApplyFunc(entry.Command); err != nil { file.Close() return err } // Append entry. l.entries = append(l.entries, entry) l.commitIndex = entry.Index lastIndex += n } file.Close() } // Open the file for appending. var err error l.file, err = os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0600) if err != nil { return err } return nil }
func erase(files ...string) { for _, f := range files { if strings.HasPrefix(f, "/var/log/") { os.Truncate(f, 0) } else { os.RemoveAll(f) } } }
func TestDiskQueueCorruption(t *testing.T) { l := test.NewTestLogger(t) dqName := "test_disk_queue_corruption" + strconv.Itoa(int(time.Now().Unix())) tmpDir, err := ioutil.TempDir("", fmt.Sprintf("nsq-test-%d", time.Now().UnixNano())) if err != nil { panic(err) } defer os.RemoveAll(tmpDir) // require a non-zero message length for the corrupt (len 0) test below dq := newDiskQueue(dqName, tmpDir, 1000, 10, 1<<10, 5, 2*time.Second, l) defer dq.Close() msg := make([]byte, 123) // 127 bytes per message, 8 (1016 bytes) messages per file for i := 0; i < 25; i++ { dq.Put(msg) } test.Equal(t, int64(25), dq.Depth()) // corrupt the 2nd file dqFn := dq.(*diskQueue).fileName(1) os.Truncate(dqFn, 500) // 3 valid messages, 5 corrupted for i := 0; i < 19; i++ { // 1 message leftover in 4th file test.Equal(t, msg, <-dq.ReadChan()) } // corrupt the 4th (current) file dqFn = dq.(*diskQueue).fileName(3) os.Truncate(dqFn, 100) dq.Put(msg) // in 5th file test.Equal(t, msg, <-dq.ReadChan()) // write a corrupt (len 0) message at the 5th (current) file dq.(*diskQueue).writeFile.Write([]byte{0, 0, 0, 0}) // force a new 6th file - put into 5th, then readOne errors, then put into 6th dq.Put(msg) dq.Put(msg) test.Equal(t, msg, <-dq.ReadChan()) }
func (list PagedList) truncate() { if !list.exists() { return } if list.listType == listTypeRolling { list.rollingTruncate() return } os.Truncate(list.Filename(0), 0) }
func (me *memNode) Truncate(file File, size uint64, context *Context) (code Status) { if file != nil { return file.Truncate(size) } me.info.Size = int64(size) err := os.Truncate(me.filename(), int64(size)) me.info.Ctime_ns = time.Nanoseconds() return ToStatus(err) }
func TestMemUnionFsTruncate(t *testing.T) { wd, _, clean := setupMemUfs(t) defer clean() writeToFile(wd+"/ro/file", "hello") os.Truncate(wd+"/mnt/file", 2) content := readFromFile(wd + "/mnt/file") if content != "he" { t.Errorf("unexpected content %v", content) } }