func (s *segment) replay(db *Db) (err os.Error) { log.Info("Replaying segment %s", s) entrych, errch := s.iter(0) end := false count := 0 for !end { select { case entry, ok := <-entrych: if ok { count++ err = entry.mutation.execute(db, true) // execute mutation (for replay) if err != nil { log.Error("Got an error replaying a mutation: %s", err) return } } else { end = true } case segerr, ok := <-errch: if ok { return segerr } else { end = true } } } log.Info("Segment %s replayed: %d mutations replayed", s, count) return }
func (fss *FsService) replicationWatcher() { for fss.running { if fss.replQueue.Len() > 0 { fss.replQueueMutex.Lock() if fss.replQueue.Len() > 0 { next := fss.replQueue.Front() fss.replQueue.Remove(next) fss.replQueueMutex.Unlock() path := next.Value.(*Path) localheader := fss.headers.GetFileHeader(path) log.Info("%d: FSS: Starting replica download for path %s version %d...", fss.cluster.MyNode.Id, path, localheader.header.Version) // TODO: Make sure we don't download the same replica twice... // check if the file doesn't already exist locally file := OpenFile(fss, localheader, 0) if !file.Exists() { // TODO: Use config to get temp path tempfile := fmt.Sprintf("%s/%d.%d.%d.data", os.TempDir(), path.Hash(), time.Nanoseconds(), localheader.header.Version) fd, err := os.Create(tempfile) if err == nil { _, err = fss.Read(path, 0, -1, 0, fd, nil) fd.Close() if err == nil { os.Rename(tempfile, file.datapath) log.Info("%d: FSS: Successfully replicated %s version %d locally", fss.cluster.MyNode.Id, path, localheader.header.Version) } else { log.Error("%d: FSS: Couldn't replicate file %s locally because couldn't read: %s", fss.cluster.MyNode.Id, path, err) } } else { log.Error("%d: FSS: Couldn't open temporary file %s to download replica localy for path %s", fss.cluster.MyNode.Id, tempfile, path) os.Remove(tempfile) } } else { log.Info("%d: FSS: Local replica for %s version %d already exist", fss.cluster.MyNode.Id, path, localheader.header.Version) } } else { fss.replQueueMutex.Unlock() } } else { // if no more replica in the queue, stop replica force if fss.replQueue.Len() == 0 { fss.replForce = false } } // TODO: Put that in configuration if !fss.replForce { time.Sleep(100 * 1000 * 1000) } } }
func (m *segmentManager) replayAll(db *Db) (err os.Error) { log.Info("Replaying all segments...") for _, seg := range m.segments { if seg != nil { err = seg.replay(db) if err != nil { return err } } } log.Info("All segments replayed") return }
func openSegment(path string) *segment { log.Info("Opening segment file %s", path) seg := &segment{ nextSegments: make([]*segment, 0), prevSegments: make([]*segment, 0), writable: false, lock: new(sync.Mutex), } var err os.Error stat, err := os.Stat(path) if stat == nil || err != nil { log.Fatal("Couldn't stat segment file %s: %s", path, err) } var from, to Token _, err = fmt.Sscanf(stat.Name, "%016X_%04X_%04X.seg", &seg.positionStart, &from, &to) if err != nil { log.Fatal("Couldn't read segment file name %s: %s", path, err) } seg.tokens = TokenRange{from, to} seg.fd, err = os.Open(path) if err != nil { log.Fatal("Couldn't open segment %s: %s", path, err) } seg.typedFd = typedio.NewReadWriter(seg.fd) seg.positionEnd = seg.positionStart + uint64(stat.Size) return seg }
func TestHeaderSize(t *testing.T) { SetupCluster() log.Info("Testing TestHeaderSize...") header, err := tc.nodes[2].Fss.Header(fs.NewPath("/header/should/not/exists"), nil) if err != nil { t.Errorf("1) Header returned an an error for: %s\n", err) } if header.Size > 0 { t.Errorf("2) Header Size shouldn't higher than 0: %d", header.Size) } byts := []byte("salut!") buf := bytes.NewBuffer(byts) tc.nodes[3].Fss.Write(fs.NewPath("/header/tests/io/size"), int64(len(byts)), "", buf, nil) header, err = tc.nodes[1].Fss.Header(fs.NewPath("/header/tests/io/size"), nil) if err != nil { t.Errorf("3) Header returned an an error for: %s\n", err) } if header.Size != 6 { t.Errorf("4) Header Exists should be equal to 6: %s", header.Size) } byts = []byte("ca") buf = bytes.NewBuffer(byts) tc.nodes[3].Fss.Write(fs.NewPath("/header/tests/io/size"), int64(len(byts)), "", buf, nil) header, err = tc.nodes[1].Fss.Header(fs.NewPath("/header/tests/io/size"), nil) if err != nil { t.Errorf("5) Header returned an an error for: %s\n", err) } if header.Size != 2 { t.Errorf("6) Header Exists should be equal to 2: %s", header.Size) } }
func (m *segmentManager) writeMutation(token Token, mutation *mutation, sync bool) *segmentEntry { m.writeMutex.Lock() segment := m.getWritableSegment(token) // create the entry entry := segment.createEntry(token) entry.mutation = mutation // write the entry segment.write(entry, sync) mutation.seg = segment mutation.segEntry = entry mutation.token = entry.token // check if the segment can still be written after size := segment.positionEnd - segment.positionStart if size >= m.segMaxSize { segment.sync(true) segment.writable = false log.Info("Segment %s too big for a new entry. Rotating!", segment) } m.writeMutex.Unlock() return entry }
func TestHeaderMimeType(t *testing.T) { SetupCluster() log.Info("Testing TestHeaderMimeType...") byts := []byte("salut!") buf := bytes.NewBuffer(byts) tc.nodes[3].Fss.Write(fs.NewPath("/header/tests/mimetype"), int64(len(byts)), "text/html", buf, nil) header, err := tc.nodes[1].Fss.Header(fs.NewPath("/header/tests/mimetype"), nil) if err != nil { t.Errorf("1) Header returned an an error for: %s\n", err) } if header.MimeType != "text/html" { t.Errorf("2) Header type should be equal to text/html: %s", header.MimeType) } byts = []byte("salut!") buf = bytes.NewBuffer(byts) tc.nodes[3].Fss.Write(fs.NewPath("/header/tests/mimetype"), int64(len(byts)), "text/css", buf, nil) header, err = tc.nodes[1].Fss.Header(fs.NewPath("/header/tests/mimetype"), nil) if err != nil { t.Errorf("3) Header returned an an error for: %s\n", err) } if header.MimeType != "text/css" { t.Errorf("4) Header type should be equal to text/css: %s", header.MimeType) } }
func TestHeaderExists(t *testing.T) { SetupCluster() log.Info("Testing TestHeaderExists...") //t.Errorf("%s", tc.nodes[1].Cluster.Resolve("/header/should/not/exists").Get(0)) header, err := tc.nodes[4].Fss.Header(fs.NewPath("/header/should/not/exists"), nil) if err != nil { t.Errorf("1) Header returned an an error for: %s\n", err) } if header.Exists { t.Errorf("2) Header Exists shouldn't be true: %s\n", header.Exists) } header, err = tc.nodes[2].Fss.Header(fs.NewPath("/header/should/not/exists"), nil) if err != nil { t.Errorf("3) Header returned an an error for: %s\n", err) } if header.Exists { t.Errorf("4) Header Exists shouldn't be true: %s", header.Exists) } byts := []byte("salut!") buf := bytes.NewBuffer(byts) tc.nodes[3].Fss.Write(fs.NewPath("/header/tests/io/exists"), int64(len(byts)), "", buf, nil) header, err = tc.nodes[1].Fss.Header(fs.NewPath("/header/tests/io/exists"), nil) if err != nil { t.Errorf("5) Header returned an an error for: %s\n", err) } if !header.Exists { t.Errorf("6) Header Exists shouldn't be false: %s", header.Exists) } }
func TestFsChildRemoveDeleteNetworkTimeout(t *testing.T) { SetupCluster() log.Info("Testing TestFsChildRemoveDeleteNetworkTimeout...") resp3, other3 := GetProcessForPath("/test/timeout/delete1", "/test/timeout") resp2, other2 := GetProcessForPath("/test/timeout/delete2", "/test/timeout", "/test/timeout/delete2") resp1, other1 := GetProcessForPath("/test/timeout") buf := buffer.NewFromString("write1") resp3.Fss.Write(fs.NewPath("/test/timeout/delete1"), buf.Size, "", buf, nil) buf = buffer.NewFromString("write1") resp3.Fss.Write(fs.NewPath("/test/timeout/delete2"), buf.Size, "", buf, nil) initadr := resp2.Cluster.Nodes.Get(resp1.Cluster.MyNode.Id).Address resp2.Cluster.Nodes.Get(resp1.Cluster.MyNode.Id).Address = net.ParseIP("224.0.0.2") resp3.Cluster.Nodes.Get(resp1.Cluster.MyNode.Id).Address = net.ParseIP("224.0.0.2") time.Sleep(900000000) // Delete child, it should never get delete from the parent err := other2.Fss.Delete(fs.NewPath("/test/timeout/delete1"), false, nil) if err != nil { t.Error("1) Received an error: %s", err) } time.Sleep(900000000) children, _ := other1.Fss.Children(fs.NewPath("/test/timeout"), nil) found := false for _, child := range children { if child.Name == "delete1" { found = true } } if !found { t.Error("2) Child should not have been deleted") } // Test timeout of the first and write of a new one err = other3.Fss.Delete(fs.NewPath("/test/timeout/delete2"), false, nil) if err != nil { t.Error("3) Received an error: %s", err) } // Set back the address resp2.Cluster.Nodes.Get(resp1.Cluster.MyNode.Id).Address = initadr resp3.Cluster.Nodes.Get(resp1.Cluster.MyNode.Id).Address = initadr time.Sleep(6000000000) children, _ = other1.Fss.Children(fs.NewPath("/test/timeout"), nil) found1, found2 := false, false for _, child := range children { if child.Name == "delete2" { found2 = true } else if child.Name == "delete1" { found1 = true } } if found1 || found2 { t.Error("4) At least one of the children is still there", found1, found2) } }
func (s *Server) acceptUDP() { // Looping for new messages for { buf := make([]byte, MAX_MSG_SIZE) n, adr, err := s.udpsock.ReadFrom(buf) if s.comm.running { if err != nil { log.Error("Error while reading UDP (read %d) from %s: %s\n", n, adr, err) } else { abcon := net.Conn(s.udpsock) connection := NewConnection(s.comm.pool, P_UDP, D_Inbound, abcon) read := io.Reader(bytes.NewBuffer(buf)) msg := s.comm.NewMessage() msg.connection = connection err := msg.readMessage(read) if err != nil { log.Error("Couldn't handle message received from UDP because of errors: %s %s\n", msg, err) } else { go s.comm.handleMessage(msg) } } } else { log.Info("Dropping connection because communications have been paused") } } }
func TestExists(t *testing.T) { SetupCluster() log.Info("Testing TestExists...") exists, err := tc.nodes[1].Fss.Exists(fs.NewPath("/should/not/exists"), nil) if err != nil { t.Errorf("1) Exists returned an an error for: %s\n", err) } if exists == true { t.Errorf("2) Exists returned bad value: %s", exists) } exists, err = tc.nodes[2].Fss.Exists(fs.NewPath("/should/not/exists"), nil) if err != nil { t.Errorf("3) Exists returned an an error for: %s\n", err) } if exists == true { t.Errorf("4) Exists returned bad value: %s", exists) } byts := []byte("salut!") buf := bytes.NewBuffer(byts) tc.nodes[0].Fss.Write(fs.NewPath("/tests/io/exists"), int64(len(byts)), "", buf, nil) exists, err = tc.nodes[2].Fss.Exists(fs.NewPath("/tests/io/exists"), nil) if err != nil { t.Errorf("5) Exists returned an an error for: %s\n", err) } if exists == false { t.Errorf("6) Exists returned bad value: %s", exists) } }
func TestChildAddReplication(t *testing.T) { SetupCluster() log.Info("Testing TestChildAddReplication...") _, other := GetProcessForPath("/tests/replication/write/childadd") resolv := tc.nodes[0].Cluster.Rings.GetGlobalRing().Resolve("/tests/replication/write") _, otherparent := GetProcessForPath("/tests/replication/write") buf := buffer.NewFromString("write1") err := other.Fss.Write(fs.NewPath("/tests/replication/write/childadd"), buf.Size, "application/mytest", buf, nil) if err != nil { t.Errorf("1) Got an error while write: %s", err) } // check if its not on another node context := tc.nodes[0].Fss.NewContext() context.ForceLocal = true children, err := otherparent.Fss.Children(fs.NewPath("/tests/replication/write"), context) if err == nil && len(children) != 0 { t.Errorf("2) Children should not exists on another node") } // check on first secondary secondaryid := resolv.GetOnline(1).Id context = tc.nodes[secondaryid].Fss.NewContext() context.ForceLocal = true children, err = tc.nodes[secondaryid].Fss.Children(fs.NewPath("/tests/replication/write"), context) if err != nil { t.Errorf("3) Got an error while exists: %s", err) } found := false for _, child := range children { if child.Name == "childadd" { found = true } } if !found { t.Errorf("4) Couldn't find child 'childadd'") } // check on second secondary secondaryid = resolv.GetOnline(1).Id context = tc.nodes[secondaryid].Fss.NewContext() context.ForceLocal = true children, err = tc.nodes[secondaryid].Fss.Children(fs.NewPath("/tests/replication/write"), context) if err != nil { t.Errorf("5) Got an error while exists: %s", err) } found = false for _, child := range children { if child.Name == "childadd" { found = true } } if !found { t.Errorf("6) Couldn't find child 'childadd'") } }
func TestWriteReadReplication(t *testing.T) { SetupCluster() log.Info("Testing TestWriteReadReplication...") _, other := GetProcessForPath("/tests/replication/write/read") resolv := tc.nodes[0].Cluster.Rings.GetGlobalRing().Resolve("/tests/replication/write/read") buf := buffer.NewFromString("write1") err := other.Fss.Write(fs.NewPath("/tests/replication/write/read"), buf.Size, "application/mytest", buf, nil) if err != nil { t.Errorf("1) Got an error while write: %s", err) } // test force local on exists context := tc.nodes[0].Fss.NewContext() context.ForceLocal = true bufwriter := bytes.NewBuffer(make([]byte, 0)) buffer := io.Writer(bufwriter) n, err := other.Fss.Read(fs.NewPath("/tests/replication/write/read"), 0, -1, 0, buffer, context) if err != fs.ErrorFileNotFound { t.Errorf("2) File shouldn't exists on another node: %s", err) } // force local replication on each secondary tc.nodes[resolv.GetOnline(1).Id].Fss.Flush() tc.nodes[resolv.GetOnline(2).Id].Fss.Flush() // check on first secondary secondaryid := resolv.GetOnline(1).Id context = tc.nodes[secondaryid].Fss.NewContext() context.ForceLocal = true bufwriter = bytes.NewBuffer(make([]byte, 0)) buffer = io.Writer(bufwriter) n, err = tc.nodes[secondaryid].Fss.Read(fs.NewPath("/tests/replication/write/read"), 0, -1, 0, buffer, context) if err != nil { t.Errorf("3) Got an error from read: %s", err) } if n != buf.Size || bytes.Compare(bufwriter.Bytes(), buf.Bytes()) != 0 { t.Errorf("4) Didn't read what was written: %s!=%s", buf.Bytes(), bufwriter) } // check on second secondary secondaryid = resolv.GetOnline(2).Id context = tc.nodes[secondaryid].Fss.NewContext() context.ForceLocal = true bufwriter = bytes.NewBuffer(make([]byte, 0)) buffer = io.Writer(bufwriter) n, err = tc.nodes[secondaryid].Fss.Read(fs.NewPath("/tests/replication/write/read"), 0, -1, 0, buffer, context) if err != nil { t.Errorf("3) Got an error from read: %s", err) } if n != buf.Size || bytes.Compare(bufwriter.Bytes(), buf.Bytes()) != 0 { t.Errorf("4) Didn't read what was written: %s!=%s", buf.Bytes(), bufwriter) } }
func (m *segmentManager) getWritableSegment(token Token) *segment { if !m.tokens.IsWithin(token) { log.Fatal("Got a token not within range: got %d, range from %d to %d", token, m.tokens.from, m.tokens.to) } seg := m.timeline.getEndSegment(token) // no writable segment found, create one if seg == nil || !seg.writable { if seg == nil { log.Debug("Couldn't find a segment for token %d", token) } else { log.Debug("Segment for token %d is not writable", token) } // find the right chunk for this token chunkLength := int(math.Ceil(float64(m.tokens.Length()) / SEG_CHUNKING)) found := false chunk := m.tokens for !found { to := int(chunk.from) + chunkLength if to > int(m.tokens.to) { // prevent overflow to = int(m.tokens.to) } chunk = TokenRange{chunk.from, Token(to)} if chunk.IsWithin(token) { found = true } else { chunk = TokenRange{chunk.to + 1, m.tokens.to} } } pos := uint64(0) if seg != nil { pos = seg.positionEnd // TODO: THIS IS NOT GOOD! IT SHOULD TAKE THE BIGGEST END POSITION OF ALL OVERRIDEN SEGMENTS } log.Info("Creating a new segment for tokens %d to %d @ %d", chunk.from, chunk.to, pos) seg = createSegment(m.dataDir, chunk.from, chunk.to, pos) m.timeline.addSegment(seg) // find an id, assign it to the segment for m.segments[m.nextSegId] != nil { m.nextSegId++ } seg.id = m.nextSegId m.segments[seg.id] = seg m.nextSegId++ } return seg }
func TestFsChildAddWriteNeedNetworkTimeout(t *testing.T) { SetupCluster() log.Info("Testing TestFsChildAddWriteNeedNetworkTimeout...") resp2, other2 := GetProcessForPath("/tests/timeout2/write") resp1, other1 := GetProcessForPath("/tests/timeout2") initadr := resp2.Cluster.Nodes.Get(resp1.Cluster.MyNode.Id).Address resp2.Cluster.Nodes.Get(resp1.Cluster.MyNode.Id).Address = net.ParseIP("224.0.0.2") // Create the child, it should never get added to the parent buf := buffer.NewFromString("write1") err := other2.Fss.Write(fs.NewPath("/tests/timeout2/write"), buf.Size, "", buf, nil) if err != nil && err != comm.ErrorTimeout { t.Error("1) Received an error: %s", err) } time.Sleep(900000000) children, _ := other1.Fss.Children(fs.NewPath("/tests/timeout2"), nil) found := false for _, child := range children { if child.Name == "write" { found = true } } if found { t.Error("2) Child should not have been added") } // Test one timeout + 1 retry, no error buf = buffer.NewFromString("write1") go func() { time.Sleep(1500 * 1000 * 1000) // set the address back resp2.Cluster.Nodes.Get(resp1.Cluster.MyNode.Id).Address = initadr }() err = other2.Fss.Write(fs.NewPath("/tests/timeout2/write3"), buf.Size, "", buf, nil) if err != nil { t.Error("3) Received an error: %s", err) } children, err = other1.Fss.Children(fs.NewPath("/tests/timeout2"), nil) found = false for _, child := range children { if child.Name == "write3" { found = true } } if !found { t.Error("4) Child should have been added") } }
func (s *Server) acceptTCP() { for { conn, err := s.tcpsock.Accept() if s.comm.running { if err != nil { log.Error("Couldn't accept TCP connexion: %s\n", err) } go s.handleTCPConnection(conn) } else { log.Info("Dropping connection because communications have been paused") } } }
func TestFsDeleteNetworkTimeout(t *testing.T) { SetupCluster() log.Info("Testing TestFsDeleteNetworkTimeout...") resp, other := GetProcessForPath("/tests/timeouts/delete1") buf := buffer.NewFromString("write1") other.Fss.Write(fs.NewPath("/tests/timeouts/delete1"), buf.Size, "", buf, nil) initadr := other.Cluster.Nodes.Get(resp.Cluster.MyNode.Id).Address other.Cluster.Nodes.Get(resp.Cluster.MyNode.Id).Address = net.ParseIP("224.0.0.2") // Test full timeout and return of an error timeoutTest(1500*4, func() { buf = buffer.NewFromString("write1") err := other.Fss.Delete(fs.NewPath("/tests/timeouts/delete1"), false, nil) if err != comm.ErrorTimeout { t.Error("1) Didn't receive timeout error: %s", err) } }, func(returned bool) { if !returned { t.Error("2) Write network timeout is endlessly sleeping") } }) resp, other = GetProcessForPath("/tests/timeouts/delete2") buf = buffer.NewFromString("write1") other.Fss.Write(fs.NewPath("/tests/timeouts/delete2"), buf.Size, "", buf, nil) // Test one timeout + 1 retry, no error timeoutTest(1500*4, func() { // wait 50ms and change addr of the remote node go func() { time.Sleep(500000000) other.Cluster.Nodes.Get(resp.Cluster.MyNode.Id).Address = initadr }() buf = buffer.NewFromString("write1") err := other.Fss.Write(fs.NewPath("/tests/timeouts/delete2"), buf.Size, "", buf, nil) if err == comm.ErrorTimeout { t.Error("3) Received timeout error: %s", err) } }, func(returned bool) { if !returned { t.Error("4) Write network timeout is endlessly sleeping") } }) }
func main() { // Flags var configpath *string = flag.String("config", "gostore.conf", "configuration") var verbosity *int = flag.Int("verbosity", 3, "degree of verbosity") flag.Parse() // Set log verbosity log.MaxLevel = *verbosity log.Info("Reading config\n") config := gostore.LoadConfig(*configpath) log.Info("Starting server...\n") process.NewProcess(config) log.Info("Server started\n") http.ListenAndServe(":8080", nil) // Wait c := make(chan int) <-c }
func TestWriteExistsReplication(t *testing.T) { SetupCluster() log.Info("Testing TestWriteExistsReplication...") _, other := GetProcessForPath("/tests/replication/write/exists") resolv := tc.nodes[0].Cluster.Rings.GetGlobalRing().Resolve("/tests/replication/write/exists") buf := buffer.NewFromString("write1") err := other.Fss.Write(fs.NewPath("/tests/replication/write/exists"), buf.Size, "application/mytest", buf, nil) if err != nil { t.Errorf("1) Got an error while write: %s", err) } // test force local on exists context := tc.nodes[0].Fss.NewContext() context.ForceLocal = true exists, err := other.Fss.Exists(fs.NewPath("/tests/replication/write/exists"), context) if err != nil { t.Errorf("2) Got an error while exists: %s", err) } if exists { t.Errorf("3) Shouldn't exists on another node when forced local") } // check on first secondary secondaryid := resolv.GetOnline(1).Id context = tc.nodes[secondaryid].Fss.NewContext() context.ForceLocal = true exists, err = tc.nodes[secondaryid].Fss.Exists(fs.NewPath("/tests/replication/write/exists"), context) if err != nil { t.Errorf("4) Got an error while exists: %s", err) } if !exists { t.Errorf("5) Received exists = false, should be true") } // check on second secondary secondaryid = resolv.GetOnline(2).Id exists, err = tc.nodes[secondaryid].Fss.Exists(fs.NewPath("/tests/replication/write/exists"), context) if err != nil { t.Errorf("6) Got an error while exists: %s", err) } if !exists { t.Errorf("7) Received exists = false, should be true") } }
func TestReadWrite(t *testing.T) { SetupCluster() log.Info("Testing TestReadWrite...") // Read write test path := fs.NewPath("/tests/io/write1") byts := []byte("write1") buf := bytes.NewBuffer(byts) err := tc.nodes[0].Fss.Write(path, int64(len(byts)), "", buf, nil) if err != nil { t.Errorf("1) Write returned an an error for: %s\n", err) } // Check if read data is the same as written bufwriter := bytes.NewBuffer(make([]byte, 0)) buffer := io.Writer(bufwriter) n, err := tc.nodes[2].Fss.Read(path, 0, -1, 0, buffer, nil) if n != int64(len(byts)) || bytes.Compare(bufwriter.Bytes(), byts) != 0 { t.Errorf("2) Didn't read written data correctly: %s!=%s\n", byts, bufwriter) } // Get from another node bufwriter = bytes.NewBuffer(make([]byte, 0)) buffer = io.Writer(bufwriter) n, err = tc.nodes[6].Fss.Read(path, 0, -1, 0, buffer, nil) if n != int64(len(byts)) || bytes.Compare(bufwriter.Bytes(), byts) != 0 { t.Errorf("3) Didn't read written data correctly: %s!=%s\n", byts, bufwriter) } // Rewrite on a file with new data path = fs.NewPath("/tests/io/write1") byts = []byte("this is new data blabla") buf = bytes.NewBuffer(byts) err = tc.nodes[4].Fss.Write(path, int64(len(byts)), "", buf, nil) if err != nil { t.Errorf("4) Write returned an an error for: %s\n", err) } // Check written data bufwriter = bytes.NewBuffer(make([]byte, 0)) buffer = io.Writer(bufwriter) n, err = tc.nodes[9].Fss.Read(path, 0, -1, 0, buffer, nil) if n != int64(len(byts)) || bytes.Compare(bufwriter.Bytes(), byts) != 0 { t.Errorf("5) Didn't read written data correctly: %s!=%s\n", byts, bufwriter) } }
func TestFsReadNetworkTimeout(t *testing.T) { SetupCluster() log.Info("Testing TestFsReadNetworkTimeout...") bufwriter := bytes.NewBuffer(make([]byte, 0)) buffer := io.Writer(bufwriter) resp, other := GetProcessForPath("/") initadr := other.Cluster.Nodes.Get(resp.Cluster.MyNode.Id).Address other.Cluster.Nodes.Get(resp.Cluster.MyNode.Id).Address = net.ParseIP("224.0.0.2") // Test full timeout and return of an error timeoutTest(1500*4, func() { _, err := other.Fss.Read(fs.NewPath("/"), 0, -1, 0, buffer, nil) if err != comm.ErrorTimeout { t.Error("1) Didn't receive timeout error: %s", err) } }, func(returned bool) { if !returned { t.Error("2) Read network timeout is endlessly sleeping") } }) // Test one timeout + 1 retry, no error timeoutTest(1500*4, func() { // wait 50ms and change addr of the remote node go func() { time.Sleep(500000000) other.Cluster.Nodes.Get(resp.Cluster.MyNode.Id).Address = initadr }() _, err := other.Fss.Read(fs.NewPath("/"), 0, -1, 0, buffer, nil) if err == comm.ErrorTimeout { t.Error("3) Received timeout error: %s", err) } }, func(returned bool) { if !returned { t.Error("4) Read network timeout is endlessly sleeping") } }) }
func TestWriteHeaderReplication(t *testing.T) { SetupCluster() log.Info("Testing TestWriteHeaderReplication...") resp, other := GetProcessForPath("/tests/replication/write/header") resolv := tc.nodes[0].Cluster.Rings.GetGlobalRing().Resolve("/tests/replication/write/header") buf := buffer.NewFromString("write1") err := other.Fss.Write(fs.NewPath("/tests/replication/write/header"), buf.Size, "application/mytest", buf, nil) if err != nil { t.Error("1) Got an error while write: %s", err) } // check header on master header, err := resp.Fss.Header(fs.NewPath("/tests/replication/write/header"), nil) version := header.Version // check on first secondary secondaryid := resolv.GetOnline(1).Id header, err = tc.nodes[secondaryid].Fss.Header(fs.NewPath("/tests/replication/write/header"), nil) if header.Version != version { t.Errorf("2) Version on secondary node wasn't the same as on master: %d != %d", version, header.Version) } if header.MimeType != "application/mytest" { t.Errorf("3) Mimetype on secondary node wasn't the same as on master: %s != %s", "application/mytest", header.MimeType) } if header.Size != buf.Size { t.Errorf("4) Size on secondary node wasn't the same as on master: %d != %d", 6, header.Size) } // check on second secondary secondaryid = resolv.GetOnline(2).Id header, err = tc.nodes[secondaryid].Fss.Header(fs.NewPath("/tests/replication/write/header"), nil) if header.Version != version { t.Errorf("5) Version on secondary node wasn't the same as on master: %d != %d", version, header.Version) } if header.MimeType != "application/mytest" { t.Errorf("6) Mimetype on secondary node wasn't the same as on master: %s != %s", "application/mytest", header.MimeType) } if header.Size != buf.Size { t.Errorf("7) Size on secondary node wasn't the same as on master: %d != %d", 6, header.Size) } }
func TestReadNotExists(t *testing.T) { SetupCluster() log.Info("Testing TestReadNotExists...") path := fs.NewPath("/should/not/exists") bufwriter := bytes.NewBuffer(make([]byte, 0)) buffer := io.Writer(bufwriter) // Local _, err := tc.nodes[1].Fss.Read(path, 0, -1, 0, buffer, nil) if err != fs.ErrorFileNotFound { t.Errorf("1) Read didn't return that the file don't exists: %s\n", err) } // Remote _, err = tc.nodes[5].Fss.Read(path, 0, -1, 0, buffer, nil) if err != fs.ErrorFileNotFound { t.Errorf("2) Read didn't return that the file don't exists: %s\n", err) } }
func TestDeleteNonRecursiveMultipleLevel(t *testing.T) { SetupCluster() log.Info("Testing TestDeleteNonRecursiveMultipleLevel...") byts := []byte("data") buf := bytes.NewBuffer(byts) tc.nodes[0].Fss.Write(fs.NewPath("/tests/delete/nonrecml/level1/level2/level3"), int64(len(byts)), "", buf, nil) time.Sleep(10000000) err := tc.nodes[0].Fss.Delete(fs.NewPath("/tests/delete/nonrecml/level1"), false, nil) if err != fs.ErrorNotEmpty { t.Errorf("1) Should get an error if trying to delete a multiple level file") } exists, _ := tc.nodes[2].Fss.Exists(fs.NewPath("/tests/delete/nonrecml/level1/level2/level3"), nil) if !exists { t.Errorf("2) Should not be deleted since it was a multilevel with non recursive\n") } }
func TestDeleteParentHierarchie(t *testing.T) { SetupCluster() log.Info("Testing TestDeleteParentHierarchie...") path := fs.NewPath("/tests/delete/hierarchie/child1") byts := []byte("child1") buf := bytes.NewBuffer(byts) tc.nodes[2].Fss.Write(path, int64(len(byts)), "", buf, nil) time.Sleep(100000000) tc.nodes[5].Fss.Delete(path, false, nil) time.Sleep(100000000) children, _ := tc.nodes[6].Fss.Children(path.ParentPath(), nil) for _, child := range children { if child.Name == "child1" { t.Errorf("1) File hasn't been removed from parent children") } } }
func (comm *Comm) handleMessage(message *Message) { // TODO: We should make sure we don't handle a message twice (since UDP can duplicate packets) if message.FunctionId == FUNC_ERROR { log.Info("Comm: Received an error: %s\n", message) } // Check if the message needed an acknowledgement or check if // it has an error callback handled := false if message.SourceNode().Equals(comm.Cluster.MyNode) { handled = comm.handleTracker(message) } // Service #0 is net and there is no implementation yet. // Function = RESPONSE should be handled by callback if message.ServiceId != 0 && message.FunctionId != FUNC_RESPONSE && !handled { serviceWrapper := comm.GetWrapper(message.ServiceId) if serviceWrapper.service != nil { // if its an error if message.FunctionId == FUNC_ERROR { err := ReadErrorPayload(message) message.SeekZero() serviceWrapper.service.HandleUnmanagedError(message, err) } else { // call the right function handled := serviceWrapper.callFunction(message.FunctionId, message) if !handled { serviceWrapper.service.HandleUnmanagedMessage(message) } } } else { log.Error("Comm: Couldn't find service for message %s\n", message) } } }
func (cs *ClusterService) loadCluster() { cs.clusterMutex.Lock() // Load data log.Debug("cls: Loading cluster data...") stat, err := os.Stat(cs.clsDataPath) if err == nil && stat.IsRegular() { file, err := os.Open(cs.clsDataPath) if err == nil { typedFile := typedio.NewReader(file) cs.clusterVersion, _ = typedFile.ReadInt64() // cluster version cs.diskVerson = cs.clusterVersion nbNodes, _ := typedFile.ReadUint16() // nodes count var i uint16 for i = 0; i < nbNodes; i++ { node := cluster.NewEmptyNode() node.Unserialize(typedFile) node.Status = cluster.Status_Offline cs.cluster.MergeNode(node, false) // merge node, doesn't notify } } else { log.Error("cls: Error while opening data file", err) } } // replay commit log log.Info("cls: Replaying commit log...") cs.commitlog.Replay() // TODO: Load cluster data cs.clusterMutex.Unlock() }
func TestDeleteNonRecursive(t *testing.T) { SetupCluster() log.Info("Testing TestDeleteNonRecursive...") path := fs.NewPath("/tests/delete/nonrec/level1") byts := []byte("data") buf := bytes.NewBuffer(byts) tc.nodes[0].Fss.Write(path, int64(len(byts)), "", buf, nil) time.Sleep(50000000) exists, _ := tc.nodes[2].Fss.Exists(path, nil) if !exists { t.Errorf("1) Created file should exists\n") } tc.nodes[0].Fss.Delete(path, false, nil) exists, _ = tc.nodes[2].Fss.Exists(path, nil) if exists { t.Errorf("2) Deleted file shouldn't exists\n") } }
func TestUnicode(t *testing.T) { SetupCluster() log.Info("Testing TestUnicode...") // Read write test path := fs.NewPath("/tests/io/writeunicode") byts := []byte("Some é unicode à data 世界") buf := bytes.NewBuffer(byts) err := tc.nodes[0].Fss.Write(path, int64(len(byts)), "", buf, nil) if err != nil { t.Errorf("1) Write returned an an error for: %s\n", err) } // Check if read data is the same as written bufwriter := bytes.NewBuffer(make([]byte, 0)) n, err := tc.nodes[2].Fss.Read(path, 0, -1, 0, io.Writer(bufwriter), nil) if n != int64(len(byts)) || bytes.Compare(bufwriter.Bytes(), byts) != 0 { t.Errorf("2) Didn't read written data correctly: %s!=%s\n", byts, bufwriter) } // Test unicode path path = fs.NewPath("/école èàaû/世界.txt") wrtnData := buffer.NewFromString("some data") err = tc.nodes[0].Fss.Write(path, wrtnData.Size, "", wrtnData, nil) if err != nil { t.Errorf("3) Error while writing: %s\n", err) } // Reading written data rdData := buffer.NewWithSize(0, false) n, err = tc.nodes[1].Fss.Read(path, 0, -1, 0, rdData, nil) if n != wrtnData.Size || bytes.Compare(rdData.Bytes(), wrtnData.Bytes()) != 0 { t.Errorf("4) Didn't read what had ben written: %s!=%s", rdData.Bytes(), wrtnData.Bytes()) } }
func TestDeleteRecursive(t *testing.T) { SetupCluster() log.Info("Testing TestDeleteRecursive...") path := fs.NewPath("/tests/delete/rec/level1-a/level2-a") byts := []byte("data") buf := bytes.NewBuffer(byts) tc.nodes[8].Fss.Write(path, int64(len(byts)), "", buf, nil) path = fs.NewPath("/tests/delete/rec/level1-a/level2-b") byts = []byte("data") buf = bytes.NewBuffer(byts) tc.nodes[0].Fss.Write(path, int64(len(byts)), "", buf, nil) path = fs.NewPath("/tests/delete/rec/level1-b/level2-a") byts = []byte("data") buf = bytes.NewBuffer(byts) tc.nodes[1].Fss.Write(path, int64(len(byts)), "", buf, nil) path = fs.NewPath("/tests/delete/rec/level1-b/level2-b") byts = []byte("data") buf = bytes.NewBuffer(byts) tc.nodes[4].Fss.Write(path, int64(len(byts)), "", buf, nil) path = fs.NewPath("/tests/delete/rec/level1-c/level2-a") byts = []byte("data") buf = bytes.NewBuffer(byts) tc.nodes[2].Fss.Write(path, int64(len(byts)), "", buf, nil) path = fs.NewPath("/tests/delete/rec/level1-c/level2-b") byts = []byte("data") buf = bytes.NewBuffer(byts) tc.nodes[6].Fss.Write(path, int64(len(byts)), "", buf, nil) time.Sleep(10000000) // 1 level with recursive on tc.nodes[5].Fss.Delete(fs.NewPath("/tests/delete/rec/level1-a/level2-a"), true, nil) exists, _ := tc.nodes[1].Fss.Exists(fs.NewPath("/tests/delete/rec/level1-a/level2-a"), nil) if exists { t.Errorf("1) Deleted file shouldn't exists\n") } // 2 level with recursive on tc.nodes[3].Fss.Delete(fs.NewPath("/tests/delete/rec/level1-a"), true, nil) exists, _ = tc.nodes[4].Fss.Exists(fs.NewPath("/tests/delete/rec/level1-a"), nil) if exists { t.Errorf("2) Deleted file shouldn't exists\n") } exists, _ = tc.nodes[5].Fss.Exists(fs.NewPath("/tests/delete/rec/level1-a/level2-b"), nil) if exists { t.Errorf("3) Deleted file shouldn't exists\n") } // 3 level with recursive on tc.nodes[3].Fss.Delete(fs.NewPath("/tests/delete/rec"), true, nil) exists, _ = tc.nodes[4].Fss.Exists(fs.NewPath("/tests/delete/rec"), nil) if exists { t.Errorf("4) Deleted file shouldn't exists\n") } exists, _ = tc.nodes[5].Fss.Exists(fs.NewPath("/tests/delete/rec/level1-c"), nil) if exists { t.Errorf("5) Deleted file shouldn't exists\n") } exists, _ = tc.nodes[5].Fss.Exists(fs.NewPath("/tests/delete/rec/level1-c/level2-a"), nil) if exists { t.Errorf("6) Deleted file shouldn't exists\n") } exists, _ = tc.nodes[5].Fss.Exists(fs.NewPath("/tests/delete/rec/level1-c/level2-b"), nil) if exists { t.Errorf("6) Deleted file shouldn't exists\n") } }