Example #1
0
func openSegment(path string) *segment {
	log.Info("Opening segment file %s", path)

	seg := &segment{
		nextSegments: make([]*segment, 0),
		prevSegments: make([]*segment, 0),
		writable:     false,
		lock:         new(sync.Mutex),
	}

	var err os.Error

	stat, err := os.Stat(path)
	if stat == nil || err != nil {
		log.Fatal("Couldn't stat segment file %s: %s", path, err)
	}

	var from, to Token
	_, err = fmt.Sscanf(stat.Name, "%016X_%04X_%04X.seg", &seg.positionStart, &from, &to)
	if err != nil {
		log.Fatal("Couldn't read segment file name %s: %s", path, err)
	}
	seg.tokens = TokenRange{from, to}

	seg.fd, err = os.Open(path)
	if err != nil {
		log.Fatal("Couldn't open segment %s: %s", path, err)
	}
	seg.typedFd = typedio.NewReadWriter(seg.fd)
	seg.positionEnd = seg.positionStart + uint64(stat.Size)

	return seg
}
Example #2
0
func NewClusterService(comm *comm.Comm, config gostore.Config, sconfig gostore.ConfigService) *ClusterService {
	cs := new(ClusterService)

	cs.sconfig = sconfig
	cs.config = config
	cs.comm = comm
	cs.cluster = comm.Cluster
	cs.clusterMutex = new(sync.Mutex)
	cs.serviceId = sconfig.Id

	cs.state = state_booting

	if path, ok := sconfig.CustomConfig["DataDir"].(string); ok {
		cs.dataDir = path
	} else {
		log.Fatal("CS: Config 'DataDir' must be specified")
	}
	cs.clsDataPath = fmt.Sprintf("%s/cluster.db", cs.dataDir)

	if masteRing, ok := sconfig.CustomConfig["MasterRing"].(float64); ok {
		cs.masterRing = byte(masteRing)
	} else {
		log.Fatal("CS: Config 'MasterRing' must be specified")
	}

	// peristence
	cs.commitlog = commitlog.New(cs.dataDir)
	cs.commitlog.RegisterMutation(&clusterMutation{cs, []*cluster.Node{}, 0, false})

	return cs
}
Example #3
0
func (cs ClusterService) saveCluster() {
	cs.clusterMutex.Lock()

	tempPath := fmt.Sprintf("%s/%d", os.TempDir(), time.Nanoseconds())
	file, err := os.Create(tempPath)
	if err != nil {
		log.Fatal("Couldn't open temp cluster data file", err)
	}

	typedFile := typedio.NewWriter(file)

	typedFile.WriteInt64(cs.clusterVersion) // cluster version
	cs.diskVerson = cs.clusterVersion

	typedFile.WriteUint16(cs.cluster.Nodes.Count()) // nodes count
	for node := range cs.cluster.Nodes.Iter() {
		node.Serialize(typedFile)
	}

	file.Close()

	err = os.Rename(tempPath, cs.clsDataPath)
	if err != nil {
		log.Fatal("Couldn't rename %s to %s", tempPath, cs.clsDataPath)
	}

	cs.clusterMutex.Unlock()
}
Example #4
0
// Merge a node into the cluster, notifying any modification
func (c *Cluster) MergeNode(nNode *Node, notify bool) {
	oNode := c.Nodes.Get(nNode.Id)
	if oNode != nil {
		oNode.Address = nNode.Address
		oNode.Adhoc = nNode.Adhoc
		oNode.TcpPort = nNode.TcpPort
		oNode.UdpPort = nNode.UdpPort

		if oNode.Status != nNode.Status {
			switch oNode.Status {
			case Status_Offline:
				if nNode.Status == Status_Joining {
					oNode.Status = Status_Joining
					if notify {
						c.Notifier.NotifyNodeJoining(oNode)
					}
				} else {
					log.Fatal("Invalid node status transition")
				}

			case Status_Joining:
				if nNode.Status == Status_Online {
					oNode.Status = Status_Online
					if notify {
						c.Notifier.NotifyNodeOnline(oNode)
					}

				} else {
					log.Fatal("Invalid node status transition")
				}
			default:
				log.Fatal("Unsupported node status transition")
			}
		}

		// TODO: Rings

	} else {
		c.Nodes.Add(nNode)
		if notify {
			if nNode.Status == Status_Online {
				c.Notifier.NotifyNodeOnline(nNode)
			} else {
				c.Notifier.NotifyNodeJoining(nNode)
			}
		}
	}
}
Example #5
0
func (cl *CommitLog) Execute(mutation Mutation) {
	typ := reflect.TypeOf(mutation)
	mutInfo, found := cl.mutationsType[typ]

	if !found {
		log.Fatal("CommitLog: Tried to execute an unknown mutation: %s of type %s", mutation, typ)
	}

	// write the modification into a buffer
	buf := buffer.New()
	buf.WriteUint8(mutInfo.id)   // mutation id
	mutation.Serialize(buf)      // mutation record
	buf.WriteInt64(buf.Size + 8) // record length

	// write record to disk
	buf.Seek(0, 0)
	cl.fdMutex.Lock()
	cl.fd.Seek(cl.writePtr, 0)
	io.Copyn(cl.fd, buf, buf.Size)

	// update commit log write pointer
	cl.fd.Seek(HEADER_WRITE_POS, 0)
	cl.writePtr += buf.Size
	cl.typedFd.WriteInt64(cl.writePtr)
	cl.fdMutex.Unlock()

	// execute the mutation
	mutation.Execute()
}
Example #6
0
func createSegment(dataDir string, token_from, token_to Token, position uint64) *segment {
	seg := &segment{
		nextSegments:  make([]*segment, 0),
		prevSegments:  make([]*segment, 0),
		tokens:        TokenRange{token_from, token_to},
		positionStart: position,
		positionEnd:   position,
		lock:          new(sync.Mutex),
	}

	var err os.Error

	filePath := fmt.Sprintf("%s/%016X_%04X_%04X.seg", dataDir, position, token_from, token_to)
	seg.fd, err = os.Create(filePath)
	if err != nil {
		log.Fatal("Couldn't open segment %s: %s", filePath, err)
	}
	seg.typedFd = typedio.NewReadWriter(seg.fd)
	seg.writable = true

	seg.buf = bufio.NewWriter(seg.fd)
	seg.typedBuf = typedio.NewWriter(seg.buf)

	return seg
}
Example #7
0
func LoadConfig(path string) Config {
	config := new(Config)

	fc, err := os.Open(path)
	if err != nil {
		log.Error("Couldn't load config file: %s", err)
	}

	buf := new(bytes.Buffer)
	buf.ReadFrom(fc)

	strOrigConfig := string(buf.Bytes())
	strFinConfig := ""
	lines := strings.Split(strOrigConfig, "\n", -1)
	for _, line := range lines {
		trimedLine := strings.TrimSpace(line)
		if len(trimedLine) < 2 || trimedLine[:2] != "//" {
			strFinConfig += trimedLine
		}
	}

	err = json.Unmarshal([]byte(strFinConfig), config)

	if err != nil {
		log.Fatal("Couldn't unmarshal config: %s %s", strFinConfig, err)
	}

	return *config
}
Example #8
0
// Resolves nodes that are manager for a given token. The number of
// nodes returned depends of the replication factor of the ring
// and the number of nodes in the ring.
//
// Ex: 	if only 1 node is in the ring and using a replicator factor
// 		of 2, only 1 node will be returned
func (cr *Ring) ResolveToken(token string) *ResolveResult {
	res := new(ResolveResult)
	res.token = token

	// resolve the key in the ring
	ringElem := cr.ring.ResolveString(token)
	if ringElem == nil {
		log.Fatal("Cluster: Got no element in ring %s for resolving of %s", cr, token)
	}

	// add the first node found
	firstNode := (ringElem.Value).(*Node)
	res.Add(firstNode)

	// iterate until we have the replication factor we want
	var curNode *Node
	for i := res.Count(); res.Count() < cr.repFactor && curNode != firstNode; i++ {
		nextElem := ringElem.Next()

		curNode = (nextElem.Value).(*Node)
		res.Add(curNode)

		ringElem = nextElem
	}

	return res
}
Example #9
0
func NewFsService(comm *comm.Comm, sconfig *gostore.ConfigService) *FsService {
	fss := new(FsService)

	fss.running = true
	fss.sconfig = sconfig
	fss.headers = newFileHeaders(fss)
	fss.comm = comm
	fss.cluster = comm.Cluster
	fss.serviceId = sconfig.Id

	datadir, ok := sconfig.CustomConfig["DataDir"]
	if !ok {
		log.Fatal("FSS: DataDir config should be setted!")
	}
	fss.dataDir = datadir.(string)

	// file mutexes
	fss.mutexes = make(map[string]*sync.Mutex)
	fss.mapMutex = new(sync.Mutex)

	apiAddress, ok := sconfig.CustomConfig["ApiAddress"]
	if !ok {
		log.Fatal("FSS: ApiAddress config should be setted!")
	}
	fss.apiAddress = apiAddress.(string)

	ringid, ok := sconfig.CustomConfig["RingId"]
	if ok {
		fss.ring = fss.cluster.Rings.GetRing(uint8(ringid.(float64)))
	} else {
		fss.ring = fss.cluster.Rings.GetGlobalRing()
	}

	// create the api
	fss.api = createApi(fss)

	// TODO: Start a local timeout tracker
	// TODO: Start a gargage collector

	// start replication watcher
	// TODO: We should be able to start as many watcher as we have core (or we need!)
	fss.replQueue = list.New()
	fss.replQueueMutex = new(sync.Mutex)
	go fss.replicationWatcher()

	return fss
}
Example #10
0
func (comm *Comm) SendFirst(res *cluster.ResolveResult, message *Message) {
	node := res.GetFirst()
	if node != nil {
		comm.SendNode(node, message)
		return
	}

	log.Fatal("Couldn't send message (%s) to first (%s). No node found.\n", message, node)
}
Example #11
0
func (wrapper *serviceWrapper) functionName2Id(name string) byte {
	id, found := wrapper.name2id[name]

	if !found {
		log.Fatal("Method %s not found in service #%d!!", name, wrapper.id)
		return 0
	}

	return id + RESERVED_FUNCTIONS
}
Example #12
0
func (comm *Comm) SendOne(res *cluster.ResolveResult, message *Message) {
	// TODO: Round robin
	node := res.GetOnline(0)
	if node != nil {
		comm.SendNode(node, message)
		return
	}

	log.Fatal("Couldn't send the message (%s) to one. No node found.\n", message)
}
Example #13
0
func (s *Server) start() {
	var err os.Error

	log.Debug("ServiceServer: starting listening tcp socket on %s\n", s.tcpaddr)
	s.tcpsock, err = net.ListenTCP("tcp", s.tcpaddr)
	if err != nil {
		log.Fatal("Couldn't create TCP server listener: %s\n", err)
	}

	go s.acceptTCP()

	log.Debug("ServiceServer: starting listening udp socket on %s\n", s.udpaddr)
	s.udpsock, err = net.ListenUDP("udp", s.udpaddr)
	if err != nil {
		log.Fatal("Couldn't create UDP server listener: %s\n", err)
	}

	go s.acceptUDP()
}
Example #14
0
func (m *segmentManager) getWritableSegment(token Token) *segment {
	if !m.tokens.IsWithin(token) {
		log.Fatal("Got a token not within range: got %d, range from %d to %d", token, m.tokens.from, m.tokens.to)
	}

	seg := m.timeline.getEndSegment(token)

	// no writable segment found, create one
	if seg == nil || !seg.writable {
		if seg == nil {
			log.Debug("Couldn't find a segment for token %d", token)
		} else {
			log.Debug("Segment for token %d is not writable", token)
		}

		// find the right chunk for this token
		chunkLength := int(math.Ceil(float64(m.tokens.Length()) / SEG_CHUNKING))

		found := false
		chunk := m.tokens
		for !found {
			to := int(chunk.from) + chunkLength
			if to > int(m.tokens.to) { // prevent overflow
				to = int(m.tokens.to)
			}
			chunk = TokenRange{chunk.from, Token(to)}

			if chunk.IsWithin(token) {
				found = true
			} else {
				chunk = TokenRange{chunk.to + 1, m.tokens.to}
			}
		}

		pos := uint64(0)
		if seg != nil {
			pos = seg.positionEnd // TODO: THIS IS NOT GOOD! IT SHOULD TAKE THE BIGGEST END POSITION OF ALL OVERRIDEN SEGMENTS
		}

		log.Info("Creating a new segment for tokens %d to %d @ %d", chunk.from, chunk.to, pos)
		seg = createSegment(m.dataDir, chunk.from, chunk.to, pos)
		m.timeline.addSegment(seg)

		// find an id, assign it to the segment
		for m.segments[m.nextSegId] != nil {
			m.nextSegId++
		}
		seg.id = m.nextSegId
		m.segments[seg.id] = seg
		m.nextSegId++
	}

	return seg
}
Example #15
0
// Create rings from config
func newRingsConfig(ringConfigs []gostore.ConfigRing, globalRing byte) *Rings {
	rings := newRings()
	rings.globalRing = globalRing

	for _, confring := range ringConfigs {
		rings.AddRing(confring.Id, NewRing(int(confring.ReplicationFactor)))
	}

	if rings.GetRing(globalRing) == nil {
		log.Fatal("Specified global ring doesn't exist!!")
	}

	return rings
}
Example #16
0
func (cl *CommitLog) readOne(reader typedio.Reader) (mutInfo mutationInfo, size int64) {
	// read mutation id
	id, err := reader.ReadUint8()
	if err != nil {
		log.Fatal("CommitLog: Couldn't read mutation from commit log: %s", err)
	}

	// get the right mutation for the read id
	mutInfo, found := cl.mutations[id]
	if !found {
		log.Fatal("CommitLog: Cannot find mutation id %d!", id)
	}

	// read the mutation
	mutInfo.mutation.Unserialize(reader)

	// read size of the mutation record
	size, err = reader.ReadInt64()
	if err != nil && err != os.EOF {
		log.Fatal("CommitLog: Couldn't read size of mutation from commit log: %s", err)
	}

	return mutInfo, size
}
Example #17
0
func (services *Services) AddService(service Service, sconfig gostore.ConfigService) {
	wrapper := new(serviceWrapper)
	wrapper.service = service
	wrapper.name2id = make(map[string]byte)
	wrapper.id2method = make([]*reflect.Method, 255)

	if sconfig.Id == 0 {
		log.Fatal("Service id cannot be 0")
	}

	wrapper.id = sconfig.Id
	services.wrappers[sconfig.Id] = wrapper
	services.service2wrapper[service] = wrapper

	wrapper.registerFunctions()
}
Example #18
0
func New(directory string) *CommitLog {
	cl := new(CommitLog)

	cl.mutations = make(map[byte]mutationInfo)
	cl.mutationsType = make(map[reflect.Type]mutationInfo)

	cl.fdMutex = new(sync.Mutex)

	// check if directory exists, create it otherwize
	dir, err := os.Stat(directory)
	if dir == nil || !dir.IsDirectory() {
		os.Mkdir(directory, 0777)
	}

	// check if log file exists and its at least the header size
	path := directory + "000.log"
	dir, err = os.Stat(path)
	existed := false
	if dir != nil && err == nil && dir.IsRegular() && dir.Size > HEADER_END {
		existed = true
	}

	// open the log
	flag := os.O_RDWR | os.O_CREATE
	cl.fd, err = os.OpenFile(path, flag, 0777)
	if err != nil {
		log.Fatal("CommitLog: Cannot open commit log file %s: %s", path, err)
	}
	cl.typedFd = typedio.NewReadWriter(cl.fd)

	// if commit log existed, read header
	if existed {
		cl.writePtr, _ = cl.typedFd.ReadInt64()
		cl.commitPtr, _ = cl.typedFd.ReadInt64()

	} else {
		// else, write header
		cl.writePtr = HEADER_END
		cl.commitPtr = HEADER_END

		cl.typedFd.WriteInt64(cl.writePtr)
		cl.typedFd.WriteInt64(cl.commitPtr)
	}

	return cl
}
Example #19
0
// Returns an API Server
func NewServer(handler Handler, adr string) *Server {
	server := new(Server)
	server.handler = handler
	server.servmux = http.NewServeMux()

	con, err := net.Listen("tcp", adr)
	if err != nil {
		log.Error("API: Couldn't create listener socket: %s\n", err)
	}

	// Add handling function at root, delegating everything to the handler
	server.servmux.HandleFunc("/", func(httpresp http.ResponseWriter, httpreq *http.Request) {
		log.Debug("API: Connection on url %s\n", httpreq.URL)

		resp := &ResponseWriter{httpresp}

		var req *Request
		if req = NewRequest(resp, httpreq); req == nil {
			log.Error("API: Couldn't create request object")
			return
		}

		handler.Handle(resp, req)

		// TODO: Remove that! Shouldn't be here!!
		// Read the rest, so we don't cause Broken Pipe on the other end if we don't read to the end
		ioutil.ReadAll(req.Body)
	})

	// Start serving the API on another thread
	go func() {
		log.Debug("API: Starting API server on adr %s\n", adr)
		err = http.Serve(con, server.servmux)
		con.Close()

		if err != nil {
			log.Fatal("API: Serve error: ", err.String())
		}
	}()

	return server
}
Example #20
0
func (cs *ClusterService) ContactMaster() {
	myNode := cs.cluster.MyNode
	log.Debug("%d: Contacting master...", myNode.Id)

	msg := cs.comm.NewMsgMessage(cs.serviceId)
	msg.Function = "RemoteContactMaster"

	err := cs.cluster.MyNode.Serialize(msg.Message)
	if err != nil {
		log.Fatal("Couldn't marshal my node: %s", err)
	}

	msg.OnResponse = func(msg *comm.Message) {
		// TODO: Merge the cluster we got into our cluster
		myNode.Status = cluster.Status_Online
	}

	// send our node to master
	masters := cs.cluster.Rings.GetRing(cs.masterRing).Resolve(master_token)
	cs.comm.SendFirst(masters, msg)
}
Example #21
0
func NewProcess(config gostore.Config) *Process {
	proc := new(Process)
	proc.Config = config

	// Create the cluster instance
	proc.Cluster = cluster.NewCluster(proc.Config)
	nodes := proc.Cluster.Nodes

	// Validate rings
	if len(config.Rings) == 0 {
		log.Fatal("CONFIG: At least one ring must be configured (at least the global)")
	}

	// Generate active nodes
	for _, confnode := range config.Nodes {
		acnode := cluster.NewNode(confnode.NodeId, net.ParseIP(confnode.NodeIP), confnode.TCPPort, confnode.UDPPort)

		for _, confring := range confnode.Rings {
			acnode.AddRing(confring.RingId, confring.Token)
		}

		nodes.Add(acnode)
	}

	// My node
	mynode := nodes.Get(config.CurrentNode)
	proc.Cluster.SetMyNode(mynode)

	// Fill all rings
	proc.Cluster.FillRings()

	// Create services server
	proc.Sc = comm.NewComm(proc.Cluster)

	var oneCls bool
	for _, sconfig := range config.Services {
		switch sconfig.Type {
		case "fs":
			log.Debug("Server: creating file system service\n")
			proc.Fss = fs.NewFsService(proc.Sc, &sconfig)
			proc.Sc.AddService(comm.Service(proc.Fss), sconfig)
			break
		case "cls":
			log.Debug("Server: creating cluster service\n")
			proc.Cls = cls.NewClusterService(proc.Sc, config, sconfig)
			proc.Sc.AddService(comm.Service(proc.Cls), sconfig)
			oneCls = true
			break
		}
	}

	// if no cluster service, make all nodes online
	if !oneCls {
		for node := range nodes.Iter() {
			node.Status = cluster.Status_Online // TODO: Should not be here!
		}
	}

	// boot services
	proc.Sc.BootServices()

	return proc
}