예제 #1
0
func (s *Splicer) splice(direction string, inFD, outFD int, wg *sync.WaitGroup) {
	// Signal to the caller that we're done
	defer func() {
		// If a reliable delivery socket has data associated with it when a close takes place, the system continues to attempt data transfer.
		if err := syscall.Shutdown(inFD, SHUT_RDWR); err != nil {
			log.Printf("Shutdown err %v", err)
		}
		if err := syscall.Shutdown(outFD, SHUT_RDWR); err != nil {
			log.Printf("Shutdown err %v", err)
		}
		if wg != nil {
			wg.Done()
		}
	}()

	pipe := make([]int, 2)
	if err := syscall.Pipe(pipe); err != nil {
		log.Fatal(err)
	}
	defer func() {
		syscall.Close(pipe[0])
		syscall.Close(pipe[1])
	}()

	var netWrittenBytes, netReadBytes int64
	log.Printf("[%v] Splicing pipe %+v, tcpfds %+v", direction, s.pipe, s.tcpFD)
	for {
		// SPLICE_F_NONBLOCK: don't block if TCP buffer is empty
		// SPLICE_F_MOVE: directly move pages into splice buffer in kernel memory
		// SPLICE_F_MORE: just makes mysql connections slow
		log.Printf("[input (%v)] Entering input", direction)
		inBytes, err := syscall.Splice(inFD, nil, pipe[1], nil, s.bufferSize, SPLICE_F_NONBLOCK)
		if err := s.checkSpliceErr(inBytes, err, fmt.Sprintf("input (%v)", direction)); err != nil {
			log.Printf("ERROR [input (%v)] error: %v", direction, err)
			return
		}
		netReadBytes += inBytes
		log.Printf("[input (%v)] %d bytes read", direction, inBytes)

		log.Printf("[input (%v)] Entering output", direction)
		outBytes, err := syscall.Splice(pipe[0], nil, outFD, nil, s.bufferSize, SPLICE_F_NONBLOCK)
		if err := s.checkSpliceErr(inBytes, err, fmt.Sprintf("output (%v)", direction)); err != nil {
			log.Printf("ERROR [output (%v)] error: %v", direction, err)
			return
		}
		log.Printf("[output (%v)] %d bytes written, out of given input %d", direction, outBytes, inBytes)
		netWrittenBytes += outBytes
	}
	log.Printf("[%v] Spliced %d bytes read %d bytes written", direction, netWrittenBytes, netReadBytes)
}
func StartServer(vshost string, me string) *PBServer {
	pb := new(PBServer)
	pb.me = me
	pb.vs = viewservice.MakeClerk(me, vshost)
	// Your pb.* initializations here.
	pb.view = viewservice.View{}
	pb.content = make(map[string]string)
	pb.client = make(map[string]string)

	rpcs := rpc.NewServer()
	rpcs.Register(pb)

	os.Remove(pb.me)
	l, e := net.Listen("unix", pb.me)
	if e != nil {
		log.Fatal("listen error: ", e)
	}
	pb.l = l

	// please do not change any of the following code,
	// or do anything to subvert it.

	go func() {
		for pb.isdead() == false {
			conn, err := pb.l.Accept()
			if err == nil && pb.isdead() == false {
				if pb.isunreliable() && (rand.Int63()%1000) < 100 {
					// discard the request.
					conn.Close()
				} else if pb.isunreliable() && (rand.Int63()%1000) < 200 {
					// process the request but force discard of reply.
					c1 := conn.(*net.UnixConn)
					f, _ := c1.File()
					err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)
					if err != nil {
						fmt.Printf("shutdown: %v\n", err)
					}
					go rpcs.ServeConn(conn)
				} else {
					go rpcs.ServeConn(conn)
				}
			} else if err == nil {
				conn.Close()
			}
			if err != nil && pb.isdead() == false {
				fmt.Printf("PBServer(%v) accept: %v\n", me, err.Error())
				pb.kill()
			}
		}
	}()

	go func() {
		for pb.isdead() == false {
			pb.tick()
			time.Sleep(viewservice.PingInterval)
		}
	}()

	return pb
}
예제 #3
0
//
// the application wants to create a paxos peer.
// the ports of all the paxos peers (including this one)
// are in peers[]. this servers port is peers[me].
//
func Make(peers []string, me int, rpcs *rpc.Server) *Paxos {
	px := &Paxos{}
	px.peers = peers
	px.me = me

	// Your initialization code here.

	if rpcs != nil {
		// caller will create socket &c
		rpcs.Register(px)
	} else {
		rpcs = rpc.NewServer()
		rpcs.Register(px)

		// prepare to receive connections from clients.
		// change "unix" to "tcp" to use over a network.
		os.Remove(peers[me]) // only needed for "unix"
		l, e := net.Listen("unix", peers[me])
		if e != nil {
			log.Fatal("listen error: ", e)
		}
		px.l = l

		// please do not change any of the following code,
		// or do anything to subvert it.

		// create a thread to accept RPC connections
		go func() {
			for px.dead == false {
				conn, err := px.l.Accept()
				if err == nil && px.dead == false {
					if px.unreliable && (rand.Int63()%1000) < 100 {
						// discard the request.
						conn.Close()
					} else if px.unreliable && (rand.Int63()%1000) < 200 {
						// process the request but force discard of reply.
						c1 := conn.(*net.UnixConn)
						f, _ := c1.File()
						err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)
						if err != nil {
							fmt.Printf("shutdown: %v\n", err)
						}
						px.rpcCount++
						go rpcs.ServeConn(conn)
					} else {
						px.rpcCount++
						go rpcs.ServeConn(conn)
					}
				} else if err == nil {
					conn.Close()
				}
				if err != nil && px.dead == false {
					fmt.Printf("Paxos(%v) accept: %v\n", me, err.Error())
				}
			}
		}()
	}

	return px
}
예제 #4
0
파일: fd_windows.go 프로젝트: gmwu/go
func (fd *netFD) shutdown(how int) error {
	if err := fd.incref(); err != nil {
		return err
	}
	defer fd.decref()
	return syscall.Shutdown(fd.sysfd, how)
}
예제 #5
0
//
// servers[] contains the ports of the set of
// servers that will cooperate via Paxos to
// form the fault-tolerant key/value service.
// me is the index of the current server in servers[].
//
func StartServer(servers []string, me int) *KVPaxos {
	// this call is all that's needed to persuade
	// Go's RPC library to marshall/unmarshall
	// struct Op.
	gob.Register(Op{})

	kv := new(KVPaxos)
	kv.me = me

	// Your initialization code here.
	kv.mydb = map[string]string{}
	kv.history = map[int64]string{}
	kv.DoneInst = 0

	rpcs := rpc.NewServer()
	rpcs.Register(kv)

	kv.px = paxos.Make(servers, me, rpcs)

	os.Remove(servers[me])
	l, e := net.Listen("unix", servers[me])
	if e != nil {
		log.Fatal("listen error: ", e)
	}
	kv.l = l

	// please do not change any of the following code,
	// or do anything to subvert it.

	go func() {
		for kv.dead == false {
			conn, err := kv.l.Accept()
			if err == nil && kv.dead == false {
				if kv.unreliable && (rand.Int63()%1000) < 100 {
					// discard the request.
					conn.Close()
				} else if kv.unreliable && (rand.Int63()%1000) < 200 {
					// process the request but force discard of reply.
					c1 := conn.(*net.UnixConn)
					f, _ := c1.File()
					err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)
					if err != nil {
						fmt.Printf("shutdown: %v\n", err)
					}
					go rpcs.ServeConn(conn)
				} else {
					go rpcs.ServeConn(conn)
				}
			} else if err == nil {
				conn.Close()
			}
			if err != nil && kv.dead == false {
				fmt.Printf("KVPaxos(%v) accept: %v\n", me, err.Error())
				kv.kill()
			}
		}
	}()

	return kv
}
예제 #6
0
//
// servers[] contains the ports of the set of
// servers that will cooperate via Paxos to
// form the fault-tolerant key/value service.
// me is the index of the current server in servers[].
//
func StartServer(servers []string, me int) *KVPaxos {
	// call gob.Register on structures you want
	// Go's RPC library to marshall/unmarshall.
	gob.Register(Op{})

	kv := new(KVPaxos)
	kv.me = me

	// initialization
	kv.kvData = make(map[string]string)
	kv.preReply = make(map[string]*Op)
	kv.seqChan = make(map[int]chan *Op)
	kv.maxInstanceID = -1

	rpcs := rpc.NewServer()
	rpcs.Register(kv)

	kv.px = paxos.Make(servers, me, rpcs)

	os.Remove(servers[me])
	l, e := net.Listen("unix", servers[me])
	if e != nil {
		log.Fatal("listen error: ", e)
	}
	kv.l = l

	// please do not change any of the following code,
	// or do anything to subvert it.

	go func() {
		for kv.dead == false {
			conn, err := kv.l.Accept()
			if err == nil && kv.dead == false {
				if kv.unreliable && (rand.Int63()%1000) < 100 {
					// discard the request.
					conn.Close()
				} else if kv.unreliable && (rand.Int63()%1000) < 200 {
					// process the request but force discard of reply.
					c1 := conn.(*net.UnixConn)
					f, _ := c1.File()
					err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)
					if err != nil {
						fmt.Printf("shutdown: %v\n", err)
					}
					go rpcs.ServeConn(conn)
				} else {
					go rpcs.ServeConn(conn)
				}
			} else if err == nil {
				conn.Close()
			}
			if err != nil && kv.dead == false {
				fmt.Printf("KVPaxos(%v) accept: %v\n", me, err.Error())
				kv.kill()
			}
		}
	}()
	go kv.updateStatus()
	return kv
}
예제 #7
0
파일: fd_unix.go 프로젝트: duhaibo0404/go-1
func (fd *netFD) shutdown(how int) error {
	if err := fd.incref(); err != nil {
		return err
	}
	defer fd.decref()
	return os.NewSyscallError("shutdown", syscall.Shutdown(fd.sysfd, how))
}
예제 #8
0
func (p *setnsProcess) start() (err error) {
	defer p.parentPipe.Close()
	if err = p.execSetns(); err != nil {
		return newSystemError(err)
	}
	if len(p.cgroupPaths) > 0 {
		if err := cgroups.EnterPid(p.cgroupPaths, p.pid()); err != nil {
			return newSystemError(err)
		}
	}
	if err := json.NewEncoder(p.parentPipe).Encode(p.config); err != nil {
		return newSystemError(err)
	}
	if err := syscall.Shutdown(int(p.parentPipe.Fd()), syscall.SHUT_WR); err != nil {
		return newSystemError(err)
	}
	// wait for the child process to fully complete and receive an error message
	// if one was encoutered
	var ierr *genericError
	if err := json.NewDecoder(p.parentPipe).Decode(&ierr); err != nil && err != io.EOF {
		return newSystemError(err)
	}
	if ierr != nil {
		return newSystemError(ierr)
	}

	return nil
}
예제 #9
0
func (p *initProcess) sendConfig() error {
	// send the state to the container's init process then shutdown writes for the parent
	if err := json.NewEncoder(p.parentPipe).Encode(p.config); err != nil {
		return err
	}
	// shutdown writes for the parent side of the pipe
	return syscall.Shutdown(int(p.parentPipe.Fd()), syscall.SHUT_WR)
}
예제 #10
0
파일: server.go 프로젝트: pokerG/6.824
//
// servers[] contains the ports of the set of
// servers that will cooperate via Paxos to
// form the fault-tolerant shardmaster service.
// me is the index of the current server in servers[].
//
func StartServer(servers []string, me int) *ShardMaster {
	gob.Register(Op{})

	sm := new(ShardMaster)
	sm.me = me

	sm.configs = make([]Config, 1)
	sm.configs[0].Groups = map[int64][]string{}

	//your initialization
	sm.cfgnum = 0

	rpcs := rpc.NewServer()
	rpcs.Register(sm)

	sm.px = paxos.Make(servers, me, rpcs)

	os.Remove(servers[me])
	l, e := net.Listen("unix", servers[me])
	if e != nil {
		log.Fatal("listen error: ", e)
	}
	sm.l = l

	// please do not change any of the following code,
	// or do anything to subvert it.

	go func() {
		for sm.dead == false {
			conn, err := sm.l.Accept()
			if err == nil && sm.dead == false {
				if sm.unreliable && (rand.Int63()%1000) < 100 {
					// discard the request.
					conn.Close()
				} else if sm.unreliable && (rand.Int63()%1000) < 200 {
					// process the request but force discard of reply.
					c1 := conn.(*net.UnixConn)
					f, _ := c1.File()
					err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)
					if err != nil {
						fmt.Printf("shutdown: %v\n", err)
					}
					go rpcs.ServeConn(conn)
				} else {
					go rpcs.ServeConn(conn)
				}
			} else if err == nil {
				conn.Close()
			}
			if err != nil && sm.dead == false {
				fmt.Printf("ShardMaster(%v) accept: %v\n", me, err.Error())
				sm.Kill()
			}
		}
	}()

	return sm
}
예제 #11
0
func (fd *netFD) shutdown(how int) error {
	if fd == nil || fd.sysfile == nil {
		return os.EINVAL
	}
	err := syscall.Shutdown(fd.sysfd, how)
	if err != nil {
		return &OpError{"shutdown", fd.net, fd.laddr, err}
	}
	return nil
}
예제 #12
0
파일: fd_windows.go 프로젝트: Sunmonds/gcc
func (fd *netFD) shutdown(how int) os.Error {
	if fd == nil || fd.sysfd == syscall.InvalidHandle {
		return os.EINVAL
	}
	errno := syscall.Shutdown(fd.sysfd, how)
	if errno != 0 {
		return &OpError{"shutdown", fd.net, fd.laddr, os.Errno(errno)}
	}
	return nil
}
예제 #13
0
func (s *SyncPipe) SendToChild(v interface{}) error {
	data, err := json.Marshal(v)
	if err != nil {
		return err
	}

	s.parent.Write(data)

	return syscall.Shutdown(int(s.parent.Fd()), syscall.SHUT_WR)
}
예제 #14
0
파일: sync_pipe.go 프로젝트: GloriaH/docker
func (s *SyncPipe) SendToChild(networkState *network.NetworkState) error {
	data, err := json.Marshal(networkState)
	if err != nil {
		return err
	}

	s.parent.Write(data)

	return syscall.Shutdown(int(s.parent.Fd()), syscall.SHUT_WR)
}
예제 #15
0
//
// servers[] contains the ports of the set of
// servers that will cooperate via Paxos to
// form the fault-tolerant shardmaster service.
// me is the index of the current server in servers[].
//
func StartServer(me string) *ObliviousReplica {

	or := new(ObliviousReplica)
	or.me = me

	rpcs := rpc.NewServer()
	rpcs.Register(or)

	or.UID = GetMD5Hash(me)
	os.Mkdir("data", 0700)

	or.dataPath = "data/replica-" + or.UID
	os.Mkdir(or.dataPath, 0700)

	os.Remove(me)
	l, e := net.Listen(Network, me)
	if e != nil {
		log.Fatal("listen error: ", e)
	}
	or.l = l

	// please do not change any of the following code,
	// or do anything to subvert it.

	go func() {
		for or.dead == false {
			conn, err := or.l.Accept()
			if err == nil && or.dead == false {
				if or.unreliable && (rand.Int63()%1000) < 100 {
					// discard the request.
					conn.Close()
				} else if or.unreliable && (rand.Int63()%1000) < 200 {
					// process the request but force discard of reply.
					c1 := conn.(*net.UnixConn)
					f, _ := c1.File()
					err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)
					if err != nil {
						fmt.Printf("shutdown: %v\n", err)
					}
					go rpcs.ServeConn(conn)
				} else {
					go rpcs.ServeConn(conn)
				}
			} else if err == nil {
				conn.Close()
			}
			if err != nil && or.dead == false {
				fmt.Printf("ShardMaster(%v) accept: %v\n", me, err.Error())
				or.Kill()
			}
		}
	}()

	return or
}
예제 #16
0
func (fd *netFD) shutdown(how int) error {
	if err := fd.incref(false); err != nil {
		return err
	}
	defer fd.decref()
	err := syscall.Shutdown(fd.sysfd, how)
	if err != nil {
		return &OpError{"shutdown", fd.net, fd.laddr, err}
	}
	return nil
}
func (fd *netFD) Close() os.Error {
	if fd == nil || fd.sysfd == -1 {
		return os.EINVAL
	}

	fd.incref()
	syscall.Shutdown(fd.sysfd, syscall.SHUT_RDWR)
	fd.closing = true
	fd.decref()
	return nil
}
예제 #18
0
파일: client.go 프로젝트: jeffchan/pushydb
func MakeClerk(me string, publish chan PublishArgs) *Clerk {
	ck := new(Clerk)
	ck.me = me
	ck.dups = make(map[string]bool)
	ck.publish = publish

	rpcs := rpc.NewServer()
	rpcs.Register(ck)

	os.Remove(ck.me)
	l, e := net.Listen("unix", ck.me)
	if e != nil {
		fmt.Println("listener error: ", e)
	}
	ck.l = l

	gob.Register(PublishArgs{})

	// please do not change any of the following code,
	// or do anything to subvert it.

	go func() {
		for ck.dead == false {
			conn, err := ck.l.Accept()
			if err == nil && ck.dead == false {
				if ck.unreliable && (rand.Int63()%1000) < 100 {
					// discard the request.
					conn.Close()
				} else if ck.unreliable && (rand.Int63()%1000) < 200 {
					// process the request but force discard of reply.
					c1 := conn.(*net.UnixConn)
					f, _ := c1.File()
					err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)
					if err != nil {
						fmt.Printf("shutdown: %v\n", err)
					}
					go rpcs.ServeConn(conn)
				} else {
					go rpcs.ServeConn(conn)
				}
			} else if err == nil {
				conn.Close()
			}
			if err != nil && ck.dead == false {
				fmt.Printf("MBClerk(%v) accept: %v\n", ck.me, err.Error())
				ck.Kill()
			}
		}
	}()

	go ck.process()

	return ck
}
예제 #19
0
func (p *setnsProcess) start() (err error) {
	defer p.parentPipe.Close()
	err = p.cmd.Start()
	p.childPipe.Close()
	p.rootDir.Close()
	if err != nil {
		return newSystemErrorWithCause(err, "starting setns process")
	}
	if p.bootstrapData != nil {
		if _, err := io.Copy(p.parentPipe, p.bootstrapData); err != nil {
			return newSystemErrorWithCause(err, "copying bootstrap data to pipe")
		}
	}
	if err = p.execSetns(); err != nil {
		return newSystemErrorWithCause(err, "executing setns process")
	}
	if len(p.cgroupPaths) > 0 {
		if err := cgroups.EnterPid(p.cgroupPaths, p.pid()); err != nil {
			return newSystemErrorWithCausef(err, "adding pid %d to cgroups", p.pid())
		}
	}
	// set oom_score_adj
	if err := setOomScoreAdj(p.config.Config.OomScoreAdj, p.pid()); err != nil {
		return newSystemErrorWithCause(err, "setting oom score")
	}
	// set rlimits, this has to be done here because we lose permissions
	// to raise the limits once we enter a user-namespace
	if err := setupRlimits(p.config.Rlimits, p.pid()); err != nil {
		return newSystemErrorWithCause(err, "setting rlimits for process")
	}
	if err := utils.WriteJSON(p.parentPipe, p.config); err != nil {
		return newSystemErrorWithCause(err, "writing config to pipe")
	}

	if err := syscall.Shutdown(int(p.parentPipe.Fd()), syscall.SHUT_WR); err != nil {
		return newSystemErrorWithCause(err, "calling shutdown on init pipe")
	}
	// wait for the child process to fully complete and receive an error message
	// if one was encoutered
	var ierr *genericError
	if err := json.NewDecoder(p.parentPipe).Decode(&ierr); err != nil && err != io.EOF {
		return newSystemErrorWithCause(err, "decoding init error from pipe")
	}
	// Must be done after Shutdown so the child will exit and we can wait for it.
	if ierr != nil {
		p.wait()
		return ierr
	}
	return nil
}
예제 #20
0
// Gracefully close the connection to a client
func (cxn *TFOServerConn) Close() {

	// Gracefull close the connection
	err := syscall.Shutdown(cxn.fd, syscall.SHUT_RDWR)
	if err != nil {
		log.Println("Failed to shutdown() connection:", err)
	}

	// Close the file descriptor
	err = syscall.Close(cxn.fd)
	if err != nil {
		log.Println("Failed to close() connection:", err)
	}

}
예제 #21
0
//
// servers[] contains the ports of the set of
// servers that will cooperate via Paxos to
// form the fault-tolerant shardmaster service.
// me is the index of the current server in servers[].
//
func FinishStartServer(sm *ShardMaster, servers []string, me int, rpcs *rpc.Server) *ShardMaster {
	os.Remove(servers[me])
	l, e := net.Listen("unix", servers[me])
	if e != nil {
		log.Fatal("listen error: ", e)
	}
	sm.l = l

	go sm.LogWalker()

	// please do not change any of the following code,
	// or do anything to subvert it.

	go func() {
		for sm.dead == false {
			conn, err := sm.l.Accept()
			if err == nil && sm.dead == false {
				if sm.unreliable && (rand.Int63()%1000) < 100 {
					// discard the request.
					conn.Close()
				} else if sm.unreliable && (rand.Int63()%1000) < 200 {
					// process the request but force discard of reply.
					c1 := conn.(*net.UnixConn)
					f, _ := c1.File()
					err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)
					if err != nil {
						fmt.Printf("shutdown: %v\n", err)
					}
					go rpcs.ServeConn(conn)
				} else {
					go rpcs.ServeConn(conn)
				}
			} else if err == nil {
				conn.Close()
			}
			if err != nil && sm.dead == false {
				fmt.Printf("ShardMaster(%v) accept: %v\n", me, err.Error())
				sm.Kill()
			}
		}
	}()

	return sm
}
예제 #22
0
func (p *setnsProcess) start() (err error) {
	defer p.parentPipe.Close()
	err = p.cmd.Start()
	p.childPipe.Close()
	if err != nil {
		return newSystemError(err)
	}
	if p.bootstrapData != nil {
		if _, err := io.Copy(p.parentPipe, p.bootstrapData); err != nil {
			return newSystemError(err)
		}
	}
	if err = p.execSetns(); err != nil {
		return newSystemError(err)
	}
	if len(p.cgroupPaths) > 0 {
		if err := cgroups.EnterPid(p.cgroupPaths, p.pid()); err != nil {
			return newSystemError(err)
		}
	}
	if err := utils.WriteJSON(p.parentPipe, p.config); err != nil {
		return newSystemError(err)
	}
	// set oom_score_adj
	if err := setOomScoreAdj(p.config.Config.OomScoreAdj, p.pid()); err != nil {
		return newSystemError(err)
	}

	if err := syscall.Shutdown(int(p.parentPipe.Fd()), syscall.SHUT_WR); err != nil {
		return newSystemError(err)
	}
	// wait for the child process to fully complete and receive an error message
	// if one was encoutered
	var ierr *genericError
	if err := json.NewDecoder(p.parentPipe).Decode(&ierr); err != nil && err != io.EOF {
		return newSystemError(err)
	}
	// Must be done after Shutdown so the child will exit and we can wait for it.
	if ierr != nil {
		p.wait()
		return newSystemError(ierr)
	}
	return nil
}
예제 #23
0
// Register a new server.
func Register(v interface{}, addr string) error {
	server := rpc.NewServer()
	if err := server.Register(v); err != nil {
		return err
	}

	os.Remove(addr)
	listener, err := net.Listen(network, addr)
	if err != nil {
		return err
	}
	servers[v] = &entry{listener, atomic.NewAtomicBool(false)}

	// Start server deamon.
	go func() {
		for {
			conn, err := listener.Accept()
			if err != nil {
				break
			} else {
				if IsUnreliable(v) {
					if rand.Int63()%1000 < 100 {
						// Discard current request.
						conn.Close()
					} else if rand.Int63()%1000 < 200 {
						// Process current request, but discard according reply.
						unixConn := conn.(*net.UnixConn)
						file, _ := unixConn.File()
						// CLose the write side to discard reoply.
						syscall.Shutdown(int(file.Fd()), syscall.SHUT_WR)
						go server.ServeConn(conn)
					} else {
						go server.ServeConn(conn)
					}
				} else {
					go server.ServeConn(conn)
				}
			}
		}
	}()

	return nil
}
예제 #24
0
파일: server.go 프로젝트: kennyhlam/6824
//
// Start a shardkv server.
// gid is the ID of the server's replica group.
// shardmasters[] contains the ports of the
//   servers that implement the shardmaster.
// servers[] contains the ports of the servers
//   in this replica group.
// Me is the index of this server in servers[].
//
func StartServer(gid int64, shardmasters []string,
	servers []string, me int) *ShardKV {
	gob.Register(Op{})
	gob.Register(shardmaster.Config{})
	gob.Register(Xaction{})
	gob.Register(GetArgs{})
	gob.Register(PutArgs{})
	gob.Register(GimmeDataArgs{})
	gob.Register(GimmeDataReply{})

	kv := new(ShardKV)
	kv.me = me
	kv.gid = gid
	kv.sm = shardmaster.MakeClerk(shardmasters)

	// Your initialization code here.
	// Don't call Join().
	kv.now = kv.sm.Query(0)
	kv.next = kv.sm.Query(0)
	kv.contiguous = -1
	kv.storage = make(map[string]string)
	kv.xactions = make(map[string]Xaction)
	kv.versions = make(map[int]Snapshot)

	rpcs := rpc.NewServer()
	rpcs.Register(kv)

	kv.px = paxos.Make(servers, me, rpcs)

	os.Remove(servers[me])
	l, e := net.Listen("unix", servers[me])
	if e != nil {
		log.Fatal("listen error: ", e)
	}
	kv.l = l

	// please do not change any of the following code,
	// or do anything to subvert it.

	go func() {
		for kv.dead == false {
			conn, err := kv.l.Accept()
			if err == nil && kv.dead == false {
				if kv.unreliable && (rand.Int63()%1000) < 100 {
					// discard the request.
					conn.Close()
				} else if kv.unreliable && (rand.Int63()%1000) < 200 {
					// process the request but force discard of reply.
					c1 := conn.(*net.UnixConn)
					f, _ := c1.File()
					err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)
					if err != nil {
						fmt.Printf("shutdown: %v\n", err)
					}
					go rpcs.ServeConn(conn)
				} else {
					go rpcs.ServeConn(conn)
				}
			} else if err == nil {
				conn.Close()
			}
			if err != nil && kv.dead == false {
				fmt.Printf("ShardKV(%v) accept: %v\n", me, err.Error())
				kv.kill()
			}
		}
	}()

	go func() {
		for kv.dead == false {
			kv.tick()
			time.Sleep(250 * time.Millisecond)
		}
	}()

	return kv
}
예제 #25
0
func (p *initProcess) start() (err error) {
	defer p.parentPipe.Close()
	err = p.cmd.Start()
	p.process.ops = p
	p.childPipe.Close()
	if err != nil {
		p.process.ops = nil
		return newSystemError(err)
	}
	// Save the standard descriptor names before the container process
	// can potentially move them (e.g., via dup2()).  If we don't do this now,
	// we won't know at checkpoint time which file descriptor to look up.
	fds, err := getPipeFds(p.pid())
	if err != nil {
		return newSystemError(err)
	}
	p.setExternalDescriptors(fds)
	// Do this before syncing with child so that no children
	// can escape the cgroup
	if err := p.manager.Apply(p.pid()); err != nil {
		return newSystemError(err)
	}
	defer func() {
		if err != nil {
			// TODO: should not be the responsibility to call here
			p.manager.Destroy()
		}
	}()
	if p.config.Config.Hooks != nil {
		s := configs.HookState{
			Version: p.container.config.Version,
			ID:      p.container.id,
			Pid:     p.pid(),
			Root:    p.config.Config.Rootfs,
		}
		for _, hook := range p.config.Config.Hooks.Prestart {
			if err := hook.Run(s); err != nil {
				return newSystemError(err)
			}
		}
	}
	if err := p.createNetworkInterfaces(); err != nil {
		return newSystemError(err)
	}
	if err := p.sendConfig(); err != nil {
		return newSystemError(err)
	}
	var (
		procSync syncT
		sentRun  bool
		ierr     *genericError
	)

loop:
	for {
		if err := json.NewDecoder(p.parentPipe).Decode(&procSync); err != nil {
			if err == io.EOF {
				break loop
			}
			return newSystemError(err)
		}
		switch procSync.Type {
		case procStart:
			break loop
		case procReady:
			if err := p.manager.Set(p.config.Config); err != nil {
				return newSystemError(err)
			}
			// Sync with child.
			if err := utils.WriteJSON(p.parentPipe, syncT{procRun}); err != nil {
				return newSystemError(err)
			}
			sentRun = true
		case procError:
			// wait for the child process to fully complete and receive an error message
			// if one was encoutered
			if err := json.NewDecoder(p.parentPipe).Decode(&ierr); err != nil && err != io.EOF {
				return newSystemError(err)
			}
			if ierr != nil {
				break loop
			}
			// Programmer error.
			panic("No error following JSON procError payload.")
		default:
			return newSystemError(fmt.Errorf("invalid JSON synchronisation payload from child"))
		}
	}
	if !sentRun {
		return newSystemError(fmt.Errorf("could not synchronise with container process"))
	}
	if err := syscall.Shutdown(int(p.parentPipe.Fd()), syscall.SHUT_WR); err != nil {
		return newSystemError(err)
	}
	// Must be done after Shutdown so the child will exit and we can wait for it.
	if ierr != nil {
		p.wait()
		return newSystemError(ierr)
	}
	return nil
}
예제 #26
0
//
// the application wants to create a paxos peer.
// the ports of all the paxos peers (including this one)
// are in peers[]. this servers port is peers[me].
//
func Make(peers []string, me int, rpcs *rpc.Server) *Paxos {
	px := &Paxos{}
	px.peers = peers
	px.me = me

	fmt.Println("Peer Number: ", len(peers))

	px.fates = make(map[int]Fate)
	px.seens = make(map[int]int)
	px.accepts = make(map[int]int)
	px.values = make(map[int]interface{})
	px.dones = make([]int, len(peers))
	px.highest = -1
	px.maxSeen = px.me

	if rpcs != nil {
		// caller will create socket &c
		rpcs.Register(px)
	} else {
		rpcs = rpc.NewServer()
		rpcs.Register(px)

		// prepare to receive connections from clients.
		// change "unix" to "tcp" to use over a network.
		os.Remove(peers[me]) // only needed for "unix"
		l, e := net.Listen("unix", peers[me])
		if e != nil {
			log.Fatal("listen error: ", e)
		}
		px.l = l

		// please do not change any of the following code,
		// or do anything to subvert it.

		// create a thread to accept RPC connections
		go func() {
			for px.isdead() == false {
				conn, err := px.l.Accept()
				if err == nil && px.isdead() == false {
					if px.isunreliable() && (rand.Int63()%1000) < 100 {
						// discard the request.
						conn.Close()
					} else if px.isunreliable() && (rand.Int63()%1000) < 200 {
						// process the request but force discard of reply.
						c1 := conn.(*net.UnixConn)
						f, _ := c1.File()
						err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)
						if err != nil {
							fmt.Printf("shutdown: %v\n", err)
						}
						atomic.AddInt32(&px.rpcCount, 1)
						go rpcs.ServeConn(conn)
					} else {
						atomic.AddInt32(&px.rpcCount, 1)
						go rpcs.ServeConn(conn)
					}
				} else if err == nil {
					conn.Close()
				}
				if err != nil && px.isdead() == false {
					fmt.Printf("Paxos(%v) accept: %v\n", me, err.Error())
				}
			}
		}()
	}

	return px
}
예제 #27
0
파일: paxos.go 프로젝트: salibrandi/mexos
//
// the application wants to create a paxos peer.
// the ports of all the paxos peers (including this one)
// are in peers[]. this servers port is peers[me].
//
func Make(peers []string, me int, rpcs *rpc.Server, network bool, tag string) *Paxos {
	px := &Paxos{}

	// Read memory options
	px.persistent = persistent
	px.recovery = recovery
	px.dbUseCompression = dbUseCompression
	px.dbUseCache = dbUseCache
	px.dbCacheSize = dbCacheSize
	px.writeToMemory = writeToMemory

	// Network stuff
	px.peers = peers
	px.me = me
	px.network = network
	px.reachable = make([]bool, len(px.peers))
	for i := range px.peers {
		px.reachable[i] = true
	}
	px.deaf = false

	// Paxos state
	px.instances = make(map[int]Proposal)
	px.leader = make(map[int]int)
	px.proposed = make(map[int]bool)
	px.maxInstance = -1
	px.done = make(map[int]int)
	for i := 0; i < len(px.peers); i++ {
		px.done[i] = -1
	}
	px.doneChannels = make(map[int]chan bool)

	// Persistence stuff
	waitChan := make(chan int)
	go func() {
		waitChan <- 1
		px.startup(tag)
	}()
	<-waitChan

	if rpcs != nil {
		// caller will create socket &c
		if !printRPCerrors {
			disableLog()
			rpcs.Register(px)
			enableLog()
		} else {
			rpcs.Register(px)
		}
	} else {
		rpcs = rpc.NewServer()
		if !printRPCerrors {
			disableLog()
			rpcs.Register(px)
			enableLog()
		} else {
			rpcs.Register(px)
		}

		// prepare to receive connections from clients.
		// change "unix" to "tcp" to use over a network.
		if px.network {
			l, e := net.Listen("tcp", ":"+strconv.Itoa(startport))
			if e != nil {
				log.Fatal("listen error: ", e)
			}
			px.l = l
		} else {
			os.Remove(peers[me]) // only needed for "unix"
			l, e := net.Listen("unix", peers[me])
			if e != nil {
				log.Fatal("listen error: ", e)
			}
			px.l = l
		}

		// please do not change any of the following code,
		// or do anything to subvert it.

		// create a thread to accept RPC connections
		go func() {
			for px.dead == false {
				conn, err := px.l.Accept()
				if err == nil && px.dead == false {
					if px.deaf || (px.unreliable && (rand.Int63()%1000) < 100) {
						// discard the request.
						conn.Close()
					} else if px.unreliable && (rand.Int63()%1000) < 200 {
						// process the request but force discard of reply.
						if !px.network {
							c1 := conn.(*net.UnixConn)
							f, _ := c1.File()
							err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)
							if err != nil {
								fmt.Printf("shutdown: %v\n", err)
							}

						} else {
							//respond to imaginary port?
							c1 := conn.(*net.TCPConn)
							f, _ := c1.File()
							err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)
							if err != nil {
								fmt.Printf("shutdown: %v\n", err)
							}
						}
						px.rpcCount++
						go rpcs.ServeConn(conn)
					} else {
						px.rpcCount++
						go rpcs.ServeConn(conn)
					}
				} else if err == nil {
					conn.Close()
				}
				if err != nil && px.dead == false {
					fmt.Printf("Paxos(%v) accept: %v\n", me, err.Error())
				}
			}
		}()
	}

	return px
}
예제 #28
0
파일: server.go 프로젝트: salibrandi/mexos
//
// Start a shardkv server.
// gid is the ID of the server's replica group.
// shardmasters[] contains the ports of the
//   servers that implement the shardmaster.
// servers[] contains the ports of the servers
//   in this replica group.
// Me is the index of this server in servers[].
//
func StartServer(gid int64, shardmasters []string,
	servers []string, me int) *ShardKV {
	gob.Register(Op{})

	kv := new(ShardKV)
	kv.me = me
	kv.gid = gid
	kv.uid = strconv.Itoa(me) + string("-") + strconv.FormatInt(gid, 10)
	kv.sm = shardmaster.MakeClerk(shardmasters)
	kv.servers = servers

	// Your initialization code here.
	kv.logged = make(map[int64]bool)
	kv.values = make(map[string]string)
	// Don't call Join().

	rpcs := rpc.NewServer()
	rpcs.Register(kv)

	kv.px = paxos.Make(servers, me, rpcs)

	os.Remove(servers[me])
	l, e := net.Listen("unix", servers[me])
	if e != nil {
		log.Fatal("listen error: ", e)
	}
	kv.l = l

	// please do not change any of the following code,
	// or do anything to subvert it.

	go func() {
		for kv.dead == false {
			conn, err := kv.l.Accept()
			if err == nil && kv.dead == false {
				if kv.unreliable && (rand.Int63()%1000) < 100 {
					// discard the request.
					conn.Close()
				} else if kv.unreliable && (rand.Int63()%1000) < 200 {
					// process the request but force discard of reply.
					c1 := conn.(*net.UnixConn)
					f, _ := c1.File()
					err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)
					if err != nil {
						fmt.Printf("shutdown: %v\n", err)
					}
					go rpcs.ServeConn(conn)
				} else {
					go rpcs.ServeConn(conn)
				}
			} else if err == nil {
				conn.Close()
			}
			if err != nil && kv.dead == false {
				fmt.Printf("ShardKV(%v) accept: %v\n", me, err.Error())
				kv.kill()
			}
		}
	}()

	go func() {
		for kv.dead == false {
			kv.tick()
			time.Sleep(250 * time.Millisecond)
		}
	}()

	return kv
}
예제 #29
0
파일: server.go 프로젝트: ctliu3/yds
func StartServer(vshost string, me string) *PBServer {
	pb := new(PBServer)
	pb.me = me
	pb.vs = viewservice.MakeClerk(me, vshost)
	pb.finish = make(chan interface{})
	// Your pb.* initializations here.
	pb.current = viewservice.View{Viewnum: 0}
	pb.db = make(map[string]string)
	//pb.filter = make(map[string]string)

	rpcs := rpc.NewServer()
	rpcs.Register(pb)

	os.Remove(pb.me)
	l, e := net.Listen("unix", pb.me)
	if e != nil {
		log.Fatal("listen error: ", e)
	}
	pb.l = l

	// please do not change any of the following code,
	// or do anything to subvert it.

	go func() {
		for pb.dead == false {
			conn, err := pb.l.Accept()
			if err == nil && pb.dead == false {
				if pb.unreliable && (rand.Int63()%1000) < 100 {
					// discard the request.
					conn.Close()
				} else if pb.unreliable && (rand.Int63()%1000) < 200 {
					// process the request but force discard of reply.
					c1 := conn.(*net.UnixConn)
					f, _ := c1.File()
					err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)
					if err != nil {
						fmt.Printf("shutdown: %v\n", err)
					}
					pb.done.Add(1)
					go func() {
						rpcs.ServeConn(conn)
						pb.done.Done()
					}()
				} else {
					pb.done.Add(1)
					go func() {
						rpcs.ServeConn(conn)
						pb.done.Done()
					}()
				}
			} else if err == nil {
				conn.Close()
			}
			if err != nil && pb.dead == false {
				fmt.Printf("PBServer(%v) accept: %v\n", me, err.Error())
				pb.kill()
			}
		}
		DPrintf("%s: wait until all request are done\n", pb.me)
		pb.done.Wait()
		// If you have an additional thread in your solution, you could
		// have it read to the finish channel to hear when to terminate.
		close(pb.finish)
	}()

	pb.done.Add(1)
	go func() {
		for pb.dead == false {
			pb.tick()
			time.Sleep(viewservice.PingInterval)
		}
		pb.done.Done()
	}()

	return pb
}
예제 #30
0
func (p *initProcess) start() error {
	defer p.parentPipe.Close()
	err := p.cmd.Start()
	p.process.ops = p
	p.childPipe.Close()
	p.rootDir.Close()
	if err != nil {
		p.process.ops = nil
		return newSystemErrorWithCause(err, "starting init process command")
	}
	if _, err := io.Copy(p.parentPipe, p.bootstrapData); err != nil {
		return err
	}
	if err := p.execSetns(); err != nil {
		return newSystemErrorWithCause(err, "running exec setns process for init")
	}
	// Save the standard descriptor names before the container process
	// can potentially move them (e.g., via dup2()).  If we don't do this now,
	// we won't know at checkpoint time which file descriptor to look up.
	fds, err := getPipeFds(p.pid())
	if err != nil {
		return newSystemErrorWithCausef(err, "getting pipe fds for pid %d", p.pid())
	}
	p.setExternalDescriptors(fds)
	// Do this before syncing with child so that no children
	// can escape the cgroup
	if err := p.manager.Apply(p.pid()); err != nil {
		return newSystemErrorWithCause(err, "applying cgroup configuration for process")
	}
	defer func() {
		if err != nil {
			// TODO: should not be the responsibility to call here
			p.manager.Destroy()
		}
	}()
	if err := p.createNetworkInterfaces(); err != nil {
		return newSystemErrorWithCause(err, "creating nework interfaces")
	}
	if err := p.sendConfig(); err != nil {
		return newSystemErrorWithCause(err, "sending config to init process")
	}
	var (
		procSync   syncT
		sentRun    bool
		sentResume bool
		ierr       *genericError
	)

	dec := json.NewDecoder(p.parentPipe)
loop:
	for {
		if err := dec.Decode(&procSync); err != nil {
			if err == io.EOF {
				break loop
			}
			return newSystemErrorWithCause(err, "decoding sync type from init pipe")
		}
		switch procSync.Type {
		case procReady:
			if err := p.manager.Set(p.config.Config); err != nil {
				return newSystemErrorWithCause(err, "setting cgroup config for ready process")
			}
			// set oom_score_adj
			if err := setOomScoreAdj(p.config.Config.OomScoreAdj, p.pid()); err != nil {
				return newSystemErrorWithCause(err, "setting oom score for ready process")
			}
			// set rlimits, this has to be done here because we lose permissions
			// to raise the limits once we enter a user-namespace
			if err := setupRlimits(p.config.Rlimits, p.pid()); err != nil {
				return newSystemErrorWithCause(err, "setting rlimits for ready process")
			}
			// call prestart hooks
			if !p.config.Config.Namespaces.Contains(configs.NEWNS) {
				if p.config.Config.Hooks != nil {
					s := configs.HookState{
						Version: p.container.config.Version,
						ID:      p.container.id,
						Pid:     p.pid(),
						Root:    p.config.Config.Rootfs,
					}
					for i, hook := range p.config.Config.Hooks.Prestart {
						if err := hook.Run(s); err != nil {
							return newSystemErrorWithCausef(err, "running prestart hook %d", i)
						}
					}
				}
			}
			// Sync with child.
			if err := utils.WriteJSON(p.parentPipe, syncT{procRun}); err != nil {
				return newSystemErrorWithCause(err, "reading syncT run type")
			}
			sentRun = true
		case procHooks:
			if p.config.Config.Hooks != nil {
				s := configs.HookState{
					Version:    p.container.config.Version,
					ID:         p.container.id,
					Pid:        p.pid(),
					Root:       p.config.Config.Rootfs,
					BundlePath: utils.SearchLabels(p.config.Config.Labels, "bundle"),
				}
				for i, hook := range p.config.Config.Hooks.Prestart {
					if err := hook.Run(s); err != nil {
						return newSystemErrorWithCausef(err, "running prestart hook %d", i)
					}
				}
			}
			// Sync with child.
			if err := utils.WriteJSON(p.parentPipe, syncT{procResume}); err != nil {
				return newSystemErrorWithCause(err, "reading syncT resume type")
			}
			sentResume = true
		case procError:
			// wait for the child process to fully complete and receive an error message
			// if one was encoutered
			if err := dec.Decode(&ierr); err != nil && err != io.EOF {
				return newSystemErrorWithCause(err, "decoding proc error from init")
			}
			if ierr != nil {
				break loop
			}
			// Programmer error.
			panic("No error following JSON procError payload.")
		default:
			return newSystemError(fmt.Errorf("invalid JSON payload from child"))
		}
	}
	if !sentRun {
		return newSystemErrorWithCause(ierr, "container init")
	}
	if p.config.Config.Namespaces.Contains(configs.NEWNS) && !sentResume {
		return newSystemError(fmt.Errorf("could not synchronise after executing prestart hooks with container process"))
	}
	if err := syscall.Shutdown(int(p.parentPipe.Fd()), syscall.SHUT_WR); err != nil {
		return newSystemErrorWithCause(err, "shutting down init pipe")
	}
	// Must be done after Shutdown so the child will exit and we can wait for it.
	if ierr != nil {
		p.wait()
		return ierr
	}
	return nil
}