Ejemplo n.º 1
0
// Returns a PeerConnection for talking to addr, which can be either
// IP:ADDR or HOST:ADDR (where HOST is something that DNS can resolve).
func Peer(addr string) *PeerConnection {
	addr, err := util.Resolve(addr, config.IP)
	if err != nil {
		return &PeerConnection{err: err}
	}

	host, port, err := net.SplitHostPort(addr)
	if err != nil {
		return &PeerConnection{err: err}
	}

	addr = host + ":" + port

	if addr == config.ServerSourceAddress {
		panic("Peer() called with my own address. This is a bug!")
	}

	connections_mutex.Lock()
	defer connections_mutex.Unlock()

	conn, have_already := connections[addr]
	if !have_already {
		conn = &PeerConnection{is_gosusi: false, addr: addr}
		connections[addr] = conn
		go util.WithPanicHandler(func() { conn.handleConnection() })
	}
	return conn
}
Ejemplo n.º 2
0
// Accepts UDP connections for TFTP requests on listen_address, serves read requests
// for path P based on request_re and reply as follows:
//
// request_re and reply have to be lists of
// equal length. Let request_re[i] be the first entry in request_re that
// matches P, then reply[i] specifies the data to return for the request.
// If reply[i] == "", then a file not found error is returned to the requestor.
// If reply[i] starts with the character '|', the remainder is taken as the path
// of a hook to execute and its stdout is returned to the requestor.
// Otherwise reply[i] is taken as the path of the file whose contents to send to
// the requestor.
//
// When executing a hook, an environment variable called "tftp_request"
// is passed containing P. If request_re[i] has a capturing
// group named "macaddress", the captured substring will be converted to
// a MAC address by converting to lowercase, removing all characters
// except 0-9a-f, left-padding to length 12 with 0s or truncating to length 12
// and inserting ":"s. The result will be added to
// the hook environment in a variable named "macaddress" and if there
// is an LDAP object for that macaddress, its attributes will be added
// to the environment, too.
//
// Named subexpressions in request_re[i] other than "macaddress" will be
// exported to the hook verbatim in like-named environment variables.
func ListenAndServe(listen_address string, request_re []*regexp.Regexp, reply []string) {
	for i := range request_re {
		util.Log(1, "INFO! TFTP: %v -> %v", request_re[i], reply[i])
	}

	udp_addr, err := net.ResolveUDPAddr("udp", listen_address)
	if err != nil {
		util.Log(0, "ERROR! Cannot start TFTP server: %v", err)
		return
	}

	udp_conn, err := net.ListenUDP("udp", udp_addr)
	if err != nil {
		util.Log(0, "ERROR! ListenUDP(): %v", err)
		return
	}
	defer udp_conn.Close()

	readbuf := make([]byte, 16384)
	for {
		n, return_addr, err := udp_conn.ReadFromUDP(readbuf)
		if err != nil {
			util.Log(0, "ERROR! ReadFromUDP(): %v", err)
			continue
		}

		// Make a copy of the buffer BEFORE starting the goroutine to prevent subsequent requests from
		// overwriting the buffer.
		payload := string(readbuf[:n])

		go util.WithPanicHandler(func() { handleConnection(return_addr, payload, request_re, reply) })

	}
}
Ejemplo n.º 3
0
// Tries to re-establish communication with a client/server at the given IP,
// by
//   1) sending here_i_am to the server where we are registered. We do this
//      even if config.RunServer (i.e. we are registered at ourselves) because
//      this will trigger new_foreign_client messages sent to peers so that other
//      servers that may believe they own us correct their data.
//   2) sending (if config.RunServer) new_server messages to all known servers
//      we find for the IP in our servers database.
//   3) if config.RunServer and in 2) we did not find a server at that IP,
//      maybe it's a client that thinks we are its server. Send "deregistered" to
//      all ClientPorts in that case to cause re-registration.
func tryToReestablishCommunicationWith(ip string) {
	// Wait a little to limit the rate of spam wars between
	// 2 machines that can't re-establish communication (e.g. because of changed
	// keys in server.conf).
	mapIP2ReestablishDelay_mutex.Lock()
	var delay time.Duration
	var ok bool
	if delay, ok = mapIP2ReestablishDelay[ip]; !ok {
		delay = 1 * time.Minute
	}
	mapIP2ReestablishDelay[ip] = 2 * delay
	mapIP2ReestablishDelay_mutex.Unlock()

	// if the delay exceeds 24h this means that we got multiple
	// reestablish requests while we're still waiting to begin one
	// in that case, bail out.
	if delay > 24*time.Hour {
		return
	}

	util.Log(0, "WARNING! Will try to re-establish communication with %v after waiting %v", ip, delay)
	time.Sleep(delay)

	// if we actually completed a 10h wait, reset the timer to 1 minute
	if delay >= 10*time.Hour {
		mapIP2ReestablishDelay_mutex.Lock()
		mapIP2ReestablishDelay[ip] = 1 * time.Minute
		mapIP2ReestablishDelay_mutex.Unlock()
	}

	util.Log(0, "WARNING! Will try to re-establish communication with %v", ip)
	ConfirmRegistration() // 1)

	ip, err := util.Resolve(ip, config.IP)
	if err != nil {
		util.Log(0, "ERROR! Resolve(): %v", err)
	}

	if config.RunServer { // 2)
		sendmuell := true
		for _, server := range db.ServerAddresses() {
			if strings.HasPrefix(server, ip) {
				sendmuell = false
				srv := server
				go util.WithPanicHandler(func() { Send_new_server("new_server", srv) })
			}
		}

		if sendmuell {
			for _, port := range config.ClientPorts {
				addr := ip + ":" + port
				if addr != config.ServerSourceAddress { // never send "deregistered" to our own server
					dereg := "<xml><header>deregistered</header><source>" + config.ServerSourceAddress + "</source><target>" + addr + "</target></xml>"
					go security.SendLnTo(addr, dereg, "", false)
				}
			}
		}
	}
}
Ejemplo n.º 4
0
func (f *LoggingFileStorer) Store(data *xml.Hash) (err error) {
	util.WithPanicHandler(func() {
		err = f.FileStorer.Store(data)
		if err != nil {
			util.Log(0, "ERROR! Cannot store database: %v", err)
		}
	})

	return err
}
Ejemplo n.º 5
0
// Handles the message "new_server".
//  xmlmsg: the decrypted and parsed message
func new_server(xmlmsg *xml.Hash) {
	server, _ := util.Resolve(xmlmsg.Text("source"), config.IP)
	if server == config.ServerSourceAddress {
		return
	} // never accept our own address as peer
	setGoSusi(xmlmsg)
	db.ServerUpdate(xmlmsg)
	handleClients(xmlmsg)
	go util.WithPanicHandler(func() {
		Send_new_server("confirm_new_server", server)
		Peer(server).SyncAll()
	})
	return
}
Ejemplo n.º 6
0
func faimon(listen_address string) {
	listener, err := net.Listen("tcp", listen_address)
	if err != nil {
		util.Log(0, "ERROR! Cannot start FAI monitor: %v", err)
		return
	}

	for {
		conn, err := listener.Accept()
		if err != nil {
			util.Log(0, "ERROR! FAI monitor error: %v", err)
			continue
		}

		go util.WithPanicHandler(func() { faiConnection(conn.(*net.TCPConn)) })
	}
}
Ejemplo n.º 7
0
// Handles "detect_hardware".
//  xmlmsg: the decrypted and parsed message
func detect_hardware(xmlmsg *xml.Hash) {
	server := xmlmsg.Text("source")
	if server == "" {
		util.Log(0, "ERROR! Received detect_hardware from unknown source")
		return
	}

	c := make(chan *xml.Hash, 2)
	go func() {
		time.Sleep(config.DetectHardwareTimeout)
		c <- nil
	}()

	go util.WithPanicHandler(func() { sendDetectedHardwareReply(server, c) })

	start := time.Now()
	env := config.HookEnvironment()
	for _, tag := range xmlmsg.Subtags() {
		env = append(env, tag+"="+strings.Join(xmlmsg.Get(tag), "\n"))
	}
	cmd := exec.Command(config.DetectHardwareHookPath)
	env = append(env, "xml="+xmlmsg.String())
	cmd.Env = append(env, os.Environ()...)
	util.Log(1, "INFO! Running detect-hardware-hook %v with parameters %v", config.DetectHardwareHookPath, env)
	hwlist, err := xml.LdifToHash("detected_hardware", false, cmd) // !!C'n'P WARNING: casefold=false!!
	if err != nil {
		util.Log(0, "ERROR! detect-hardware-hook %v: %v", config.DetectHardwareHookPath, err)
		return
	}
	util.Log(1, "INFO! Finished detect-hardware-hook. Running time: %v", time.Since(start))
	for hwlist.RemoveFirst("dn") != nil {
	} // dn is ignored (see manual)
	util.Log(1, "INFO! Hardware detection result: %v", hwlist)

	c <- hwlist
}
Ejemplo n.º 8
0
// Launches a background job that queries the systemdb for the name of
// the machine with the given macaddress and if/when the answer arrives,
// schedules an update of all entries in the jobdb that match the macaddress.
func JobsUpdateNameForMAC(macaddress string) {
	updatename := func(request *jobDBRequest) {
		plainname := request.Job.Text("plainname")
		found := jobDB.Query(request.Filter).First("job")
		for ; found != nil; found = found.Next() {
			if found.Text("plainname") != plainname {
				found.FirstOrAdd("plainname").SetText(plainname)
				jobDB.Replace(xml.FilterSimple("id", found.Text("id")), true, found)

				// if the job is one of ours, then send out fju to gosa-si peers
				// because they don't look up the name themselves
				if found.Text("siserver") == config.ServerSourceAddress {
					fju := xml.NewHash("xml", "header", "foreign_job_updates")
					clone := found.Clone()
					clone.Rename("answer1")

					fju.AddWithOwnership(clone)

					fju.Add("source", config.ServerSourceAddress)
					fju.Add("target", "gosa-si") // only gosa-si peers
					fju.Add("sync", "ordered")
					ForeignJobUpdates <- fju
				}
			}
		}
	}

	go util.WithPanicHandler(func() {
		filter := xml.FilterSimple("macaddress", macaddress)
		plainname := SystemPlainnameForMAC(macaddress)
		if plainname != "none" {
			job := xml.NewHash("job", "plainname", plainname)
			jobDBRequests <- &jobDBRequest{updatename, filter, job, nil}
		}
	})
}
Ejemplo n.º 9
0
// This function runs in a single goroutine and is responsible for handling
// all actions that affect the jobDB as well as starting local jobs whose time
// has come.
// The general idea behind synchronized job processing is this:
//  * a single goroutine processes all requests that affect the jobDB and pushes
//    the resulting changes into the ForeignJobUpdates queue
//  * a single goroutine processes the items from ForeignJobUpdates and passes them
//    on to the appropriate PeerConnection(s).
//  * each PeerConnection has a single goroutine forwarding the updates over a
//    single TCP connection to its respective peer.
//  * The above ensures that each peer receives foreign_job_updates messages in
//    exactly the same order in which the corresponding edits are made on the jobDB.
//    This makes sure that a peer that applies foreign_job_updates messages in
//    order will always have a consistent jobdb.
//  * Because <sync>all</sync> messages are prepared by the same single goroutine
//    that performs the edits and creates <sync>ordered</sync> messages and because
//    all these messages go over the same channels, they cannot overtake each other
//    and will always fit together.
func handleJobDBRequests() {
	groom_ticker := time.Tick(config.JobDBGroomInterval)
	hour, min, _ := time.Now().Clock()
	next_groom_minutes_since_midnight := hour*60 + min + int(config.JobDBGroomInterval/time.Minute)
	backwardsjump := false
	var request *jobDBRequest
	for {
		select {
		case request = <-jobDBRequests:
			request.Action(request)

		case _ = <-groom_ticker:
			hour, min, _ = time.Now().Clock()
			minutes_since_midnight := hour*60 + min

			// If everything is normal, groomlag should be 0. A negative number
			// can only happend if the clock changes because (unless there's a bug)
			// groom_ticker will never fire too early.
			// The number 1 is possible in the pathological case
			// that the timer fires just before the minute wraps around and in
			// the few milliseconds delay between the timer firing and us reading
			// the clock the minute wraps around.
			groomlag := minutes_since_midnight - next_groom_minutes_since_midnight
			// normalize groomlag to be in the range (-12*60,12*60]
			if groomlag <= -12*60 {
				groomlag += 24 * 60
			}
			if groomlag > 12*60 {
				groomlag -= 24 * 60
			}

			if groomlag > 3 { // we accept 3 minutes lag as normal (extremely high server load)
				// Do not groom jobDB after a clock jump forward because groomJobDB()
				// would assume that jobs have not started correctly even though they
				// haven't got their chance yet and will probably start up in the next
				// few minutes.
				// NOTE: The message does not include the word "FORWARD" because
				// a backwards adjustment by an amount that is not a multiple of
				// config.JobDBGroomInterval will first trigger the BACKWARD case
				// and then trigger this case at the next grooming time. So we
				// keep this message phrased in a way that it will not create the
				// mistaken impression that the clock jumps around wildly.
				util.Log(1, "INFO! Grooming jobdb SKIPPED because of clock adjustment")

				// The next grooming will take place as usual.
				next_groom_minutes_since_midnight = minutes_since_midnight + int(config.JobDBGroomInterval/time.Minute)

				// Ping processPendingActions because as mentioned further above we can
				// enter this case not only after an actual forwards jump of the clock
				// but also after a backwards jump once our clock has finally caught up.
				// And in that case the comment further below still applies, that we
				// need to ping processPendingActions to make sure that jobs are
				// triggered before the next grooming.
				go func() { processPendingActions <- true }()

				// Clear backwardsjump marker if it is set (which is possible; see
				// previous comments)
				backwardsjump = false

			} else if groomlag < -3 { // we accept groom_ticker to fire up to 3 minutes early (clock adjustment for drift)
				util.Log(1, "INFO! Grooming jobdb SKIPPED because of clock jump BACKWARDS")

				// We keep next_groom_minutes_since_midnight at its current value
				// (which is in the future) until our wall clock has caught up.
				// We do this so that every config.JobDBGroomInterval we run into
				// this case again and can ping processPendingActions, because
				// if the backwards jump happened right between the firing of
				// a job's processPendingActions ping and the time the database
				// is queried for pending jobs, the query will not see the jobs
				// as pending and won't trigger them. The next groomJobDB() would then
				// discover the unlaunched jobs, report a bug and kill them.
				// But by pinging processPendingActions here without grooming until
				// our clock has caught up with the originally scheduled grooming time,
				// we make sure that jobs will be started, if not at the right time,
				// at least before the next grooming.
				go func() { processPendingActions <- true }()

				backwardsjump = true

			} else {
				next_groom_minutes_since_midnight = minutes_since_midnight + int(config.JobDBGroomInterval/time.Minute)
				groom_delay := time.Duration(0)
				// If we get here after a backward jump of the clock, there may
				// still be jobs waiting to be executed, so we ping
				// processPendingActions and delay the grooming a little
				if backwardsjump {
					go func() { processPendingActions <- true }()
					groom_delay = time.Minute
					backwardsjump = false
				}

				go func(dly time.Duration) {
					time.Sleep(dly)
					util.WithPanicHandler(groomJobDB)
				}(groom_delay)
			}

		case _ = <-processPendingActions:
			/*** WARNING! WARNING! ***
			  Using the function JobsQuery() here will cause deadlock!
			  Other functions like JobsModifyLocal() are okay, but remember
			  that they will not be executed until this case ends.
			  *************************/
			localwait := xml.FilterSimple("siserver", config.ServerSourceAddress,
				"status", "waiting")
			beforenow := xml.FilterRel("timestamp", util.MakeTimestamp(time.Now()), -1, 0)
			filter := xml.FilterAnd([]xml.HashFilter{localwait, beforenow})
			JobsModifyLocal(filter, xml.NewHash("job", "status", "launch"))
		}
	}
}
Ejemplo n.º 10
0
// Infinite loop that consumes *xml.Hash job descriptors from
// db.PendingActions and launches goroutines to perform the appropriate
// action depending on the job's status ("done" or "processing").
// This function is also responsible for adding a new job when a periodic
// job is done.
func Init() { // not init() because we need to call it from go-susi.go
	go func() {
		for {
			job := db.PendingActions.Next().(*xml.Hash)

			if job.Text("status") != "done" {

				util.Log(1, "INFO! Taking action for job: %v", job)

				go util.WithPanicHandler(func() {

					if !Forward(job) {

						// Tell the lucky winner what we're going to do with it.

						macaddress := job.Text("macaddress")
						headertag := job.Text("headertag")
						if headertag != "send_user_msg" && // send_user_msg does not target a machine
							headertag != "set_activated_for_installation" { // set_activated_for_installation is sent when the action is taken
							client := db.ClientWithMAC(macaddress)
							if client == nil {
								util.Log(0, "ERROR! Client with MAC %v not in clientdb. Cannot send %v", macaddress, headertag)
								// Don't abort. Some jobs work even if we can't reach the client.
							} else {
								client_addr := client.Text("client")
								util.Log(1, "INFO! Sending %v to %v", headertag, client_addr)
								trigger_action := "<xml><header>" + headertag + "</header><" + headertag + "></" + headertag + "><source>" + config.ServerSourceAddress + "</source><target>" + client_addr + "</target></xml>"
								message.Client(client_addr).Tell(trigger_action, config.ActionAnnouncementTTL)
							}
						}

						// Now that the client is rightfully excited, give it our best shot.

						done := true
						switch headertag {
						case "send_user_msg":
							SendUserMsg(job)
						case "trigger_action_wake":
							Wake(job) // "Aufwecken"
						case "trigger_action_lock":
							Lock(job) // "Sperre"
						case "trigger_action_localboot":
							Localboot(job) // "Erzwinge lokalen Start"
						case "trigger_action_halt":
							Halt(job) // "Anhalten"
						case "trigger_action_reboot":
							Reboot(job) // "Neustarten"
						case "trigger_action_faireboot":
							FAIReboot(job) // "Job abbrechen"
						case "set_activated_for_installation",
							"trigger_action_activate":
							Activate(job) // "Sperre aufheben"
						case "trigger_action_update":
							Update(job) // "Aktualisieren"
							done = false
						case "trigger_action_reinstall":
							Reinstall(job) // "Neuinstallation"
							done = false
						default:
							util.Log(0, "ERROR! Unknown headertag in PendingActions for job: %v", job)
						}

						if done {
							util.Log(1, "INFO! No further processing required => Removing job: %v", job)
							db.JobsRemoveLocal(xml.FilterSimple("id", job.Text("id")), false)
						}
					}
				})

			} else // if status == "done"
			{
				util.Log(1, "INFO! Job is done or cancelled: %v", job)

				go util.WithPanicHandler(func() {

					switch job.Text("headertag") {
					case "send_user_msg":
					case "trigger_action_lock": // "Sperre"
					case "trigger_action_halt": // "Anhalten"
					case "trigger_action_localboot": // "Erzwinge lokalen Start"
					case "trigger_action_reboot": // "Neustarten"
					case "trigger_action_faireboot": // "Job abbrechen"
					case "set_activated_for_installation",
						"trigger_action_activate": // "Sperre aufheben"
					case "trigger_action_wake": // "Aufwecken"

					case "trigger_action_update", // "Aktualisieren"
						"trigger_action_reinstall": // "Neuinstallation"
						macaddress := job.Text("macaddress")
						faistate := db.SystemGetState(macaddress, "faiState")
						if faistate == "" || strings.HasPrefix(faistate, "softupdat") || strings.HasPrefix(faistate, "install") {
							if job.Text("progress") == "forward" || job.Text("progress") == "groom" {
								util.Log(1, "INFO! Job removed due to %ving => will NOT set faiState to \"localboot\" for client with MAC %v", job.Text("progress"), macaddress)
							} else {
								util.Log(1, "INFO! Setting faiState \"localboot\" for client with MAC %v", macaddress)
								db.SystemSetState(macaddress, "faiState", "localboot")
							}
						} else if faistate != "localboot" {
							util.Log(1, "INFO! Client with MAC %v has faiState \"%v\" => will NOT overwrite this with \"localboot\"", macaddress, faistate)
						}

					default:
						util.Log(0, "ERROR! Unknown headertag \"%v\" in PendingActions", job.Text("headertag"))
					}

					periodic := job.Text("periodic")
					if periodic != "none" && periodic != "" {
						t := util.ParseTimestamp(job.Text("timestamp"))
						p := strings.Split(periodic, "_")
						if len(p) != 2 {
							util.Log(0, "ERROR! Illegal <periodic>: %v", periodic)
							return
						}
						period, err := strconv.ParseUint(p[0], 10, 64)
						if err != nil || period == 0 {
							util.Log(0, "ERROR! Illegal <periodic>: %v: %v", periodic, err)
							return
						}

						for t.Before(time.Now()) {
							switch p[1] {
							case "seconds":
								t = t.Add(time.Duration(period) * time.Second)
							case "minutes":
								t = t.Add(time.Duration(period) * time.Minute)
							case "hours":
								t = t.Add(time.Duration(period) * time.Hour)
							case "days":
								t = t.AddDate(0, 0, int(period))
							case "weeks":
								t = t.AddDate(0, 0, int(period*7))
							case "months":
								t = t.AddDate(0, int(period), 0)
							case "years":
								t = t.AddDate(int(period), 0, 0)
							default:
								util.Log(0, "ERROR! Unknown periodic unit: %v", p[1])
								return
							}
						}
						job.FirstOrAdd("timestamp").SetText(util.MakeTimestamp(t))
						job.FirstOrAdd("result").SetText("none")
						job.FirstOrAdd("progress").SetText("none")
						job.FirstOrAdd("status").SetText("waiting")
						util.Log(1, "INFO! Scheduling next instance of periodic job: %v", job)
						db.JobAddLocal(job)
					}
				})

			}
		}
	}()
}
Ejemplo n.º 11
0
// If job belongs to an unknown client or a client registered here or if
// job has <progress>forward-failed</progress> or if the job's <headertag>
// is trigger_action_wake,_lock or _localboot or send_user_msg,
// this function returns false.
// Otherwise this function removes the job from the jobdb and then tries to
// forward the job to the siserver where the client is registered.
// If forwarding fails, the job is re-added to the jobdb but marked with
// <progress>forward-failed</progress>. No matter if forwarding succeeds or fails,
// if it is attempted this function returns true. Note, that the re-added job has
// a different id from the original job (which has been removed from the database)
// and will independently come up in PendingActions. This is why it doesn't make
// sense to return false in the case of a failed forward.
func Forward(job *xml.Hash) bool {
	if job.Text("progress") == "forward-failed" {
		return false
	}
	switch job.Text("headertag") {
	case "send_user_msg":
		return false
	case "trigger_action_wake", "trigger_action_lock", "trigger_action_localboot":
		return false
	}

	macaddress := job.Text("macaddress")

	client := db.ClientWithMAC(macaddress)
	if client == nil || client.Text("source") == config.ServerSourceAddress {
		return false
	}

	siserver := client.Text("source")
	headertag := job.Text("headertag")

	util.Log(1, "INFO! %v for client %v must be forwarded to server %v where client is registered", headertag, macaddress, siserver)

	if message.Peer(siserver).Downtime() != 0 {
		util.Log(0, "ERROR! Peer %v is down => Will try to execute %v for client %v myself.", siserver, headertag, macaddress)
		return false
	}

	// remove job with stop_periodic=true after setting progress="forward" to suppress forcing "localboot"
	db.JobsModifyLocal(xml.FilterSimple("id", job.Text("id")), xml.NewHash("job", "progress", "forward"))
	db.JobsRemoveLocal(xml.FilterSimple("id", job.Text("id")), true)

	if !message.Peer(siserver).IsGoSusi() {
		// Wait if the peer is not a go-susi, to prevent the fju caused by
		// db.JobsRemoveLocal() above from killing the forwarded job; which
		// might otherwise happen because gosa-si uses macaddress+headertag to
		// identify jobs and therefore cannot differentiate between the old and
		// the new job.
		time.Sleep(5 * time.Second)
	}

	util.Log(1, "INFO! Forwarding %v for client %v to server %v", headertag, macaddress, siserver)

	// gosa-si-server does not seem to process some jobs when sent as job_...
	// So we use gosa_.... However this means that <periodic> won't work properly with
	// non-go-susi peers :-(
	// We make an exception for reinstall and update because these need to be sent
	// as job_ or they won't appear in deployment status.
	header := "gosa_" + headertag
	if headertag == "trigger_action_update" || headertag == "trigger_action_reinstall" {
		header = "job_" + headertag
	}
	gosa_trigger_action := xml.NewHash("xml", "header", header)
	gosa_trigger_action.Add("source", "GOSA")
	gosa_trigger_action.Add("macaddress", macaddress)
	gosa_trigger_action.Add("target", macaddress)
	if job.First("timestamp") != nil {
		gosa_trigger_action.Add("timestamp", job.Text("timestamp"))
	}
	if job.First("periodic") != nil {
		gosa_trigger_action.Add("periodic", job.Text("periodic"))
	}

	request := gosa_trigger_action.String()

	// clone job, because we want to use it in a new goroutine and don't want to risk
	// having it changed concurrently.
	job_clone := job.Clone()

	go util.WithPanicHandler(func() {
		util.Log(2, "DEBUG! Forwarding to %v: %v", siserver, request)
		conn, _ := security.SendLnTo(siserver, request, config.ModuleKey["[GOsaPackages]"], true)
		if conn != nil {
			conn.Close()
			return
		}

		util.Log(0, "ERROR! Could not forward %v for client %v to server %v => Will try to execute job myself.", headertag, macaddress, siserver)

		job_clone.FirstOrAdd("result").SetText("none")
		job_clone.FirstOrAdd("progress").SetText("forward-failed")
		job_clone.FirstOrAdd("status").SetText("waiting")

		util.Log(1, "INFO! Re-Scheduling job tagged with \"forward-failed\": %v", job_clone)
		db.JobAddLocal(job_clone)
	})

	return true
}
Ejemplo n.º 12
0
// Handles the message "CLMSG_save_fai_log".
//  buf: the decrypted message
func clmsg_save_fai_log(buf *bytes.Buffer) {
	macaddress := ""
	action := ""
	start := 0
	end := 0
	data := buf.Bytes()
	for i := 0; i < len(data)-19; i++ {
		if data[i] == '<' {
			if i+12+17 <= len(data) && match(data, i, "<macaddress>") {
				macaddress = string(data[i+12 : i+12+17])
			} else if match(data, i, "<fai_action>") {
				for k := i + 12; k < len(data); k++ {
					if data[k] == '<' {
						action = string(data[i+12 : k])
						i = k
						break
					}
				}
			} else if match(data, i, "<CLMSG_save_fai_log>") {
				start = i + 20
			} else if match(data, i, "</CLMSG_save_fai_log>") {
				end = i
			}
		}
	}

	if !macAddressRegexp.MatchString(macaddress) {
		util.Log(0, "ERROR! CLMSG_save_fai_log with illegal <macaddress> \"%v\"", macaddress)
		return
	}

	if !actionRegexp.MatchString(action) {
		util.Log(0, "ERROR! CLMSG_save_fai_log with illegal <fai_action> \"%v\"", action)
		return
	}

	util.Log(1, "INFO! Received log files from client %v. Assuming CLMSG_PROGRESS 100", macaddress)
	progress_msg := xml.NewHash("xml", "CLMSG_PROGRESS", "100")
	progress_msg.Add("macaddress", macaddress)
	clmsg_progress(progress_msg)

	timestamp := util.MakeTimestamp(time.Now())
	logname := action + "_" + timestamp[0:8] + "_" + timestamp[8:]
	logdir := path.Join(config.FAILogPath, strings.ToLower(macaddress), logname)

	// NOTE: 1kB = 1000B, 1kiB = 1024B
	util.Log(1, "INFO! Storing %vkB of %v log files from %v in %v", len(data)/1000, action, macaddress, logdir)

	err := os.MkdirAll(logdir, 0755)
	if err != nil {
		util.Log(0, "ERROR! Error creating log directory \"%v\": %v", logdir, err)
		return
	}

	// Create convenience symlink with the system's name as alias for MAC address.
	go util.WithPanicHandler(func() {
		if plainname := db.SystemPlainnameForMAC(macaddress); plainname != "none" {
			linkpath := path.Join(config.FAILogPath, strings.ToLower(plainname))
			link_target, err := os.Readlink(linkpath)
			if err != nil && !os.IsNotExist(err.(*os.PathError).Err) {
				util.Log(0, "ERROR! %v exists but is not a symlink: %v", linkpath, err)
				return
			}
			if err == nil {
				if link_target == strings.ToLower(macaddress) {
					return // symlink is already correct => nothing to do
				}

				util.Log(0, "WARNING! Machine %v has a new MAC %v . Removing old symlink %v => %v", plainname, macaddress, linkpath, link_target)
				err = os.Remove(linkpath)
				if err != nil {
					util.Log(0, "ERROR! Removing %v failed: %v", linkpath, err)
					// Don't bail out. Maybe we can create the new symlink anyway.
				}
			}
			err = os.Symlink(strings.ToLower(macaddress), linkpath)
			if err != nil && !os.IsExist(err.(*os.LinkError).Err) {
				util.Log(0, "ERROR! Could not create symlink %v => %v: %v", linkpath, strings.ToLower(macaddress), err)
			}
		}
	})

	files := []int{}
	for i := start; i < end; i++ {
		if data[i] == ':' && match(data, i-8, "log_file") {
			k := i
			i++
			for i < end {
				if data[i] == ':' {
					if k+1 < i {
						files = append(files, k+1, i)
					}
					break
				}
				i++
			}
		}
	}

	files = append(files, end+8)

	for i := 0; i < len(files)-1; i += 2 {
		fname := string(data[files[i]:files[i+1]])
		logdata := data[files[i+1]+1 : files[i+2]-8]
		util.Log(1, "INFO! Processing \"%v\" (%vkB)", fname, len(logdata)/1000)

		logdata = util.Base64DecodeInPlace(logdata)

		// As a precaution, make sure fname contains no slashes.
		fname = strings.Replace(fname, "/", "_", -1)
		err = ioutil.WriteFile(path.Join(logdir, fname), logdata, 0644)
		if err != nil {
			util.Log(0, "ERROR! Could not store \"%v\": %v", path.Join(logdir, fname), err)
			continue
		}
	}
}
Ejemplo n.º 13
0
// Tell(msg, ttl): Tries to send text to the client.
//                 The ttl determines how long the message will be buffered for
//                 resend attempts if sending fails. ttl values smaller than
//                 100ms will be treated as 100ms.
func (conn *ClientConnection) Tell(text string, ttl time.Duration) {
	if ttl < 100*time.Millisecond {
		ttl = 100 * time.Millisecond
	}
	util.Log(2, "DEBUG! Tell(): Queuing message for client %v with TTL %v: %v", conn.addr, ttl, text)

	msg := &ClientMessage{text, time.Now().Add(ttl)}

	go util.WithPanicHandler(func() {
		var try uint = 0

		if msg.Expires.Before(time.Now()) {
			util.Log(0, "ERROR! Scheduling of goroutine for sending message to %v delayed more than TTL %v => Message will not be sent", conn.addr, ttl)
		} else {
			for {
				if try > 0 {
					expiry := msg.Expires.Sub(time.Now())
					if expiry <= 0 {
						break
					}
					delay := (1 << try) * time.Second
					if delay > 60*time.Second {
						delay = 60 * time.Second
					}
					if delay > expiry {
						delay = expiry - 1*time.Second
					}
					if delay <= 0 {
						break
					}
					util.Log(2, "DEBUG! Sleeping %v before next send attempt", delay)
					time.Sleep(delay)
				}

				try++

				util.Log(1, "INFO! Attempt #%v to send message to %v: %v", try, conn.addr, msg.Text)

				client := db.ClientWithAddress(conn.addr)
				if client == nil {
					if conn.addr == config.ServerSourceAddress {
						// If sending to myself (e.g. new_ldap_config), fake a client object
						client = xml.NewHash("xml", "source", config.ServerSourceAddress)
						key := "" // default to empty key which signals TLS
						if config.TLSClientConfig == nil {
							key = config.ModuleKey["[ClientPackages]"]
						}
						client.Add("key", key)
					} else {
						util.Log(0, "ERROR! Client %v not found in clientdb", conn.addr)
						continue
					}
				}

				// if client is registered at a foreign server
				if client.Text("source") != config.ServerSourceAddress {
					util.Log(1, "INFO! Client %v is registered at %v => Forwarding message", conn.addr, client.Text("source"))

					// MESSAGE FORWARDING NOT YET IMPLEMENTED
					util.Log(0, "ERROR! Message forwarding not yet implemented")
					break

				} else { // if client is registered at our server

					keys := client.Get("key")
					if len(keys) == 0 {
						// This case should be impossible. A client's here_i_am message always contains a key (unless the client is buggy).
						util.Log(0, "ERROR! No key known for client %v", conn.addr)
						break
					}

					encrypted := msg.Text // default is unencrypted for TLS connection

					var tcpConn net.Conn
					var err error

					if keys[0] == "" { // TLS client
						// We just use security.SendLnTo() to establish the TLS connection
						// The empty line that is sent is ignored by the receiving go-susi.
						tcpConn, _ = security.SendLnTo(conn.addr, "", "", true)
						if tcpConn == nil {
							// Error message already logged by SendLnTo()
							continue
						}
					} else { // non-TLS client
						encrypted = security.GosaEncrypt(msg.Text, keys[0])

						tcpConn, err = net.Dial("tcp", conn.addr)
						if err != nil {
							util.Log(0, "ERROR! Dial() could not connect to %v: %v", conn.addr, err)
							continue
						}

						err = tcpConn.(*net.TCPConn).SetKeepAlive(true)
						if err != nil {
							util.Log(0, "ERROR! SetKeepAlive: %v", err)
							// This is not fatal => Don't abort send attempt
						}
					}

					if msg.Expires.Before(time.Now()) {
						util.Log(0, "ERROR! Connection to %v established, but TTL %v has expired in the meantime => Message will not be sent", conn.addr, ttl)
						tcpConn.Close()
						break
					}

					util.Log(2, "DEBUG! Sending message to %v encrypted with key %v", conn.addr, keys[0])
					err = util.SendLn(tcpConn, encrypted, config.Timeout)
					tcpConn.Close()
					if err == nil {
						util.Log(2, "DEBUG! Successfully sent message to %v: %v", conn.addr, msg.Text)
						return // not break! break would cause an error message to be logged
					} else {
						util.Log(0, "ERROR! SendLn() to %v failed: %v", conn.addr, err)
					}
				}
			}
		}

		util.Log(0, "ERROR! Cannot send message to %v: %v", conn.addr, msg.Text)
	})
}
Ejemplo n.º 14
0
// Sends all local jobs and clients to the peer. If the peer is not a go-susi, also
// requests all of the peer's local jobs and converts them to a <sync>all</sync>
// message and feeds it into foreign_job_updates().
func (conn *PeerConnection) SyncAll() {

	// send all our clients as new_foreign_client messages
	for nfc := db.ClientsRegisteredAtThisServer().First("xml"); nfc != nil; nfc = nfc.Next() {
		nfc.FirstOrAdd("target").SetText(conn.addr)
		conn.Tell(nfc.String(), "")
	}

	if conn.IsGoSusi() {
		util.Log(1, "INFO! Full sync (go-susi protocol) with %v", conn.addr)
		db.JobsSyncAll(conn.addr, nil)
	} else { // peer is not go-susi (or not known to be one, yet)
		go util.WithPanicHandler(func() {
			util.Log(1, "INFO! Full sync (gosa-si fallback) with %v", conn.addr)

			// Query the peer's database for
			// * all jobs the peer is responsible for
			// * all jobs the peer thinks we are responsible for
			query := xml.NewHash("xml", "header", "gosa_query_jobdb")
			query.Add("source", "GOSA")
			query.Add("target", "GOSA")
			clause := query.Add("where").Add("clause")
			clause.Add("connector", "or")
			clause.Add("phrase").Add("siserver", "localhost")
			clause.Add("phrase").Add("siserver", conn.addr)
			clause.Add("phrase").Add("siserver", config.ServerSourceAddress)

			jobs_str := <-conn.Ask(query.String(), config.ModuleKey["[GOsaPackages]"])
			jobs, err := xml.StringToHash(jobs_str)
			if err != nil {
				util.Log(0, "ERROR! gosa_query_jobdb: Error decoding reply from peer %v: %v", conn.addr, err)
				// Bail out. Otherwise we would end up removing all of the peer's jobs from
				// our database if the peer is down. While that would be one way of dealing
				// with this case, we prefer to keep those jobs and convert them into
				// state "error" with an error message about the downtime. This happens
				// in gosa_query_jobdb.go.
				return
			}

			if jobs.First("error_string") != nil {
				util.Log(0, "ERROR! gosa_query_jobdb: Peer %v returned error: %v", conn.addr, jobs.Text("error_string"))
				// Bail out. See explanation further above.
				return
			}

			// Now we extract from jobs those that are the responsibility of the
			// peer and synthesize a foreign_job_updates with <sync>all</sync> from them.
			// This leaves in jobs those the peer believes belong to us.

			fju := jobs.Remove(xml.FilterOr([]xml.HashFilter{xml.FilterSimple("siserver", "localhost"), xml.FilterSimple("siserver", conn.addr)}))
			fju.Rename("xml")
			fju.Add("header", "foreign_job_updates")
			fju.Add("source", conn.addr)
			fju.Add("target", config.ServerSourceAddress)
			fju.Add("sync", "all")

			util.Log(2, "DEBUG! Queuing synthetic fju: %v", fju)
			foreign_job_updates(fju)

			db.JobsSyncAll(conn.addr, jobs)
		})
	}
}
Ejemplo n.º 15
0
// Encrypts request with key, sends it to the peer and returns a channel
// from which the peer's reply can be received (already decrypted with
// the same key). It is guaranteed that a reply will
// be available from this channel even if the peer connection breaks
// or the peer does not reply within a certain time. In the case of
// an error, the reply will be an error reply (as returned by
// message.ErrorReply()). The returned channel will be buffered and
// the producer goroutine will close it after writing the reply. This
// means it is permissible to ignore the reply without risk of a
// goroutine leak.
// If key == "" the first key from db.ServerKeys(peer) is used.
func (conn *PeerConnection) Ask(request, key string) <-chan string {
	c := make(chan string, 1)

	if conn.err != nil {
		c <- ErrorReply(conn.err)
		close(c)
		return c
	}

	keys := db.ServerKeys(conn.addr)
	// If we use TLS and the target does, too
	if config.TLSClientConfig != nil && len(keys) > 0 && keys[0] == "" {
		key = ""
	} else if key == "" {
		if len(keys) == 0 {
			c <- ErrorReply("PeerConnection.Ask: No key known for peer " + conn.addr)
			close(c)
			return c
		}
		key = keys[0]
	}

	go util.WithPanicHandler(func() {
		defer close(c)
		var tcpconn net.Conn
		var err error
		if key == "" { // TLS
			// We just use security.SendLnTo() to establish the TLS connection
			// The empty line that is sent is ignored by the receiving go-susi.
			tcpconn, _ = security.SendLnTo(conn.addr, "", "", true)
			if tcpconn == nil {
				// Unfortunately we don't have the actual error from SendLnTo(), so generate
				// a generic one.
				err = fmt.Errorf("Could not establish TLS connection to %v", conn.addr)
			}
		} else {
			tcpconn, err = net.Dial("tcp", conn.addr)
		}

		if err != nil {
			c <- ErrorReply(err)
			// make sure handleConnection()/monitorConnection() notice that the peer is unreachable
			if conn.tcpConn != nil {
				conn.tcpConn.Close()
			}
		} else {
			defer tcpconn.Close()
			util.Log(1, "INFO! Asking %v: %v", conn.addr, request)
			encrypted := request
			if key != "" {
				encrypted = security.GosaEncrypt(request, key)
			}
			err = util.SendLn(tcpconn, encrypted, config.Timeout)
			// make sure handleConnection()/monitorConnection() notice that the peer is unreachable
			if err != nil && conn.tcpConn != nil {
				conn.tcpConn.Close()
			}
			reply, err := util.ReadLn(tcpconn, config.Timeout)
			if err != nil && err != io.EOF {
				util.Log(0, "ERROR! ReadLn(): %v", err)
			}
			if key != "" {
				reply = security.GosaDecrypt(reply, key)
			}
			if reply == "" {
				reply = ErrorReply("Communication error in Ask()")
				// make sure handleConnection()/monitorConnection() notice that the peer is unreachable
				if conn.tcpConn != nil {
					conn.tcpConn.Close()
				}
			}
			util.Log(1, "INFO! Reply from %v: %v", conn.addr, reply)
			c <- reply
		}
	})
	return c
}
Ejemplo n.º 16
0
// Run KernelListHook() and PackageListHook() to update the respective databases.
// This happens in the background. This function does not wait for them to complete.
// startup == true => This is the initial call right after go-susi starts.
func HooksExecute(startup bool) {
	go util.WithPanicHandler(func() { runHooks(startup) })
	go util.WithPanicHandler(FAIReleasesListUpdate)
}
Ejemplo n.º 17
0
// Sends a new_server message to all known peer servers.
func Broadcast_new_server() {
	for _, server := range db.ServerAddresses() {
		srv := server
		go util.WithPanicHandler(func() { Send_new_server("new_server", srv) })
	}
}
Ejemplo n.º 18
0
// Unit tests for the package github.com/mbenkmann/golib/util.
func Util_test() {
	fmt.Printf("\n==== util ===\n\n")

	addr, err := util.Resolve("1.2.3.4", "")
	check(err, nil)
	check(addr, "1.2.3.4")

	addr, err = util.Resolve("1.2.3.4:5", "")
	check(err, nil)
	check(addr, "1.2.3.4:5")

	addr, err = util.Resolve("::1:5", "")
	check(err, nil)
	check(addr, "[::1:5]")

	addr, err = util.Resolve("localhost:65535", "")
	check(err, nil)
	check(addr, "127.0.0.1:65535")

	addr, err = util.Resolve("localhost", "")
	check(err, nil)
	check(addr, "127.0.0.1")

	addr, err = util.Resolve("::1", "")
	check(err, nil)
	check(addr, "127.0.0.1")

	addr, err = util.Resolve("[::1]", "")
	check(err, nil)
	check(addr, "127.0.0.1")

	addr, err = util.Resolve("[::1]:12345", "")
	check(err, nil)
	check(addr, "127.0.0.1:12345")

	addr, err = util.Resolve("localhost:65535", "foo")
	check(err, nil)
	check(addr, "foo:65535")

	addr, err = util.Resolve("localhost", "foo")
	check(err, nil)
	check(addr, "foo")

	addr, err = util.Resolve("::1", "foo")
	check(err, nil)
	check(addr, "foo")

	addr, err = util.Resolve("[::1]", "foo")
	check(err, nil)
	check(addr, "foo")

	addr, err = util.Resolve("[::1]:12345", "foo")
	check(err, nil)
	check(addr, "foo:12345")

	addr, err = util.Resolve("", "")
	check(hasWords(err, "no", "such", "host"), "")
	check(addr, "")

	addr, err = util.Resolve(":10", "")
	check(hasWords(err, "no", "such", "host"), "")
	check(addr, ":10")

	check(util.WaitForDNS(3*time.Second), true)

	h, _ := exec.Command("hostname").CombinedOutput()
	hostname := strings.TrimSpace(string(h))

	ipp, _ := exec.Command("hostname", "-I").CombinedOutput()
	ips := strings.Fields(strings.TrimSpace(string(ipp)))
	addr, err = util.Resolve(hostname+":234", config.IP)
	check(err, nil)
	ip := ""
	for _, ip2 := range ips {
		if addr == ip2+":234" {
			ip = ip2
		}
	}
	check(addr, ip+":234")

	testLogging()

	buf := make([]byte, 80)
	for i := range buf {
		buf[i] = byte(util_test_rng.Intn(26) + 'a')
	}

	crap1 := &crappyConnection1{}
	n, err := util.WriteAll(crap1, buf)
	check(string(*crap1), string(buf))
	check(n, len(buf))
	check(err, nil)

	crap2 := &crappyConnection2{}
	n, err = util.WriteAll(crap2, buf)
	check(string(*crap2), string(buf))
	check(n, len(buf))
	check(err, nil)

	stalled1 := &stalledConnection1{}
	n, err = util.WriteAll(stalled1, buf)
	check(string(*stalled1), string(buf[0:16]))
	check(n, 16)
	check(err, io.ErrShortWrite)

	stalled2 := &stalledConnection2{}
	n, err = util.WriteAll(stalled2, buf)
	check(string(*stalled2), string(buf[0:16]))
	check(n, 16)
	check(err, io.ErrShortWrite)

	broken := &brokenConnection{}
	n, err = util.WriteAll(broken, buf)
	check(string(*broken), string(buf[0:16]))
	check(n, 16)
	check(err, io.ErrClosedPipe)

	panicker := func() {
		foobar = "bar"
		panic("foo")
	}

	var buffy bytes.Buffer
	util.LoggersSuspend()
	util.LoggerAdd(&buffy)
	defer util.LoggersRestore()

	util.WithPanicHandler(panicker)
	time.Sleep(200 * time.Millisecond) // make sure log message is written out
	check(foobar, "bar")
	check(len(buffy.String()) > 10, true)

	listener, err := net.Listen("tcp", "127.0.0.1:39390")
	if err != nil {
		panic(err)
	}

	go func() {
		r, err := listener.Accept()
		if err != nil {
			panic(err)
		}
		buf := make([]byte, 1)
		r.Read(buf)
		time.Sleep(10 * time.Second)
		r.Read(buf)
	}()
	long := make([]byte, 10000000)
	longstr := string(long)
	buffy.Reset()
	t0 := time.Now()
	util.SendLnTo("127.0.0.1:39390", longstr, 5*time.Second)
	duration := time.Since(t0)
	check(duration > 4*time.Second && duration < 6*time.Second, true)
	time.Sleep(200 * time.Millisecond) // make sure log message is written out
	check(strings.Contains(buffy.String(), "ERROR"), true)

	go func() {
		conn, err := listener.Accept()
		if err != nil {
			panic(err)
		}
		ioutil.ReadAll(conn)
	}()
	long = make([]byte, 10000000)
	longstr = string(long)
	buffy.Reset()
	t0 = time.Now()
	util.SendLnTo("127.0.0.1:39390", longstr, 5*time.Second)
	duration = time.Since(t0)
	check(duration < 2*time.Second, true)
	time.Sleep(200 * time.Millisecond) // make sure log message is written out
	check(buffy.String(), "")

	// Test that ReadLn() times out properly
	go func() {
		_, err := net.Dial("tcp", "127.0.0.1:39390")
		if err != nil {
			panic(err)
		}
	}()
	conn, err := listener.Accept()
	if err != nil {
		panic(err)
	}
	t0 = time.Now()
	st, err := util.ReadLn(conn, 5*time.Second)
	duration = time.Since(t0)
	check(duration > 4*time.Second && duration < 6*time.Second, true)
	check(st, "")
	check(hasWords(err, "timeout"), "")

	// Test that ReadLn() returns io.EOF if last line not terminated by \n
	go func() {
		conn, err := net.Dial("tcp", "127.0.0.1:39390")
		if err != nil {
			panic(err)
		}
		conn.Write([]byte("foo\r"))
		conn.Close()
	}()
	conn, err = listener.Accept()
	if err != nil {
		panic(err)
	}
	st, err = util.ReadLn(conn, 5*time.Second)
	check(err, io.EOF)
	check(st, "foo")

	go func() {
		conn, err := net.Dial("tcp", "127.0.0.1:39390")
		if err != nil {
			panic(err)
		}
		conn.Write([]byte("\r\r\n\rfo\ro\nbar\r\nfoxtrott"))
		conn.Close()
	}()
	conn, err = listener.Accept()
	if err != nil {
		panic(err)
	}
	// Test proper trimming of multiple \r
	st, err = util.ReadLn(conn, 0)
	check(err, nil)
	check(st, "")
	// Test that the empty first line has actually been read
	// and that the next ReadLn() reads the 2nd line
	// Also test that negative timeouts work the same as timeout==0
	// Also test that \r is not trimmed at start and within line.
	st, err = util.ReadLn(conn, -1*time.Second)
	check(err, nil)
	check(st, "\rfo\ro")
	// Check 3rd line
	st, err = util.ReadLn(conn, 0)
	check(err, nil)
	check(st, "bar")
	// Check 4th line and io.EOF error
	st, err = util.ReadLn(conn, 0)
	check(err, io.EOF)
	check(st, "foxtrott")

	// Test that delayed reads work with timeout==0
	go func() {
		conn, err := net.Dial("tcp", "127.0.0.1:39390")
		if err != nil {
			panic(err)
		}
		time.Sleep(1 * time.Second)
		_, err = conn.Write([]byte("foo\r\n"))
		if err != nil {
			panic(err)
		}
		time.Sleep(2 * time.Second)
	}()
	conn, err = listener.Accept()
	if err != nil {
		panic(err)
	}
	t0 = time.Now()
	st, err = util.ReadLn(conn, time.Duration(0))
	duration = time.Since(t0)
	check(duration < 2*time.Second, true)
	check(duration > 800*time.Millisecond, true)
	check(err, nil)
	check(st, "foo")

	counter := util.Counter(13)
	var b1 UintArray = make([]uint64, 100)
	var b2 UintArray = make([]uint64, 100)
	done := make(chan bool)
	fill := func(b UintArray) {
		for i := 0; i < 100; i++ {
			b[i] = <-counter
			time.Sleep(1 * time.Millisecond)
		}
		done <- true
	}
	go fill(b1)
	go fill(b2)
	<-done
	<-done
	check(sort.IsSorted(&b1), true)
	check(sort.IsSorted(&b2), true)
	var b3 UintArray = make([]uint64, 200)
	i := 0
	j := 0
	k := 0
	for i < 100 || j < 100 {
		if i == 100 {
			b3[k] = b2[j]
			j++
			k++
			continue
		}
		if j == 100 {
			b3[k] = b1[i]
			i++
			k++
			continue
		}
		if b1[i] == b2[j] {
			check(b1[i] != b2[j], true)
			break
		}
		if b1[i] < b2[j] {
			b3[k] = b1[i]
			i++
		} else {
			b3[k] = b2[j]
			j++
		}
		k++
	}

	one_streak := true
	b5 := make([]uint64, 200)
	for i := 0; i < 200; i++ {
		if i < 100 && b1[i] != uint64(13+i) && b2[i] != uint64(13+i) {
			one_streak = false
		}
		b5[i] = uint64(13 + i)
	}

	check(b3, b5)
	check(one_streak, false) // Check whether goroutines were actually executed concurrently rather than in sequence

	tempdir, err := ioutil.TempDir("", "util-test-")
	if err != nil {
		panic(err)
	}
	defer os.RemoveAll(tempdir)
	fpath := tempdir + "/foo.log"
	logfile := util.LogFile(fpath)
	check(logfile.Close(), nil)
	n, err = util.WriteAll(logfile, []byte("Test"))
	check(err, nil)
	check(n, 4)
	check(logfile.Close(), nil)
	n, err = util.WriteAll(logfile, []byte("12"))
	check(err, nil)
	check(n, 2)
	n, err = util.WriteAll(logfile, []byte("3"))
	check(err, nil)
	check(n, 1)
	check(os.Rename(fpath, fpath+".old"), nil)
	n, err = util.WriteAll(logfile, []byte("Fo"))
	check(err, nil)
	check(n, 2)
	f2, _ := os.OpenFile(fpath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
	f2.Write([]byte("o"))
	f2.Close()
	n, err = util.WriteAll(logfile, []byte("bar"))
	check(err, nil)
	check(n, 3)
	check(logfile.Close(), nil)
	data, err := ioutil.ReadFile(fpath)
	check(err, nil)
	if err == nil {
		check(string(data), "Foobar")
	}
	data, err = ioutil.ReadFile(fpath + ".old")
	check(err, nil)
	if err == nil {
		check(string(data), "Test123")
	}

	test_time := time.Date(2013, time.January, 20, 14, 7, 21, 0, time.Local)
	check(util.MakeTimestamp(test_time), "20130120140721")
	test_time = time.Date(2013, time.January, 20, 14, 7, 21, 0, time.UTC)
	check(util.MakeTimestamp(test_time), "20130120140721")
	test_time = time.Date(2013, time.January, 20, 14, 7, 21, 0, time.FixedZone("Fooistan", 45678))
	check(util.MakeTimestamp(test_time), "20130120140721")
	illegal := time.Unix(0, 0)
	buffy.Reset()
	check(util.ParseTimestamp(""), illegal)
	time.Sleep(200 * time.Millisecond) // make sure log message is written out
	check(strings.Contains(buffy.String(), "ERROR"), true)
	buffy.Reset()
	check(util.ParseTimestamp("20139910101010"), illegal)
	time.Sleep(200 * time.Millisecond) // make sure log message is written out
	check(strings.Contains(buffy.String(), "ERROR"), true)
	check(util.ParseTimestamp("20131110121314"), time.Date(2013, time.November, 10, 12, 13, 14, 0, time.Local))
	check(util.MakeTimestamp(util.ParseTimestamp(util.MakeTimestamp(test_time))), util.MakeTimestamp(test_time))
	test_time = test_time.Add(2400 * time.Hour)
	check(util.MakeTimestamp(util.ParseTimestamp(util.MakeTimestamp(test_time))), util.MakeTimestamp(test_time))
	test_time = test_time.Add(2400 * time.Hour)
	check(util.MakeTimestamp(util.ParseTimestamp(util.MakeTimestamp(test_time))), util.MakeTimestamp(test_time))
	test_time = test_time.Add(2400 * time.Hour)
	check(util.MakeTimestamp(util.ParseTimestamp(util.MakeTimestamp(test_time))), util.MakeTimestamp(test_time))
	test_time = test_time.Add(2400 * time.Hour)
	check(util.MakeTimestamp(util.ParseTimestamp(util.MakeTimestamp(test_time))), util.MakeTimestamp(test_time))

	diff := time.Since(util.ParseTimestamp(util.MakeTimestamp(time.Now())))
	if diff < time.Second {
		diff = 0
	}
	check(diff, time.Duration(0))

	t0 = time.Now()
	util.WaitUntil(t0.Add(-10 * time.Second))
	util.WaitUntil(t0.Add(-100 * time.Minute))
	dur := time.Now().Sub(t0)
	if dur < 1*time.Second {
		dur = 0
	}
	check(dur, 0)
	t0 = time.Now()
	util.WaitUntil(t0.Add(1200 * time.Millisecond))
	dur = time.Now().Sub(t0)
	if dur >= 1200*time.Millisecond && dur <= 1300*time.Millisecond {
		dur = 1200 * time.Millisecond
	}
	check(dur, 1200*time.Millisecond)

	mess := "WaitUntil(Jesus first birthday) takes forever"
	go func() {
		util.WaitUntil(time.Date(1, time.December, 25, 0, 0, 0, 0, time.UTC))
		mess = ""
	}()
	time.Sleep(100 * time.Millisecond)
	check(mess, "")

	mess = "WaitUntil(1000-11-10 00:00:00) takes forever"
	go func() {
		util.WaitUntil(time.Date(1000, time.October, 11, 0, 0, 0, 0, time.UTC))
		mess = ""
	}()
	time.Sleep(100 * time.Millisecond)
	check(mess, "")

	testBase64()
}
Ejemplo n.º 19
0
func main() {
	// Intercept signals asap (in particular intercept SIGTTOU before the first output)
	signals := make(chan os.Signal, 32)
	signals_to_watch := []os.Signal{syscall.SIGUSR1, syscall.SIGUSR2, syscall.SIGTTOU, syscall.SIGHUP, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGINT}
	signal.Notify(signals, signals_to_watch...)

	config.Init()
	config.ReadArgs(os.Args[1:])

	if config.PrintVersion {
		fmt.Printf(`go-susi %v (revision %v)
Copyright (c) 2013 Matthias S. Benkmann
This is free software; see the source for copying conditions.  There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.

`, config.Version, config.Revision)
	}

	if config.PrintHelp {
		fmt.Println(`USAGE: go-susi [args]

--help       print this text and exit
--version    print version and exit
--stats      print sistats info from running go-susi process

-v           print operator debug messages (INFO)
-vv          print developer debug messages (DEBUG)
             ATTENTION! developer messages include keys!

-f           start with a fresh database; discard old /var/lib/go-susi

--test=<dir> test mode:
             * read config files from <dir> instead of /etc/gosa-si
             * use <dir>/go-susi.log as log file
             * use <dir> as database directory instead /var/lib/go-susi

-c <file>    read config from <file> instead of default location
`)
	}

	if config.PrintVersion || config.PrintHelp {
		os.Exit(0)
	}

	config.ReadConfig()
	config.ReadCertificates() // after config.ReadConfig()

	if config.TLSRequired && config.TLSServerConfig == nil {
		util.Log(0, "ERROR! No cert, no keys => no service")
		util.LoggersFlush(5 * time.Second)
		os.Exit(1)
	}

	logfile, err := os.OpenFile(config.LogFilePath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
	if err != nil {
		util.Log(0, "ERROR! %v", err)
		// Do not exit. We can go on without logging to a file.

	} else {
		logfile.Close() // will be re-opened on the first write
		// Add file to loggers list. os.Stderr is on it by default.
		util.LoggerAdd(util.LogFile(logfile.Name()))
	}
	util.LogLevel = config.LogLevel

	if config.PrintStats {
		// We nead ReadNetwork() to determine config.IP which is necessary
		// for TLS certificate validation. We call this inside the if config.Printstats
		// block instead of outside because for a go-susi daemon it is important
		// to wait for DNS before calling ReadNetwork() which is something
		// we don't want to do for the --stats call.
		config.ReadNetwork()
		code := printStats()
		util.LoggersFlush(5 * time.Second)
		os.Exit(code)
	}

	util.Log(0, "=============================================================================")
	util.Log(0, "INFO! go-susi %v started", config.Version)

	if !config.RunServer {
		util.Log(1, "INFO! No ldap-admin-dn configured => Will run in client-only mode")
	}

	util.Log(1, "INFO! Expecting standard clients to communicate on these ports: %v", config.ClientPorts)

	util.Log(1, "INFO! Waiting up to 5 minutes for DNS to be available")
	if !util.WaitForDNS(5 * time.Minute) {
		util.Log(0, "ERROR! DNS not available")
		util.LoggersFlush(5 * time.Second)
		os.Exit(1)
	}
	util.Log(1, "INFO! DNS available")

	config.ReadNetwork() // after config.ReadConfig()

	if config.TLSServerConfig != nil {
		util.Log(1, "INFO! [SECURITY] CA certificate:\n%v", security.CertificateInfo(config.CACert[0]))
		util.Log(1, "INFO! [SECURITY] My certificate:\n%v", security.CertificateInfo(config.TLSServerConfig.Certificates[0].Leaf))
	}

	// ATTENTION! DO NOT MOVE THE FOLLOWING CODE FURTHER DOWN!
	// We want to try listening on our socket as early in the program as possible,
	// so that we can bail out if another go-susi instance is already running
	// before potentially damaging the databases.
	tcp_addr, err := net.ResolveTCPAddr("tcp4", config.ServerListenAddress)
	if err != nil {
		util.Log(0, "ERROR! ResolveTCPAddr: %v", err)
		util.LoggersFlush(5 * time.Second)
		os.Exit(1)
	}
	listener, err := net.ListenTCP("tcp4", tcp_addr)
	if err != nil {
		util.Log(0, "ERROR! ListenTCP: %v", err)
		util.LoggersFlush(5 * time.Second)
		os.Exit(1)
	}

	if config.RunServer {
		util.Log(1, "INFO! Waiting up to 5 minutes for %v to be available", config.LDAPURI)
		if !db.LDAPAvailable(5 * time.Minute) {
			util.Log(0, "ERROR! LDAP not available")
			util.LoggersFlush(5 * time.Second)
			os.Exit(1)
		}
		util.Log(1, "INFO! LDAP available")

		setConfigUnitTag() // after config.ReadNetwork()
		config.FAIBase = db.LDAPFAIBase()
		util.Log(1, "INFO! FAI base: %v", config.FAIBase)
		util.Log(1, "INFO! ou=servers.conf: %v", config.LDAPServerOUs)
		os.MkdirAll(path.Dir(config.JobDBPath), 0750)
		db.ServersInit()      // after config.ReadNetwork()
		db.JobsInit()         // after config.ReadConfig()
		db.ClientsInit()      // after config.ReadConfig()
		db.HooksExecute(true) // after config.ReadConfig()
		action.Init()
	}

	// Create channels for receiving events.
	// The main() goroutine receives on all these channels
	// and spawns new goroutines to handle the incoming events.
	tcp_connections := make(chan *net.TCPConn, 32)
	// NOTE: signals channel is created at the beginning of main()

	util.Log(1, "INFO! Intercepting these signals: %v", signals_to_watch)

	util.Log(1, "INFO! Accepting gosa-si protocol connections on TCP port %v", strings.SplitN(config.ServerSourceAddress, ":", 2)[1])
	go acceptConnections(listener, tcp_connections)

	go util.WithPanicHandler(faiProgressWatch)

	if config.RunServer {
		if config.FAIMonPort != "disabled" {
			util.Log(1, "INFO! Accepting FAI monitoring messages on TCP port %v", config.FAIMonPort)
			go faimon(":" + config.FAIMonPort)
		}

		util.Log(1, "INFO! Accepting TFTP requests on UDP port %v", config.TFTPPort)
		go tftp.ListenAndServe(":"+config.TFTPPort, config.TFTPRegexes, config.TFTPReplies)

		go message.CheckPossibleClients()
		go message.Broadcast_new_server()
		go message.DistributeForeignJobUpdates()
	}

	// http server for profiling
	//go func(){http.ListenAndServe("localhost:6060", nil)}()

	go message.RegistrationHandler()

	/********************  main event loop ***********************/
	for {
		select {
		case sig := <-signals: //os.Signal
			if sig != syscall.SIGTTOU { // don't log SIGTTOU as that may cause another
				util.Log(1, "INFO! Received signal \"%v\"", sig)
			}
			if sig == syscall.SIGUSR2 && config.RunServer {
				db.HooksExecute(false)
			}
			if sig == syscall.SIGHUP || sig == syscall.SIGTERM ||
				sig == syscall.SIGQUIT || sig == syscall.SIGINT {
				Shutdown = true
				util.Log(0, "WARNING! Shutting down!")
				util.Log(1, "INFO! Shutting down listener")
				listener.Close()
				if config.RunServer {
					wait := make(chan bool, 16)
					go func() { db.JobsShutdown(); wait <- true }()
					go func() { db.ServersShutdown(); wait <- true }()
					go func() { db.ClientsShutdown(); wait <- true }()
					<-wait // for jobdb
					<-wait // for serverdb
					<-wait // for clientdb
				}
				config.Shutdown()
				util.Log(1, "INFO! Average request processing time: %v", time.Duration((atomic.LoadInt64(&message.RequestProcessingTime)+50)/100))
				util.Log(1, "INFO! Databases have been saved => Exit program")
				util.LoggersFlush(5 * time.Second)
				os.Exit(0)
			}

		case conn := <-tcp_connections: // *net.TCPConn
			if Shutdown {
				util.Log(1, "INFO! Rejecting TCP request from %v because of go-susi shutdown", conn.RemoteAddr())
				conn.Close()
			} else {
				//util.Log(2, "DEBUG! Incoming TCP request from %v", conn.RemoteAddr())
				go util.WithPanicHandler(func() { handle_request(conn) })
			}
		}
	}
}