Esempio n. 1
0
func faiProgressWatch() {
	clientpackageskey := config.ModuleKey["[ClientPackages]"]
	// If [ClientPackages]/key missing, take the last key in the list
	// (We don't take the 1st because that would be "dummy-key").
	if clientpackageskey == "" {
		clientpackageskey = config.ModuleKeys[len(config.ModuleKeys)-1]
	}

	util.Log(1, "INFO! Launching fai-progress-hook %v", config.FAIProgressHookPath)
	env := config.HookEnvironment()
	cmd := exec.Command(config.FAIProgressHookPath)
	cmd.Env = append(env, os.Environ()...)

	out, err := cmd.StdoutPipe()
	if err != nil {
		util.Log(0, "ERROR! Could not get stdout pipe for %v: %v => FAI progress monitoring disabled", config.FAIProgressHookPath, err)
		return
	}

	err = cmd.Start()
	if err != nil {
		util.Log(0, "ERROR! Could not launch %v: %v => FAI progress monitoring disabled", config.FAIProgressHookPath, err)
		return
	}

	reader := bufio.NewReader(out)
	for {
		line, err := reader.ReadString('\n')
		if err != nil {
			util.Log(0, "ERROR! Error reading stdout from FAI progress monitor %v: %v => FAI progress monitoring disabled", config.FAIProgressHookPath, err)
			return
		}

		line = strings.TrimSpace(line)
		util.Log(1, "INFO! FAI progress event: \"%v\"", line)
		idx := strings.Index(line, " ")
		if idx < 0 {
			util.Log(0, "WARNING! Ignoring strange FAI progress event \"%v\"", line)
			continue
		}

		header := "CLMSG_" + line[0:idx]
		content := line[idx+1:]
		x := xml.NewHash("xml", "header", header)
		x.Add("source", config.ServerSourceAddress)
		target := message.CurrentRegistrationServer()
		x.Add("target", target)
		x.Add(header, content)
		x.Add("macaddress", config.MAC)
		x.Add("timestamp", util.MakeTimestamp(time.Now()))

		util.Log(2, "DEBUG! Sending to %v: %v", target, x)
		msg := x.String()
		go func() { security.SendLnTo(target, msg, clientpackageskey, false) }()

		if strings.HasPrefix(line, "TASKEND savelog") {
			message.Send_clmsg_save_fai_log(target, config.FAISavelogHookPath)
		}
	}
}
Esempio n. 2
0
// Handles the message "job_send_user_msg".
//  xmlmsg: the decrypted and parsed message
// Returns:
//  unencrypted reply
func job_send_user_msg(xmlmsg *xml.Hash) *xml.Hash {
	job := xmlmsg.Clone()
	job.FirstOrAdd("progress").SetText("none")
	job.FirstOrAdd("status").SetText("waiting")
	job.FirstOrAdd("siserver").SetText(config.ServerSourceAddress)
	job.FirstOrAdd("modified").SetText("1")
	job.FirstOrAdd("targettag").SetText(xmlmsg.Text("target"))
	macaddress := config.MAC
	job.FirstOrAdd("macaddress").SetText(macaddress)
	job.FirstOrAdd("plainname").SetText("none") // updated automatically
	timestamp := xmlmsg.Text("timestamp")
	// go-susi does not use 19700101000000 as default timestamp as gosa-si does,
	// because that plays badly in conjunction with <periodic>
	if timestamp == "" {
		timestamp = util.MakeTimestamp(time.Now())
	}
	job.FirstOrAdd("timestamp").SetText(timestamp)
	for _, periodic := range xmlmsg.Get("periodic") {
		job.FirstOrAdd("periodic").SetText(periodic) // last <periodic> wins if there are multiple
	}
	job.FirstOrAdd("headertag").SetText(strings.ToLower(xmlmsg.Text("header")[len("job_"):]))
	job.RemoveFirst("header")
	job.FirstOrAdd("result").SetText("none")

	db.JobAddLocal(job)

	answer := xml.NewHash("xml", "header", "answer")
	answer.Add("source", config.ServerSourceAddress)
	answer.Add("target", xmlmsg.Text("source"))
	answer.Add("answer1", "0")
	answer.Add("session_id", "1")
	return answer
}
Esempio n. 3
0
// Sets the selected system's faistate and removes all running install and update
// jobs affecting the system.
//
// ATTENTION! This function takes a while to complete because it tries multiple
// times if necessary and verifies that the faistate has actually been set.
func SystemForceFAIState(macaddress, faistate string) {
	util.Log(1, "INFO! Forcing faiState for %v to %v", macaddress, faistate)

	// retry for 30s
	endtime := time.Now().Add(30 * time.Second)

	for time.Now().Before(endtime) {
		SystemSetState(macaddress, "faiState", faistate)

		// remove softupdate and install jobs ...
		job_types_to_kill := xml.FilterOr(
			[]xml.HashFilter{xml.FilterSimple("headertag", "trigger_action_reinstall"),
				xml.FilterSimple("headertag", "trigger_action_update")})
		// ... that are already happening or scheduled within the next 5 minutes ...
		timeframe := xml.FilterRel("timestamp", util.MakeTimestamp(time.Now().Add(5*time.Minute)), -1, 0)
		// ... that affect the machine for which we force the faistate
		target := xml.FilterSimple("macaddress", macaddress)
		filter := xml.FilterAnd([]xml.HashFilter{job_types_to_kill,
			timeframe,
			target})
		JobsRemove(filter)

		// Wait a little and see if the jobs are gone
		time.Sleep(3 * time.Second)
		if JobsQuery(filter).FirstChild() == nil { // if all jobs are gone
			// set state again just in case the job removal raced with something that set faistate
			SystemSetState(macaddress, "faiState", faistate)
			return // we're done
		} // else if some jobs remained

		util.Log(1, "INFO! ForceFAIState(%v, %v): Some install/softupdate jobs remain => Retrying", macaddress, faistate)
	}

	util.Log(0, "ERROR! ForceFAIState(%v, %v): Some install/softupdate jobs could not be removed.", macaddress, faistate)
}
Esempio n. 4
0
func makeSureWeHaveAppropriateProcessingJob(macaddress, headertag, progress string) {
	job := xml.NewHash("job")
	job.Add("progress", progress)
	job.Add("status", "processing")
	job.Add("siserver", config.ServerSourceAddress)
	job.Add("targettag", macaddress)
	job.Add("macaddress", macaddress)
	job.Add("modified", "1")
	job.Add("timestamp", util.MakeTimestamp(time.Now()))
	job.Add("headertag", headertag)
	job.Add("result", "none")

	// Filter for selecting local jobs in status "processing" for the client's MAC.
	local_processing := xml.FilterSimple("siserver", config.ServerSourceAddress, "macaddress", macaddress, "status", "processing")

	// If we don't already have an appropriate job with status "processing", create one
	if db.JobsQuery(xml.FilterAnd([]xml.HashFilter{local_processing, xml.FilterSimple("headertag", headertag)})).FirstChild() == nil {

		// First cancel other local install or update jobs for the same MAC in status "processing",
		// because only one install or update job can be processing at any time.
		// NOTE: I'm not sure if clearing <periodic> is the right thing to do
		// in this case. See the corresponding note in foreign_job_updates.go
		install_or_update := xml.FilterOr([]xml.HashFilter{xml.FilterSimple("headertag", "trigger_action_reinstall"), xml.FilterSimple("headertag", "trigger_action_update")})
		local_processing_install_or_update := xml.FilterAnd([]xml.HashFilter{local_processing, install_or_update})
		db.JobsModifyLocal(local_processing_install_or_update, xml.NewHash("job", "progress", "groom")) // to prevent faistate => localboot
		db.JobsRemoveLocal(local_processing_install_or_update, true)

		// Now add the new job.
		db.JobAddLocal(job)
	}
}
Esempio n. 5
0
// Returns a list of all known Debian software repositories as well as the
// available releases and their sections. If none are found, the return
// value is <faidb></faidb>. The general format of the return value is
// <faidb>
//    <repository>
//      <timestamp>20130304093211</timestamp>
//        <fai_release>halut/2.4.0</fai_release>
//        <repopath>halut-security</repopath>
//        <tag>1154342234048479900</tag>
//        <server>http://vts-susi.example.de/repo</server>
//        <sections>main,contrib,non-free,lhm,ff</sections>
//    </repository>
//    <repository>
//      ...
//    </repository>
//    ...
// </faidb>
//
// See operator's manual, description of message gosa_query_fai_server for
// the meanings of the individual elements.
func FAIServers() *xml.Hash {
	ldapSearchResults := []*xml.Hash{}

	// NOTE: We do NOT add config.UnitTagFilter here because the results are individually
	// tagged within the reply.
	x, err := xml.LdifToHash("repository", true, ldapSearch("(&(FAIrepository=*)(objectClass=FAIrepositoryServer))", "FAIrepository", "gosaUnitTag"))
	if err != nil {
		util.Log(0, "ERROR! LDAP error while looking for FAIrepositoryServer objects: %v", err)
	} else {
		ldapSearchResults = append(ldapSearchResults, x)
	}

	for _, ou := range config.LDAPServerOUs {
		x, err := xml.LdifToHash("repository", true, ldapSearchBaseScope(ou, "one", "(&(FAIrepository=*)(objectClass=FAIrepositoryServer))", "FAIrepository", "gosaUnitTag"))
		if err != nil {
			util.Log(0, "ERROR! LDAP error while looking for FAIrepositoryServer objects in %v: %v", ou, err)
		} else {
			ldapSearchResults = append(ldapSearchResults, x)
		}
	}

	result := xml.NewHash("faidb")
	timestamp := util.MakeTimestamp(time.Now())

	mapRepoPath2FAIrelease_mutex.Lock()
	defer mapRepoPath2FAIrelease_mutex.Unlock()

	for _, x := range ldapSearchResults {
		for repo := x.First("repository"); repo != nil; repo = repo.Next() {
			tag := repo.Text("gosaunittag")

			// http://vts-susi.example.de/repo|parent-repo.example.de|plophos/4.1.0|main,restricted,universe,multiverse
			for _, fairepo := range repo.Get("fairepository") {
				repodat := strings.Split(fairepo, "|")
				if len(repodat) != 4 {
					util.Log(0, "ERROR! Cannot parse FAIrepository=%v", fairepo)
					continue
				}

				repository := xml.NewHash("repository", "timestamp", timestamp)
				repository.Add("repopath", repodat[2])
				if fairelease, ok := mapRepoPath2FAIrelease[repodat[2]]; ok {
					repository.Add("fai_release", fairelease)
				} else {
					repository.Add("fai_release", repodat[2])
				}
				if tag != "" {
					repository.Add("tag", tag)
				}
				repository.Add("server", repodat[0])
				repository.Add("sections", repodat[3])
				result.AddWithOwnership(repository)
			}
		}
	}

	return result
}
Esempio n. 6
0
// Checks the jobdb for stale jobs and cleans them up.
func groomJobDB() {
	util.Log(1, "INFO! Grooming jobdb")

	older_than_5_minutes := xml.FilterRel("timestamp", util.MakeTimestamp(time.Now().Add(-5*time.Minute)), -1, 0)
	jobs := JobsQuery(older_than_5_minutes)

	for jobi := jobs.FirstChild(); jobi != nil; jobi = jobi.Next() {
		job := jobi.Element()

		macaddress := job.Text("macaddress")
		siserver := job.Text("siserver")

		if (job.Text("headertag") == "trigger_action_reinstall" ||
			job.Text("headertag") == "trigger_action_update") && job.Text("status") == "processing" {
			// job is update or install job that is (believed to be) currently running

			var faistate string
			system, err := SystemGetAllDataForMAC(macaddress, false)
			if err == nil {
				faistate = system.Text("faistate")
			} else {
				util.Log(0, "WARNING! Error getting LDAP data for %v while grooming jobdb: %v", macaddress, err)
				continue
			}

			state := (faistate + "12345")[0:5]
			if (job.Text("headertag") == "trigger_action_reinstall" && (state == "reins" || state == "insta")) ||
				(job.Text("headertag") == "trigger_action_update" && (state == "updat" || state == "softu")) {
				// FAIstate matches job => OK
				continue
			}

			// FAIstate does not match job
			util.Log(0, "WARNING! FAIstate \"%v\" for %v is inconsistent with job that is supposedly processing: %v", faistate, macaddress, job)

			if siserver == config.ServerSourceAddress { // job is local => remove
				util.Log(1, "INFO! Removing inconsistent job: %v", job)
				JobsModifyLocal(xml.FilterSimple("id", job.Text("id")), xml.NewHash("job", "progress", "groom")) // to prevent faistate => localboot
				JobsRemoveLocal(xml.FilterSimple("id", job.Text("id")), false)
			}

		} else { // whatever the job is, it shouldn't be like this.
			// It has apparently not been launched (or has not been removed after launching).

			if siserver == config.ServerSourceAddress {
				util.Log(0, "ERROR! Job has not launched. This is a bug. Please report. Job: %v", job)
				update := xml.NewHash("job", "status", "error")
				update.Add("result", "Job has not launched. This is a bug. Please report.")
				JobsModifyLocal(xml.FilterSimple("id", job.Text("id")), update)

			} else {
				util.Log(0, "WARNING! Peer %v has not taken action for job or not told us about it: %v", siserver, job)
			}
		}
	}
}
Esempio n. 7
0
// Handles all messages of the form "job_trigger_action_*".
//  xmlmsg: the decrypted and parsed message
// Returns:
//  unencrypted reply
func job_trigger_action(xmlmsg *xml.Hash) *xml.Hash {
	util.Log(2, "DEBUG! job_trigger_action(%v)", xmlmsg)
	job := xml.NewHash("job")
	job.Add("progress", "none")
	job.Add("status", "waiting")
	job.Add("siserver", config.ServerSourceAddress)
	job.Add("modified", "1")
	job.Add("targettag", xmlmsg.Text("target"))
	macaddress := xmlmsg.Text("macaddress")
	if macaddress == "" {
		macaddress = xmlmsg.Text("target")
	}
	if !macAddressRegexp.MatchString(macaddress) {
		return ErrorReplyXML("job_trigger_action* with invalid or missing MAC address")
	}
	job.Add("macaddress", macaddress)
	job.Add("plainname", "none") // updated automatically
	timestamp := xmlmsg.Text("timestamp")
	// go-susi does not use 19700101000000 as default timestamp as gosa-si does,
	// because that plays badly in conjunction with <periodic>
	if timestamp == "" {
		timestamp = util.MakeTimestamp(time.Now())
	}
	job.Add("timestamp", timestamp)
	for _, periodic := range xmlmsg.Get("periodic") {
		job.FirstOrAdd("periodic").SetText(periodic) // last <periodic> wins if there are multiple
	}
	job.Add("headertag", strings.ToLower(xmlmsg.Text("header")[len("job_"):]))
	job.Add("result", "none")
	job.Add("xmlmessage", base64.StdEncoding.EncodeToString([]byte(xmlmsg.String())))

	db.JobAddLocal(job)

	answer := xml.NewHash("xml", "header", "answer")
	answer.Add("source", config.ServerSourceAddress)
	answer.Add("target", xmlmsg.Text("source"))
	answer.Add("answer1", "0")
	answer.Add("session_id", "1")
	return answer
}
Esempio n. 8
0
// Infinite loop that consumes *xml.Hash job descriptors from
// db.PendingActions and launches goroutines to perform the appropriate
// action depending on the job's status ("done" or "processing").
// This function is also responsible for adding a new job when a periodic
// job is done.
func Init() { // not init() because we need to call it from go-susi.go
	go func() {
		for {
			job := db.PendingActions.Next().(*xml.Hash)

			if job.Text("status") != "done" {

				util.Log(1, "INFO! Taking action for job: %v", job)

				go util.WithPanicHandler(func() {

					if !Forward(job) {

						// Tell the lucky winner what we're going to do with it.

						macaddress := job.Text("macaddress")
						headertag := job.Text("headertag")
						if headertag != "send_user_msg" && // send_user_msg does not target a machine
							headertag != "set_activated_for_installation" { // set_activated_for_installation is sent when the action is taken
							client := db.ClientWithMAC(macaddress)
							if client == nil {
								util.Log(0, "ERROR! Client with MAC %v not in clientdb. Cannot send %v", macaddress, headertag)
								// Don't abort. Some jobs work even if we can't reach the client.
							} else {
								client_addr := client.Text("client")
								util.Log(1, "INFO! Sending %v to %v", headertag, client_addr)
								trigger_action := "<xml><header>" + headertag + "</header><" + headertag + "></" + headertag + "><source>" + config.ServerSourceAddress + "</source><target>" + client_addr + "</target></xml>"
								message.Client(client_addr).Tell(trigger_action, config.ActionAnnouncementTTL)
							}
						}

						// Now that the client is rightfully excited, give it our best shot.

						done := true
						switch headertag {
						case "send_user_msg":
							SendUserMsg(job)
						case "trigger_action_wake":
							Wake(job) // "Aufwecken"
						case "trigger_action_lock":
							Lock(job) // "Sperre"
						case "trigger_action_localboot":
							Localboot(job) // "Erzwinge lokalen Start"
						case "trigger_action_halt":
							Halt(job) // "Anhalten"
						case "trigger_action_reboot":
							Reboot(job) // "Neustarten"
						case "trigger_action_faireboot":
							FAIReboot(job) // "Job abbrechen"
						case "set_activated_for_installation",
							"trigger_action_activate":
							Activate(job) // "Sperre aufheben"
						case "trigger_action_update":
							Update(job) // "Aktualisieren"
							done = false
						case "trigger_action_reinstall":
							Reinstall(job) // "Neuinstallation"
							done = false
						default:
							util.Log(0, "ERROR! Unknown headertag in PendingActions for job: %v", job)
						}

						if done {
							util.Log(1, "INFO! No further processing required => Removing job: %v", job)
							db.JobsRemoveLocal(xml.FilterSimple("id", job.Text("id")), false)
						}
					}
				})

			} else // if status == "done"
			{
				util.Log(1, "INFO! Job is done or cancelled: %v", job)

				go util.WithPanicHandler(func() {

					switch job.Text("headertag") {
					case "send_user_msg":
					case "trigger_action_lock": // "Sperre"
					case "trigger_action_halt": // "Anhalten"
					case "trigger_action_localboot": // "Erzwinge lokalen Start"
					case "trigger_action_reboot": // "Neustarten"
					case "trigger_action_faireboot": // "Job abbrechen"
					case "set_activated_for_installation",
						"trigger_action_activate": // "Sperre aufheben"
					case "trigger_action_wake": // "Aufwecken"

					case "trigger_action_update", // "Aktualisieren"
						"trigger_action_reinstall": // "Neuinstallation"
						macaddress := job.Text("macaddress")
						faistate := db.SystemGetState(macaddress, "faiState")
						if faistate == "" || strings.HasPrefix(faistate, "softupdat") || strings.HasPrefix(faistate, "install") {
							if job.Text("progress") == "forward" || job.Text("progress") == "groom" {
								util.Log(1, "INFO! Job removed due to %ving => will NOT set faiState to \"localboot\" for client with MAC %v", job.Text("progress"), macaddress)
							} else {
								util.Log(1, "INFO! Setting faiState \"localboot\" for client with MAC %v", macaddress)
								db.SystemSetState(macaddress, "faiState", "localboot")
							}
						} else if faistate != "localboot" {
							util.Log(1, "INFO! Client with MAC %v has faiState \"%v\" => will NOT overwrite this with \"localboot\"", macaddress, faistate)
						}

					default:
						util.Log(0, "ERROR! Unknown headertag \"%v\" in PendingActions", job.Text("headertag"))
					}

					periodic := job.Text("periodic")
					if periodic != "none" && periodic != "" {
						t := util.ParseTimestamp(job.Text("timestamp"))
						p := strings.Split(periodic, "_")
						if len(p) != 2 {
							util.Log(0, "ERROR! Illegal <periodic>: %v", periodic)
							return
						}
						period, err := strconv.ParseUint(p[0], 10, 64)
						if err != nil || period == 0 {
							util.Log(0, "ERROR! Illegal <periodic>: %v: %v", periodic, err)
							return
						}

						for t.Before(time.Now()) {
							switch p[1] {
							case "seconds":
								t = t.Add(time.Duration(period) * time.Second)
							case "minutes":
								t = t.Add(time.Duration(period) * time.Minute)
							case "hours":
								t = t.Add(time.Duration(period) * time.Hour)
							case "days":
								t = t.AddDate(0, 0, int(period))
							case "weeks":
								t = t.AddDate(0, 0, int(period*7))
							case "months":
								t = t.AddDate(0, int(period), 0)
							case "years":
								t = t.AddDate(int(period), 0, 0)
							default:
								util.Log(0, "ERROR! Unknown periodic unit: %v", p[1])
								return
							}
						}
						job.FirstOrAdd("timestamp").SetText(util.MakeTimestamp(t))
						job.FirstOrAdd("result").SetText("none")
						job.FirstOrAdd("progress").SetText("none")
						job.FirstOrAdd("status").SetText("waiting")
						util.Log(1, "INFO! Scheduling next instance of periodic job: %v", job)
						db.JobAddLocal(job)
					}
				})

			}
		}
	}()
}
// Handles the messages "job_trigger_activate_new" and "gosa_trigger_activate_new".
//  xmlmsg: the decrypted and parsed message
// Returns:
//  unencrypted reply
func job_trigger_activate_new(xmlmsg *xml.Hash) *xml.Hash {
	util.Log(2, "DEBUG! job_trigger_activate_new(%v)", xmlmsg)

	//====== determine MAC address ======

	macaddress := xmlmsg.Text("mac")
	if macaddress == "" {
		macaddress = xmlmsg.Text("macaddress")
	} else {
		if xmlmsg.Text("macaddress") != "" && xmlmsg.Text("macaddress") != macaddress {
			util.Log(0, "WARNING! <mac> and <macaddress> differ: %v", xmlmsg)
			// not fatal, <mac> takes precedence
		}
	}
	if macaddress == "" {
		macaddress = xmlmsg.Text("target")
	}

	if !macAddressRegexp.MatchString(macaddress) {
		emsg := fmt.Sprintf("job_trigger_activate_new(): Not a valid MAC address: %v", macaddress)
		util.Log(0, "ERROR! %v", emsg)
		return ErrorReplyXML(emsg)
	}

	// ======== determine template object (if any) from <ogroup> ========

	var err error
	var template *xml.Hash
	ogroup := xmlmsg.First("ogroup")
	if ogroup != nil && ogroup.Text() != "" {
		template_mac := db.SystemMACForName(ogroup.Text())
		if template_mac == "none" { // ogroup is apparently not a system => try object group
			groups := db.SystemGetGroupsWithName(ogroup.Text())
			group := groups.First("xml")
			if group == nil {
				emsg := fmt.Sprintf("job_trigger_activate_new(): Could not find %v", ogroup)
				util.Log(0, "ERROR! %v", emsg)
				return ErrorReplyXML(emsg)
			}

			if group.Next() != nil {
				emsg := fmt.Sprintf("job_trigger_activate_new(): Multiple groups match %v", ogroup)
				util.Log(0, "ERROR! %v", emsg)
				return ErrorReplyXML(emsg)
			}

			members := group.Get("member")
			sort.Strings(members)
			for i := range members {
				member_name := strings.SplitN(strings.SplitN(members[i], ",", 2)[0], "=", 2)[1]
				template_mac = db.SystemMACForName(member_name)
				if template_mac != "none" {
					break
				}
			}

			if template_mac == "none" {
				emsg := fmt.Sprintf("job_trigger_activate_new(): Could not get template system from %v", ogroup)
				util.Log(0, "ERROR! %v", emsg)
				return ErrorReplyXML(emsg)
			}
		}

		template, err = db.SystemGetAllDataForMAC(template_mac, false)
		if err != nil {
			emsg := fmt.Sprintf("job_trigger_activate_new could not extract data for template system %v (from %v)", template_mac, ogroup)
			util.Log(0, "ERROR! %v", emsg)
			return ErrorReplyXML(emsg)
		}
	}

	if template != nil {
		util.Log(1, "INFO! job_trigger_activate_new(): Using %v as template for %v", template.Text("dn"), macaddress)
	} else {
		util.Log(1, "INFO! job_trigger_activate_new(): No template for %v", macaddress)
	}

	existing_sys, _ := db.SystemGetAllDataForMAC(macaddress, false)

	if existing_sys != nil {
		util.Log(1, "INFO! job_trigger_activate_new(): LDAP entry for %v already exists: %v", macaddress, existing_sys.Text("dn"))
	}

	// ======== determine ou to put/move system into =========

	ou := config.IncomingOU
	base := xmlmsg.Text("base")
	if base == "" {
		if template != nil {
			ou = strings.SplitN(template.Text("dn"), ",", 2)[1]
		} else {
			if existing_sys != nil {
				ou = strings.SplitN(existing_sys.Text("dn"), ",", 2)[1]
			} // else { ou remains config.IncomingOU }
		}
	} else { // if base != ""
		oclasses := []string{}
		if existing_sys != nil {
			oclasses = append(oclasses, existing_sys.Get("objectclass")...)
		}
		if template != nil {
			oclasses = append(oclasses, template.Get("objectclass")...)
		}
		for _, ocls := range oclasses {
			if ocls == "gotoWorkstation" {
				ou = "ou=workstations,ou=systems," + base
				break
			}
			if ocls == "goServer" {
				ou = "ou=servers,ou=systems," + base
				break
			}
		}
	}

	if !strings.HasSuffix(ou, config.LDAPBase) {
		emsg := fmt.Sprintf("job_trigger_activate_new(): Cannot put %v into %v (not under %v)", macaddress, ou, config.LDAPBase)
		util.Log(0, "ERROR! %v", emsg)
		return ErrorReplyXML(emsg)
	}

	util.Log(1, "INFO! job_trigger_activate_new(): Will put/move LDAP entry for %v into %v", macaddress, ou)

	// ======== determine ipHostNumber =======

	ip := xmlmsg.Text("ip")
	if ip == "" {
		fqdn := xmlmsg.Text("fqdn")
		if fqdn != "" {
			ip = db.SystemIPAddressForName(xmlmsg.Text())
			if ip == "none" {
				ip = ""
			}
		}
	}

	timestamp := xmlmsg.Text("timestamp")
	// go-susi does not use 19700101000000 as default timestamp as gosa-si does,
	// because that plays badly in conjunction with <periodic>
	if timestamp == "" {
		timestamp = util.MakeTimestamp(time.Now())
	}

	// ============ create or modify LDAP entry ============

	var system *xml.Hash
	if existing_sys == nil {
		system = xml.NewHash("xml")
		system.Add("macaddress", macaddress)
		system.Add("cn", config.CNAutoPrefix+strings.Replace(macaddress, ":", "-", -1)+config.CNAutoSuffix)
	} else {
		system = existing_sys.Clone()
	}

	system.FirstOrAdd("dn").SetText("cn=%v,%v", system.Text("cn"), ou)

	if existing_sys == nil && template != nil {
		db.SystemFillInMissingData(system, template)
		// Add system to the same object groups template is member of (if any).
		db.SystemAddToGroups(system.Text("dn"), db.SystemGetGroupsWithMember(template.Text("dn")))
	}

	if existing_sys == nil {
		system.RemoveFirst("iphostnumber")
	}
	if ip != "" {
		system.FirstOrAdd("iphostnumber").SetText(ip)
	}

	// Fallback if neither an existing entry nor a template provided us with
	// some essential attributes.
	if system.First("objectclass") == nil {
		system.Add("objectclass", "GOhard")
		system.Add("objectclass", "FAIobject")
		if config.UnitTag != "" {
			system.Add("objectclass", "gosaAdministrativeUnitTag")
			system.Add("gosaunittag", config.UnitTag)
		}
	}
	if system.First("faistate") == nil {
		system.Add("faistate", "install")
	}

	// gotoMode is always active
	system.FirstOrAdd("gotomode").SetText("active")

	// Update LDAP data or create new entry (if existing_sys==nil)
	err = db.SystemReplace(existing_sys, system)
	if err != nil {
		emsg := fmt.Sprintf("Could not create/update LDAP object %v: %v", system.Text("dn"), err)
		util.Log(0, "ERROR! %v", emsg)
		// This error is fatal, unless there is an existing object.
		// If there is an existing object we can continue and create the install job.
		if existing_sys == nil {
			return ErrorReplyXML(emsg)
		}
	}

	// ========== create install job ============
	job := xml.NewHash("job")
	job.Add("progress", "none")
	job.Add("status", "waiting")
	job.Add("siserver", config.ServerSourceAddress)
	job.Add("modified", "1")
	job.Add("targettag", macaddress)
	job.Add("macaddress", macaddress)
	job.Add("plainname", "none") // updated automatically
	job.Add("timestamp", timestamp)
	job.Add("headertag", "trigger_action_reinstall")
	job.Add("result", "none")

	util.Log(1, "INFO! job_trigger_activate_new(): Creating install job for %v (%v)", macaddress, system.Text("dn"))

	db.JobAddLocal(job)

	// ======== In case the system is already waiting, tell it it is activated. =======
	client := db.ClientWithMAC(macaddress)
	if client != nil {
		// get complete data, including from groups
		system, _ = db.SystemGetAllDataForMAC(macaddress, true)
		if system != nil {
			Send_set_activated_for_installation(client.Text("client"), system)
		}
	}

	answer := xml.NewHash("xml", "header", "answer")
	answer.Add("source", config.ServerSourceAddress)
	answer.Add("target", xmlmsg.Text("source"))
	answer.Add("answer1", "0")
	answer.Add("session_id", "1")
	return answer
}
Esempio n. 10
0
// Parses the hash x and replaces faiClassCache with the result.
// This function is public only for the sake of the unit tests.
// It's not meant to be used by application code and the format of x
// is subject to change without notice.
func FAIClassesCacheInit(x *xml.Hash) {
	// "HARDENING" => "ou=plophos/4.1.0,ou=plophos" => { 0x007F,"45192" }
	// where
	// Tag: the gosaUnitTag
	// Type:
	// bit  0=1: has explicit instance of FAIhook of the class name
	// bit  1=1: has explicit instance of FAIpackageList of the class name
	// bit  2=1: has explicit instance of FAIpartitionTable of the class name
	// bit  3=1: has explicit instance of FAIprofile of the class name
	// bit  4=1: has explicit instance of FAIscript of the class name
	// bit  5=1: has explicit instance of FAItemplate of the class name
	// bit  6=1: has explicit instance of FAIvariable of the class name
	// bit  7=1: unused
	// bit  8=1: removes FAIhook of the class name
	// bit  9=1: removes FAIpackageList of the class name
	// bit 10=1: removes FAIpartitionTable of the class name
	// bit 11=1: removes FAIprofile of the class name
	// bit 12=1: removes FAIscript of the class name
	// bit 13=1: removes FAItemplate of the class name
	// bit 14=1: removes FAIvariable of the class name
	// bit 15=1: unused
	// bit 16=1: freeze FAIhook of the class name (implies bit 0=1)
	// bit 17=1: freeze FAIpackageList of the class name (implies bit 1=1)
	// bit 18=1: freeze FAIpartitionTable of the class name (implies bit 2=1)
	// bit 19=1: freeze FAIprofile of the class name (implies bit 3=1)
	// bit 20=1: freeze FAIscript of the class name (implies bit 4=1)
	// bit 21=1: freeze FAItemplate of the class name (implies bit 5=1)
	// bit 22=1: freeze FAIvariable of the class name (implies bit 6=1)
	// bit 23=1: unused

	all_releases_mutex.Lock()
	defer all_releases_mutex.Unlock()

	type info struct {
		Type int
		Tag  string
	}
	class2release2info := map[string]map[string]info{}

	for fai := x.First("fai"); fai != nil; fai = fai.Next() {
		class := fai.Text("cn")
		if class == "" {
			util.Log(0, "ERROR! Encountered FAI class without cn: %v", fai)
			continue
		}

		dn := fai.Text("dn")
		release := extractReleaseFromFAIClassDN(dn)
		if release == "" {
			continue
		}
		all_releases[release] = true // just in case FAIClassesCacheInit() is called before FAIReleasesListUpdate()

		typ := 0
		for _, oc := range fai.Get("objectclass") {
			var ok bool
			if typ, ok = typeMap[oc]; ok {
				break
			}
		}

		state := fai.Text("faistate")
		if strings.Contains(state, "remove") {
			typ = typ << 8
		}
		if strings.Contains(state, "freeze") {
			typ = typ | (typ << 16)
		}

		release2info := class2release2info[class]
		if release2info == nil {
			release2info := map[string]info{release: info{typ, fai.Text("gosaunittag")}}
			class2release2info[class] = release2info
		} else {
			inf, ok := release2info[release]
			if ok && inf.Tag != fai.Text("gosaunittag") {
				util.Log(0, "ERROR! Release \"%v\" has 2 FAI classes with same name \"%v\" but differing unit tags \"%v\" and \"%v\"", release, class, fai.Text("gosaunittag"), inf.Tag)
			}
			release2info[release] = info{typ | inf.Type, fai.Text("gosaunittag")}
		}
	}

	timestamp := util.MakeTimestamp(time.Now())

	faidb := xml.NewHash("faidb")

	if config.ServerListenAddress != ":20087" {
		for d := 0; d < 7; d++ {
			if strings.Contains("130331140420150405160327170416180401190421200412210404220417230409", util.MakeTimestamp(time.Now().Add(time.Duration(-d*24) * time.Hour))[2:8]) {
				for release := range all_releases {
					for _, c := range []string{"+%%%%%%%%%%%%%%%%%%%%%%%%%%%\u00a0", ",%%%%%%/)/)  %\u00a0\u00a0\u0048\u0061\u0070\u0070\u0079 \u0045\u0061\u0073\u0074\u0065\u0072! %%%%%%\u00a0", "-%%%%%=(',')= %\u00a0%%%%%%%%%%%%%%%%\u00a0", ".%%%%%c(\")(\") %\\\\Øø'Ø//%%%%%%%%%%%\u00a0", "/~~~~~~~~~~~'''''''''''''''''''~~~~~~~~~~~~"} {
						c = strings.Replace(c, "%", "\u00a0 ", -1)
						if class2release2info[c] == nil {
							class2release2info[c] = map[string]info{}
						}
						class2release2info[c][release] = info{0x080008, config.UnitTag}
					}
				}
			}
		}
	}

	for class, release2info := range class2release2info {

		// class is the name of the FAI class(es)

		// now loop over all releases and create entries for the FAI class(es) named class present in that release
		for release := range all_releases {

			// compute inheritance. Let's say release="ou=4.1.0,ou=plophos", then the first
			// iteration of the loop will take "ou=plophos" and combine its bits (taken from release2info)
			// with the start value types==0.
			// The 2nd iteration of the loop will take "ou=4.1.0,ou=plophos" and combine its bits
			// the bits from the previous iteration. If there were more commans in the release, this would
			// go on for more iterations.
			types := 0
			tag := ""
			for comma := len(release); comma > 0; {
				comma = strings.LastIndex(release[0:comma], ",") + 1
				info := release2info[release[comma:]]
				if info.Tag != "" {
					tag = info.Tag
				}
				t := info.Type

				removed := (t >> 8) & 0x7f
				types = types &^ (removed | removed<<16) // "removed" clears "freeze" and "explicit instance"
				types = types &^ ((t & 0x7f) << 16)      // "explicit instance" clears "freeze"
				types = types | t                        // combine new bits with the old bits (that survived the preceding lines)
				comma--
			}

			// At this point the variable types contains the bits for FAI class(es) named class in release release.

			// Now we scan the bits in types to create each of the 7 individual entries for FAIhook, FAIpackageList,...
			for i := 0; i < 7; i++ {
				has_explicit_instance := types & (1 << uint(i))
				freeze := types & (0x10000 << uint(i))
				if (has_explicit_instance | freeze) != 0 { // "freeze" implies "explicit instance"
					faitype := faiTypes[i]
					state := ""
					if freeze != 0 {
						state = "freeze"
					}
					fai := xml.NewHash("fai", "timestamp", timestamp)
					// remove "ou=", split at commas
					parts := strings.Split(strings.Replace(release, "ou=", "", -1), ",")
					// reverse the order ({"4.1.0","plophos"} => {"plophos","4.1.0"}
					for i := 0; i < len(parts)/2; i++ {
						parts[i], parts[len(parts)-1-i] = parts[len(parts)-1-i], parts[i]
					}
					fai.Add("fai_release", strings.Join(parts, "/"))
					fai.Add("type", faitype)
					fai.Add("class", class)
					if tag != "" {
						fai.Add("tag", tag)
					}
					fai.Add("state", state)
					faidb.AddWithOwnership(fai)
				}
			}
		}
	}

	// lock the time mutex before calling faiClassCache.Init()
	// so that faiClassCache is never newer than faiClassCacheUpdateTime.
	faiClassCacheUpdateTime_mutex.Lock()
	defer faiClassCacheUpdateTime_mutex.Unlock()
	faiClassCache.Init(faidb)
	faiClassCacheUpdateTime = time.Now()
}
Esempio n. 11
0
// Handles the message "CLMSG_save_fai_log".
//  buf: the decrypted message
func clmsg_save_fai_log(buf *bytes.Buffer) {
	macaddress := ""
	action := ""
	start := 0
	end := 0
	data := buf.Bytes()
	for i := 0; i < len(data)-19; i++ {
		if data[i] == '<' {
			if i+12+17 <= len(data) && match(data, i, "<macaddress>") {
				macaddress = string(data[i+12 : i+12+17])
			} else if match(data, i, "<fai_action>") {
				for k := i + 12; k < len(data); k++ {
					if data[k] == '<' {
						action = string(data[i+12 : k])
						i = k
						break
					}
				}
			} else if match(data, i, "<CLMSG_save_fai_log>") {
				start = i + 20
			} else if match(data, i, "</CLMSG_save_fai_log>") {
				end = i
			}
		}
	}

	if !macAddressRegexp.MatchString(macaddress) {
		util.Log(0, "ERROR! CLMSG_save_fai_log with illegal <macaddress> \"%v\"", macaddress)
		return
	}

	if !actionRegexp.MatchString(action) {
		util.Log(0, "ERROR! CLMSG_save_fai_log with illegal <fai_action> \"%v\"", action)
		return
	}

	util.Log(1, "INFO! Received log files from client %v. Assuming CLMSG_PROGRESS 100", macaddress)
	progress_msg := xml.NewHash("xml", "CLMSG_PROGRESS", "100")
	progress_msg.Add("macaddress", macaddress)
	clmsg_progress(progress_msg)

	timestamp := util.MakeTimestamp(time.Now())
	logname := action + "_" + timestamp[0:8] + "_" + timestamp[8:]
	logdir := path.Join(config.FAILogPath, strings.ToLower(macaddress), logname)

	// NOTE: 1kB = 1000B, 1kiB = 1024B
	util.Log(1, "INFO! Storing %vkB of %v log files from %v in %v", len(data)/1000, action, macaddress, logdir)

	err := os.MkdirAll(logdir, 0755)
	if err != nil {
		util.Log(0, "ERROR! Error creating log directory \"%v\": %v", logdir, err)
		return
	}

	// Create convenience symlink with the system's name as alias for MAC address.
	go util.WithPanicHandler(func() {
		if plainname := db.SystemPlainnameForMAC(macaddress); plainname != "none" {
			linkpath := path.Join(config.FAILogPath, strings.ToLower(plainname))
			link_target, err := os.Readlink(linkpath)
			if err != nil && !os.IsNotExist(err.(*os.PathError).Err) {
				util.Log(0, "ERROR! %v exists but is not a symlink: %v", linkpath, err)
				return
			}
			if err == nil {
				if link_target == strings.ToLower(macaddress) {
					return // symlink is already correct => nothing to do
				}

				util.Log(0, "WARNING! Machine %v has a new MAC %v . Removing old symlink %v => %v", plainname, macaddress, linkpath, link_target)
				err = os.Remove(linkpath)
				if err != nil {
					util.Log(0, "ERROR! Removing %v failed: %v", linkpath, err)
					// Don't bail out. Maybe we can create the new symlink anyway.
				}
			}
			err = os.Symlink(strings.ToLower(macaddress), linkpath)
			if err != nil && !os.IsExist(err.(*os.LinkError).Err) {
				util.Log(0, "ERROR! Could not create symlink %v => %v: %v", linkpath, strings.ToLower(macaddress), err)
			}
		}
	})

	files := []int{}
	for i := start; i < end; i++ {
		if data[i] == ':' && match(data, i-8, "log_file") {
			k := i
			i++
			for i < end {
				if data[i] == ':' {
					if k+1 < i {
						files = append(files, k+1, i)
					}
					break
				}
				i++
			}
		}
	}

	files = append(files, end+8)

	for i := 0; i < len(files)-1; i += 2 {
		fname := string(data[files[i]:files[i+1]])
		logdata := data[files[i+1]+1 : files[i+2]-8]
		util.Log(1, "INFO! Processing \"%v\" (%vkB)", fname, len(logdata)/1000)

		logdata = util.Base64DecodeInPlace(logdata)

		// As a precaution, make sure fname contains no slashes.
		fname = strings.Replace(fname, "/", "_", -1)
		err = ioutil.WriteFile(path.Join(logdir, fname), logdata, 0644)
		if err != nil {
			util.Log(0, "ERROR! Could not store \"%v\": %v", path.Join(logdir, fname), err)
			continue
		}
	}
}
Esempio n. 12
0
// Reads the output from the program config.PackageListHookPath (LDIF) and
// uses it to replace packagedb.
// debconf is passed as PackageListDebconf environment var to the hook.
// See manual section on package-list-hook for more info.
func PackageListHook(debconf string) {
	start := time.Now()
	timestamp := util.MakeTimestamp(start)

	cmd := exec.Command(config.PackageListHookPath)
	cmd.Env = append(config.HookEnvironment(), os.Environ()...)

	fairepos := []string{}
	for repo := FAIServers().First("repository"); repo != nil; repo = repo.Next() {
		fairepos = append(fairepos, fmt.Sprintf("%v||%v|%v", repo.Text("server"), repo.Text("repopath"), repo.Text("sections")))
	}

	package_list_params := []string{"PackageListDebconf=" + debconf, "PackageListCacheDir=" + config.PackageCacheDir, "PackageListFAIrepository=" + strings.Join(fairepos, " ")}
	cmd.Env = append(cmd.Env, package_list_params...)
	util.Log(1, "INFO! Running package-list-hook: %v %v", strings.Join(package_list_params, " "), config.PackageListHookPath)

	var outbuf bytes.Buffer
	defer outbuf.Reset()
	var errbuf bytes.Buffer
	defer errbuf.Reset()
	cmd.Stdout = &outbuf
	cmd.Stderr = &errbuf
	err := cmd.Run()

	if err != nil {
		util.Log(0, "ERROR! package-list-hook %v: %v (%v)", config.PackageListHookPath, err, errbuf.String())
		return
	} else if errbuf.Len() != 0 {
		// if the command prints to stderr but does not return non-0 exit status (which
		// would result in err != nil), we just log a WARNING, but use the stdout data
		// anyway.
		util.Log(0, "WARNING! package-list-hook %v: %v", config.PackageListHookPath, errbuf.String())
	}

	plist, err := xml.LdifToHash("pkg", true, outbuf.Bytes(), packageListFormat...)
	if err != nil {
		util.Log(0, "ERROR! package-list-hook %v: %v", config.PackageListHookPath, err)
		return
	}
	if plist.First("pkg") == nil {
		util.Log(0, "ERROR! package-list-hook %v returned no data", config.PackageListHookPath)
		return
	}
	util.Log(1, "INFO! Finished package-list-hook. Running time: %v", time.Since(start))

	start = time.Now()

	plist.Rename("packagedb")

	new_mapRepoPath2FAIrelease := map[string]string{}

	accepted := 0
	total := 0

	for pkg := plist.FirstChild(); pkg != nil; pkg = pkg.Next() {
		total++
		p := pkg.Element()

		release := p.First("distribution") // packageListFormat translates "release" => "distribution"
		if release == nil {
			util.Log(0, "ERROR! package-list-hook %v returned entry without \"Release\": %v", config.PackageListHookPath, p)
			pkg.Remove()
			continue
		}

		for repopath := p.First("repository"); repopath != nil; repopath = repopath.Next() {
			new_mapRepoPath2FAIrelease[repopath.Text()] = release.Text()
		}

		pkgname := p.Get("package")
		if len(pkgname) == 0 {
			if p.First("repository") == nil { // Release/Repository groups without Package are okay, so only log error if there is no Repository
				util.Log(0, "ERROR! package-list-hook %v returned entry without \"Package\": %v", config.PackageListHookPath, p)
			}
			pkg.Remove()
			continue
		}

		if len(pkgname) > 1 {
			util.Log(0, "ERROR! package-list-hook %v returned entry with multiple \"Package\" values: %v", config.PackageListHookPath, p)
			pkg.Remove()
			continue
		}

		version := p.First("version")
		if version == nil {
			util.Log(0, "WARNING! package-list-hook %v returned entry for \"%v\" without \"Version\". Assuming \"1.0\"", config.PackageListHookPath, pkgname[0])
			p.Add("version", "1.0")
		}

		section := p.First("section")
		if section == nil {
			util.Log(0, "WARNING! package-list-hook %v returned entry for \"%v\" without \"Section\". Assuming \"main\"", config.PackageListHookPath, pkgname[0])
			p.Add("section", "main")
		}

		p.FirstOrAdd("timestamp").SetText(timestamp)

		description := p.First("description")
		if description == nil {
			description = p.Add("description", pkgname[0])
			description.EncodeBase64()
		}

		// add empty <template></template> if there is no <template> element.
		if p.First("template") == nil {
			p.Add("template")
		}

		accepted++
	}

	if accepted == 0 {
		util.Log(0, "ERROR! package-list-hook %v returned no valid entries", config.PackageListHookPath)
	} else {
		util.Log(1, "INFO! package-list-hook: %v/%v entries accepted into database. Processing time: %v", accepted, total, time.Since(start))
		packagedb.Init(plist)
		mapRepoPath2FAIrelease_mutex.Lock()
		defer mapRepoPath2FAIrelease_mutex.Unlock()
		mapRepoPath2FAIrelease = new_mapRepoPath2FAIrelease
		util.Log(1, "INFO! Repository path => FAI release %v", mapRepoPath2FAIrelease)
	}
}
Esempio n. 13
0
// Unit tests for the package github.com/mbenkmann/golib/util.
func Util_test() {
	fmt.Printf("\n==== util ===\n\n")

	addr, err := util.Resolve("1.2.3.4", "")
	check(err, nil)
	check(addr, "1.2.3.4")

	addr, err = util.Resolve("1.2.3.4:5", "")
	check(err, nil)
	check(addr, "1.2.3.4:5")

	addr, err = util.Resolve("::1:5", "")
	check(err, nil)
	check(addr, "[::1:5]")

	addr, err = util.Resolve("localhost:65535", "")
	check(err, nil)
	check(addr, "127.0.0.1:65535")

	addr, err = util.Resolve("localhost", "")
	check(err, nil)
	check(addr, "127.0.0.1")

	addr, err = util.Resolve("::1", "")
	check(err, nil)
	check(addr, "127.0.0.1")

	addr, err = util.Resolve("[::1]", "")
	check(err, nil)
	check(addr, "127.0.0.1")

	addr, err = util.Resolve("[::1]:12345", "")
	check(err, nil)
	check(addr, "127.0.0.1:12345")

	addr, err = util.Resolve("localhost:65535", "foo")
	check(err, nil)
	check(addr, "foo:65535")

	addr, err = util.Resolve("localhost", "foo")
	check(err, nil)
	check(addr, "foo")

	addr, err = util.Resolve("::1", "foo")
	check(err, nil)
	check(addr, "foo")

	addr, err = util.Resolve("[::1]", "foo")
	check(err, nil)
	check(addr, "foo")

	addr, err = util.Resolve("[::1]:12345", "foo")
	check(err, nil)
	check(addr, "foo:12345")

	addr, err = util.Resolve("", "")
	check(hasWords(err, "no", "such", "host"), "")
	check(addr, "")

	addr, err = util.Resolve(":10", "")
	check(hasWords(err, "no", "such", "host"), "")
	check(addr, ":10")

	check(util.WaitForDNS(3*time.Second), true)

	h, _ := exec.Command("hostname").CombinedOutput()
	hostname := strings.TrimSpace(string(h))

	ipp, _ := exec.Command("hostname", "-I").CombinedOutput()
	ips := strings.Fields(strings.TrimSpace(string(ipp)))
	addr, err = util.Resolve(hostname+":234", config.IP)
	check(err, nil)
	ip := ""
	for _, ip2 := range ips {
		if addr == ip2+":234" {
			ip = ip2
		}
	}
	check(addr, ip+":234")

	testLogging()

	buf := make([]byte, 80)
	for i := range buf {
		buf[i] = byte(util_test_rng.Intn(26) + 'a')
	}

	crap1 := &crappyConnection1{}
	n, err := util.WriteAll(crap1, buf)
	check(string(*crap1), string(buf))
	check(n, len(buf))
	check(err, nil)

	crap2 := &crappyConnection2{}
	n, err = util.WriteAll(crap2, buf)
	check(string(*crap2), string(buf))
	check(n, len(buf))
	check(err, nil)

	stalled1 := &stalledConnection1{}
	n, err = util.WriteAll(stalled1, buf)
	check(string(*stalled1), string(buf[0:16]))
	check(n, 16)
	check(err, io.ErrShortWrite)

	stalled2 := &stalledConnection2{}
	n, err = util.WriteAll(stalled2, buf)
	check(string(*stalled2), string(buf[0:16]))
	check(n, 16)
	check(err, io.ErrShortWrite)

	broken := &brokenConnection{}
	n, err = util.WriteAll(broken, buf)
	check(string(*broken), string(buf[0:16]))
	check(n, 16)
	check(err, io.ErrClosedPipe)

	panicker := func() {
		foobar = "bar"
		panic("foo")
	}

	var buffy bytes.Buffer
	util.LoggersSuspend()
	util.LoggerAdd(&buffy)
	defer util.LoggersRestore()

	util.WithPanicHandler(panicker)
	time.Sleep(200 * time.Millisecond) // make sure log message is written out
	check(foobar, "bar")
	check(len(buffy.String()) > 10, true)

	listener, err := net.Listen("tcp", "127.0.0.1:39390")
	if err != nil {
		panic(err)
	}

	go func() {
		r, err := listener.Accept()
		if err != nil {
			panic(err)
		}
		buf := make([]byte, 1)
		r.Read(buf)
		time.Sleep(10 * time.Second)
		r.Read(buf)
	}()
	long := make([]byte, 10000000)
	longstr := string(long)
	buffy.Reset()
	t0 := time.Now()
	util.SendLnTo("127.0.0.1:39390", longstr, 5*time.Second)
	duration := time.Since(t0)
	check(duration > 4*time.Second && duration < 6*time.Second, true)
	time.Sleep(200 * time.Millisecond) // make sure log message is written out
	check(strings.Contains(buffy.String(), "ERROR"), true)

	go func() {
		conn, err := listener.Accept()
		if err != nil {
			panic(err)
		}
		ioutil.ReadAll(conn)
	}()
	long = make([]byte, 10000000)
	longstr = string(long)
	buffy.Reset()
	t0 = time.Now()
	util.SendLnTo("127.0.0.1:39390", longstr, 5*time.Second)
	duration = time.Since(t0)
	check(duration < 2*time.Second, true)
	time.Sleep(200 * time.Millisecond) // make sure log message is written out
	check(buffy.String(), "")

	// Test that ReadLn() times out properly
	go func() {
		_, err := net.Dial("tcp", "127.0.0.1:39390")
		if err != nil {
			panic(err)
		}
	}()
	conn, err := listener.Accept()
	if err != nil {
		panic(err)
	}
	t0 = time.Now()
	st, err := util.ReadLn(conn, 5*time.Second)
	duration = time.Since(t0)
	check(duration > 4*time.Second && duration < 6*time.Second, true)
	check(st, "")
	check(hasWords(err, "timeout"), "")

	// Test that ReadLn() returns io.EOF if last line not terminated by \n
	go func() {
		conn, err := net.Dial("tcp", "127.0.0.1:39390")
		if err != nil {
			panic(err)
		}
		conn.Write([]byte("foo\r"))
		conn.Close()
	}()
	conn, err = listener.Accept()
	if err != nil {
		panic(err)
	}
	st, err = util.ReadLn(conn, 5*time.Second)
	check(err, io.EOF)
	check(st, "foo")

	go func() {
		conn, err := net.Dial("tcp", "127.0.0.1:39390")
		if err != nil {
			panic(err)
		}
		conn.Write([]byte("\r\r\n\rfo\ro\nbar\r\nfoxtrott"))
		conn.Close()
	}()
	conn, err = listener.Accept()
	if err != nil {
		panic(err)
	}
	// Test proper trimming of multiple \r
	st, err = util.ReadLn(conn, 0)
	check(err, nil)
	check(st, "")
	// Test that the empty first line has actually been read
	// and that the next ReadLn() reads the 2nd line
	// Also test that negative timeouts work the same as timeout==0
	// Also test that \r is not trimmed at start and within line.
	st, err = util.ReadLn(conn, -1*time.Second)
	check(err, nil)
	check(st, "\rfo\ro")
	// Check 3rd line
	st, err = util.ReadLn(conn, 0)
	check(err, nil)
	check(st, "bar")
	// Check 4th line and io.EOF error
	st, err = util.ReadLn(conn, 0)
	check(err, io.EOF)
	check(st, "foxtrott")

	// Test that delayed reads work with timeout==0
	go func() {
		conn, err := net.Dial("tcp", "127.0.0.1:39390")
		if err != nil {
			panic(err)
		}
		time.Sleep(1 * time.Second)
		_, err = conn.Write([]byte("foo\r\n"))
		if err != nil {
			panic(err)
		}
		time.Sleep(2 * time.Second)
	}()
	conn, err = listener.Accept()
	if err != nil {
		panic(err)
	}
	t0 = time.Now()
	st, err = util.ReadLn(conn, time.Duration(0))
	duration = time.Since(t0)
	check(duration < 2*time.Second, true)
	check(duration > 800*time.Millisecond, true)
	check(err, nil)
	check(st, "foo")

	counter := util.Counter(13)
	var b1 UintArray = make([]uint64, 100)
	var b2 UintArray = make([]uint64, 100)
	done := make(chan bool)
	fill := func(b UintArray) {
		for i := 0; i < 100; i++ {
			b[i] = <-counter
			time.Sleep(1 * time.Millisecond)
		}
		done <- true
	}
	go fill(b1)
	go fill(b2)
	<-done
	<-done
	check(sort.IsSorted(&b1), true)
	check(sort.IsSorted(&b2), true)
	var b3 UintArray = make([]uint64, 200)
	i := 0
	j := 0
	k := 0
	for i < 100 || j < 100 {
		if i == 100 {
			b3[k] = b2[j]
			j++
			k++
			continue
		}
		if j == 100 {
			b3[k] = b1[i]
			i++
			k++
			continue
		}
		if b1[i] == b2[j] {
			check(b1[i] != b2[j], true)
			break
		}
		if b1[i] < b2[j] {
			b3[k] = b1[i]
			i++
		} else {
			b3[k] = b2[j]
			j++
		}
		k++
	}

	one_streak := true
	b5 := make([]uint64, 200)
	for i := 0; i < 200; i++ {
		if i < 100 && b1[i] != uint64(13+i) && b2[i] != uint64(13+i) {
			one_streak = false
		}
		b5[i] = uint64(13 + i)
	}

	check(b3, b5)
	check(one_streak, false) // Check whether goroutines were actually executed concurrently rather than in sequence

	tempdir, err := ioutil.TempDir("", "util-test-")
	if err != nil {
		panic(err)
	}
	defer os.RemoveAll(tempdir)
	fpath := tempdir + "/foo.log"
	logfile := util.LogFile(fpath)
	check(logfile.Close(), nil)
	n, err = util.WriteAll(logfile, []byte("Test"))
	check(err, nil)
	check(n, 4)
	check(logfile.Close(), nil)
	n, err = util.WriteAll(logfile, []byte("12"))
	check(err, nil)
	check(n, 2)
	n, err = util.WriteAll(logfile, []byte("3"))
	check(err, nil)
	check(n, 1)
	check(os.Rename(fpath, fpath+".old"), nil)
	n, err = util.WriteAll(logfile, []byte("Fo"))
	check(err, nil)
	check(n, 2)
	f2, _ := os.OpenFile(fpath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
	f2.Write([]byte("o"))
	f2.Close()
	n, err = util.WriteAll(logfile, []byte("bar"))
	check(err, nil)
	check(n, 3)
	check(logfile.Close(), nil)
	data, err := ioutil.ReadFile(fpath)
	check(err, nil)
	if err == nil {
		check(string(data), "Foobar")
	}
	data, err = ioutil.ReadFile(fpath + ".old")
	check(err, nil)
	if err == nil {
		check(string(data), "Test123")
	}

	test_time := time.Date(2013, time.January, 20, 14, 7, 21, 0, time.Local)
	check(util.MakeTimestamp(test_time), "20130120140721")
	test_time = time.Date(2013, time.January, 20, 14, 7, 21, 0, time.UTC)
	check(util.MakeTimestamp(test_time), "20130120140721")
	test_time = time.Date(2013, time.January, 20, 14, 7, 21, 0, time.FixedZone("Fooistan", 45678))
	check(util.MakeTimestamp(test_time), "20130120140721")
	illegal := time.Unix(0, 0)
	buffy.Reset()
	check(util.ParseTimestamp(""), illegal)
	time.Sleep(200 * time.Millisecond) // make sure log message is written out
	check(strings.Contains(buffy.String(), "ERROR"), true)
	buffy.Reset()
	check(util.ParseTimestamp("20139910101010"), illegal)
	time.Sleep(200 * time.Millisecond) // make sure log message is written out
	check(strings.Contains(buffy.String(), "ERROR"), true)
	check(util.ParseTimestamp("20131110121314"), time.Date(2013, time.November, 10, 12, 13, 14, 0, time.Local))
	check(util.MakeTimestamp(util.ParseTimestamp(util.MakeTimestamp(test_time))), util.MakeTimestamp(test_time))
	test_time = test_time.Add(2400 * time.Hour)
	check(util.MakeTimestamp(util.ParseTimestamp(util.MakeTimestamp(test_time))), util.MakeTimestamp(test_time))
	test_time = test_time.Add(2400 * time.Hour)
	check(util.MakeTimestamp(util.ParseTimestamp(util.MakeTimestamp(test_time))), util.MakeTimestamp(test_time))
	test_time = test_time.Add(2400 * time.Hour)
	check(util.MakeTimestamp(util.ParseTimestamp(util.MakeTimestamp(test_time))), util.MakeTimestamp(test_time))
	test_time = test_time.Add(2400 * time.Hour)
	check(util.MakeTimestamp(util.ParseTimestamp(util.MakeTimestamp(test_time))), util.MakeTimestamp(test_time))

	diff := time.Since(util.ParseTimestamp(util.MakeTimestamp(time.Now())))
	if diff < time.Second {
		diff = 0
	}
	check(diff, time.Duration(0))

	t0 = time.Now()
	util.WaitUntil(t0.Add(-10 * time.Second))
	util.WaitUntil(t0.Add(-100 * time.Minute))
	dur := time.Now().Sub(t0)
	if dur < 1*time.Second {
		dur = 0
	}
	check(dur, 0)
	t0 = time.Now()
	util.WaitUntil(t0.Add(1200 * time.Millisecond))
	dur = time.Now().Sub(t0)
	if dur >= 1200*time.Millisecond && dur <= 1300*time.Millisecond {
		dur = 1200 * time.Millisecond
	}
	check(dur, 1200*time.Millisecond)

	mess := "WaitUntil(Jesus first birthday) takes forever"
	go func() {
		util.WaitUntil(time.Date(1, time.December, 25, 0, 0, 0, 0, time.UTC))
		mess = ""
	}()
	time.Sleep(100 * time.Millisecond)
	check(mess, "")

	mess = "WaitUntil(1000-11-10 00:00:00) takes forever"
	go func() {
		util.WaitUntil(time.Date(1000, time.October, 11, 0, 0, 0, 0, time.UTC))
		mess = ""
	}()
	time.Sleep(100 * time.Millisecond)
	check(mess, "")

	testBase64()
}
Esempio n. 14
0
// This function runs in a single goroutine and is responsible for handling
// all actions that affect the jobDB as well as starting local jobs whose time
// has come.
// The general idea behind synchronized job processing is this:
//  * a single goroutine processes all requests that affect the jobDB and pushes
//    the resulting changes into the ForeignJobUpdates queue
//  * a single goroutine processes the items from ForeignJobUpdates and passes them
//    on to the appropriate PeerConnection(s).
//  * each PeerConnection has a single goroutine forwarding the updates over a
//    single TCP connection to its respective peer.
//  * The above ensures that each peer receives foreign_job_updates messages in
//    exactly the same order in which the corresponding edits are made on the jobDB.
//    This makes sure that a peer that applies foreign_job_updates messages in
//    order will always have a consistent jobdb.
//  * Because <sync>all</sync> messages are prepared by the same single goroutine
//    that performs the edits and creates <sync>ordered</sync> messages and because
//    all these messages go over the same channels, they cannot overtake each other
//    and will always fit together.
func handleJobDBRequests() {
	groom_ticker := time.Tick(config.JobDBGroomInterval)
	hour, min, _ := time.Now().Clock()
	next_groom_minutes_since_midnight := hour*60 + min + int(config.JobDBGroomInterval/time.Minute)
	backwardsjump := false
	var request *jobDBRequest
	for {
		select {
		case request = <-jobDBRequests:
			request.Action(request)

		case _ = <-groom_ticker:
			hour, min, _ = time.Now().Clock()
			minutes_since_midnight := hour*60 + min

			// If everything is normal, groomlag should be 0. A negative number
			// can only happend if the clock changes because (unless there's a bug)
			// groom_ticker will never fire too early.
			// The number 1 is possible in the pathological case
			// that the timer fires just before the minute wraps around and in
			// the few milliseconds delay between the timer firing and us reading
			// the clock the minute wraps around.
			groomlag := minutes_since_midnight - next_groom_minutes_since_midnight
			// normalize groomlag to be in the range (-12*60,12*60]
			if groomlag <= -12*60 {
				groomlag += 24 * 60
			}
			if groomlag > 12*60 {
				groomlag -= 24 * 60
			}

			if groomlag > 3 { // we accept 3 minutes lag as normal (extremely high server load)
				// Do not groom jobDB after a clock jump forward because groomJobDB()
				// would assume that jobs have not started correctly even though they
				// haven't got their chance yet and will probably start up in the next
				// few minutes.
				// NOTE: The message does not include the word "FORWARD" because
				// a backwards adjustment by an amount that is not a multiple of
				// config.JobDBGroomInterval will first trigger the BACKWARD case
				// and then trigger this case at the next grooming time. So we
				// keep this message phrased in a way that it will not create the
				// mistaken impression that the clock jumps around wildly.
				util.Log(1, "INFO! Grooming jobdb SKIPPED because of clock adjustment")

				// The next grooming will take place as usual.
				next_groom_minutes_since_midnight = minutes_since_midnight + int(config.JobDBGroomInterval/time.Minute)

				// Ping processPendingActions because as mentioned further above we can
				// enter this case not only after an actual forwards jump of the clock
				// but also after a backwards jump once our clock has finally caught up.
				// And in that case the comment further below still applies, that we
				// need to ping processPendingActions to make sure that jobs are
				// triggered before the next grooming.
				go func() { processPendingActions <- true }()

				// Clear backwardsjump marker if it is set (which is possible; see
				// previous comments)
				backwardsjump = false

			} else if groomlag < -3 { // we accept groom_ticker to fire up to 3 minutes early (clock adjustment for drift)
				util.Log(1, "INFO! Grooming jobdb SKIPPED because of clock jump BACKWARDS")

				// We keep next_groom_minutes_since_midnight at its current value
				// (which is in the future) until our wall clock has caught up.
				// We do this so that every config.JobDBGroomInterval we run into
				// this case again and can ping processPendingActions, because
				// if the backwards jump happened right between the firing of
				// a job's processPendingActions ping and the time the database
				// is queried for pending jobs, the query will not see the jobs
				// as pending and won't trigger them. The next groomJobDB() would then
				// discover the unlaunched jobs, report a bug and kill them.
				// But by pinging processPendingActions here without grooming until
				// our clock has caught up with the originally scheduled grooming time,
				// we make sure that jobs will be started, if not at the right time,
				// at least before the next grooming.
				go func() { processPendingActions <- true }()

				backwardsjump = true

			} else {
				next_groom_minutes_since_midnight = minutes_since_midnight + int(config.JobDBGroomInterval/time.Minute)
				groom_delay := time.Duration(0)
				// If we get here after a backward jump of the clock, there may
				// still be jobs waiting to be executed, so we ping
				// processPendingActions and delay the grooming a little
				if backwardsjump {
					go func() { processPendingActions <- true }()
					groom_delay = time.Minute
					backwardsjump = false
				}

				go func(dly time.Duration) {
					time.Sleep(dly)
					util.WithPanicHandler(groomJobDB)
				}(groom_delay)
			}

		case _ = <-processPendingActions:
			/*** WARNING! WARNING! ***
			  Using the function JobsQuery() here will cause deadlock!
			  Other functions like JobsModifyLocal() are okay, but remember
			  that they will not be executed until this case ends.
			  *************************/
			localwait := xml.FilterSimple("siserver", config.ServerSourceAddress,
				"status", "waiting")
			beforenow := xml.FilterRel("timestamp", util.MakeTimestamp(time.Now()), -1, 0)
			filter := xml.FilterAnd([]xml.HashFilter{localwait, beforenow})
			JobsModifyLocal(filter, xml.NewHash("job", "status", "launch"))
		}
	}
}