// Like GosaDecrypt() but operates in-place on buf. // Returns true if decryption successful and false if not. // If false is returned, the buffer contents may be destroyed, but only // if further decryption attempts with other keys would be pointless anyway, // because of some fatal condition (such as the data not being a multiple of // the cipher's block size). func GosaDecryptBuffer(buf *bytes.Buffer, key string) bool { buf.TrimSpace() if buf.Len() < 11 { return false } // minimum length of unencrypted <xml></xml> data := buf.Bytes() if string(data[0:5]) == "<xml>" { return true } // Fixes the following: // * gosa-si bug in the following line: // if( $client_answer =~ s/session_id=(\d+)$// ) { // This leaves the "." before "session_id" which breaks base64 // * new gosa-si protocol has ";IP:PORT" appended to message // which also breaks base64 for semicolon_period := 0; semicolon_period < len(data); semicolon_period++ { if data[semicolon_period] == ';' || data[semicolon_period] == '.' { buf.Trim(0, semicolon_period) data = buf.Bytes() break } } aescipher, _ := aes.NewCipher([]byte(util.Md5sum(key))) crypter := cipher.NewCBCDecrypter(aescipher, config.InitializationVector) cryptotest := make([]byte, (((3*aes.BlockSize)+2)/3)<<2) n := copy(cryptotest, data) cryptotest = cryptotest[0:n] cryptotest = util.Base64DecodeInPlace(cryptotest) n = (len(cryptotest) / aes.BlockSize) * aes.BlockSize cryptotest = cryptotest[0:n] crypter.CryptBlocks(cryptotest, cryptotest) if !strings.Contains(string(cryptotest), "<xml>") { return false } data = util.Base64DecodeInPlace(data) buf.Trim(0, len(data)) data = buf.Bytes() if buf.Len()%aes.BlockSize != 0 { // this condition is fatal => further decryption attempts are pointless buf.Reset() return false } crypter = cipher.NewCBCDecrypter(aescipher, config.InitializationVector) crypter.CryptBlocks(data, data) buf.TrimSpace() // removes 0 padding, too return true }
func faiConnection(conn *net.TCPConn) { defer conn.Close() var err error err = conn.SetKeepAlive(true) if err != nil { util.Log(0, "ERROR! SetKeepAlive: %v", err) } var buf bytes.Buffer defer buf.Reset() readbuf := make([]byte, 4096) n := 1 for n != 0 { n, err = conn.Read(readbuf) if err != nil && err != io.EOF { util.Log(0, "ERROR! Read: %v", err) } if n == 0 && err == nil { util.Log(0, "ERROR! Read 0 bytes but no error reported") } // Find complete lines terminated by '\n' and process them. for start := 0; ; { eol := start for ; eol < n; eol++ { if readbuf[eol] == '\n' { break } } // no \n found, append to buf and continue reading if eol == n { buf.Write(readbuf[start:n]) break } // append to rest of line to buffered contents buf.Write(readbuf[start:eol]) start = eol + 1 buf.TrimSpace() util.Log(2, "DEBUG! FAI monitor message from %v: %v", conn.RemoteAddr(), buf.String()) buf.Reset() } } if buf.Len() != 0 { util.Log(2, "DEBUG! Incomplete FAI monitor message (i.e. not terminated by \"\\n\") from %v: %v", conn.RemoteAddr(), buf.String()) } }
func main() { if len(os.Args) != 3 && len(os.Args) != 2 { fmt.Fprintf(os.Stderr, "USAGE: %v", USAGE) os.Exit(0) } var input bytes.Buffer defer input.Reset() if len(os.Args) == 3 { input.WriteString(os.Args[2]) } else { buf, err := ioutil.ReadAll(os.Stdin) if err != nil { fmt.Fprintf(os.Stderr, "%v", err) os.Exit(1) } input.Write(buf) } security.GosaEncryptBuffer(&input, os.Args[1]) fmt.Fprintln(os.Stdout, input.String()) }
// Handles the message "gosa_get_log_file_by_date_and_mac". // xmlmsg: the decrypted and parsed message // Returns: // unencrypted reply func gosa_get_log_file_by_date_and_mac(xmlmsg *xml.Hash) *xml.Hash { macaddress := xmlmsg.Text("mac") lmac := strings.ToLower(macaddress) subdir := xmlmsg.Text("date") log_file := xmlmsg.Text("log_file") header := "get_log_file_by_date_and_mac" x := xml.NewHash("xml", "header", header) x.Add(header) x.Add("source", config.ServerSourceAddress) x.Add("target", "GOSA") x.Add("session_id", "1") if !macAddressRegexp.MatchString(macaddress) { emsg := fmt.Sprintf("Illegal or missing <mac> element in message: %v", xmlmsg) util.Log(0, "ERROR! %v", emsg) return ErrorReplyXML(emsg) } // As a precaution, make sure subdir and log_file contain no slashes. subdir = strings.Replace(subdir, "/", "_", -1) log_file = strings.Replace(log_file, "/", "_", -1) if subdir == "" { // When you open the installation logs in GOsa for the first time, GOsa sends // a broken request that is characterized by an empty <date> and log_file==0. // If we return an error, GOsa presents it to the user which // gives a bad experience. So we instead return an empty reply in this special case. if log_file == "0" { return x } emsg := fmt.Sprintf("Missing or empty <date> element in message: %v", xmlmsg) util.Log(0, "ERROR! %v", emsg) return ErrorReplyXML(emsg) } if log_file == "" { emsg := fmt.Sprintf("Missing or empty <log_file> element in message: %v", xmlmsg) util.Log(0, "ERROR! %v", emsg) return ErrorReplyXML(emsg) } f, err := os.Open(path.Join(config.FAILogPath, lmac, subdir, log_file)) if err != nil { emsg := fmt.Sprintf("gosa_get_log_file_by_date_and_mac: %v", err) util.Log(0, "ERROR! %v", emsg) return ErrorReplyXML(emsg) } defer f.Close() var b bytes.Buffer defer b.Reset() buffy := make([]byte, 65536) for { n, err := f.Read(buffy) b.Write(buffy[0:n]) if err == io.EOF { break } if err != nil { emsg := fmt.Sprintf("gosa_get_log_file_by_date_and_mac: %v", err) util.Log(0, "ERROR! %v", emsg) return ErrorReplyXML(emsg) } if n == 0 { util.Log(0, "WARNING! Read returned 0 bytes but no error. Assuming EOF") break } } idx := (((b.Len() + 2) / 3) << 2) - b.Len() b.Write0(idx) data := b.Bytes() copy(data[idx:], data) data = util.Base64EncodeInPlace(data, idx) data_element := x.Add(log_file) // To reduce memory leak potential, we append in pieces rather than as one large string end := xml.MaxFragmentLength for ; end < len(data); end += xml.MaxFragmentLength { data_element.AppendString(string(data[end-xml.MaxFragmentLength : end])) } data_element.AppendString(string(data[end-xml.MaxFragmentLength:])) return x }
// Converts LDIF data into a Hash. The outermost tag will always be "xml". // If an error occurs the returned Hash may contain partial data but it // is never nil. // // itemtag: If non-empty, each object in the LDIF will be inside an element // whose outermosttag is itemtag. If itemtag == "", all objects in the // LDIF are merged, i.e. all their combined attributes are directly // inside the surrounding "xml" tag. // casefold: If true, all attribute names are converted to lowercase. // If false, they are left exactly as found in the LDIF. // ldif: A []byte, string, io.Reader or *exec.Cmd that provides the LDIF data. // Understands all ldapsearch formats with an arbitrary number of "-L" switches. // elementInfo: If one or more ElementInfo structs are passed, only attributes // matching one of them will be accepted and the first match in the // elementInfo list determines how the attribute in the LDIF will be // converted to an element in the result Hash. // If casefold==true, matching is done case-insensitive. This requires that // the LDIFAttributeName fields are all lowercase. func LdifToHash(itemtag string, casefold bool, ldif interface{}, elementInfo ...*ElementInfo) (xml *Hash, err error) { x := NewHash("xml") var xmldata []byte switch ld := ldif.(type) { case []byte: xmldata = ld case string: xmldata = []byte(ld) case io.Reader: xmldata, err = ioutil.ReadAll(ld) if err != nil { return x, err } case *exec.Cmd: var outbuf bytes.Buffer defer outbuf.Reset() var errbuf bytes.Buffer defer errbuf.Reset() oldout := ld.Stdout olderr := ld.Stderr ld.Stdout = &outbuf ld.Stderr = &errbuf err := ld.Run() ld.Stdout = oldout ld.Stderr = olderr errstr := errbuf.String() if errstr != "" { err = fmt.Errorf(errstr) } if err != nil { return x, err } xmldata = outbuf.Bytes() default: return x, fmt.Errorf("ldif argument has unsupported type") } item := x var attr *Hash new_item := true end := len(xmldata) b64 := false var info *ElementInfo = nil skip := false i := 0 start := 0 if !match(xmldata, i, "version:") { goto wait_for_item } /////////////////////////////////////////////////////////////////////// skip_line: /////////////////////////////////////////////////////////////////////// for { if i == end { goto end_of_input } if xmldata[i] == '\n' { break } i++ } // Even comments can have line continuations in LDIF, so we need to // continue skipping if there is a continuation. i++ if i < end && (xmldata[i] == ' ' || xmldata[i] == '\t') { goto skip_line } /////////////////////////////////////////////////////////////////////// wait_for_item: /////////////////////////////////////////////////////////////////////// new_item = true for { if i == end { goto end_of_input } if ch := xmldata[i]; ch > ' ' { if match(xmldata, i, "# search result") { goto end_of_input } if ch < 'A' { goto skip_line } // skip garbage (typically comments) break } i++ } /////////////////////////////////////////////////////////////////////// scan_attribute_name: /////////////////////////////////////////////////////////////////////// start = i b64 = false info = nil skip = false for { if i == end { goto end_of_input } if xmldata[i] == '#' { goto skip_broken_attribute } if xmldata[i] == ':' { colon := i if colon == start { goto skip_broken_attribute } // line that starts with ":" => Skip i++ if i < end && xmldata[i] == ':' { b64 = true i++ } // See if we have an ElementInfo. if len(elementInfo) > 0 { for _, inf := range elementInfo { if equals(casefold, xmldata[start:colon], inf.LDIFAttributeName) { info = inf break } } skip = (info == nil) } // if separate items are requested, create <itemtag></itemtag> element as new item // otherwise item == x stays as it is if new_item && itemtag != "" && !skip { item = x.Add(itemtag) new_item = false } // If no elementInfo arguments have been provided, we create a new element // directly from the LDIF attribute's name. if len(elementInfo) == 0 { attrname := string(xmldata[start:colon]) if casefold { attrname = strings.ToLower(attrname) } attr = item.Add(attrname) } else { // We create the new element from the ElementInfo if !skip { attr = item.Add(info.ElementName) } } if i == end { goto end_of_input } // skip 1 space or tab after colon if xmldata[i] == ' ' || xmldata[i] == '\t' { i++ } break } i++ } /////////////////////////////////////////////////////////////////////// //scan_value_fragment: /////////////////////////////////////////////////////////////////////// start = i for { if i-start > MaxFragmentLength { if !skip { attr.AppendString(string(xmldata[start:i])) } start = i } if i == end || xmldata[i] == '\n' { if !skip { attr.AppendString(string(xmldata[start:i])) } i++ // 1 tab or space is a line continuation, everything else ends the value if i >= end || (xmldata[i] != ' ' && xmldata[i] != '\t') { break } start = i + 1 // start next fragment after line continuation marker } i++ } /////////////////////////////////////////////////////////////////////// //attribute_value_scanned: /////////////////////////////////////////////////////////////////////// if !skip { if info != nil { if b64 { if !info.Base64 { attr.DecodeBase64() } } else { if info.Base64 { attr.EncodeBase64() } } } else { if b64 { attr.DecodeBase64() } } } if i >= end { goto end_of_input } if xmldata[i] == '\n' { goto wait_for_item } // empty line => next item goto scan_attribute_name /////////////////////////////////////////////////////////////////////// skip_broken_attribute: /////////////////////////////////////////////////////////////////////// for { if i == end { goto end_of_input } if xmldata[i] == '\n' { break } i++ } i++ if i >= end { goto end_of_input } if xmldata[i] == '\n' { goto wait_for_item } // empty line => next item // Even comments can have line continuations in LDIF, so we need to // continue skipping if there is a continuation. if xmldata[i] == ' ' || xmldata[i] == '\t' { goto skip_broken_attribute } goto scan_attribute_name /////////////////////////////////////////////////////////////////////// end_of_input: /////////////////////////////////////////////////////////////////////// // NOTE: // Don't assume anything about i here. // i > len(xmldata) is possible, as well as i < len(xmldata) return x, nil }
// Executes program and reads from its standard output log files to transfer to // the target server. See fai-savelog-hook in the manual. func Send_clmsg_save_fai_log(target string, program string) { var buffy bytes.Buffer defer buffy.Reset() clientpackageskey := config.ModuleKey["[ClientPackages]"] // If [ClientPackages]/key missing, take the last key in the list // (We don't take the 1st because that would be "dummy-key"). if clientpackageskey == "" { clientpackageskey = config.ModuleKeys[len(config.ModuleKeys)-1] } util.Log(1, "INFO! Launching fai-savelog-hook %v", program) start := time.Now() env := config.HookEnvironment() cmd := exec.Command(program) cmd.Env = append(env, os.Environ()...) out, err := cmd.StdoutPipe() if err != nil { util.Log(0, "ERROR! Could not get stdout pipe for %v: %v", program, err) return } defer out.Close() in, err := cmd.StdinPipe() if err != nil { util.Log(0, "ERROR! Could not get stdin pipe for %v: %v", program, err) return } defer in.Close() err = cmd.Start() if err != nil { util.Log(0, "ERROR! Could not launch %v: %v", program, err) return } buffy.WriteString("<xml><header>CLMSG_save_fai_log</header><source>") buffy.WriteString(config.ServerSourceAddress) buffy.WriteString("</source>") buffy.WriteString("<target>") buffy.WriteString(target) buffy.WriteString("</target>") buffy.WriteString("<macaddress>") buffy.WriteString(config.MAC) buffy.WriteString("</macaddress>") buffy.WriteString("<CLMSG_save_fai_log>") reader := bufio.NewReader(out) fai_action := "" for { line, err := reader.ReadString('\n') if err != nil { util.Log(0, "ERROR! Error reading stdout from %v: %v", program, err) return } line = strings.TrimSpace(line) if line == "install" || line == "softupdate" { fai_action = line break } buffy.WriteString(line) } util.Log(1, "INFO! Received %v bytes in %v from fai-savelog-hook", buffy.Len(), time.Since(start)) buffy.WriteString("</CLMSG_save_fai_log>") buffy.WriteString("<fai_action>") buffy.WriteString(fai_action) buffy.WriteString("</fai_action>") buffy.WriteString("</xml>") util.Log(1, "INFO! Sending %v bytes of log files to %v", buffy.Len(), target) security.SendLnTo(target, buffy.String(), clientpackageskey, false) in.Write([]byte{'\n'}) // notify hook that transfer is complete }
func testBuffer() { var b bytes.Buffer check(b.String(), "") // String() on fresh variable b.Reset() // Reset() on fresh variable check(b.String(), "") // String() after Reset() b.Reset() // Reset() after Reset() check(b.String(), "") check(b.Len(), 0) // same tests as above with pointer b2 := &bytes.Buffer{} check(b2.String(), "") b2.Reset() check(b2.String(), "") b2.Reset() check(b2.String(), "") check(b2.Len(), 0) b2.WriteString("Dies ist ein Test!") check(b2.String(), "Dies ist ein Test!") check(b2.Len(), 18) n, err := b.Write(nil) check(n, 0) check(err, nil) check(b.String(), "") n, err = b.Write([]byte{}) check(n, 0) check(err, nil) check(b.String(), "") check(b.Pointer(), nil) check(b.Capacity(), 0) check(b.Len(), 0) func() { defer func() { check(recover(), bytes.ErrTooLarge) }() b.Grow(-1) }() n, err = b.Write([]byte{'a'}) check(n, 1) check(err, nil) check(b.String(), "a") check(b.Capacity() >= 1, true) check(b.Len(), 1) check(b.Pointer() != nil, true) check(b.Grow(11), 1) check(b.Capacity() >= 12, true) c := b.Capacity() p := b.Pointer() check(b.Grow(11), 1) // should not cause actual growth check(b.Pointer(), p) check(b.Capacity(), c) check(b.Len(), 1) ((*[2]byte)(b.Pointer()))[1] = 'z' check(b.Contains("z"), false) n, err = b.WriteString("Hallo") check(n, 5) check(err, nil) check(b.String(), "aHallo") check(b.Pointer(), p) check(b.Capacity(), c) check(b.Len(), 6) b.Reset() check(b.String(), "") check(b.Pointer(), nil) check(b.Capacity(), 0) check(b.Contains(""), true) check(b.Contains("a"), false) b.WriteString("Hallo") b.WriteByte(' ') b.Write([]byte{'d', 'i', 'e', 's'}) b.WriteByte(' ') b.WriteString("ist ") b.WriteString("ein ") b.Write([]byte("Test")) check(b.String(), "Hallo dies ist ein Test") check(b.Contains("Hallo dies ist ein Test"), true) check(b.Contains("Test"), true) check(b.Contains("Hallo"), true) check(b.Contains("allo"), true) check(b.Contains(""), true) check(b.Split(" "), []string{"Hallo", "dies", "ist", "ein", "Test"}) check(b.Split("X"), []string{"Hallo dies ist ein Test"}) check(b.Split("Hallo dies ist ein Test"), []string{"", ""}) check(b.Split("H"), []string{"", "allo dies ist ein Test"}) check(b.Split("Test"), []string{"Hallo dies ist ein ", ""}) check(b.Split("es"), []string{"Hallo di", " ist ein T", "t"}) b.Reset() b.WriteString(" \n\t Hallo \t\v\n") check(b.Len(), 15) p = b.Pointer() b.TrimSpace() check(b.String(), "Hallo") check(b.Len(), 5) check(b.Pointer(), p) b.Reset() b.WriteString(" \n\t \t\v\n") check(b.Len(), 10) b.TrimSpace() check(b.Pointer(), nil) check(b.Len(), 0) check(b.Capacity(), 0) b.TrimSpace() check(b.Pointer(), nil) check(b.Len(), 0) check(b.Capacity(), 0) b.Reset() b.WriteString(" \n\t Hallo") check(b.Len(), 10) p = b.Pointer() b.TrimSpace() check(b.String(), "Hallo") check(b.Len(), 5) check(b.Pointer(), p) b.Reset() b.WriteString("Hallo \t\v\n") check(b.Len(), 10) p = b.Pointer() b.TrimSpace() check(b.String(), "Hallo") check(b.Len(), 5) check(b.Pointer(), p) b.Reset() b.WriteString(" ") check(b.Len(), 1) b.TrimSpace() check(b.Pointer(), nil) check(b.Len(), 0) check(b.Capacity(), 0) b.Reset() b.WriteString("Der Cottbuser Postkutscher kotzt in den Cottbuser Postkotzkasten") n = b.Len() c = b.Capacity() p = b.Pointer() b.Trim(-10, 2000) check(b.Len(), n) check(b.Capacity(), c) check(b.Pointer(), p) b.Trim(2000, -10) check(b.Len(), 0) check(b.Capacity(), 0) check(b.Pointer(), nil) b.WriteString("Der Cottbuser Postkutscher kotzt in den Cottbuser Postkotzkasten") b.Trim(4, 4) check(b.Len(), 0) check(b.Capacity(), 0) check(b.Pointer(), nil) b.WriteString("Der Cottbuser Postkutscher kotzt in den Cottbuser Postkotzkasten") n = b.Len() c = b.Capacity() p = b.Pointer() b.Trim(0, b.Len()-6) check(b.Len(), n-6) check(b.Capacity(), c) check(b.Pointer(), p) check(b.String(), "Der Cottbuser Postkutscher kotzt in den Cottbuser Postkotz") b.Trim(27, b.Len()) check(b.Len(), n-6-27) check(b.Capacity(), c) check(b.Pointer(), p) check(b.String(), "kotzt in den Cottbuser Postkotz") b.Trim(1, b.Len()-1) check(b.Len(), n-6-27-2) check(b.Capacity(), c) check(b.Pointer(), p) check(b.String(), "otzt in den Cottbuser Postkot") b.Reset() b.Write0(-1) b.Write0(-100) b.Write0(0) check(b.Len(), 0) check(b.Capacity(), 0) check(b.Pointer(), nil) b.Write0(1) check(b.Len(), 1) check(b.Capacity(), 1) check(b.Bytes(), []byte{0}) b.WriteByte(111) b.Write0(1) b.WriteByte(222) b.Write0(2) b.WriteByte(99) check(b.Len(), 7) check(b.Bytes(), []byte{0, 111, 0, 222, 0, 0, 99}) b2.Reset() slices := [][]byte{} total := 0 numfakeerrs := 0 for total < 100000 { c = rand.Intn(30000) total += c sl := make([]byte, c) for i := range sl { sl[i] = byte(rand.Intn(256)) } slices = append(slices, sl) b2.Write(sl) if total/30000 > numfakeerrs { slices = append(slices, nil) numfakeerrs++ } } check(numfakeerrs, 3) slcopy := make([][]byte, len(slices)) copy(slcopy, slices) slret := &sliceReturner{Slices: slcopy} b.Reset() check(b.Capacity(), 0) check(b.Len(), 0) check(b.Pointer(), nil) n = 0 for i := 0; i < numfakeerrs; i++ { n64, err := b.ReadFrom(slret) n += int(n64) check(err, FakeError) } n64, err := b.ReadFrom(slret) n += int(n64) check(err, nil) check(n, total) check(b.Capacity() > b.Len(), true) check(b.Len(), total) contents := b.Bytes() contents2 := b2.Bytes() check(len(contents), len(contents2)) n = 0 for i := range contents { if contents[i] != contents2[i] { break } n++ } check(n, total) b2.Reset() for i := range slices { for k := range slices[i] { slices[i][k] = 11 } n, err = b.Read(slices[i]) check(n, len(slices[i])) check(err, nil) b2.Write(slices[i]) } check(b2.Len(), total) n, err = b.Read(slices[0]) check(n, 0) check(err, io.EOF) contents = b.Bytes() contents2 = b2.Bytes() check(len(contents), len(contents2)) n = 0 for i := range contents { if contents[i] != contents2[i] { break } n++ } check(n, total) b.WriteString("foo") foo := make([]byte, 10) n, err = b.Read(foo) check(n, 3) check(err, nil) check(string(foo[0:3]), "foo") n64, err = b.Seek(6700, 0) check(n64, 6700) check(err, nil) n64, err = b.Seek(-6000, 1) check(n64, 700) check(err, nil) n64, err = b.Seek(815, 1) check(n64, 1515) check(err, nil) n, err = b.Read(foo) check(n, len(foo)) check(err, nil) check(foo, b2.Bytes()[1515:1515+len(foo)]) n64, err = b.Seek(-3, 2) check(n64, total) check(err, nil) n, err = b.Read(foo) check(n, 3) check(err, nil) check(string(foo[0:3]), "foo") n64, err = b.Seek(999999, 0) check(n64, b.Len()) check(err, nil) n64, err = b.Seek(-3, 1) check(n64, total) check(err, nil) n, err = b.Read(foo) check(n, 3) check(err, nil) check(string(foo[0:3]), "foo") n64, err = b.Seek(-815, 0) check(n64, 0) check(err, nil) n, err = b.Read(foo) check(n, len(foo)) check(err, nil) check(foo, b2.Bytes()[0:len(foo)]) }
// Returns the data for the given request. request_re and reply are lists of // equal length. If request matches request_re[i], then reply[i] specifies the // data to return for the request. If reply[i] == "", // then this function returns (nil,nil). If reply[i] starts with the // character '|', the remainder is taken as the path of a hook to execute // to generate the data. Otherwise reply[i] is taken as the path of the // file whose contents to return as data. // // When executing a hook, an environment variable called "tftp_request" // is passed containing the request string. If request_re[i] has a capturing // group named "macaddress", the captured substring will be converted to // a MAC address by converting to lowercase, removing all characters // except 0-9a-f, left-padding to // length 12 with 0s or truncating to length 12 and inserting ":"s. The // result will be added to // the hook environment in a variable named "macaddress" and if there // is an LDAP object for that macaddress, its attributes will be added // to the environment, too. // Other named subexpressions in request_re[i] will be exported to the hook // verbatim in like-named environment variables. // // ATTENTION! Do not forget to call Release() on the returned cacheEntry when you're // done using it. func getFile(request string, request_re []*regexp.Regexp, reply []string) (cacheEntry, error) { for i := range request_re { if subs := request_re[i].FindStringSubmatch(request); subs != nil { if reply[i] == "" { return nil, nil } if reply[i][0] != '|' { // plain file subsidx := request_re[i].FindStringSubmatchIndex(request) fpath := string(request_re[i].ExpandString(nil, reply[i], request, subsidx)) util.Log(1, "INFO! TFTP mapping \"%v\" => \"%v\"", request, fpath) // We use fpath as cache key instead of request because // multiple requests may map to the same fpath and we want to avoid // caching the same file multiple times. entry := getCacheEntry(fpath, 60*time.Second) entry.Mutex.Lock() defer entry.Mutex.Unlock() if entry.LoadCount == 0 { file, err := os.Open(fpath) entry.Err = err if err == nil { defer file.Close() buffy := make([]byte, 65536) for { n, err := file.Read(buffy) entry.Data.Write(buffy[0:n]) if err == io.EOF { break } if err != nil { entry.Data.Reset() entry.Err = err } if n == 0 { util.Log(0, "WARNING! Read returned 0 bytes but no error. Assuming EOF") break } } } } else { util.Log(1, "INFO! TFTP: Serving %v from cache", fpath) } entry.LoadCount++ return entry, entry.Err } else { // hook hook := reply[i][1:] // cut off '|' // We need a few seconds afterlife to deal with multiple requests in // short succession by the same loader due to delayed UDP packets. entry := getCacheEntry(request, 5*time.Second) entry.Mutex.Lock() defer entry.Mutex.Unlock() if entry.LoadCount == 0 { util.Log(1, "INFO! TFTP: Calling %v to generate %v", hook, request) env := config.HookEnvironment() env = append(env, "tftp_request="+request) for k, varname := range request_re[i].SubexpNames() { if varname == "" { continue } value := subs[k] if varname == "macaddress" { format_mac := func(r rune) rune { switch { case r >= 'a' && r <= 'f': return r case r >= '0' && r <= '9': return r case r >= 'A' && r <= 'F': return 'a' + (r - 'A') } return -1 } value = "000000000000" + strings.Map(format_mac, value) value = value[len(value)-12:] value = value[0:2] + ":" + value[2:4] + ":" + value[4:6] + ":" + value[6:8] + ":" + value[8:10] + ":" + value[10:12] sys, err := db.SystemGetAllDataForMAC(value, true) if err != nil { if _, not_found := err.(db.SystemNotFoundError); not_found { util.Log(1, "INFO! TFTP: %v", err) } else { util.Log(0, "ERROR! TFTP: %v", err) } // Don't abort. The hook will generate a default config. } else { // Add environment variables with system's data for the hook for _, tag := range sys.Subtags() { env = append(env, tag+"="+strings.Join(sys.Get(tag), "\n")) } } } env = append(env, varname+"="+value) } hook_fields := strings.Fields(hook) cmd := exec.Command(hook_fields[0], hook_fields[1:]...) cmd.Env = append(env, os.Environ()...) var errbuf bytes.Buffer defer errbuf.Reset() cmd.Stdout = &entry.Data cmd.Stderr = &errbuf err := cmd.Run() if err != nil { util.Log(0, "ERROR! TFTP: error executing %v: %v (%v)", hook, err, errbuf.String()) entry.Err = err } else { if entry.Data.Len() > 512 { util.Log(1, "INFO! TFTP: Generated %v: %v bytes", request, entry.Data.Len()) } else { util.Log(1, "INFO! TFTP: Generated %v:\n%v", request, entry.Data.String()) } } } else { util.Log(1, "INFO! TFTP: Serving %v from cache", request) } entry.LoadCount++ return entry, entry.Err } } } errentry := &bufCacheEntry{LoadCount: 1000, Err: fmt.Errorf("TFTP not configured to serve file \"%v\"", request)} return errentry, errentry.Err }
// Reads the output from the program config.PackageListHookPath (LDIF) and // uses it to replace packagedb. // debconf is passed as PackageListDebconf environment var to the hook. // See manual section on package-list-hook for more info. func PackageListHook(debconf string) { start := time.Now() timestamp := util.MakeTimestamp(start) cmd := exec.Command(config.PackageListHookPath) cmd.Env = append(config.HookEnvironment(), os.Environ()...) fairepos := []string{} for repo := FAIServers().First("repository"); repo != nil; repo = repo.Next() { fairepos = append(fairepos, fmt.Sprintf("%v||%v|%v", repo.Text("server"), repo.Text("repopath"), repo.Text("sections"))) } package_list_params := []string{"PackageListDebconf=" + debconf, "PackageListCacheDir=" + config.PackageCacheDir, "PackageListFAIrepository=" + strings.Join(fairepos, " ")} cmd.Env = append(cmd.Env, package_list_params...) util.Log(1, "INFO! Running package-list-hook: %v %v", strings.Join(package_list_params, " "), config.PackageListHookPath) var outbuf bytes.Buffer defer outbuf.Reset() var errbuf bytes.Buffer defer errbuf.Reset() cmd.Stdout = &outbuf cmd.Stderr = &errbuf err := cmd.Run() if err != nil { util.Log(0, "ERROR! package-list-hook %v: %v (%v)", config.PackageListHookPath, err, errbuf.String()) return } else if errbuf.Len() != 0 { // if the command prints to stderr but does not return non-0 exit status (which // would result in err != nil), we just log a WARNING, but use the stdout data // anyway. util.Log(0, "WARNING! package-list-hook %v: %v", config.PackageListHookPath, errbuf.String()) } plist, err := xml.LdifToHash("pkg", true, outbuf.Bytes(), packageListFormat...) if err != nil { util.Log(0, "ERROR! package-list-hook %v: %v", config.PackageListHookPath, err) return } if plist.First("pkg") == nil { util.Log(0, "ERROR! package-list-hook %v returned no data", config.PackageListHookPath) return } util.Log(1, "INFO! Finished package-list-hook. Running time: %v", time.Since(start)) start = time.Now() plist.Rename("packagedb") new_mapRepoPath2FAIrelease := map[string]string{} accepted := 0 total := 0 for pkg := plist.FirstChild(); pkg != nil; pkg = pkg.Next() { total++ p := pkg.Element() release := p.First("distribution") // packageListFormat translates "release" => "distribution" if release == nil { util.Log(0, "ERROR! package-list-hook %v returned entry without \"Release\": %v", config.PackageListHookPath, p) pkg.Remove() continue } for repopath := p.First("repository"); repopath != nil; repopath = repopath.Next() { new_mapRepoPath2FAIrelease[repopath.Text()] = release.Text() } pkgname := p.Get("package") if len(pkgname) == 0 { if p.First("repository") == nil { // Release/Repository groups without Package are okay, so only log error if there is no Repository util.Log(0, "ERROR! package-list-hook %v returned entry without \"Package\": %v", config.PackageListHookPath, p) } pkg.Remove() continue } if len(pkgname) > 1 { util.Log(0, "ERROR! package-list-hook %v returned entry with multiple \"Package\" values: %v", config.PackageListHookPath, p) pkg.Remove() continue } version := p.First("version") if version == nil { util.Log(0, "WARNING! package-list-hook %v returned entry for \"%v\" without \"Version\". Assuming \"1.0\"", config.PackageListHookPath, pkgname[0]) p.Add("version", "1.0") } section := p.First("section") if section == nil { util.Log(0, "WARNING! package-list-hook %v returned entry for \"%v\" without \"Section\". Assuming \"main\"", config.PackageListHookPath, pkgname[0]) p.Add("section", "main") } p.FirstOrAdd("timestamp").SetText(timestamp) description := p.First("description") if description == nil { description = p.Add("description", pkgname[0]) description.EncodeBase64() } // add empty <template></template> if there is no <template> element. if p.First("template") == nil { p.Add("template") } accepted++ } if accepted == 0 { util.Log(0, "ERROR! package-list-hook %v returned no valid entries", config.PackageListHookPath) } else { util.Log(1, "INFO! package-list-hook: %v/%v entries accepted into database. Processing time: %v", accepted, total, time.Since(start)) packagedb.Init(plist) mapRepoPath2FAIrelease_mutex.Lock() defer mapRepoPath2FAIrelease_mutex.Unlock() mapRepoPath2FAIrelease = new_mapRepoPath2FAIrelease util.Log(1, "INFO! Repository path => FAI release %v", mapRepoPath2FAIrelease) } }
// Unit tests for the package github.com/mbenkmann/golib/util. func Util_test() { fmt.Printf("\n==== util ===\n\n") addr, err := util.Resolve("1.2.3.4", "") check(err, nil) check(addr, "1.2.3.4") addr, err = util.Resolve("1.2.3.4:5", "") check(err, nil) check(addr, "1.2.3.4:5") addr, err = util.Resolve("::1:5", "") check(err, nil) check(addr, "[::1:5]") addr, err = util.Resolve("localhost:65535", "") check(err, nil) check(addr, "127.0.0.1:65535") addr, err = util.Resolve("localhost", "") check(err, nil) check(addr, "127.0.0.1") addr, err = util.Resolve("::1", "") check(err, nil) check(addr, "127.0.0.1") addr, err = util.Resolve("[::1]", "") check(err, nil) check(addr, "127.0.0.1") addr, err = util.Resolve("[::1]:12345", "") check(err, nil) check(addr, "127.0.0.1:12345") addr, err = util.Resolve("localhost:65535", "foo") check(err, nil) check(addr, "foo:65535") addr, err = util.Resolve("localhost", "foo") check(err, nil) check(addr, "foo") addr, err = util.Resolve("::1", "foo") check(err, nil) check(addr, "foo") addr, err = util.Resolve("[::1]", "foo") check(err, nil) check(addr, "foo") addr, err = util.Resolve("[::1]:12345", "foo") check(err, nil) check(addr, "foo:12345") addr, err = util.Resolve("", "") check(hasWords(err, "no", "such", "host"), "") check(addr, "") addr, err = util.Resolve(":10", "") check(hasWords(err, "no", "such", "host"), "") check(addr, ":10") check(util.WaitForDNS(3*time.Second), true) h, _ := exec.Command("hostname").CombinedOutput() hostname := strings.TrimSpace(string(h)) ipp, _ := exec.Command("hostname", "-I").CombinedOutput() ips := strings.Fields(strings.TrimSpace(string(ipp))) addr, err = util.Resolve(hostname+":234", config.IP) check(err, nil) ip := "" for _, ip2 := range ips { if addr == ip2+":234" { ip = ip2 } } check(addr, ip+":234") testLogging() buf := make([]byte, 80) for i := range buf { buf[i] = byte(util_test_rng.Intn(26) + 'a') } crap1 := &crappyConnection1{} n, err := util.WriteAll(crap1, buf) check(string(*crap1), string(buf)) check(n, len(buf)) check(err, nil) crap2 := &crappyConnection2{} n, err = util.WriteAll(crap2, buf) check(string(*crap2), string(buf)) check(n, len(buf)) check(err, nil) stalled1 := &stalledConnection1{} n, err = util.WriteAll(stalled1, buf) check(string(*stalled1), string(buf[0:16])) check(n, 16) check(err, io.ErrShortWrite) stalled2 := &stalledConnection2{} n, err = util.WriteAll(stalled2, buf) check(string(*stalled2), string(buf[0:16])) check(n, 16) check(err, io.ErrShortWrite) broken := &brokenConnection{} n, err = util.WriteAll(broken, buf) check(string(*broken), string(buf[0:16])) check(n, 16) check(err, io.ErrClosedPipe) panicker := func() { foobar = "bar" panic("foo") } var buffy bytes.Buffer util.LoggersSuspend() util.LoggerAdd(&buffy) defer util.LoggersRestore() util.WithPanicHandler(panicker) time.Sleep(200 * time.Millisecond) // make sure log message is written out check(foobar, "bar") check(len(buffy.String()) > 10, true) listener, err := net.Listen("tcp", "127.0.0.1:39390") if err != nil { panic(err) } go func() { r, err := listener.Accept() if err != nil { panic(err) } buf := make([]byte, 1) r.Read(buf) time.Sleep(10 * time.Second) r.Read(buf) }() long := make([]byte, 10000000) longstr := string(long) buffy.Reset() t0 := time.Now() util.SendLnTo("127.0.0.1:39390", longstr, 5*time.Second) duration := time.Since(t0) check(duration > 4*time.Second && duration < 6*time.Second, true) time.Sleep(200 * time.Millisecond) // make sure log message is written out check(strings.Contains(buffy.String(), "ERROR"), true) go func() { conn, err := listener.Accept() if err != nil { panic(err) } ioutil.ReadAll(conn) }() long = make([]byte, 10000000) longstr = string(long) buffy.Reset() t0 = time.Now() util.SendLnTo("127.0.0.1:39390", longstr, 5*time.Second) duration = time.Since(t0) check(duration < 2*time.Second, true) time.Sleep(200 * time.Millisecond) // make sure log message is written out check(buffy.String(), "") // Test that ReadLn() times out properly go func() { _, err := net.Dial("tcp", "127.0.0.1:39390") if err != nil { panic(err) } }() conn, err := listener.Accept() if err != nil { panic(err) } t0 = time.Now() st, err := util.ReadLn(conn, 5*time.Second) duration = time.Since(t0) check(duration > 4*time.Second && duration < 6*time.Second, true) check(st, "") check(hasWords(err, "timeout"), "") // Test that ReadLn() returns io.EOF if last line not terminated by \n go func() { conn, err := net.Dial("tcp", "127.0.0.1:39390") if err != nil { panic(err) } conn.Write([]byte("foo\r")) conn.Close() }() conn, err = listener.Accept() if err != nil { panic(err) } st, err = util.ReadLn(conn, 5*time.Second) check(err, io.EOF) check(st, "foo") go func() { conn, err := net.Dial("tcp", "127.0.0.1:39390") if err != nil { panic(err) } conn.Write([]byte("\r\r\n\rfo\ro\nbar\r\nfoxtrott")) conn.Close() }() conn, err = listener.Accept() if err != nil { panic(err) } // Test proper trimming of multiple \r st, err = util.ReadLn(conn, 0) check(err, nil) check(st, "") // Test that the empty first line has actually been read // and that the next ReadLn() reads the 2nd line // Also test that negative timeouts work the same as timeout==0 // Also test that \r is not trimmed at start and within line. st, err = util.ReadLn(conn, -1*time.Second) check(err, nil) check(st, "\rfo\ro") // Check 3rd line st, err = util.ReadLn(conn, 0) check(err, nil) check(st, "bar") // Check 4th line and io.EOF error st, err = util.ReadLn(conn, 0) check(err, io.EOF) check(st, "foxtrott") // Test that delayed reads work with timeout==0 go func() { conn, err := net.Dial("tcp", "127.0.0.1:39390") if err != nil { panic(err) } time.Sleep(1 * time.Second) _, err = conn.Write([]byte("foo\r\n")) if err != nil { panic(err) } time.Sleep(2 * time.Second) }() conn, err = listener.Accept() if err != nil { panic(err) } t0 = time.Now() st, err = util.ReadLn(conn, time.Duration(0)) duration = time.Since(t0) check(duration < 2*time.Second, true) check(duration > 800*time.Millisecond, true) check(err, nil) check(st, "foo") counter := util.Counter(13) var b1 UintArray = make([]uint64, 100) var b2 UintArray = make([]uint64, 100) done := make(chan bool) fill := func(b UintArray) { for i := 0; i < 100; i++ { b[i] = <-counter time.Sleep(1 * time.Millisecond) } done <- true } go fill(b1) go fill(b2) <-done <-done check(sort.IsSorted(&b1), true) check(sort.IsSorted(&b2), true) var b3 UintArray = make([]uint64, 200) i := 0 j := 0 k := 0 for i < 100 || j < 100 { if i == 100 { b3[k] = b2[j] j++ k++ continue } if j == 100 { b3[k] = b1[i] i++ k++ continue } if b1[i] == b2[j] { check(b1[i] != b2[j], true) break } if b1[i] < b2[j] { b3[k] = b1[i] i++ } else { b3[k] = b2[j] j++ } k++ } one_streak := true b5 := make([]uint64, 200) for i := 0; i < 200; i++ { if i < 100 && b1[i] != uint64(13+i) && b2[i] != uint64(13+i) { one_streak = false } b5[i] = uint64(13 + i) } check(b3, b5) check(one_streak, false) // Check whether goroutines were actually executed concurrently rather than in sequence tempdir, err := ioutil.TempDir("", "util-test-") if err != nil { panic(err) } defer os.RemoveAll(tempdir) fpath := tempdir + "/foo.log" logfile := util.LogFile(fpath) check(logfile.Close(), nil) n, err = util.WriteAll(logfile, []byte("Test")) check(err, nil) check(n, 4) check(logfile.Close(), nil) n, err = util.WriteAll(logfile, []byte("12")) check(err, nil) check(n, 2) n, err = util.WriteAll(logfile, []byte("3")) check(err, nil) check(n, 1) check(os.Rename(fpath, fpath+".old"), nil) n, err = util.WriteAll(logfile, []byte("Fo")) check(err, nil) check(n, 2) f2, _ := os.OpenFile(fpath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) f2.Write([]byte("o")) f2.Close() n, err = util.WriteAll(logfile, []byte("bar")) check(err, nil) check(n, 3) check(logfile.Close(), nil) data, err := ioutil.ReadFile(fpath) check(err, nil) if err == nil { check(string(data), "Foobar") } data, err = ioutil.ReadFile(fpath + ".old") check(err, nil) if err == nil { check(string(data), "Test123") } test_time := time.Date(2013, time.January, 20, 14, 7, 21, 0, time.Local) check(util.MakeTimestamp(test_time), "20130120140721") test_time = time.Date(2013, time.January, 20, 14, 7, 21, 0, time.UTC) check(util.MakeTimestamp(test_time), "20130120140721") test_time = time.Date(2013, time.January, 20, 14, 7, 21, 0, time.FixedZone("Fooistan", 45678)) check(util.MakeTimestamp(test_time), "20130120140721") illegal := time.Unix(0, 0) buffy.Reset() check(util.ParseTimestamp(""), illegal) time.Sleep(200 * time.Millisecond) // make sure log message is written out check(strings.Contains(buffy.String(), "ERROR"), true) buffy.Reset() check(util.ParseTimestamp("20139910101010"), illegal) time.Sleep(200 * time.Millisecond) // make sure log message is written out check(strings.Contains(buffy.String(), "ERROR"), true) check(util.ParseTimestamp("20131110121314"), time.Date(2013, time.November, 10, 12, 13, 14, 0, time.Local)) check(util.MakeTimestamp(util.ParseTimestamp(util.MakeTimestamp(test_time))), util.MakeTimestamp(test_time)) test_time = test_time.Add(2400 * time.Hour) check(util.MakeTimestamp(util.ParseTimestamp(util.MakeTimestamp(test_time))), util.MakeTimestamp(test_time)) test_time = test_time.Add(2400 * time.Hour) check(util.MakeTimestamp(util.ParseTimestamp(util.MakeTimestamp(test_time))), util.MakeTimestamp(test_time)) test_time = test_time.Add(2400 * time.Hour) check(util.MakeTimestamp(util.ParseTimestamp(util.MakeTimestamp(test_time))), util.MakeTimestamp(test_time)) test_time = test_time.Add(2400 * time.Hour) check(util.MakeTimestamp(util.ParseTimestamp(util.MakeTimestamp(test_time))), util.MakeTimestamp(test_time)) diff := time.Since(util.ParseTimestamp(util.MakeTimestamp(time.Now()))) if diff < time.Second { diff = 0 } check(diff, time.Duration(0)) t0 = time.Now() util.WaitUntil(t0.Add(-10 * time.Second)) util.WaitUntil(t0.Add(-100 * time.Minute)) dur := time.Now().Sub(t0) if dur < 1*time.Second { dur = 0 } check(dur, 0) t0 = time.Now() util.WaitUntil(t0.Add(1200 * time.Millisecond)) dur = time.Now().Sub(t0) if dur >= 1200*time.Millisecond && dur <= 1300*time.Millisecond { dur = 1200 * time.Millisecond } check(dur, 1200*time.Millisecond) mess := "WaitUntil(Jesus first birthday) takes forever" go func() { util.WaitUntil(time.Date(1, time.December, 25, 0, 0, 0, 0, time.UTC)) mess = "" }() time.Sleep(100 * time.Millisecond) check(mess, "") mess = "WaitUntil(1000-11-10 00:00:00) takes forever" go func() { util.WaitUntil(time.Date(1000, time.October, 11, 0, 0, 0, 0, time.UTC)) mess = "" }() time.Sleep(100 * time.Millisecond) check(mess, "") testBase64() }
// Handles one or more messages received over conn. Each message is a single // line terminated by \n. The message may be encrypted as by security.GosaEncrypt(). func handle_request(tcpconn *net.TCPConn) { defer tcpconn.Close() defer atomic.AddInt32(&ActiveConnections, -1) // defer util.Log(2, "DEBUG! Connection to %v closed", tcpconn.RemoteAddr()) // util.Log(2, "DEBUG! Connection from %v", tcpconn.RemoteAddr()) var err error err = tcpconn.SetKeepAlive(true) if err != nil { util.Log(0, "ERROR! SetKeepAlive: %v", err) } var buf bytes.Buffer defer buf.Reset() readbuf := make([]byte, 4096) var conn net.Conn conn = tcpconn n := 1 if config.TLSServerConfig != nil { // If TLS is required, we need to see a STARTTLS before the timeout. // If TLS is optional we need to accept idle connections for backwards compatibility if config.TLSRequired { conn.SetDeadline(time.Now().Add(config.TimeoutTLS)) } for i := range starttls { n, err = conn.Read(readbuf[0:1]) if n == 0 { if i != 0 { // Do not log an error for a port scan that just opens a connection and closes it immediately util.Log(0, "ERROR! Read error while looking for STARTTLS from %v: %v", conn.RemoteAddr(), err) } return } buf.Write(readbuf[0:1]) if readbuf[0] == '\r' && starttls[i] == '\n' { // Read the \n that must follow \r (we don't support lone CR line endings) conn.Read(readbuf[0:1]) // ignore error. It will pop up again further down the line. } if readbuf[0] != starttls[i] { if config.TLSRequired { util.Log(0, "ERROR! No STARTTLS from %v, but TLS is required", conn.RemoteAddr()) util.WriteAll(conn, []byte(message.ErrorReply("STARTTLS is required to connect"))) return } break } if readbuf[0] == '\n' { buf.Reset() // purge STARTTLS\n from buffer conn = tls.Server(conn, config.TLSServerConfig) } } } context := security.ContextFor(conn) if context == nil { return } for n != 0 { //util.Log(2, "DEBUG! Receiving from %v", conn.RemoteAddr()) n, err = conn.Read(readbuf) if err != nil && err != io.EOF { util.Log(0, "ERROR! Read: %v", err) } if err == io.EOF { util.Log(2, "DEBUG! Connection closed by %v", conn.RemoteAddr()) } if n == 0 && err == nil { util.Log(0, "ERROR! Read 0 bytes but no error reported") } // Find complete lines terminated by '\n' and process them. for start := 0; ; { eol := start for ; eol < n; eol++ { if readbuf[eol] == '\n' { break } } // no \n found, append to buf and continue reading if eol == n { buf.Write(readbuf[start:n]) break } // append to rest of line to buffered contents buf.Write(readbuf[start:eol]) start = eol + 1 buf.TrimSpace() // process the message and get a reply (if applicable) if buf.Len() > 0 { // ignore empty lines request_start := time.Now() reply, disconnect := message.ProcessEncryptedMessage(&buf, context) buf.Reset() request_time := time.Since(request_start) RequestProcessingTimes.Push(request_time) request_time -= RequestProcessingTimes.Next().(time.Duration) atomic.AddInt64(&message.RequestProcessingTime, int64(request_time)) if reply.Len() > 0 { util.Log(2, "DEBUG! Sending %v bytes reply to %v", reply.Len(), conn.RemoteAddr()) var deadline time.Time // zero value means "no deadline" if config.Timeout >= 0 { deadline = time.Now().Add(config.Timeout) } conn.SetWriteDeadline(deadline) _, err := util.WriteAll(conn, reply.Bytes()) if err != nil { util.Log(0, "ERROR! WriteAll: %v", err) } reply.Reset() util.WriteAll(conn, []byte{'\r', '\n'}) } if disconnect { util.Log(1, "INFO! Forcing disconnect of %v because of error", conn.RemoteAddr()) return } if Shutdown { util.Log(1, "INFO! Forcing disconnect of %v because of go-susi shutdown", conn.RemoteAddr()) return } } } } if buf.Len() != 0 { util.Log(0, "ERROR! Incomplete message from %v (i.e. not terminated by \"\\n\") of %v bytes: %v", conn.RemoteAddr(), buf.Len(), buf.String()) } }