Beispiel #1
0
// Opens a connection to target (e.g. "foo.example.com:20081"),
// sends msg followed by \r\n.
// If keep_open == false, the connection is closed, otherwise it is
// returned together with the corresponding security.Context.
// The connection will be secured according to
// the config settings. If a certificate is configured, the connection
// will use TLS (and the key argument will be ignored). Otherwise, key
// will be used to GosaEncrypt() the message before sending it over
// a non-TLS connection.
// If an error occurs, it is logged and nil is returned even if keep_open.
func SendLnTo(target, msg, key string, keep_open bool) (net.Conn, *Context) {
	conn, err := net.Dial("tcp", target)
	if err != nil {
		util.Log(0, "ERROR! Could not connect to %v: %v\n", target, err)
		return nil, nil
	}
	if !keep_open {
		defer conn.Close()
	}

	// enable keep alive to avoid connections hanging forever in case of routing issues etc.
	err = conn.(*net.TCPConn).SetKeepAlive(true)
	if err != nil {
		util.Log(0, "ERROR! SetKeepAlive: %v", err)
		// This is not fatal => Don't abort send attempt
	}

	if config.TLSClientConfig != nil {
		conn.SetDeadline(time.Now().Add(config.TimeoutTLS)) // don't allow stalling on STARTTLS

		_, err = util.WriteAll(conn, starttls)
		if err != nil {
			util.Log(0, "ERROR! [SECURITY] Could not send STARTTLS to %v: %v\n", target, err)
			conn.Close() // even if keep_open
			return nil, nil
		}

		var no_deadline time.Time
		conn.SetDeadline(no_deadline)

		conn = tls.Client(conn, config.TLSClientConfig)

	} else {
		msg = GosaEncrypt(msg, key)
	}

	context := ContextFor(conn)
	if context == nil {
		conn.Close() // even if keep_open
		return nil, nil
	}

	err = util.SendLn(conn, msg, config.Timeout)
	if err != nil {
		util.Log(0, "ERROR! [SECURITY] While sending message to %v: %v\n", target, err)
		conn.Close() // even if keep_open
		return nil, nil
	}

	if keep_open {
		return conn, context
	}

	return nil, nil
}
Beispiel #2
0
// Like gosa() but if read_reply == false, no reply will be read and nil
// will be returned
func gosa2(typ string, x *xml.Hash, read_reply bool) *xml.Hash {
	if !strings.HasPrefix(typ, "gosa_") && !strings.HasPrefix(typ, "job_") {
		typ = "gosa_" + typ
	}
	if x.First("header") == nil {
		x.Add("header", typ)
	}
	if x.First("source") == nil {
		x.Add("source", "GOSA")
	}
	if x.First("target") == nil {
		x.Add("target", "GOSA")
	}
	conn, err := net.Dial("tcp", config.ServerSourceAddress)
	if err != nil {
		util.Log(0, "ERROR! Dial: %v", err)
		return xml.NewHash("error")
	}
	defer conn.Close()
	util.SendLn(conn, security.GosaEncrypt(x.String(), config.ModuleKey["[GOsaPackages]"]), config.Timeout)
	if read_reply {
		reply, err := util.ReadLn(conn, config.Timeout)
		reply = security.GosaDecrypt(reply, config.ModuleKey["[GOsaPackages]"])
		if err == nil {
			x, err = xml.StringToHash(reply)
		}
		if err != nil {
			x = xml.NewHash("error")
		}
		if err != nil {
			util.Log(0, "ERROR! While reading reply in test-helpers.go:gosa(): %v\nIf this is a timeout error for a request that does not return a reply, use gosa_noreply() instead of gosa()", err)
			time.Sleep(60 * time.Second)
		}
	}
	return x
}
// Tell(msg, ttl): Tries to send text to the client.
//                 The ttl determines how long the message will be buffered for
//                 resend attempts if sending fails. ttl values smaller than
//                 100ms will be treated as 100ms.
func (conn *ClientConnection) Tell(text string, ttl time.Duration) {
	if ttl < 100*time.Millisecond {
		ttl = 100 * time.Millisecond
	}
	util.Log(2, "DEBUG! Tell(): Queuing message for client %v with TTL %v: %v", conn.addr, ttl, text)

	msg := &ClientMessage{text, time.Now().Add(ttl)}

	go util.WithPanicHandler(func() {
		var try uint = 0

		if msg.Expires.Before(time.Now()) {
			util.Log(0, "ERROR! Scheduling of goroutine for sending message to %v delayed more than TTL %v => Message will not be sent", conn.addr, ttl)
		} else {
			for {
				if try > 0 {
					expiry := msg.Expires.Sub(time.Now())
					if expiry <= 0 {
						break
					}
					delay := (1 << try) * time.Second
					if delay > 60*time.Second {
						delay = 60 * time.Second
					}
					if delay > expiry {
						delay = expiry - 1*time.Second
					}
					if delay <= 0 {
						break
					}
					util.Log(2, "DEBUG! Sleeping %v before next send attempt", delay)
					time.Sleep(delay)
				}

				try++

				util.Log(1, "INFO! Attempt #%v to send message to %v: %v", try, conn.addr, msg.Text)

				client := db.ClientWithAddress(conn.addr)
				if client == nil {
					if conn.addr == config.ServerSourceAddress {
						// If sending to myself (e.g. new_ldap_config), fake a client object
						client = xml.NewHash("xml", "source", config.ServerSourceAddress)
						key := "" // default to empty key which signals TLS
						if config.TLSClientConfig == nil {
							key = config.ModuleKey["[ClientPackages]"]
						}
						client.Add("key", key)
					} else {
						util.Log(0, "ERROR! Client %v not found in clientdb", conn.addr)
						continue
					}
				}

				// if client is registered at a foreign server
				if client.Text("source") != config.ServerSourceAddress {
					util.Log(1, "INFO! Client %v is registered at %v => Forwarding message", conn.addr, client.Text("source"))

					// MESSAGE FORWARDING NOT YET IMPLEMENTED
					util.Log(0, "ERROR! Message forwarding not yet implemented")
					break

				} else { // if client is registered at our server

					keys := client.Get("key")
					if len(keys) == 0 {
						// This case should be impossible. A client's here_i_am message always contains a key (unless the client is buggy).
						util.Log(0, "ERROR! No key known for client %v", conn.addr)
						break
					}

					encrypted := msg.Text // default is unencrypted for TLS connection

					var tcpConn net.Conn
					var err error

					if keys[0] == "" { // TLS client
						// We just use security.SendLnTo() to establish the TLS connection
						// The empty line that is sent is ignored by the receiving go-susi.
						tcpConn, _ = security.SendLnTo(conn.addr, "", "", true)
						if tcpConn == nil {
							// Error message already logged by SendLnTo()
							continue
						}
					} else { // non-TLS client
						encrypted = security.GosaEncrypt(msg.Text, keys[0])

						tcpConn, err = net.Dial("tcp", conn.addr)
						if err != nil {
							util.Log(0, "ERROR! Dial() could not connect to %v: %v", conn.addr, err)
							continue
						}

						err = tcpConn.(*net.TCPConn).SetKeepAlive(true)
						if err != nil {
							util.Log(0, "ERROR! SetKeepAlive: %v", err)
							// This is not fatal => Don't abort send attempt
						}
					}

					if msg.Expires.Before(time.Now()) {
						util.Log(0, "ERROR! Connection to %v established, but TTL %v has expired in the meantime => Message will not be sent", conn.addr, ttl)
						tcpConn.Close()
						break
					}

					util.Log(2, "DEBUG! Sending message to %v encrypted with key %v", conn.addr, keys[0])
					err = util.SendLn(tcpConn, encrypted, config.Timeout)
					tcpConn.Close()
					if err == nil {
						util.Log(2, "DEBUG! Successfully sent message to %v: %v", conn.addr, msg.Text)
						return // not break! break would cause an error message to be logged
					} else {
						util.Log(0, "ERROR! SendLn() to %v failed: %v", conn.addr, err)
					}
				}
			}
		}

		util.Log(0, "ERROR! Cannot send message to %v: %v", conn.addr, msg.Text)
	})
}
Beispiel #4
0
// Must be called in a separate goroutine for each newly created PeerConnection.
// Will not return without first removing all jobs associated with the peer from
// jobdb and removing the PeerConnection itself from the connections list.
func (conn *PeerConnection) handleConnection() {
	var err error
	var pingerRunning int32

	for {
		// gosa-si puts incoming messages into incomingdb and then
		// processes them in the order they are returned by the database
		// which causes messages to be processed in the wrong order.
		// To counteract this we wait a little between messages.
		// The wait time may seem long, but even with as much as 250ms
		// I observed the fju for a new job and the fju that adds the
		// plainname getting mixed up. Apparently gosa-si takes time
		// in the seconds range to process messages.
		// If we have >= 10 messages backlog, we don't wait. It's likely
		// that the later messages have more recent fju data anyway.
		if !conn.IsGoSusi() && conn.queue.Count() < 10 {
			time.Sleep(1000 * time.Millisecond)
		}

		message := conn.queue.Next().(string)
		if conn.tcpConn != nil {
			err = util.SendLn(conn.tcpConn, message, config.Timeout)
		} else {
			err = peerDownError
		}

		if err != nil {
			util.Log(2, "DEBUG! handleConnection() SendLn #1 to %v failed: %v", conn.addr, err)
			if conn.tcpConn != nil {
				conn.tcpConn.Close()
			} // make sure connection is closed in case the error didn't

			// try to re-establish connection
			keys := db.ServerKeys(conn.addr)
			// If we use TLS and the peer does, too, or we don't know => use TLS
			if config.TLSClientConfig != nil && (len(keys) == 0 || keys[0] == "") {
				// We just use security.SendLnTo() to establish the TLS connection
				// The empty line that is sent is ignored by the receiving go-susi.
				conn.tcpConn, _ = security.SendLnTo(conn.addr, "", "", true)
				if conn.tcpConn == nil {
					// Unfortunately we don't have the actual error from SendLnTo(), so generate
					// a generic one.
					err = fmt.Errorf("Could not establish TLS connection to %v", conn.addr)
				}
			} else {
				conn.tcpConn, err = net.Dial("tcp", conn.addr)
				if err == nil {
					errkeepalive := conn.tcpConn.(*net.TCPConn).SetKeepAlive(true)
					if errkeepalive != nil {
						util.Log(0, "ERROR! SetKeepAlive: %v", errkeepalive)
					}
				}
			}

			if err == nil {
				util.Log(2, "DEBUG! handleConnection() re-connected to %v", conn.addr)

				conn.stopDowntime()
				go monitorConnection(conn.tcpConn, &conn.queue)
				// try to re-send message
				err = util.SendLn(conn.tcpConn, message, config.Timeout)
				if err != nil {
					util.Log(2, "DEBUG! handleConnection() SendLn #2 to %v failed: %v", conn.addr, err)
					conn.tcpConn.Close() // if resending failed, make sure connection is closed
					// NOTE: There will be no further retransmission attempts of the message.
					//       It is now lost. However if the peer comes online again, we will do
					//       a full sync to make up for any lost foreign_job_updates messages.
				} else {
					// resending succeed => We're back in business. Do full sync.
					// If peer is not go-susi it generates a new key after re-starting,
					// so we need to send it a key, so that it understands our fju.
					// go-susi doesn't need this. See the long comment in db/serverdb.go:addServer()
					// However it is possible that the database of a go-susi has been nuked,
					// so just to be on the save side we send new_server anyway.
					// The new_server/confirm_new_server exchange will automatically trigger
					// a full sync.
					Send_new_server("new_server", conn.addr)
				}
			}

			// If either re-establishing the connection or resending the message failed
			// we wait a little and then ping the queue which will trigger another attempt
			// to re-establish the connection.
			// We increase the wait interval based on the length of the downtime.
			// After a downtime of config.MaxPeerDowntime we give up, clean remaining
			// jobs associated with the peer from the jobdb, remove the peer from serverdb
			// and then remove this PeerConnection from the list of connections and terminate.
			//
			// NOTE: Every message that comes in due to other events will also result
			//       in an attempt to re-establish the connection. In particular if
			//       the peer actually went down and then comes up again, it should
			//       send us a new_server message which in turn will cause a Tell()
			//       with our confirm_new_server message that will cause the
			//       connection to be re-established.
			//       The ping here is only a fallback for the case where nothing
			//       happens on our end and the peer doesn't send us new_server
			//       (e.g. because its dns-lookup is disabled).
			if err != nil {
				util.Log(2, "DEBUG! handleConnection() connection to %v failed: %v", conn.addr, err)

				// start downtime if it's not already running
				if atomic.LoadInt64(&(conn.whendown)) == 0 {
					conn.startDowntime()
				}

				// if we don't already have a pinger running, start one to ping us
				// after some time to make us try connecting again.
				if atomic.LoadInt32(&pingerRunning) == 0 {
					atomic.AddInt32(&pingerRunning, 1)

					down := conn.Downtime()
					maxdelay := config.MaxPeerDowntime - down
					var delay time.Duration
					// For the first 10 minutes we try every 10s to re-establish the connection
					if maxdelay > 0 && down < 10*time.Minute {
						delay = 10 * time.Second
					} else
					// For the first day we try every 10 minutes
					if maxdelay > 0 && down < 24*time.Hour {
						delay = 10 * time.Minute
					} else
					// Then we go to 30 minute intervals
					if maxdelay > 0 {
						delay = 30 * time.Minute
					} else
					// Finally we give up
					{
						util.Log(2, "DEBUG! handleConnection() giving up. Removing jobs and PeerConnection for %v", conn.addr)
						db.JobsRemoveForeign(xml.FilterSimple("siserver", conn.addr))
						db.ServerRemove(conn.addr)
						connections_mutex.Lock()
						delete(connections, conn.addr)
						connections_mutex.Unlock()
						return
					}

					if delay > maxdelay {
						delay = maxdelay
					}

					// Wait and ping in the background, so that we don't miss other messages
					go func() {
						time.Sleep(delay)
						atomic.AddInt32(&pingerRunning, -1)
						conn.queue.Push("")
					}()
				}
			}
		}
	}
}
Beispiel #5
0
// Encrypts request with key, sends it to the peer and returns a channel
// from which the peer's reply can be received (already decrypted with
// the same key). It is guaranteed that a reply will
// be available from this channel even if the peer connection breaks
// or the peer does not reply within a certain time. In the case of
// an error, the reply will be an error reply (as returned by
// message.ErrorReply()). The returned channel will be buffered and
// the producer goroutine will close it after writing the reply. This
// means it is permissible to ignore the reply without risk of a
// goroutine leak.
// If key == "" the first key from db.ServerKeys(peer) is used.
func (conn *PeerConnection) Ask(request, key string) <-chan string {
	c := make(chan string, 1)

	if conn.err != nil {
		c <- ErrorReply(conn.err)
		close(c)
		return c
	}

	keys := db.ServerKeys(conn.addr)
	// If we use TLS and the target does, too
	if config.TLSClientConfig != nil && len(keys) > 0 && keys[0] == "" {
		key = ""
	} else if key == "" {
		if len(keys) == 0 {
			c <- ErrorReply("PeerConnection.Ask: No key known for peer " + conn.addr)
			close(c)
			return c
		}
		key = keys[0]
	}

	go util.WithPanicHandler(func() {
		defer close(c)
		var tcpconn net.Conn
		var err error
		if key == "" { // TLS
			// We just use security.SendLnTo() to establish the TLS connection
			// The empty line that is sent is ignored by the receiving go-susi.
			tcpconn, _ = security.SendLnTo(conn.addr, "", "", true)
			if tcpconn == nil {
				// Unfortunately we don't have the actual error from SendLnTo(), so generate
				// a generic one.
				err = fmt.Errorf("Could not establish TLS connection to %v", conn.addr)
			}
		} else {
			tcpconn, err = net.Dial("tcp", conn.addr)
		}

		if err != nil {
			c <- ErrorReply(err)
			// make sure handleConnection()/monitorConnection() notice that the peer is unreachable
			if conn.tcpConn != nil {
				conn.tcpConn.Close()
			}
		} else {
			defer tcpconn.Close()
			util.Log(1, "INFO! Asking %v: %v", conn.addr, request)
			encrypted := request
			if key != "" {
				encrypted = security.GosaEncrypt(request, key)
			}
			err = util.SendLn(tcpconn, encrypted, config.Timeout)
			// make sure handleConnection()/monitorConnection() notice that the peer is unreachable
			if err != nil && conn.tcpConn != nil {
				conn.tcpConn.Close()
			}
			reply, err := util.ReadLn(tcpconn, config.Timeout)
			if err != nil && err != io.EOF {
				util.Log(0, "ERROR! ReadLn(): %v", err)
			}
			if key != "" {
				reply = security.GosaDecrypt(reply, key)
			}
			if reply == "" {
				reply = ErrorReply("Communication error in Ask()")
				// make sure handleConnection()/monitorConnection() notice that the peer is unreachable
				if conn.tcpConn != nil {
					conn.tcpConn.Close()
				}
			}
			util.Log(1, "INFO! Reply from %v: %v", conn.addr, reply)
			c <- reply
		}
	})
	return c
}
Beispiel #6
0
// handles an individual connection received by listen().
func handleConnection(conn net.Conn, is_client bool) {
	defer conn.Close()
	active_connections.Push(conn)
	defer active_connections.Remove(conn)

	senderIP, _, _ := net.SplitHostPort(conn.RemoteAddr().String())
	// translate loopback address to our own external IP
	if senderIP == "127.0.0.1" {
		senderIP = config.IP
	}

	conn.(*net.TCPConn).SetKeepAlive(true)

	var err error

	var buf = make([]byte, 65536)
	i := 0
	n := 1
	for n != 0 {
		n, err = conn.Read(buf[i:])
		i += n

		if err != nil && err != io.EOF {
			break
		}
		if err == io.EOF {
			err = nil
			break
		}
		if n == 0 && err == nil {
			err = fmt.Errorf("Read 0 bytes but no error reported")
			break
		}

		if i == len(buf) {
			buf_new := make([]byte, len(buf)+65536)
			copy(buf_new, buf)
			buf = buf_new
		}

		// Find complete lines terminated by '\n' and process them.
		for start := 0; ; {
			eol := bytes.IndexByte(buf[start:i], '\n')

			// no \n found, go back to reading from the connection
			// after purging the bytes processed so far
			if eol < 0 {
				copy(buf[0:], buf[start:i])
				i -= start
				break
			}

			// process the message and get a reply (if applicable)
			reply := processMessage(string(buf[start:start+eol]), senderIP, is_client)
			if reply != "" {
				util.SendLn(conn, reply, 5*time.Second)
			}
			start += eol + 1
		}
	}

	if i != 0 {
		err = fmt.Errorf("ERROR! Incomplete message (i.e. not terminated by \"\\n\") of %v bytes: %v", i, buf[0:i])
	}

	if err != nil {
		msg := queueElement{IsClientMessage: is_client}
		msg.XML = hash("error(%v)", err)
		msg.Time = time.Now()
		msg.SenderIP = senderIP

		queue_mutex.Lock()
		defer queue_mutex.Unlock()
		queue = append(queue, &msg)
	}
}