func connector(context *cli.Context) int {
	config, configFilename, err := lib.GetConfig(context)
	if err != nil {
		fmt.Fprintf(os.Stderr, "Failed to read config file: %s", err)
		return 1
	}

	logFileMaxBytes := config.LogFileMaxMegabytes * 1024 * 1024
	var logWriter io.Writer
	logWriter, err = log.NewLogRoller(config.LogFileName, logFileMaxBytes, config.LogMaxFiles)
	if err != nil {
		fmt.Fprintf(os.Stderr, "Failed to start log roller: %s", err)
		return 1
	}

	if context.Bool("log-to-console") {
		logWriter = io.MultiWriter(logWriter, os.Stderr)
	}
	logLevel, ok := log.LevelFromString(config.LogLevel)
	if !ok {
		fmt.Fprintf(os.Stderr, "Log level %s is not recognized", config.LogLevel)
		return 1
	}
	log.SetLevel(logLevel)
	log.SetWriter(logWriter)

	if configFilename == "" {
		log.Info("No config file was found, so using defaults")
	}

	log.Error(lib.FullName)
	fmt.Println(lib.FullName)

	if !config.CloudPrintingEnable && !config.LocalPrintingEnable {
		log.Error("Cannot run connector with both local_printing_enable and cloud_printing_enable set to false")
		return 1
	}

	if _, err := os.Stat(config.MonitorSocketFilename); !os.IsNotExist(err) {
		if err != nil {
			log.Errorf("Failed to stat monitor socket: %s", err)
		} else {
			log.Errorf(
				"A connector is already running, or the monitoring socket %s wasn't cleaned up properly",
				config.MonitorSocketFilename)
		}
		return 1
	}

	jobs := make(chan *lib.Job, 10)
	xmppNotifications := make(chan xmpp.PrinterNotification, 5)

	var g *gcp.GoogleCloudPrint
	var x *xmpp.XMPP
	if config.CloudPrintingEnable {
		xmppPingTimeout, err := time.ParseDuration(config.XMPPPingTimeout)
		if err != nil {
			log.Fatalf("Failed to parse xmpp ping timeout: %s", err)
			return 1
		}
		xmppPingInterval, err := time.ParseDuration(config.XMPPPingInterval)
		if err != nil {
			log.Fatalf("Failed to parse xmpp ping interval default: %s", err)
			return 1
		}

		g, err = gcp.NewGoogleCloudPrint(config.GCPBaseURL, config.RobotRefreshToken,
			config.UserRefreshToken, config.ProxyName, config.GCPOAuthClientID,
			config.GCPOAuthClientSecret, config.GCPOAuthAuthURL, config.GCPOAuthTokenURL,
			config.GCPMaxConcurrentDownloads, jobs)
		if err != nil {
			log.Error(err)
			return 1
		}

		x, err = xmpp.NewXMPP(config.XMPPJID, config.ProxyName, config.XMPPServer, config.XMPPPort,
			xmppPingTimeout, xmppPingInterval, g.GetRobotAccessToken, xmppNotifications)
		if err != nil {
			log.Error(err)
			return 1
		}
		defer x.Quit()
	}

	cupsConnectTimeout, err := time.ParseDuration(config.CUPSConnectTimeout)
	if err != nil {
		log.Fatalf("Failed to parse CUPS connect timeout: %s", err)
		return 1
	}
	c, err := cups.NewCUPS(config.CopyPrinterInfoToDisplayName, config.PrefixJobIDToJobTitle,
		config.DisplayNamePrefix, config.CUPSPrinterAttributes, config.CUPSMaxConnections,
		cupsConnectTimeout)
	if err != nil {
		log.Fatal(err)
		return 1
	}
	defer c.Quit()

	var s *snmp.SNMPManager
	if config.SNMPEnable {
		log.Info("SNMP enabled")
		s, err = snmp.NewSNMPManager(config.SNMPCommunity, config.SNMPMaxConnections)
		if err != nil {
			log.Error(err)
			return 1
		}
		defer s.Quit()
	}

	var priv *privet.Privet
	if config.LocalPrintingEnable {
		if g == nil {
			priv, err = privet.NewPrivet(jobs, config.GCPBaseURL, nil)
		} else {
			priv, err = privet.NewPrivet(jobs, config.GCPBaseURL, g.ProximityToken)
		}
		if err != nil {
			log.Error(err)
			return 1
		}
		defer priv.Quit()
	}

	cupsPrinterPollInterval, err := time.ParseDuration(config.CUPSPrinterPollInterval)
	if err != nil {
		log.Fatalf("Failed to parse CUPS printer poll interval: %s", err)
		return 1
	}
	pm, err := manager.NewPrinterManager(c, g, priv, s, cupsPrinterPollInterval,
		config.CUPSJobQueueSize, config.CUPSJobFullUsername, config.CUPSIgnoreRawPrinters,
		config.ShareScope, jobs, xmppNotifications)
	if err != nil {
		log.Error(err)
		return 1
	}
	defer pm.Quit()

	m, err := monitor.NewMonitor(c, g, priv, pm, config.MonitorSocketFilename)
	if err != nil {
		log.Error(err)
		return 1
	}
	defer m.Quit()

	if config.CloudPrintingEnable {
		if config.LocalPrintingEnable {
			log.Errorf("Ready to rock as proxy '%s' and in local mode", config.ProxyName)
			fmt.Printf("Ready to rock as proxy '%s' and in local mode\n", config.ProxyName)
		} else {
			log.Errorf("Ready to rock as proxy '%s'", config.ProxyName)
			fmt.Printf("Ready to rock as proxy '%s'\n", config.ProxyName)
		}
	} else {
		log.Error("Ready to rock in local-only mode")
		fmt.Println("Ready to rock in local-only mode")
	}

	waitIndefinitely()

	log.Error("Shutting down")
	fmt.Println("")
	fmt.Println("Shutting down")

	return 0
}
Beispiel #2
0
// handleClientStateChange makes clean transitions as the connection with
// avahi-daemon changes.
//export handleClientStateChange
func handleClientStateChange(client *C.AvahiClient, newState C.AvahiClientState, userdata unsafe.Pointer) {
	z := instance
	z.spMutex.Lock()
	defer z.spMutex.Unlock()

	// Name conflict.
	if newState == C.AVAHI_CLIENT_S_COLLISION {
		log.Warning("Avahi reports a host name collision.")
	}

	// Transition from not connecting to connecting. Warn in logs.
	if newState == C.AVAHI_CLIENT_CONNECTING {
		log.Warning("Cannot find Avahi daemon. Is it running?")
	}

	// Transition from running to not running. Free all groups.
	if newState != C.AVAHI_CLIENT_S_RUNNING {
		log.Info("Local printing disabled (Avahi client is not running).")
		for name, r := range z.printers {
			if r.group != nil {
				if errstr := C.removeAvahiGroup(z.threadedPoll, r.group); errstr != nil {
					err := errors.New(C.GoString(errstr))
					log.Errorf("Failed to remove Avahi group: %s", err)
				}
				r.group = nil
				z.printers[name] = r
			}
		}
	}

	// Transition from not running to running. Recreate all groups.
	if newState == C.AVAHI_CLIENT_S_RUNNING {
		log.Info("Local printing enabled (Avahi client is running).")
		for name, r := range z.printers {
			txt := prepareTXT(r.ty, r.url, r.id, r.online)
			defer C.avahi_string_list_free(txt)

			if errstr := C.addAvahiGroup(z.threadedPoll, z.client, &r.group, r.name, C.ushort(r.port), txt); errstr != nil {
				err := errors.New(C.GoString(errstr))
				log.Errorf("Failed to add Avahi group: %s", err)
			}

			z.printers[name] = r
		}
	}

	// Transition from not failure to failure. Recreate thread poll and client.
	if newState == C.AVAHI_CLIENT_FAILURE {
		z.restart <- struct{}{}
	}

	z.state = newState
}
Beispiel #3
0
// isXMLErrorClosedConnection simplifies an xml.Decoder error.
//
// If the error is a variation of "connection closed" then logs a suitable error and returns true.
// Otherwise, returns false.
func isXMLErrorClosedConnection(err error) bool {
	if strings.Contains(err.Error(), "use of closed network connection") {
		log.Info("XMPP connection was closed")
		return true
	} else if strings.Contains(err.Error(), "connection reset by peer") {
		log.Info("XMPP connection was forcibly closed by server")
		return true
	} else if err == io.EOF {
		log.Info("XMPP connection failed")
		return true
	}
	return false
}
func (pm *PrinterManager) syncPrinters(ignorePrivet bool) error {
	log.Info("Synchronizing printers, stand by")

	// Get current snapshot of CUPS printers.
	cupsPrinters, err := pm.cups.GetPrinters()
	if err != nil {
		return fmt.Errorf("Sync failed while calling GetPrinters(): %s", err)
	}
	if pm.ignoreRawPrinters {
		cupsPrinters, _ = lib.FilterRawPrinters(cupsPrinters)
	}

	// Augment CUPS printers with extra information from SNMP.
	if pm.snmp != nil {
		err = pm.snmp.AugmentPrinters(cupsPrinters)
		if err != nil {
			log.Warningf("Failed to augment printers with SNMP data: %s", err)
		}
	}

	// Set CapsHash on all printers.
	for i := range cupsPrinters {
		h := adler32.New()
		lib.DeepHash(cupsPrinters[i].Tags, h)
		cupsPrinters[i].Tags["tagshash"] = fmt.Sprintf("%x", h.Sum(nil))

		h = adler32.New()
		lib.DeepHash(cupsPrinters[i].Description, h)
		cupsPrinters[i].CapsHash = fmt.Sprintf("%x", h.Sum(nil))
	}

	// Compare the snapshot to what we know currently.
	diffs := lib.DiffPrinters(cupsPrinters, pm.printers.GetAll())
	if diffs == nil {
		log.Infof("Printers are already in sync; there are %d", len(cupsPrinters))
		return nil
	}

	// Update GCP.
	ch := make(chan lib.Printer, len(diffs))
	for i := range diffs {
		go pm.applyDiff(&diffs[i], ch, ignorePrivet)
	}
	currentPrinters := make([]lib.Printer, 0, len(diffs))
	for _ = range diffs {
		p := <-ch
		if p.Name != "" {
			currentPrinters = append(currentPrinters, p)
		}
	}

	// Update what we know.
	pm.printers.Refresh(currentPrinters)
	log.Infof("Finished synchronizing %d printers", len(currentPrinters))

	return nil
}
Beispiel #5
0
func (m *Monitor) listen(listener net.Listener) {
	ch := make(chan net.Conn)
	quitReq := make(chan bool, 1)
	quitAck := make(chan bool)

	go func() {
		for {
			conn, err := listener.Accept()
			if err != nil {
				select {
				case <-quitReq:
					quitAck <- true
					return
				}
				log.Errorf("Error listening to monitor socket: %s", err)
			} else {
				ch <- conn
			}
		}
	}()

	for {
		select {
		case conn := <-ch:
			log.Info("Received monitor request")
			stats, err := m.getStats()
			if err != nil {
				log.Warningf("Monitor request failed: %s", err)
				conn.Write([]byte("error"))
			} else {
				conn.Write([]byte(stats))
			}
			conn.Close()

		case <-m.listenerQuit:
			quitReq <- true
			listener.Close()
			<-quitAck
			m.listenerQuit <- true
			return
		}
	}
}
func (x *internalXMPP) pingPeriodically(timeout, interval time.Duration, dying <-chan struct{}) {
	t := time.NewTimer(interval)
	defer t.Stop()

	for {
		select {
		case <-t.C:
			if success, err := x.ping(timeout); success {
				t.Reset(interval)
			} else {
				log.Info(err)
				x.Quit()
			}
		case <-dying:
			// Signal death externally.
			x.dead <- struct{}{}
			return
		}
	}
}
func connector(context *cli.Context) int {
	config, configFilename, err := lib.GetConfig(context)
	if err != nil {
		fmt.Fprintf(os.Stderr, "Failed to read config file: %s", err)
		return 1
	}

	log.SetLogToConsole(context.Bool("log-to-console"))

	logLevel, ok := log.LevelFromString(config.LogLevel)
	if !ok {
		fmt.Fprintf(os.Stderr, "Log level %s is not recognized", config.LogLevel)
		return 1
	}
	log.SetLevel(logLevel)

	if configFilename == "" {
		log.Info("No config file was found, so using defaults")
	}

	log.Info(lib.FullName)
	fmt.Println(lib.FullName)

	if !config.CloudPrintingEnable && !config.LocalPrintingEnable {
		log.Fatal("Cannot run connector with both local_printing_enable and cloud_printing_enable set to false")
		return 1
	} else if config.LocalPrintingEnable {
		log.Fatal("Local printing has not been implemented in this version of the Windows connector.")
		return 1
	}

	jobs := make(chan *lib.Job, 10)
	xmppNotifications := make(chan xmpp.PrinterNotification, 5)

	var g *gcp.GoogleCloudPrint
	var x *xmpp.XMPP
	if config.CloudPrintingEnable {
		xmppPingTimeout, err := time.ParseDuration(config.XMPPPingTimeout)
		if err != nil {
			log.Fatalf("Failed to parse xmpp ping timeout: %s", err)
			return 1
		}
		xmppPingInterval, err := time.ParseDuration(config.XMPPPingInterval)
		if err != nil {
			log.Fatalf("Failed to parse xmpp ping interval default: %s", err)
			return 1
		}

		g, err = gcp.NewGoogleCloudPrint(config.GCPBaseURL, config.RobotRefreshToken,
			config.UserRefreshToken, config.ProxyName, config.GCPOAuthClientID,
			config.GCPOAuthClientSecret, config.GCPOAuthAuthURL, config.GCPOAuthTokenURL,
			config.GCPMaxConcurrentDownloads, jobs)
		if err != nil {
			log.Fatal(err)
			return 1
		}

		x, err = xmpp.NewXMPP(config.XMPPJID, config.ProxyName, config.XMPPServer, config.XMPPPort,
			xmppPingTimeout, xmppPingInterval, g.GetRobotAccessToken, xmppNotifications)
		if err != nil {
			log.Fatal(err)
			return 1
		}
		defer x.Quit()
	}

	ws, err := winspool.NewWinSpool(config.PrefixJobIDToJobTitle, config.DisplayNamePrefix, config.PrinterBlacklist)
	if err != nil {
		log.Fatal(err)
		return 1
	}

	nativePrinterPollInterval, err := time.ParseDuration(config.NativePrinterPollInterval)
	if err != nil {
		log.Fatalf("Failed to parse printer poll interval: %s", err)
		return 1
	}
	pm, err := manager.NewPrinterManager(ws, g, nil, nil, nativePrinterPollInterval,
		config.NativeJobQueueSize, false, false, config.ShareScope, jobs, xmppNotifications)
	if err != nil {
		log.Fatal(err)
		return 1
	}
	defer pm.Quit()

	if config.CloudPrintingEnable {
		if config.LocalPrintingEnable {
			log.Infof("Ready to rock as proxy '%s' and in local mode", config.ProxyName)
			fmt.Printf("Ready to rock as proxy '%s' and in local mode\n", config.ProxyName)
		} else {
			log.Infof("Ready to rock as proxy '%s'", config.ProxyName)
			fmt.Printf("Ready to rock as proxy '%s'\n", config.ProxyName)
		}
	} else {
		log.Info("Ready to rock in local-only mode")
		fmt.Println("Ready to rock in local-only mode")
	}

	waitIndefinitely()

	log.Info("Shutting down")
	fmt.Println("")
	fmt.Println("Shutting down")

	return 0
}