// NewBot creates a new instance of Bot func NewBot(host, nick string, options ...func(*Bot)) (*Bot, error) { // Defaults are set here bot := Bot{ Incoming: make(chan *Message, 16), outgoing: make(chan string, 16), started: time.Now(), unixastr: fmt.Sprintf("@%s-%s/bot", host, nick), Host: host, Nick: nick, ThrottleDelay: 200 * time.Millisecond, PingTimeout: 300 * time.Second, HijackSession: false, SSL: false, SASL: false, Channels: []string{"#test"}, Password: "", } for _, option := range options { option(&bot) } // Discard logs by default bot.Logger = log.New("id", logext.RandId(8), "host", bot.Host, "nick", log.Lazy{bot.getNick}) bot.Logger.SetHandler(log.DiscardHandler()) bot.AddTrigger(pingPong) bot.AddTrigger(joinChannels) return &bot, nil }
// NewServer constructs a new SIFT Server, using the SIFT database at the // provided path (or creating a new one it does not exist). Be sure to start // the Server with Serve() func NewServer(dbpath string) (*Server, error) { newDB, err := db.Open(dbpath) if err != nil { return nil, fmt.Errorf("could not open sift db: %v", err) } authorizor := auth.New() notifier := notif.New(authorizor) return &Server{ SiftDB: newDB, dbpath: dbpath, Authorizor: authorizor, Provider: notifier, Receiver: notifier, factoriesByDescriptionID: make(map[string]adapter.Factory), adapters: make(map[string]adapter.Adapter), updatesFromAdapters: make(chan updatePackage, updateChanWidth), prioritizer: lib.NewPrioritizer(nil), // uses default sorting ipv4Scan: ipv4.NewContinousScanner(ipv4ScanFrequency), stop: make(chan struct{}), stopped: make(chan struct{}), log: Log.New("obj", "server", "id", logext.RandId(8)), }, nil }
func (st *SyncTable) triggerHandler() func(http.ResponseWriter, *http.Request) { return func(w http.ResponseWriter, r *http.Request) { q := r.URL.Query() vars := mux.Vars(r) ns := vars["ns"] log := st.log.New("trigger_id", logext.RandId(6), "ns", ns) url := q.Get("url") log.Info("Starting sync...", "url", url) apiKey := q.Get("api_key") rawState := st.generateTree(ns) defer rawState.Close() state := &State{ Namespace: ns, Root: rawState.Root(), Count: rawState.Count(), Leafs: rawState.Level1(), } client := NewSyncTableClient(state, st.blobstore, st.nsdb, ns, url, apiKey, st.blobs) stats, err := client.Sync() if err != nil { panic(err) } httputil.WriteJSON(w, stats) } }
// New creates a new notifier. func New(authorizor auth.Authorizor) *Notifier { return &Notifier{ authorizor: authorizor, lock: &sync.RWMutex{}, channelLocks: make(map[chan interface{}]sync.Mutex), filtersByChanel: make(map[chan interface{}][]interface{}), authTokenByChannel: make(map[chan interface{}]auth.Token), allNotificationListeners: make(map[chan interface{}]ActionsMask), // Indices for Drivers driverListenersFilteredByType: make(map[string]map[chan interface{}]ActionsMask), driverListenersFilteredByID: make(map[string]map[chan interface{}]ActionsMask), unfilteredDriverListeners: make(map[chan interface{}]ActionsMask), // Indices for Devices deviceListenersFilteredByType: make(map[string]map[chan interface{}]ActionsMask), deviceListenersFilteredByID: make(map[types.DeviceID]map[chan interface{}]ActionsMask), unfilteredDeviceListeners: make(map[chan interface{}]ActionsMask), componentListenersFilteredByType: make(map[string]map[chan interface{}]ActionsMask), componentListenersFilteredByID: make(map[types.ComponentID]map[chan interface{}]ActionsMask), unfilteredComponentListeners: make(map[chan interface{}]ActionsMask), log: Log.New("obj", "notifier", "id", logext.RandId(8)), } }
// NewAllAtOnceDiffer properly instantiates an AllAtOnceDiffer func NewAllAtOnceDiffer() *AllAtOnceDiffer { return &AllAtOnceDiffer{ lastKnownDevices: make(map[types.ExternalDeviceID]types.Device), lock: &sync.Mutex{}, log: Log.New("obj", "differ", "id", logext.RandId(8)), } }
func (lua *LuaExt) AppHandler() func(http.ResponseWriter, *http.Request) { return func(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) // Try to fetch the app appID := vars["appID"] lua.appMutex.Lock() defer lua.appMutex.Unlock() app, ok := lua.registeredApps[appID] if !ok { panic("no such app") } reqID := logext.RandId(8) reqLogger := lua.logger.New("reqID", reqID, "appID", appID) reqLogger.Info("Starting", "app", app.String()) w.Header().Add("BlobStash-App-ID", appID) w.Header().Add("BlobStash-App-Req-ID", reqID) w.Header().Add("BlobStash-App-Script-Hash", app.Hash) // Out the hash script on HEAD request to allow app manager/owner // to verify if the script exists, and compare local version if r.Method == "HEAD" { reqLogger.Debug("HEAD request, aborting...") return } // Check the app ACL if !app.Public && !lua.authFunc(r) { w.Header().Set("WWW-Authenticate", "Basic realm=\""+app.AppID+"\"") http.Error(w, "Not Authorized", http.StatusUnauthorized) return } // Copy the script so we can release the mutex script := make([]byte, len(app.Script)) copy(script[:], app.Script[:]) // Execute the script start := time.Now() status := strconv.Itoa(lua.exec(reqLogger, app, appID, reqID, string(script), w, r)) // Increment the internal stats app, ok = lua.registeredApps[appID] if !ok { panic("App seems to have been deleted") } app.Stats.Requests++ if _, ok := app.Stats.Statuses[status]; !ok { app.Stats.Statuses[status] = 1 } else { app.Stats.Statuses[status]++ } app.Stats.TotalTime += time.Since(start) w.Header().Add("BlobStash-App-Script-Execution-Time", time.Since(start).String()) } }
// NewServer properly instantiates a Server func NewServer(port uint16) *Server { return &Server{ port: uint16(port), devices: make(map[string]Device), notify: make(chan struct{}, 10), listeners: make([]chan bool, 0), log: Log.New("obj", "example_server", "id", logext.RandId(8)), stop: make(chan struct{}), } }
// NewPrioritizer properly instantiates a Prioritizer func NewPrioritizer(sortFns []lessFunc) *Prioritizer { if sortFns == nil { sortFns = defaultLessFuncs } return &Prioritizer{ dest: make(chan interface{}, outputChanLen), sortFns: sortFns, adapterChannelsByToken: make(map[string]chan interface{}), rankedAdapterIDsByDeviceID: make(map[types.ExternalDeviceID][]string), rankedAdapterDescsByDeviceID: make(map[types.ExternalDeviceID][]AdapterDescription), rlock: &sync.Mutex{}, log: Log.New("obj", "prioritizer", "id", logext.RandId(8)), } }
func newIPv4Adapter(context *ipv4.ServiceContext) *ipv4Adapter { log := Log.New("obj", "Connected By TCP IPv4 Adapter", "id", logext.RandId(8), "adapting", context.IP.String()) adapter := &ipv4Adapter{ updateChan: make(chan interface{}, 100), context: context, differ: lib.NewAllAtOnceDiffer(), stop: make(chan struct{}), log: log, } if err := adapter.differ.SetOutput(adapter.updateChan); err != nil { panic(fmt.Sprintf("newAdapter() could not set output: %v", err)) } go adapter.Serve() return adapter }
func newAdapter(port uint16, context *ipv4.ServiceContext) *ipv4Adapter { log := Log.New("obj", "example ipv4 adapter", "id", logext.RandId(8), "adapting", context.IP.String()) log.Info("example adapter created") adapter := &ipv4Adapter{ port: port, updateChan: make(chan interface{}, 100), context: context, differ: lib.NewAllAtOnceDiffer(), stop: make(chan struct{}), debgForceRefresh: make(chan struct{}, 10), log: log, } if err := adapter.differ.SetOutput(adapter.updateChan); err != nil { panic(fmt.Sprintf("newAdapter() could not set output: %v", err)) } go adapter.Serve() return adapter }
// NewScanner properly instantiates a Scanner. func NewScanner() *Scanner { s := &Scanner{ interfaces: make(map[string]net.Interface), ilock: sync.RWMutex{}, descriptionsByID: make(map[string]ServiceDescription), dlock: sync.RWMutex{}, activeServicesByIP: make(map[string]struct{}), slock: &sync.RWMutex{}, log: Log.New("obj", "ipv4.scanner", "id", logext.RandId(8)), } err := s.refreshInterfaces() if err != nil { panic("ipv4.NewScanner(): scanner could not refresh interfaces: " + err.Error()) } return s }
// Open opens the SIFT database at the provided file path. If the file does not // exist, it is created and initialized with the SIFT schema. func Open(pathToDBFile string) (*SiftDB, error) { var tempFile *os.File // will be populated if caller requests a temporary file switch pathToDBFile { case "": file, err := ioutil.TempFile(os.TempDir(), "siftdb_") if err != nil { return nil, fmt.Errorf("could not open temporary DB file: %v", err) } tempFile = file pathToDBFile = file.Name() case ":memory:": return nil, fmt.Errorf("SiftDB cannot be opened with :memory:") } // Open a connection to the file at the specified path db, err := sqlx.Connect("sqlite3", pathToDBFile) if err != nil { Log.Error("could not open database", "err", err, "filename", pathToDBFile) return nil, fmt.Errorf("could not open database at path %v: %v", pathToDBFile, err) } defer db.Close() // Check that this is a valid SIFT DB. If it isn't, initialize it. if validErr := isDBValid(db); validErr != nil { Log.Debug("could not validate database; this may be a new file. Initializing", "filename", pathToDBFile, "validation_error", validErr) if err := dbInitByGoFile(db); err != nil { return nil, fmt.Errorf("error initializing sift DB: %v", err) } } return &SiftDB{ dbpath: pathToDBFile, tempFile: tempFile, log: Log.New("obj", "components_db", "id", logext.RandId(8)), }, nil }
// query returns a JSON list as []byte for the given query // docs are unmarhsalled to JSON only when needed. func (docstore *DocStoreExt) query(collection string, query map[string]interface{}, cursor string, limit int) ([]byte, *executionStats, error) { js := []byte("[") tstart := time.Now() stats := &executionStats{} start := fmt.Sprintf(KeyFmt, collection, cursor) end := fmt.Sprintf(KeyFmt, collection, "\xff") if query == nil || len(query) == 0 { // Prefetch more docs since there's a lot of chance the query will // match every documents limit = int(float64(limit) * 1.3) } qLogger := docstore.logger.New("query", query, "id", logext.RandId(8)) qLogger.Info("new query") var noQuery bool if len(query) == 0 { noQuery = true } var lastKey string for { qLogger.Debug("internal query", "limit", limit, "start", start, "end", end, "nreturned", stats.NReturned) res, err := docstore.kvStore.Keys(start, end, limit) // Prefetch more docs if err != nil { panic(err) } for _, kv := range res { jsPart := []byte{} _id := hashFromKey(collection, kv.Key) if _, err := docstore.fetchDoc(collection, _id, &jsPart); err != nil { panic(err) } stats.TotalDocsExamined++ if noQuery { // No query, so we just add every docs js = append(js, addID(jsPart, _id)...) js = append(js, []byte(",")...) stats.NReturned++ stats.LastID = _id if stats.NReturned == limit { break } } else { doc := map[string]interface{}{} if err := json.Unmarshal(jsPart, &doc); err != nil { panic(err) } if matchQuery(qLogger, query, doc) { js = append(js, addID(jsPart, _id)...) js = append(js, []byte(",")...) stats.NReturned++ stats.LastID = _id if stats.NReturned == limit { break } } } lastKey = kv.Key } if len(res) == 0 || len(res) < limit { break } start = nextKey(lastKey) } if stats.NReturned > 0 { js = js[0 : len(js)-1] } duration := time.Since(tstart) qLogger.Debug("scan done", "duration", duration, "nReturned", stats.NReturned, "scanned", stats.TotalDocsExamined) stats.ExecutionTimeMillis = int(duration.Nanoseconds() / 1e6) js = append(js, []byte("]")...) return js, stats, nil }
func (st *SyncTable) syncHandler() func(http.ResponseWriter, *http.Request) { return func(w http.ResponseWriter, r *http.Request) { if r.Method != "POST" { w.WriteHeader(http.StatusMethodNotAllowed) return } vars := mux.Vars(r) ns := vars["ns"] log := st.log.New("sync_id", logext.RandId(6), "ns", ns) log.Info("sync triggered") state := st.generateTree(ns) defer state.Close() local_state := &State{ Namespace: ns, Root: state.Root(), Leafs: state.Level1(), Count: state.Count(), } log.Debug("local state computed", "local_state", local_state.String()) remote_state := &State{} if err := json.NewDecoder(r.Body).Decode(remote_state); err != nil { panic(err) } log.Debug("remote state decoded", "remote_state", remote_state.String()) // First check the root, if the root hash is the same, then we can't stop here, we are in sync. if local_state.Root == remote_state.Root { log.Debug("No sync needed") w.WriteHeader(http.StatusNoContent) return } // The root differs, found out the leafs we need to inspect leafsNeeded := []string{} leafsToSend := []string{} leafsConflict := []string{} for lleaf, lh := range local_state.Leafs { if rh, ok := remote_state.Leafs[lleaf]; ok { if lh != rh { leafsConflict = append(leafsConflict, lleaf) } } else { // This leaf is only present locally, we can send blindly all the blobs belonging to this leaf leafsToSend = append(leafsToSend, lleaf) // If an entire leaf is missing, this means we can send/receive the entire hashes for the missing leaf } } // Find out the leafs present only on the remote-side for rleaf, _ := range remote_state.Leafs { if _, ok := local_state.Leafs[rleaf]; !ok { leafsNeeded = append(leafsNeeded, rleaf) } } httputil.WriteJSON(w, map[string]interface{}{ "conflicted": leafsConflict, "needed": leafsNeeded, "missing": leafsToSend, }) } }
// New creates a new SiftAuthorizor func New() *SiftAuthorizor { return &SiftAuthorizor{ log: Log.New("obj", "authorizor", "id", logext.RandId(8)), } }