func (conn *CassConnection) CreateAccount( username, email, password string) (datalayer.Account, error) { salt := conn.dl.cfg.OptPasswordSecretSalt() hashCost := conn.dl.cfg.OptPasswordHashCost() password_hash, _ := bcrypt.GenerateFromPassword( []byte(password+salt), int(hashCost)) err := validateUsername(username) if err != nil { return nil, err } err = validateEmail(email) if err != nil { return nil, err } err = validatePassword(password) if err != nil { return nil, err } activation_code, err := random.Base64String(24) if err != nil { return nil, err } now := time.Now() // TODO: transactionize if err := conn.session.Query(` INSERT INTO accounts ( username, email, password_hash, activated, activation_code, password_reset_code, password_reset_code_expiry) VALUES (?, ?, ?, ?, ?, ?, ?) `, username, email, password_hash, false, activation_code, "", now).Exec(); err != nil { canolog.Error("Error creating account:", err) return nil, err } if err := conn.session.Query(` INSERT INTO account_emails (email, username) VALUES (?, ?) `, email, username).Exec(); err != nil { canolog.Error("Error setting account email:", err) return nil, err } return &CassAccount{conn, username, email, password_hash, false, activation_code, "", now}, nil }
func (conn *CassConnection) LookupAccount( usernameOrEmail string) (datalayer.Account, error) { var account CassAccount var username string canolog.Info("Looking up account: ", usernameOrEmail) if strings.Contains(usernameOrEmail, "@") { canolog.Info("It is an email address") // email address provided. Lookup username based on email err := conn.session.Query(` SELECT email, username FROM account_emails WHERE email = ? LIMIT 1 `, usernameOrEmail).Consistency(gocql.One).Scan( &account.email, &username) if err != nil { canolog.Error("Error looking up account", err) return nil, err } } else { canolog.Info("It is not an email address") username = usernameOrEmail } canolog.Info("fetching info for: ", username) // Lookup account info based on username err := conn.session.Query(` SELECT username, email, password_hash, activated, activation_code, password_reset_code, password_reset_code_expiry FROM accounts WHERE username = ? LIMIT 1 `, username).Consistency(gocql.One).Scan( &account.username, &account.email, &account.password_hash, &account.activated, &account.activation_code, &account.password_reset_code, &account.password_reset_code_expiry) if err != nil { canolog.Error("Error looking up account", err) return nil, err } canolog.Info("Success") account.conn = conn return &account, nil }
func (account *CassAccount) SetEmail(newEmail string) error { // validate new email address err := validateEmail(newEmail) if err != nil { return err } // generate new activation code newActivationCode, err := random.Base64String(24) if err != nil { return err } // TODO: transactionize // update accounts table err = account.conn.session.Query(` UPDATE accounts SET email = ?, activated = false, activation_code = ? WHERE username = ? `, newEmail, newActivationCode, account.Username()).Exec() if err != nil { canolog.Error("Error changing email address to", newEmail, ":", err) return err } // Update account_emails table // Remove old email address err = account.conn.session.Query(` DELETE FROM account_emails WHERE email = ? `, account.Email()).Exec() if err != nil { canolog.Error("Error removing old email while changing email address to", newEmail, ":", err) return err } // Add new email address err = account.conn.session.Query(` INSERT INTO account_emails (email, username) VALUES (?, ?) `, newEmail, account.Username()).Exec() if err != nil { canolog.Error("Error adding new email while changing email address to", newEmail, ":", err) return err } // update local copy account.activated = false account.activation_code = newActivationCode account.email = newEmail return nil }
func (conn *CassConnection) DeleteAccount(username string) error { // TODO: We should archive the account, not actually delete it. // TODO: If we are really deleting it, then we need to also cleanup // all the other data (permissions, orphanded devices, etc). account, err := conn.LookupAccount(username) if err != nil { canolog.Error("Error looking up account for deletion: ", err) return err } email := account.Email() // TODO: Transactionize. This might be done by adding a txn state field to // the table. err = conn.session.Query(` DELETE FROM device_group WHERE username = ? `, username).Exec() if err != nil { canolog.Error("Error deleting account's device groups", err) return err } err = conn.session.Query(` DELETE FROM device_permissions WHERE username = ? `, username).Exec() if err != nil { canolog.Error("Error deleting account's permission", err) return err } err = conn.session.Query(` DELETE FROM account_emails WHERE email = ? `, email).Exec() if err != nil { canolog.Error("Error deleting account email", err) return err } err = conn.session.Query(` DELETE FROM accounts WHERE username = ? `, username).Exec() if err != nil { canolog.Error("Error deleting account", err) return err } return nil }
func (outbox *PigeonOutbox) send(hostname string, request *PigeonRequest, respChan chan<- Response) error { resp := &PigeonResponse{} // Dial the server // TODO: Inefficient to dial each time? canolog.Info("RPC Dialing") rpcClient, err := rpc.DialHTTP("tcp", hostname+":1888") if err != nil { return fmt.Errorf("Pigeon: (dialing) %s", err.Error()) } defer rpcClient.Close() // Make the call canolog.Info("RPC Calling") err = rpcClient.Call("PigeonServer.RPCHandleRequest", request, resp) if err != nil { canolog.Error("Pigeon: (calling) ", err.Error()) // Send error response to channel respChan <- resp return fmt.Errorf("Pigeon: (calling) %s", err.Error()) } // Send response to channel respChan <- resp return nil }
func (conn *CassConnection) LookupDevice( deviceId gocql.UUID) (datalayer.Device, error) { var device CassDevice device.deviceId = deviceId device.conn = conn var last_seen time.Time var ws_connected bool err := conn.session.Query(` SELECT friendly_name, location_note, secret_key, sddl, last_seen, ws_connected FROM devices WHERE device_id = ? LIMIT 1`, deviceId).Consistency(gocql.One).Scan( &device.name, &device.locationNote, &device.secretKey, &device.docString, &last_seen, &ws_connected) if err != nil { canolog.Error(err) return nil, err } // This scan returns Jan 1, 1970 UTC if last_seen is NULL. if last_seen.Before(time.Unix(1, 0)) { device.last_seen = nil } else { device.last_seen = &last_seen } device.wsConnected = ws_connected if device.docString != "" { device.doc, err = sddl.Sys.ParseDocumentString(device.docString) if err != nil { canolog.Error("Error parsing class string for device: ", device.docString, err) return nil, err } } else { device.doc = sddl.Sys.NewEmptyDocument() } return &device, nil }
func (device *CassDevice) ExtendSDDL(jsn map[string]interface{}) error { // TODO: Race condition? doc := device.SDDLDocument() err := doc.Extend(jsn) if err != nil { canolog.Error("Error extending class ", jsn, err) return err } // save modified SDDL class to DB err = device.SetSDDLDocument(doc) if err != nil { canolog.Error("Error saving SDDL: ", err) return err } return nil }
func (sys *SDDLSys) ParseDocumentString(doc string) (Document, error) { var jsn map[string]interface{} err := json.Unmarshal([]byte(doc), &jsn) if err != nil { canolog.Error("Error JSON decoding SDDL docoument: %s %s", doc, err) return nil, err } return sys.ParseDocument(jsn) }
func (conn *CassConnection) LookupDeviceByStringID( id string) (datalayer.Device, error) { deviceId, err := gocql.ParseUUID(id) if err != nil { canolog.Error(err) return nil, err } return conn.LookupDevice(deviceId) }
func (dl *CassDatalayer) EraseDb(keyspace string) error { cluster := gocql.NewCluster("127.0.0.1") session, err := cluster.CreateSession() if err != nil { canolog.Error("Error creating DB session: ", err) return err } err = session.Query(`DROP KEYSPACE ` + keyspace + ``).Exec() return err }
func (dl *CassDatalayer) PrepDb(keyspace string) error { cluster := gocql.NewCluster("127.0.0.1") session, err := cluster.CreateSession() if err != nil { canolog.Error("Error creating DB session: ", err) return err } // Create keyspace. err = session.Query(` CREATE KEYSPACE ` + keyspace + ` WITH REPLICATION = {'class' : 'SimpleStrategy', 'replication_factor' : 1} `).Exec() if err != nil { // Ignore errors (just log them). canolog.Warn("(IGNORED) ", err) } // Create a new session connecting to that keyspace. cluster = gocql.NewCluster("127.0.0.1") cluster.Keyspace = keyspace cluster.Consistency = gocql.Quorum session, err = cluster.CreateSession() if err != nil { canolog.Error("Error creating DB session: ", err) return err } // Perform all creation queries. for _, query := range creationQueries { if err := session.Query(query).Exec(); err != nil { // Ignore errors (just print them). // This allows PrepDB to be used to add new tables. Eventually, we // should come up with a proper migration strategy. canolog.Warn("(IGNORED) ", query, ": ", err) } } return nil }
func (conn *CassConnection) DeleteDevice(deviceId gocql.UUID) error { // TODO: Should we archive the device, not actually delete it? device, err := conn.LookupDevice(deviceId) if err != nil { canolog.Error("Error deleting device", err) return err } err = conn.session.Query(` DELETE FROM devices WHERE device_id = ? `, device.ID()).Exec() if err != nil { canolog.Error("Error deleting from devices table", err) return err } // TODO: How to cleanup device_permissions? // TODO: transactionize // TODO: Cleanup cloud variable data return nil }
func (dl *CassDatalayer) MigrateDB(keyspace, startVersion, endVersion string) error { var err error cluster := gocql.NewCluster("127.0.0.1") cluster.Keyspace = keyspace session, err := cluster.CreateSession() if err != nil { canolog.Error("Error creating DB session: ", err) return err } curVersion := startVersion for curVersion != endVersion { canolog.Info("Migrating from %s to next version", curVersion) curVersion, err = dl.migrateNext(session, startVersion) if err != nil { canolog.Error("Failed migrating from %s:", curVersion, err) return err } } canolog.Info("Migration complete! DB is now version: %s", curVersion) return nil }
// Insert a cloud variable data sample. func (device *CassDevice) InsertSample(varDef sddl.VarDef, t time.Time, value interface{}) error { // Convert to UTC before inserting t = t.UTC() canolog.Info("Inserting sample", varDef.Name(), t) // check last update time lastUpdateTime, err := device.varLastUpdateTime(varDef.Name()) if err != nil { canolog.Error("Error inserting sample:", err.Error()) return err } canolog.Info("Last update time was", lastUpdateTime) if t.Before(lastUpdateTime) { canolog.Error("Insertion time before last update time: ", t, lastUpdateTime) return fmt.Errorf("Insertion time %s before last update time %s", t, lastUpdateTime) } // update last update time err = device.varSetLastUpdateTime(varDef.Name(), t) if err != nil { return err } // For each LOD, insert or discard sample based on our // stratification algorithm. for lod := LOD_0; lod < LOD_END; lod++ { err = device.insertOrDiscardSampleLOD(varDef, lastUpdateTime, lod, t, value) if err != nil { // TODO: Transactionize/rollback? return err } } // TODO: Do we need to update in-memory device object? return nil }
func (pigeonsys *CassPigeonSystem) GetListeners(key string) ([]string, error) { var workers []string rows, err := pigeonsys.conn.session.Query(` SELECT * FROM listeners WHERE key = ? `, key).Consistency(gocql.One).Iter().SliceMap() if err != nil { canolog.Error(err) } if len(rows) != 1 { return nil, fmt.Errorf("Expected 1 DB row for listener ", key) } workers = rows[0]["workers"].([]string) return workers, nil }
// Carries out the side-effect actions. // Specifically: // // 1) Sends emails // 2) Appends "set-cookies" and "clear-cookies" to the response object, as // appropriate. func (sideEffect *RestSideEffects) Perform(req jobqueue.Request, resp jobqueue.Response) error { if len(sideEffect.setCookies) > 0 { resp.AppendToBody("set-cookies", sideEffect.setCookies) } if len(sideEffect.clearCookies) > 0 { resp.AppendToBody("clear-cookies", sideEffect.clearCookies) } for _, email := range sideEffect.sendEmails { err := sideEffect.mailer.Send(email) if err != nil { // Log the error, but do not affect the HTTP response canolog.Error("Error sending email: ", err.Error()) } } return nil }
func (dl *CassDatalayer) Connect(keyspace string) (datalayer.Connection, error) { cluster := gocql.NewCluster("127.0.0.1") cluster.Keyspace = keyspace cluster.Consistency = gocql.Any session, err := cluster.CreateSession() if err != nil { canolog.Error("Error creating DB session: ", err) return nil, err } return &CassConnection{ dl: dl, session: session, }, nil }
func (conn *CassConnection) LookupDeviceVerifySecretKey( deviceId gocql.UUID, secret string) (datalayer.Device, error) { device, err := conn.LookupDevice(deviceId) if err != nil { return nil, err } if device.SecretKey() != secret { canolog.Error("Invalid secret key") return nil, datalayer.InvalidPasswordError } return device, nil }
// Insert a sample into the database for a particular LOD level, discarding the // sample if the stratification chunk already contains a sample. func (device *CassDevice) insertOrDiscardSampleLOD(varDef sddl.VarDef, lastUpdateTime time.Time, lod lodEnum, t time.Time, value interface{}) error { // Discard sample if it doesn't cross a stratification boundary stratificationSize := lodStratificationSize[lod] if !crossesStratificationBoundary(lastUpdateTime, t, stratificationSize) { // discard sample canolog.Info("LOD", lod, "discarded") return nil } // Get table name tableName, err := varTableNameByDatatype(varDef.Datatype()) if err != nil { return err } // insert sample bucket := getBucket(t, lod) propname := varDef.Name() err = device.conn.session.Query(` INSERT INTO `+tableName+` (device_id, propname, timeprefix, time, value) VALUES (?, ?, ?, ?, ?) `, device.ID(), propname, bucket.Name(), t, value).Exec() if err != nil { return err } canolog.Info("LOD", lod, "sample inserted into bucket", bucket.Name()) // Track new bucket (if any) for garbage collection purposes. // And garbage collect. if crossesBucketBoundary(lastUpdateTime, t, bucket.BucketSize()) { err := device.addBucket(propname, &bucket) canolog.Info("New bucket", bucket, "created") if err != nil { canolog.Error("Error adding sample bucket: ", err) // don't return! We still need to do garbage collection! } device.garbageCollectLOD(t, varDef, lod, false) } return nil }
// Use with care. Erases all sensor data. func (conn *CassConnection) ClearSensorData() { tables := []string{ "propval_int", "propval_float", "propval_double", "propval_timestamp", "propval_boolean", "propval_void", "propval_string", } for _, table := range tables { err := conn.session.Query(`TRUNCATE ` + table).Exec() if err != nil { canolog.Error("Error truncating ", table, ":", err) } } }
// RPC entrypoint func (server *PigeonServer) rpcHandleRequest(req *PigeonRequest, resp *PigeonResponse) (outErr error) { // Log crashes in the RPC code defer func() { r := recover() if r != nil { var buf [4096]byte runtime.Stack(buf[:], false) canolog.Error("RPC PANIC ", r, string(buf[:])) canolog.Info("Recovered") outErr = fmt.Errorf("Crash in %s", req.ReqJobKey) } }() canolog.Info("RPC Handling", req.ReqJobKey) // Lookup the handler for that job type inboxes, ok := server.inboxesByMsgKey[req.ReqJobKey] if !ok { // NOT FOUND (NO INBOX LIST) return fmt.Errorf("Pigeon Server: No inbox for msg key %s on server %s", req.ReqJobKey, server.hostname) } if len(inboxes) < 0 { // NOT FOUND (NO INBOXES IN LIST) return fmt.Errorf("Pigeon Server: No inboxes for msg key %s on server %s", req.ReqJobKey, server.hostname) } // TODO: handle broadcast & idempotent request // For now, send to random inbox inbox := inboxes[rand.Intn(len(inboxes))] if inbox.handler == nil { return fmt.Errorf("Pigeon Server: Expected handler for inbox %s on inbox %s", req.ReqJobKey, inbox) } // Call the handler canolog.Info("Calling Registered handler") canolog.Info(req) canolog.Info(resp) canolog.Info("inbox: ", inbox) inbox.handler.Handle(req.ReqJobKey, inbox.userCtx, req, resp) canolog.Info("All done") return nil }
func (conn *CassConnection) CreateDevice( name string, uuid *gocql.UUID, secretKey string, publicAccessLevel datalayer.AccessLevel) (datalayer.Device, error) { // TODO: validate parameters var id gocql.UUID var err error if uuid == nil { id, err = gocql.RandomUUID() if err != nil { return nil, err } } else { id = *uuid } if secretKey == "" { secretKey, err = random.Base64String(24) if err != nil { return nil, err } } err = conn.session.Query(` INSERT INTO devices (device_id, secret_key, friendly_name, public_access_level) VALUES (?, ?, ?, ?) `, id, secretKey, name, publicAccessLevel).Exec() if err != nil { canolog.Error("Error creating device:", err) return nil, err } return &CassDevice{ conn: conn, deviceId: id, secretKey: secretKey, name: name, doc: sddl.Sys.NewEmptyDocument(), docString: "", publicAccessLevel: publicAccessLevel, locationNote: "", wsConnected: false, }, nil }
func NewGenericRestError(statusCode int, errorType string, msg string) *GenericRestError { body := map[string]interface{}{ "result": "error", "error_type": errorType, } if msg != "" { body["error_msg"] = msg } jsonBytes, err := json.MarshalIndent(body, "", " ") if err != nil { canolog.Error("Error marshalling error response. That's ironic.", err) return &GenericRestError{ statusCode: statusCode, responseBody: `{"result" : "error", "error_type" : "internal_error", "error_msg" : "Error encoding error response"}`, } } responseBody := string(jsonBytes) return &GenericRestError{ statusCode: statusCode, responseBody: responseBody, } }
// This handler forwards an HTTP request along as a Pigeon job. func CanopyRestJobForwarder( jobKey string, cookieStore *sessions.CookieStore, allowOrigin string, outbox jobqueue.Outbox) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { // Log crashes defer func() { r := recover() if r != nil { var buf [4096]byte runtime.Stack(buf[:], false) canolog.Error("PANIC ", r, string(buf[:])) w.WriteHeader(http.StatusInternalServerError) fmt.Fprintf(w, "{\"result\" : \"error\", \"error_type\" : \"crash\"}") } }() // Log request canolog.Info("Request: ", r.Method, r.URL, " BY ", r.RemoteAddr) // Check for session-based AUTH cookieUsername := "" if cookieStore != nil { session, _ := cookieStore.Get(r, "canopy-login-session") cookieUsername, _ = session.Values["logged_in_username"].(string) } // Read message body bodyBytes, err := ioutil.ReadAll(r.Body) if err != nil { fmt.Fprintf(w, "{\"error\" : \"reading_body\"}") return } bodyString := string(bodyBytes) // Launch backend job payload := map[string]interface{}{ "url-vars": mux.Vars(r), "query": r.URL.Query(), // map[string][]string "auth-header": r.Header["Authorization"], "cookie-username": cookieUsername, "http-body": bodyString, } // canolog.Info("Launching job", jobKey) respChan, err := outbox.Launch(jobKey, payload) if err != nil { w.WriteHeader(http.StatusInternalServerError) fmt.Fprintf(w, "{\"result\" : \"error\", \"error_type\" : \"failed_to_launch_job\"}") return } w.Header().Set("Content-Type", "application/json") if allowOrigin != "" { w.Header().Set("Access-Control-Allow-Origin", allowOrigin) } // Wait for pigeon response resp := (<-respChan).Body() // Parse pigeon response httpStatus, ok := resp["http-status"].(int) if !ok { w.WriteHeader(http.StatusInternalServerError) fmt.Fprintf(w, "{\"result\" : \"error\", \"error\" : \"Expected int http-status\"}") return } clearCookies, ok := resp["clear-cookies"].([]string) if ok { session, _ := cookieStore.Get(r, "canopy-login-session") for _, cookie := range clearCookies { canolog.Info("Clearing cookie: ", session, session.Values, cookie) session.Values[cookie] = "" canolog.Info("Cleared") } err := session.Save(r, w) if err != nil { w.WriteHeader(http.StatusInternalServerError) fmt.Fprintf(w, "{\"result\" : \"error\", \"error\" : \"error_saving_session\"}") return } } setCookies, ok := resp["set-cookies"].(map[string]string) if ok { session, _ := cookieStore.Get(r, "canopy-login-session") for key, value := range setCookies { canolog.Info("Setting cookie: ", key, ":", value) session.Values[key] = value } err := session.Save(r, w) if err != nil { w.WriteHeader(http.StatusInternalServerError) fmt.Fprintf(w, "{\"result\" : \"error\", \"error\" : \"error_saving_session\"}") return } } // Write HTTP Response w.WriteHeader(httpStatus) fmt.Fprint(w, resp["http-body"]) } }
func NewCanopyWebsocketServer(cfg config.Config, outbox jobqueue.Outbox, pigeonServer jobqueue.Server) func(ws *websocket.Conn) { // Main websocket server routine. // This event loop runs until the websocket connection is broken. return func(ws *websocket.Conn) { canolog.Websocket("Websocket connection established") var cnt int32 var device datalayer.Device var inbox jobqueue.Inbox var inboxReciever jobqueue.RecieveHandler lastPingTime := time.Now() cnt = 0 // connect to cassandra dl := cassandra_datalayer.NewDatalayer(cfg) conn, err := dl.Connect("canopy") if err != nil { canolog.Error("Could not connect to database: ", err) return } defer conn.Close() for { var in string // check for message from client ws.SetReadDeadline(time.Now().Add(100 * time.Millisecond)) err := websocket.Message.Receive(ws, &in) if err == nil { // success, payload received cnt++ resp := service.ProcessDeviceComm(cfg, conn, device, "", "", in) if resp.Device == nil { canolog.Error("Error processing device communications: ", resp.Err) } else { device = resp.Device if inbox == nil { deviceIdString := device.ID().String() inbox, err = pigeonServer.CreateInbox("canopy_ws:" + deviceIdString) if err != nil { canolog.Error("Error initializing inbox:", err) return } inboxReciever = jobqueue.NewRecieveHandler() inbox.SetHandler(inboxReciever) err = device.UpdateWSConnected(true) if err != nil { canolog.Error("Unexpected error: ", err) } } } } else if err == io.EOF { canolog.Websocket("Websocket connection closed") // connection closed if inbox != nil { if device != nil { err = device.UpdateWSConnected(false) if err != nil { canolog.Error("Unexpected error: ", err) } } inbox.Close() } return } else if nerr, ok := err.(net.Error); ok && nerr.Timeout() { // timeout reached, no data for me this time } else { canolog.Error("Unexpected error: ", err) } // Periodically send blank message if time.Now().After(lastPingTime.Add(30 * time.Second)) { err := websocket.Message.Send(ws, "{}") if err != nil { canolog.Websocket("Websocket connection closed during ping") // connection closed if inbox != nil { if device != nil { err = device.UpdateWSConnected(false) if err != nil { canolog.Error("Unexpected error: ", err) } } inbox.Close() } return } canolog.Info("Pinging WS") lastPingTime = time.Now() } if inbox != nil { msg, _ := inboxReciever.Recieve(time.Duration(100 * time.Millisecond)) if msg != nil { msgString, err := json.Marshal(msg) if err != nil { canolog.Error("Unexpected error: ", err) } canolog.Info("Websocket sending", msgString) canolog.Websocket("Websocket sending: ", msgString) websocket.Message.Send(ws, msgString) } } } } }
// Wrapper for handling pigeon requests that originated from // CanopyRestJobForwarder func RestJobWrapper(handler RestJobHandler) jobqueue.HandlerFunc { return func(jobKey string, userCtxItf interface{}, req jobqueue.Request, resp jobqueue.Response) { // This expects to recieve the following over the wire from the Pigeon // client: // { // "url-vars" : map[string]string, // "auth-header" : string, // "cookie-username" : string, // "http-body" : string, // } // // This sends the following response to the Pigeon client: // { // "http-status" : int, // "http-body" : string, // "clear-cookies" : []string, // "set-cookies" : map[string]string, // } var ok bool defer func() { // Catch exceptions and return callstack r := recover() if r != nil { var buf [4096]byte runtime.Stack(buf[:], false) n := bytes.Index(buf[:], []byte{0}) canolog.Error(string(buf[:n])) RestSetError(resp, InternalServerError(fmt.Sprint("Crash: ", string(buf[:n])))) } }() canolog.Info("Handling job", jobKey) info := &RestRequestInfo{} body := req.Body() canolog.Info("Request:", body) // Get URL vars from job request info.URLVars, ok = body["url-vars"].(map[string]string) if !ok { RestSetError(resp, InternalServerError("Expected map[string]string for 'url-vars'").Log()) return } // Get URL query parameters from job request info.Query, ok = body["query"].(url.Values) if !ok { RestSetError(resp, InternalServerError("Expected url.Values for 'query'").Log()) return } userCtx, ok := userCtxItf.(map[string]interface{}) if !ok { RestSetError(resp, InternalServerError("Expected map[string]interface{} for userCtx").Log()) return } // Get DB Connection from userCtx info.Conn, ok = userCtx["db-conn"].(datalayer.Connection) conn := info.Conn if !ok { RestSetError(resp, InternalServerError("Expected datalayer.Connection for 'db-conn'").Log()) return } // Get Config from userCtx info.Config, ok = userCtx["cfg"].(config.Config) if !ok { RestSetError(resp, InternalServerError("Expected config.Config for 'cfg'").Log()) return } // Get MailClient from userCtx mailer, ok := userCtx["mailer"].(mail.MailClient) if !ok { RestSetError(resp, InternalServerError("Expected MailClient for 'mailer'").Log()) return } // Check for BASIC AUTH authHeader, ok := body["auth-header"].([]string) if !ok { RestSetError(resp, InternalServerError("Expected []string for 'auth-header'").Log()) return } username_string, password, err := parseBasicAuth(authHeader) if err == nil { // was a UUID provided? if len(username_string) == 36 { device, err := info.Conn.LookupDeviceByStringID(username_string) if err != nil { RestSetError(resp, IncorrectUsernameOrPasswordError().Log()) return } if device.SecretKey() != password { RestSetError(resp, IncorrectUsernameOrPasswordError().Log()) return } info.AuthType = CANOPY_REST_AUTH_DEVICE_BASIC info.Device = device // update last_seen for this device err = device.UpdateLastActivityTime(nil) if err != nil { RestSetError(resp, InternalServerError("Updating last seen time "+err.Error()).Log()) return } canolog.Info("Device BASIC auth provided") } else { // otherwise, assume user account username/password provided acct, err := conn.LookupAccountVerifyPassword(username_string, password) if err != nil { if err == datalayer.InvalidPasswordError { RestSetError(resp, IncorrectUsernameOrPasswordError().Log()) return } else { RestSetError(resp, InternalServerError("Account lookup failed").Log()) return } } canolog.Info("Basic auth provided") info.AuthType = CANOPY_REST_AUTH_BASIC info.Account = acct } } // Check for session-based AUTH info.Cookies = make(map[string]string) info.Cookies["username"], ok = body["cookie-username"].(string) if !ok { RestSetError(resp, InternalServerError("Expected string for 'cookie-username'").Log()) return } username, ok := info.Cookies["username"] if ok && username != "" { canolog.Info("Looking up account: ", username) acct, err := conn.LookupAccount(username) if err != nil { // TODO: Handle clear cookie logic on client side as well RestSetErrorClearCookies(resp, InternalServerError("Account lookup failed").Log()) return } canolog.Info("Session auth provided") info.AuthType = CANOPY_REST_AUTH_SESSION info.Account = acct } httpBody, ok := body["http-body"].(string) if !ok { RestSetError(resp, InternalServerError("Expected string for 'http-body'").Log()) return } // Decode httpBody JSON var bodyObj map[string]interface{} if httpBody != "" { decoder := json.NewDecoder(strings.NewReader(httpBody)) err := decoder.Decode(&bodyObj) if err != nil { RestSetError(resp, BadInputError("JSON decode failed: %s "+err.Error()).Log()) return } } info.BodyObj = bodyObj // Call the wrapped handler. sideEffects := NewRestSideEffects(mailer) respObj, restErr := handler(info, sideEffects) if restErr != nil { // Send the error response RestSetError(resp, restErr) return } // Marshall the success response jsonBytes, err := json.MarshalIndent(respObj, "", " ") if err != nil { RestSetError(resp, InternalServerError("Error JSON-encoding Response").Log()) return } resp.SetBody(map[string]interface{}{ "http-body": string(jsonBytes), "http-status": http.StatusOK, }) // Perform deferred side effects // This must occur after resp.SetBody sideEffects.Perform(req, resp) } }
func main() { /*if true { passed := device_filter.RunTests() fmt.Println(passed) return }*/ r := mux.NewRouter() cfg := config.NewDefaultConfig(buildVersion, buildDate, buildCommit) err := cfg.LoadConfig() if err != nil { logFilename := config.JustGetOptLogFile() err2 := canolog.Init(logFilename) if err2 != nil { fmt.Println(err) return } canolog.Info("Starting Canopy Cloud Service") canolog.Error("Configuration error: %s", err) canolog.Info("Exiting") return } err = canolog.Init(cfg.OptLogFile()) if err != nil { fmt.Println(err) return } canolog.Info("Starting Canopy Cloud Service") canolog.Info("Version:", cfg.BuildVersion()) canolog.Info("Build Date:", cfg.BuildDate()) canolog.Info("Build Commit:", cfg.BuildCommit()) // Log crashes defer func() { r := recover() if r != nil { var buf [4096]byte runtime.Stack(buf[:], false) canolog.Error("PANIC ", r, string(buf[:])) panic(r) } shutdown() }() // handle SIGINT & SIGTERM c := make(chan os.Signal, 1) c2 := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt) signal.Notify(c2, syscall.SIGTERM) go func() { <-c canolog.Info("SIGINT recieved") shutdown() os.Exit(1) }() go func() { <-c2 canolog.Info("SIGTERM recieved") shutdown() os.Exit(1) }() if cfg.OptHostname() == "" { canolog.Error("You must set the configuration option \"hostname\"") return } if cfg.OptPasswordSecretSalt() == "" { canolog.Error("You must set the configuration option \"password-secret-salt\"") return } canolog.Info(cfg.ToString()) pigeonSys, err := jobqueue.NewPigeonSystem(cfg) if err != nil { canolog.Error("Error initializing messaging system (Pigeon):", err) return } pigeonServer, err := pigeonSys.StartServer("localhost") // TODO use configured host if err != nil { canolog.Error("Unable to start messaging server (Pigeon):", err) return } pigeonOutbox := pigeonSys.NewOutbox() err = jobs.InitJobServer(cfg, pigeonServer) if err != nil { canolog.Error("Unable to initialize Job Server", err) return } if cfg.OptForwardOtherHosts() != "" { canolog.Info("Requests to hosts other than ", cfg.OptHostname(), " will be forwarded to ", cfg.OptForwardOtherHosts()) targetUrl, _ := url.Parse(cfg.OptForwardOtherHosts()) reverseProxy := httputil.NewSingleHostReverseProxy(targetUrl) http.Handle("/", reverseProxy) } else { canolog.Info("No reverse proxy for other hosts consfigured.") } hostname := cfg.OptHostname() webManagerPath := cfg.OptWebManagerPath() jsClientPath := cfg.OptJavascriptClientPath() http.Handle(hostname+"/echo", websocket.Handler(ws.NewCanopyWebsocketServer(cfg, pigeonOutbox, pigeonServer))) webapp.AddRoutes(r) rest.AddRoutes(r, cfg, pigeonSys) http.Handle(hostname+"/", r) if webManagerPath != "" { http.Handle(hostname+"/mgr/", http.StripPrefix("/mgr/", http.FileServer(http.Dir(webManagerPath)))) } if jsClientPath != "" { http.Handle(hostname+"/canopy-js-client/", http.StripPrefix("/canopy-js-client/", http.FileServer(http.Dir(jsClientPath)))) } // Run HTTP and HTTPS servers simultaneously (if both are enabled) httpResultChan := make(chan error) httpsResultChan := make(chan error) if cfg.OptEnableHTTP() { go func() { httpPort := cfg.OptHTTPPort() srv := &http.Server{ Addr: fmt.Sprintf(":%d", httpPort), Handler: context.ClearHandler(http.DefaultServeMux), } err = srv.ListenAndServe() httpResultChan <- err }() } if cfg.OptEnableHTTPS() { go func() { httpsPort := cfg.OptHTTPSPort() httpsCertFile := cfg.OptHTTPSCertFile() httpsPrivKeyFile := cfg.OptHTTPSPrivKeyFile() srv := &http.Server{ Addr: fmt.Sprintf(":%d", httpsPort), Handler: context.ClearHandler(http.DefaultServeMux), } err := srv.ListenAndServeTLS(httpsCertFile, httpsPrivKeyFile) httpsResultChan <- err }() } // Exit if either server has error select { case err := <-httpResultChan: canolog.Error(err) case err := <-httpsResultChan: canolog.Error(err) } }
func (device *CassDevice) getLatestData_generic(varname string, datatype sddl.DatatypeEnum) (*cloudvar.CloudVarSample, error) { var timestamp time.Time var sample *cloudvar.CloudVarSample // Get table name tableName, err := varTableNameByDatatype(datatype) if err != nil { return nil, err } // Get most recent LOD0 bucket query := device.conn.session.Query(` SELECT timeprefix FROM var_buckets WHERE device_id = ? AND var_name = ? AND lod = ? ORDER BY timeprefix DESC LIMIT 1 `, device.ID(), varname, LOD_0).Consistency(gocql.One) var timeprefix string err = query.Scan(&timeprefix) if err != nil { canolog.Error("Error getting most recent LOD_0 bucket", err) return nil, err } // Get most recent sample in most recent LOD0 bucket query = device.conn.session.Query(` SELECT time, value FROM `+tableName+` WHERE device_id = ? AND propname = ? AND timeprefix = ? ORDER BY time DESC LIMIT 1 `, device.ID(), varname, timeprefix).Consistency(gocql.One) switch datatype { case sddl.DATATYPE_VOID: err = query.Scan(×tamp) sample = &cloudvar.CloudVarSample{timestamp, nil} case sddl.DATATYPE_STRING: var value string err = query.Scan(×tamp, &value) sample = &cloudvar.CloudVarSample{timestamp, value} case sddl.DATATYPE_BOOL: var value bool err = query.Scan(×tamp, &value) sample = &cloudvar.CloudVarSample{timestamp, value} case sddl.DATATYPE_INT8: var value int8 err = query.Scan(×tamp, &value) sample = &cloudvar.CloudVarSample{timestamp, value} case sddl.DATATYPE_UINT8: var value uint8 err = query.Scan(×tamp, &value) sample = &cloudvar.CloudVarSample{timestamp, value} case sddl.DATATYPE_INT16: var value int16 err = query.Scan(×tamp, &value) sample = &cloudvar.CloudVarSample{timestamp, value} case sddl.DATATYPE_UINT16: var value uint16 err = query.Scan(×tamp, &value) sample = &cloudvar.CloudVarSample{timestamp, value} case sddl.DATATYPE_INT32: var value int32 err = query.Scan(×tamp, &value) sample = &cloudvar.CloudVarSample{timestamp, value} case sddl.DATATYPE_UINT32: var value uint32 err = query.Scan(×tamp, &value) sample = &cloudvar.CloudVarSample{timestamp, value} case sddl.DATATYPE_FLOAT32: var value float32 err = query.Scan(×tamp, &value) sample = &cloudvar.CloudVarSample{timestamp, value} case sddl.DATATYPE_FLOAT64: var value float64 err = query.Scan(×tamp, &value) sample = &cloudvar.CloudVarSample{timestamp, value} case sddl.DATATYPE_DATETIME: var value time.Time err = query.Scan(×tamp, &value) sample = &cloudvar.CloudVarSample{timestamp, value} case sddl.DATATYPE_INVALID: return nil, fmt.Errorf("Cannot get property values for DATATYPE_INVALID") default: return nil, fmt.Errorf("Cannot get property values for datatype %d", datatype) } if err != nil { return nil, fmt.Errorf("Error reading latest property value: ", err) } return sample, nil }
// Remove old buckets for a single cloud variable and LOD // Set <deleteAll> to false for normal garbage collection (only expired buckets // are removed). Set <deleteAll> to true to delete all data, expired or not. func (device *CassDevice) garbageCollectLOD(curTime time.Time, varDef sddl.VarDef, lod lodEnum, deleteAll bool) error { canolog.Info("Running garbage collection for ", varDef.Name(), "LOD", lod) // Get list of expired buckets for that LOD var bucketName string bucketsToRemove := []string{} query := device.conn.session.Query(` SELECT timeprefix, endtime FROM var_buckets WHERE device_id = ? AND var_name = ? AND lod = ? ORDER BY timeprefix DESC `, device.ID(), varDef.Name(), lod).Consistency(gocql.One) iter := query.Iter() var endTime time.Time // NOTE: As a special case, we never delete the most recent LOD0 bucket, // even if it has expired, because we need it for LastUpdateTime. skipFirst := (lod == LOD_0) for iter.Scan(&bucketName, &endTime) { // determine expiration time // TODO: Handle tiers if deleteAll || bucketExpired(curTime, endTime, TIER_STANDARD, lod) { if skipFirst { skipFirst = false } else { bucketsToRemove = append(bucketsToRemove, bucketName) } } } err := iter.Close() if err != nil { return fmt.Errorf("Error garbage collecting cloudvar: %s", err.Error()) } // Remove buckets for _, bucketName := range bucketsToRemove { // Get table name tableName, err := varTableNameByDatatype(varDef.Datatype()) if err != nil { return err } // Remove expired bucket canolog.Info("Removing expired bucket", varDef.Name(), bucketName) err = device.conn.session.Query(` DELETE FROM `+tableName+` WHERE device_id = ? AND propname = ? AND timeprefix = ? `, device.ID(), varDef.Name(), bucketName).Consistency(gocql.One).Exec() if err != nil { canolog.Error("Problem deleting bucket ", device.ID(), varDef.Name(), bucketName) } else { // Cleanup var_buckets table, but only if we actually deleted the // bucket in the previous step err := device.conn.session.Query(` DELETE FROM var_buckets WHERE device_id = ? AND var_name = ? AND lod = ? AND timeprefix = ? `, device.ID(), varDef.Name(), lod, bucketName).Consistency(gocql.One).Exec() if err != nil { canolog.Error("Problem cleaning var_buckets ", device.ID(), varDef.Name(), bucketName, ":", err) } } } return nil }