Example #1
0
// Fetch historic time series data for a cloud variable. The resolution is
// automatically selected.
func (device *CassDevice) HistoricData(
	varDef sddl.VarDef,
	curTime,
	startTime,
	endTime time.Time) ([]cloudvar.CloudVarSample, error) {

	canolog.Info("Fetching historic data for", varDef.Name(), startTime, endTime)

	// Figure out which resolution to use.
	// Pick the highest resolution that covers the entire requested period.
	var lod lodEnum
	for lod = LOD_0; lod < LOD_END; lod++ {
		lodDuration := cloudVarLODDuration(TIER_STANDARD, lod)
		// TODO: Should we use curTime or lastUpdateTime for this?
		if startTime.After(curTime.Add(-lodDuration)) {
			break
		}
	}
	if lod == LOD_END {
		lod = LOD_5
	}

	canolog.Info("Using LOD", lod)

	// Fetch the data from that LOD
	return device.historicDataLOD(varDef, startTime, endTime, lod)
}
Example #2
0
func (outbox *PigeonOutbox) Launch(key string, payload map[string]interface{}) (<-chan Response, error) {
	canolog.Info("Launching ", key)

	req := PigeonRequest{
		ReqJobKey: key,
		ReqBody:   payload,
	}

	// Get list of all workers interested in these keys
	serverHosts, err := outbox.sys.dl.GetListeners(key)
	if err != nil {
		return nil, err
	}

	if len(serverHosts) == 0 {
		canolog.Info("No listeners found", key)
		return nil, fmt.Errorf("Pigeon: No listeners found for %s", key)
	}

	// For now, pick one at random
	serverHost := serverHosts[rand.Intn(len(serverHosts))]

	canolog.Info("Making RPC call ", key)
	respChan := make(chan Response)
	go outbox.send(serverHost, &req, respChan)
	canolog.Info("Returned from send", key)

	return respChan, nil
}
Example #3
0
func (outbox *PigeonOutbox) send(hostname string, request *PigeonRequest, respChan chan<- Response) error {
	resp := &PigeonResponse{}

	// Dial the server
	// TODO: Inefficient to dial each time?
	canolog.Info("RPC Dialing")
	rpcClient, err := rpc.DialHTTP("tcp", hostname+":1888")
	if err != nil {
		return fmt.Errorf("Pigeon: (dialing) %s", err.Error())
	}
	defer rpcClient.Close()

	// Make the call
	canolog.Info("RPC Calling")
	err = rpcClient.Call("PigeonServer.RPCHandleRequest", request, resp)
	if err != nil {
		canolog.Error("Pigeon: (calling) ", err.Error())
		// Send error response to channel
		respChan <- resp
		return fmt.Errorf("Pigeon: (calling) %s", err.Error())
	}

	// Send response to channel
	respChan <- resp

	return nil
}
func (conn *CassConnection) LookupAccount(
	usernameOrEmail string) (datalayer.Account, error) {
	var account CassAccount
	var username string

	canolog.Info("Looking up account: ", usernameOrEmail)

	if strings.Contains(usernameOrEmail, "@") {
		canolog.Info("It is an email address")
		// email address provided.  Lookup username based on email
		err := conn.session.Query(`
                SELECT email, username FROM account_emails
                WHERE email = ?
                LIMIT 1
        `, usernameOrEmail).Consistency(gocql.One).Scan(
			&account.email, &username)

		if err != nil {
			canolog.Error("Error looking up account", err)
			return nil, err
		}
	} else {
		canolog.Info("It is not an email address")
		username = usernameOrEmail
	}

	canolog.Info("fetching info for: ", username)
	// Lookup account info based on username
	err := conn.session.Query(`
            SELECT 
                username, 
                email, 
                password_hash, 
                activated, 
                activation_code, 
                password_reset_code, 
                password_reset_code_expiry 
            FROM accounts 
            WHERE username = ?
            LIMIT 1
    `, username).Consistency(gocql.One).Scan(
		&account.username,
		&account.email,
		&account.password_hash,
		&account.activated,
		&account.activation_code,
		&account.password_reset_code,
		&account.password_reset_code_expiry)

	if err != nil {
		canolog.Error("Error looking up account", err)
		return nil, err
	}

	canolog.Info("Success")
	account.conn = conn
	return &account, nil
}
Example #5
0
func (server *PigeonServer) RPCHandleRequest(req *PigeonRequest, resp *PigeonResponse) error {
	// defer does not seem to work correctly inside main RPC routine.  So this
	// is our workaround.
	err := server.rpcHandleRequest(req, resp)
	canolog.Info("Leaving RPCHandleRequest")
	return err
}
Example #6
0
// Backend implementation /api/activate endpoint
// Activates a user account (i.e., email address confirmation).
//
func ApiActivateHandler(info *RestRequestInfo, sideEffects *RestSideEffects) (map[string]interface{}, RestError) {
	canolog.Info("api/activate REST job started")
	if info.Account == nil {
		return nil, NotLoggedInError().Log()
	}

	username, ok := info.BodyObj["username"].(string)
	if !ok {
		return nil, BadInputError(`String "username" expected`).Log()
	}

	code, ok := info.BodyObj["code"].(string)
	if !ok {
		return nil, BadInputError(`String "code" expected`).Log()
	}

	err := info.Account.Activate(username, code)
	if err != nil {
		// TODO: Report InternalServerError different from InvalidCode
		//return nil, rest_errors.NewBadInputError("Unable to activate account")
		return nil, BadInputError("Unable to activate account").Log()
	}

	return map[string]interface{}{
		"result": "ok",
	}, nil
}
Example #7
0
// Insert a sample into the database for a particular LOD level, discarding the
// sample if the stratification chunk already contains a sample.
func (device *CassDevice) insertOrDiscardSampleLOD(varDef sddl.VarDef,
	lastUpdateTime time.Time,
	lod lodEnum,
	t time.Time,
	value interface{}) error {

	// Discard sample if it doesn't cross a stratification boundary
	stratificationSize := lodStratificationSize[lod]
	if !crossesStratificationBoundary(lastUpdateTime, t, stratificationSize) {
		// discard sample
		canolog.Info("LOD", lod, "discarded")
		return nil
	}

	// Get table name
	tableName, err := varTableNameByDatatype(varDef.Datatype())
	if err != nil {
		return err
	}

	// insert sample
	bucket := getBucket(t, lod)
	propname := varDef.Name()
	err = device.conn.session.Query(`
            INSERT INTO `+tableName+` 
                (device_id, propname, timeprefix, time, value)
            VALUES (?, ?, ?, ?, ?)
    `, device.ID(), propname, bucket.Name(), t, value).Exec()
	if err != nil {
		return err
	}
	canolog.Info("LOD", lod, "sample inserted into bucket", bucket.Name())

	// Track new bucket (if any) for garbage collection purposes.
	// And garbage collect.
	if crossesBucketBoundary(lastUpdateTime, t, bucket.BucketSize()) {
		err := device.addBucket(propname, &bucket)
		canolog.Info("New bucket", bucket, "created")
		if err != nil {
			canolog.Error("Error adding sample bucket: ", err)
			// don't return!  We still need to do garbage collection!
		}
		device.garbageCollectLOD(t, varDef, lod, false)
	}

	return nil
}
Example #8
0
// Determine if <t0> and <t1> fall within the same stratification chunk.
func crossesStratificationBoundary(t0, t1 time.Time,
	stratification stratificationSizeEnum) bool {

	period := stratificationPeriod[stratification]
	sb0 := stratificationBoundary(t0, period)
	sb1 := stratificationBoundary(t1, period)
	canolog.Info("Stratification boundary", sb0, "(matches/mismatches ", sb1, ")")
	return !sb0.Equal(sb1)
}
func Migrate_0_9_1_to_15_04_03(session *gocql.Session) error {
	// Perform all migration queries.
	for _, query := range migrationQueries_0_9_1_to_15_04_03 {
		canolog.Info(query)
		if err := session.Query(query).Exec(); err != nil {
			// Ignore errors (just print them).
			canolog.Warn(query, ": ", err)
		}
	}
	return nil
}
Example #10
0
func (client *CanopySGClient) Send(m MailMessage) error {
	mail, ok := m.(*CanopySGMail)
	if !ok {
		return errors.New("Message was not constructed with CanopySGClient")
	}

	canolog.Info("Sending email: " + fmt.Sprint(mail.sgmail))
	err := client.sg.Send(mail.sgmail)
	if err != nil {
		canolog.Warn("Error sending email: " + err.Error())
	}
	return err
}
Example #11
0
func (dl *CassDatalayer) MigrateDB(keyspace, startVersion, endVersion string) error {
	var err error
	cluster := gocql.NewCluster("127.0.0.1")
	cluster.Keyspace = keyspace

	session, err := cluster.CreateSession()
	if err != nil {
		canolog.Error("Error creating DB session: ", err)
		return err
	}

	curVersion := startVersion
	for curVersion != endVersion {
		canolog.Info("Migrating from %s to next version", curVersion)
		curVersion, err = dl.migrateNext(session, startVersion)
		if err != nil {
			canolog.Error("Failed migrating from %s:", curVersion, err)
			return err
		}
	}
	canolog.Info("Migration complete!  DB is now version: %s", curVersion)
	return nil
}
Example #12
0
// Insert a cloud variable data sample.
func (device *CassDevice) InsertSample(varDef sddl.VarDef, t time.Time, value interface{}) error {
	// Convert to UTC before inserting
	t = t.UTC()
	canolog.Info("Inserting sample", varDef.Name(), t)

	// check last update time
	lastUpdateTime, err := device.varLastUpdateTime(varDef.Name())
	if err != nil {
		canolog.Error("Error inserting sample:", err.Error())
		return err
	}
	canolog.Info("Last update time was", lastUpdateTime)

	if t.Before(lastUpdateTime) {
		canolog.Error("Insertion time before last update time: ", t, lastUpdateTime)
		return fmt.Errorf("Insertion time %s before last update time %s", t, lastUpdateTime)
	}

	// update last update time
	err = device.varSetLastUpdateTime(varDef.Name(), t)
	if err != nil {
		return err
	}

	// For each LOD, insert or discard sample based on our
	// stratification algorithm.
	for lod := LOD_0; lod < LOD_END; lod++ {
		err = device.insertOrDiscardSampleLOD(varDef, lastUpdateTime, lod, t, value)
		if err != nil {
			// TODO: Transactionize/rollback?
			return err
		}
	}

	// TODO: Do we need to update in-memory device object?
	return nil
}
Example #13
0
// Fetch the historic timeseries data for a particular LOD.
func (device *CassDevice) historicDataLOD(
	varDef sddl.VarDef,
	start,
	end time.Time,
	lod lodEnum) ([]cloudvar.CloudVarSample, error) {

	var err error
	samples := []cloudvar.CloudVarSample{}

	// Get list of all buckets containing samples we are interested in.
	// TODO: This could happen in parallel w/ map-reduce-like algo
	buckets := getBucketsForTimeRange(start, end, lod)
	canolog.Info("Using buckets: ", buckets)
	for _, bucket := range buckets {
		samples, err = device.fetchAndAppendBucketSamples(varDef, samples, start, end, bucket.Name())

		if err != nil {
			canolog.Info("Error: ", err)
			return samples, err
		}
		canolog.Info("Fetched ", len(samples), "samples from bucket", bucket.Name())
	}
	return samples, nil
}
Example #14
0
func (conn *CassConnection) LookupAccountVerifyPassword(
	usernameOrEmail string,
	password string) (datalayer.Account, error) {
	account, err := conn.LookupAccount(usernameOrEmail)
	if err != nil {
		return nil, err
	}

	verified := account.VerifyPassword(password)
	if !verified {
		canolog.Info("Incorrect password for ", usernameOrEmail)
		return nil, datalayer.InvalidPasswordError
	}

	return account, nil
}
Example #15
0
// RPC entrypoint
func (server *PigeonServer) rpcHandleRequest(req *PigeonRequest, resp *PigeonResponse) (outErr error) {

	// Log crashes in the RPC code
	defer func() {
		r := recover()
		if r != nil {
			var buf [4096]byte
			runtime.Stack(buf[:], false)
			canolog.Error("RPC PANIC ", r, string(buf[:]))
			canolog.Info("Recovered")
			outErr = fmt.Errorf("Crash in %s", req.ReqJobKey)
		}
	}()

	canolog.Info("RPC Handling", req.ReqJobKey)

	// Lookup the handler for that job type
	inboxes, ok := server.inboxesByMsgKey[req.ReqJobKey]
	if !ok {
		// NOT FOUND (NO INBOX LIST)
		return fmt.Errorf("Pigeon Server: No inbox for msg key %s on server %s", req.ReqJobKey, server.hostname)
	}
	if len(inboxes) < 0 {
		// NOT FOUND (NO INBOXES IN LIST)
		return fmt.Errorf("Pigeon Server: No inboxes for msg key %s on server %s", req.ReqJobKey, server.hostname)
	}
	// TODO: handle broadcast & idempotent request
	// For now, send to random inbox

	inbox := inboxes[rand.Intn(len(inboxes))]

	if inbox.handler == nil {
		return fmt.Errorf("Pigeon Server: Expected handler for inbox %s on inbox %s", req.ReqJobKey, inbox)
	}

	// Call the handler
	canolog.Info("Calling Registered handler")
	canolog.Info(req)
	canolog.Info(resp)
	canolog.Info("inbox: ", inbox)
	inbox.handler.Handle(req.ReqJobKey, inbox.userCtx, req, resp)
	canolog.Info("All done")

	return nil
}
Example #16
0
func AddRoutes(r *mux.Router, cfg config.Config, pigeonSys jobqueue.System) error {
	store := sessions.NewCookieStore([]byte(cfg.OptProductionSecret()))

	outbox := pigeonSys.NewOutbox()

	forwardAsPigeonJob := func(httpEndpoint, httpMethods, jobKey string) {
		canolog.Info("Registering route: ", httpEndpoint, "  to ", jobKey)
		r.HandleFunc(
			httpEndpoint,
			CanopyRestJobForwarder(
				jobKey,
				store,
				cfg.OptAllowOrigin(),
				outbox,
			),
		).Methods(httpMethods)
	}

	// TODO: Need to handle allow-origin correctly!
	// TODO: Can we automate all this?
	r.HandleFunc("/", rootRedirectHandler).Methods("GET")
	forwardAsPigeonJob("/api/activate", "POST", "api/activate")
	forwardAsPigeonJob("/api/create_devices", "POST", "api/create_devices")
	forwardAsPigeonJob("/api/create_user", "POST", "api/create_user")
	forwardAsPigeonJob("/api/device/{id}", "GET", "GET:api/device/id")
	forwardAsPigeonJob("/api/device/{id}", "POST", "POST:api/device/id")
	forwardAsPigeonJob("/api/device/{id}", "DELETE", "DELETE:api/device/id")
	forwardAsPigeonJob("/api/device/{id}/{var}", "GET", "api/device/id/var")
	forwardAsPigeonJob("/api/finish_share_transaction", "POST", "api/finish_share_transaction")
	forwardAsPigeonJob("/api/info", "GET", "api/info")
	forwardAsPigeonJob("/api/login", "POST", "api/login")
	forwardAsPigeonJob("/api/logout", "GET", "api/logout")
	forwardAsPigeonJob("/api/logout", "POST", "api/logout")
	forwardAsPigeonJob("/api/user/self", "GET", "GET:api/user/self")
	forwardAsPigeonJob("/api/user/self", "POST", "POST:api/user/self")
	forwardAsPigeonJob("/api/user/self", "DELETE", "DELETE:api/user/self")
	forwardAsPigeonJob("/api/user/self/devices", "GET", "api/devices")
	forwardAsPigeonJob("/api/reset_password", "POST", "api/reset_password")
	forwardAsPigeonJob("/api/share", "POST", "api/share")

	return nil
}
Example #17
0
// Remove old buckets for a single cloud variable and LOD
// Set <deleteAll> to false for normal garbage collection (only expired buckets
// are removed).  Set <deleteAll> to true to delete all data, expired or not.
func (device *CassDevice) garbageCollectLOD(curTime time.Time,
	varDef sddl.VarDef,
	lod lodEnum,
	deleteAll bool) error {

	canolog.Info("Running garbage collection for ", varDef.Name(), "LOD", lod)

	// Get list of expired buckets for that LOD
	var bucketName string
	bucketsToRemove := []string{}

	query := device.conn.session.Query(`
            SELECT timeprefix, endtime
            FROM var_buckets
            WHERE device_id = ?
                AND var_name = ?
                AND lod = ?
            ORDER BY timeprefix DESC
    `, device.ID(), varDef.Name(), lod).Consistency(gocql.One)

	iter := query.Iter()

	var endTime time.Time
	// NOTE: As a special case, we never delete the most recent LOD0 bucket,
	// even if it has expired, because we need it for LastUpdateTime.
	skipFirst := (lod == LOD_0)
	for iter.Scan(&bucketName, &endTime) {
		// determine expiration time
		// TODO: Handle tiers
		if deleteAll || bucketExpired(curTime, endTime, TIER_STANDARD, lod) {
			if skipFirst {
				skipFirst = false
			} else {
				bucketsToRemove = append(bucketsToRemove, bucketName)
			}
		}
	}

	err := iter.Close()
	if err != nil {
		return fmt.Errorf("Error garbage collecting cloudvar: %s", err.Error())
	}

	// Remove buckets
	for _, bucketName := range bucketsToRemove {
		// Get table name
		tableName, err := varTableNameByDatatype(varDef.Datatype())
		if err != nil {
			return err
		}

		// Remove expired bucket
		canolog.Info("Removing expired bucket", varDef.Name(), bucketName)
		err = device.conn.session.Query(`
                DELETE FROM `+tableName+`
                WHERE device_id = ?
                    AND propname = ?
                    AND timeprefix = ?
        `, device.ID(), varDef.Name(), bucketName).Consistency(gocql.One).Exec()
		if err != nil {
			canolog.Error("Problem deleting bucket ", device.ID(), varDef.Name(), bucketName)
		} else {
			// Cleanup var_buckets table, but only if we actually deleted the
			// bucket in the previous step
			err := device.conn.session.Query(`
                DELETE FROM var_buckets
                WHERE device_id = ?
                    AND var_name = ?
                    AND lod = ?
                    AND timeprefix = ?
            `, device.ID(), varDef.Name(), lod, bucketName).Consistency(gocql.One).Exec()
			if err != nil {
				canolog.Error("Problem cleaning var_buckets ", device.ID(), varDef.Name(), bucketName, ":", err)
			}
		}
	}
	return nil
}
Example #18
0
// Process communication payload from device (via websocket. or REST)
//  {
//      "device_id" : "9dfe2a00-efe2-45f9-a84c-8afc69caf4e7",
//        "sddl" : {
//          "optional inbound bool onoff" : {}
//        },
//        "vars" : {
//            "temperature" : 38.0f;
//            "gps" : {
//                "latitude" : 38.0f;
//                "longitude" : 38.0f;
//            }
//        }
//    }
//  }
//
//  <conn> is an optional datalayer connection.  If provided, it is used.
//  Otherwise, a datalayer connection is opened by this routine.
//
//  <device> is the device that sent the communication.  If nil, then either
//  <deviceId> or, as a last resort, the payload's "device_id" will be used.
//
//  <deviceId> is a string device ID of the device that sent the communication.
//  This is ignored if <device> is not nil.  If nil, then the payload's
//  "device_id" will be used.
//
//  <secretKey> is the device's secret key. A secret key is required if
//  <device> is nil.  Either the value of <secretKey> or, as a last resort, the
//  payload's "secret_key" field will be used.
//
//  <payload> is a string containing the JSON payload.
func ProcessDeviceComm(
	cfg config.Config,
	conn datalayer.Connection,
	device datalayer.Device,
	deviceIdString string,
	secretKey string,
	payload string) ServiceResponse {
	var err error
	var out ServiceResponse
	var ok bool

	canolog.Info("ProcessDeviceComm STARTED")
	// If conn is nil, open a datalayer connection.
	if conn == nil {
		dl := cassandra_datalayer.NewDatalayer(cfg)
		conn, err = dl.Connect("canopy")
		if err != nil {
			return ServiceResponse{
				HttpCode: http.StatusInternalServerError,
				Err:      fmt.Errorf("Could not connect to database: %s", err),
				Response: `{"result" : "error", "error_type" : "could_not_connect_to_database"}`,
				Device:   nil,
			}
		}
		defer conn.Close()
	}

	// Parse JSON payload
	var payloadObj map[string]interface{}
	err = json.Unmarshal([]byte(payload), &payloadObj)
	if err != nil {
		return ServiceResponse{
			HttpCode: http.StatusBadRequest,
			Err:      fmt.Errorf("Error JSON decoding payload: %s", err),
			Response: `{"result" : "error", "error_type" : "decoding_paylaod"}`,
			Device:   nil,
		}
	}

	// Device can be provided to this routine in one of three ways:
	// 1) <device> parameter
	// 2) <deviceId> parameter
	// 3) "device_id" field in payload
	if device == nil && deviceIdString != "" {
		// Parse UUID
		uuid, err := gocql.ParseUUID(deviceIdString)
		if err != nil {
			return ServiceResponse{
				HttpCode: http.StatusBadRequest,
				Err:      fmt.Errorf("Invalid UUID %s: %s", deviceIdString, err),
				Response: `{"result" : "error", "error_type" : "device_uuid_required"}`,
				Device:   nil,
			}
		}

		// Get secret key from payload if necessary
		if secretKey == "" {
			secretKey, ok = payloadObj["secret_key"].(string)
			if !ok {
				return ServiceResponse{
					HttpCode: http.StatusBadRequest,
					Err:      fmt.Errorf("\"secret_key\" field must be string"),
					Response: `{"result" : "error", "error_type" : "bad_payload"}`,
					Device:   nil,
				}
			}
		}

		// lookup device
		device, err = conn.LookupDeviceVerifySecretKey(uuid, secretKey)
		if err != nil {
			return ServiceResponse{
				HttpCode: http.StatusInternalServerError,
				Err:      fmt.Errorf("Error looking up or verifying device: %s", err),
				Response: `{"result" : "error", "error_type" : "database_error"}`,
				Device:   nil,
			}
		}
	}

	// Is "device_id" provided in payload?
	_, ok = payloadObj["device_id"]
	if ok {
		deviceIdStringFromPayload, ok := payloadObj["device_id"].(string)
		if !ok {
			return ServiceResponse{
				HttpCode: http.StatusBadRequest,
				Err:      fmt.Errorf("\"device_id\" field must be string"),
				Response: `{"result" : "error", "error_type" : "bad_payload"}`,
				Device:   nil,
			}
		}

		// Parse UUID
		uuid, err := gocql.ParseUUID(deviceIdStringFromPayload)
		if err != nil {
			return ServiceResponse{
				HttpCode: http.StatusBadRequest,
				Err:      fmt.Errorf("Invalid UUID %s: %s", deviceIdStringFromPayload, err),
				Response: `{"result" : "error", "error_type" : "device_uuid_required"}`,
				Device:   nil,
			}
		}

		// Is <device> already set?
		// If not: set it.
		// If so: ensure consistency
		if device == nil {

			// Get secret key from payload if necessary
			if secretKey == "" {
				secretKey, ok = payloadObj["secret_key"].(string)
				if !ok {
					return ServiceResponse{
						HttpCode: http.StatusBadRequest,
						Err:      fmt.Errorf("\"secret_key\" field must be string"),
						Response: `{"result" : "error", "error_type" : "bad_payload"}`,
						Device:   nil,
					}
				}
			}

			// Lookup device
			device, err = conn.LookupDeviceVerifySecretKey(uuid, secretKey)
			if err != nil {
				return ServiceResponse{
					HttpCode: http.StatusInternalServerError,
					Err:      fmt.Errorf("Error looking up or verifying device: %s", err),
					Response: `{"result" : "error", "error_type" : "database_error"}`,
					Device:   nil,
				}
			}
		} else {
			if device.ID().String() != deviceIdStringFromPayload {
				return ServiceResponse{
					HttpCode: http.StatusBadRequest,
					Err:      fmt.Errorf("Inconsistent device ID: %s %s", device.ID().String(), deviceIdStringFromPayload),
					Response: `{"result" : "error", "error_type" : "bad_payload"}`,
					Device:   nil,
				}
			}
		}
	}

	// If device wasn't provided at all, throw error.
	if device == nil {
		return ServiceResponse{
			HttpCode: http.StatusBadRequest,
			Err:      fmt.Errorf("Device ID expected"),
			Response: `{"result" : "error", "error_type" : "bad_payload"}`,
			Device:   nil,
		}
	}
	out.Device = device

	device.UpdateLastActivityTime(nil)

	// If "sddl" is present, create new / reconfigure Cloud Variables.
	_, ok = payloadObj["sddl"]
	if ok {
		updateMap, ok := payloadObj["sddl"].(map[string]interface{})
		if !ok {
			return ServiceResponse{
				HttpCode: http.StatusBadRequest,
				Err:      fmt.Errorf("Expected object for \"sdd\" field"),
				Response: `{"result" : "error", "error_type" : "bad_payload"}`,
				Device:   nil,
			}
		}
		err = device.ExtendSDDL(updateMap)
		if err != nil {
			return ServiceResponse{
				HttpCode: http.StatusInternalServerError,
				Err:      fmt.Errorf("Error updating device's SDDL: %s", err),
				Response: `{"result" : "error", "error_type" : "database_error"}`,
				Device:   nil,
			}
		}
	}

	// If "vars" is present, update value of all Cloud Variables (creating new
	// Cloud Variables as necessary)
	doc := device.SDDLDocument()
	_, ok = payloadObj["vars"]
	canolog.Info("vars present:", ok)
	if ok {
		varsMap, ok := payloadObj["vars"].(map[string]interface{})
		if !ok {
			return ServiceResponse{
				HttpCode: http.StatusBadRequest,
				Err:      fmt.Errorf("Expected object for \"vars\" field"),
				Response: `{"result" : "error", "error_type" : "bad_payload"}`,
				Device:   nil,
			}
		}
		canolog.Info("varsMap: ", varsMap)
		for varName, value := range varsMap {
			varDef, err := doc.LookupVarDef(varName)
			// TODO: an error doesn't necessarily mean prop should be created?
			canolog.Info("Looking up property ", varName)
			if varDef == nil {
				// Property doesn't exist.  Add it.
				canolog.Info("Not found.  Add property ", varName)
				// TODO: What datatype?
				// TODO: What other parameters?
				varDef, err = doc.AddVarDef(varName, sddl.DATATYPE_FLOAT32)
				if err != nil {
					return ServiceResponse{
						HttpCode: http.StatusInternalServerError,
						Err:      fmt.Errorf("Error creating cloud variable %s: %s", varName, err),
						Response: `{"result" : "error", "error_type" : "database_error"}`,
						Device:   nil,
					}
				}

				// save modified SDDL
				// TODO: Save at the end?
				canolog.Info("SetSDDLDocument ", doc)
				err = device.SetSDDLDocument(doc)
				if err != nil {
					return ServiceResponse{
						HttpCode: http.StatusInternalServerError,
						Err:      fmt.Errorf("Error updating SDDL: %s", err),
						Response: `{"result" : "error", "error_type" : "database_error"}`,
						Device:   nil,
					}
				}
			}

			// Store property value.
			// Convert value datatype
			varVal, err := cloudvar.JsonToCloudVarValue(varDef, value)
			if err != nil {
				return ServiceResponse{
					HttpCode: http.StatusInternalServerError,
					Err:      fmt.Errorf("Error converting JSON to propertyValue: %s", err),
					Response: `{"result" : "error", "error_type" : "bad_payload"}`,
					Device:   nil,
				}
			}
			canolog.Info("InsertStample")
			err = device.InsertSample(varDef, time.Now(), varVal)
			if err != nil {
				return ServiceResponse{
					HttpCode: http.StatusInternalServerError,
					Err:      fmt.Errorf("Error inserting sample %s: %s", varName, err),
					Response: `{"result" : "error", "error_type" : "database_error"}`,
					Device:   nil,
				}
			}
		}
	}

	return ServiceResponse{
		HttpCode: http.StatusOK,
		Err:      nil,
		Response: `{"result" : "ok"}`,
		Device:   device,
	}
}
Example #19
0
func main() {
	/*if true {
	    passed := device_filter.RunTests()
	    fmt.Println(passed)
	    return
	}*/
	r := mux.NewRouter()

	cfg := config.NewDefaultConfig(buildVersion, buildDate, buildCommit)
	err := cfg.LoadConfig()
	if err != nil {
		logFilename := config.JustGetOptLogFile()

		err2 := canolog.Init(logFilename)
		if err2 != nil {
			fmt.Println(err)
			return
		}
		canolog.Info("Starting Canopy Cloud Service")
		canolog.Error("Configuration error: %s", err)
		canolog.Info("Exiting")
		return
	}

	err = canolog.Init(cfg.OptLogFile())
	if err != nil {
		fmt.Println(err)
		return
	}

	canolog.Info("Starting Canopy Cloud Service")
	canolog.Info("Version:", cfg.BuildVersion())
	canolog.Info("Build Date:", cfg.BuildDate())
	canolog.Info("Build Commit:", cfg.BuildCommit())

	// Log crashes
	defer func() {
		r := recover()
		if r != nil {
			var buf [4096]byte
			runtime.Stack(buf[:], false)
			canolog.Error("PANIC ", r, string(buf[:]))
			panic(r)
		}
		shutdown()
	}()
	// handle SIGINT & SIGTERM
	c := make(chan os.Signal, 1)
	c2 := make(chan os.Signal, 1)
	signal.Notify(c, os.Interrupt)
	signal.Notify(c2, syscall.SIGTERM)
	go func() {
		<-c
		canolog.Info("SIGINT recieved")
		shutdown()
		os.Exit(1)
	}()
	go func() {
		<-c2
		canolog.Info("SIGTERM recieved")
		shutdown()
		os.Exit(1)
	}()

	if cfg.OptHostname() == "" {
		canolog.Error("You must set the configuration option \"hostname\"")
		return
	}

	if cfg.OptPasswordSecretSalt() == "" {
		canolog.Error("You must set the configuration option \"password-secret-salt\"")
		return
	}
	canolog.Info(cfg.ToString())

	pigeonSys, err := jobqueue.NewPigeonSystem(cfg)
	if err != nil {
		canolog.Error("Error initializing messaging system (Pigeon):", err)
		return
	}

	pigeonServer, err := pigeonSys.StartServer("localhost") // TODO use configured host
	if err != nil {
		canolog.Error("Unable to start messaging server (Pigeon):", err)
		return
	}

	pigeonOutbox := pigeonSys.NewOutbox()

	err = jobs.InitJobServer(cfg, pigeonServer)
	if err != nil {
		canolog.Error("Unable to initialize Job Server", err)
		return
	}

	if cfg.OptForwardOtherHosts() != "" {
		canolog.Info("Requests to hosts other than ", cfg.OptHostname(), " will be forwarded to ", cfg.OptForwardOtherHosts())
		targetUrl, _ := url.Parse(cfg.OptForwardOtherHosts())
		reverseProxy := httputil.NewSingleHostReverseProxy(targetUrl)
		http.Handle("/", reverseProxy)
	} else {
		canolog.Info("No reverse proxy for other hosts consfigured.")
	}

	hostname := cfg.OptHostname()
	webManagerPath := cfg.OptWebManagerPath()
	jsClientPath := cfg.OptJavascriptClientPath()
	http.Handle(hostname+"/echo", websocket.Handler(ws.NewCanopyWebsocketServer(cfg, pigeonOutbox, pigeonServer)))

	webapp.AddRoutes(r)
	rest.AddRoutes(r, cfg, pigeonSys)

	http.Handle(hostname+"/", r)

	if webManagerPath != "" {
		http.Handle(hostname+"/mgr/", http.StripPrefix("/mgr/", http.FileServer(http.Dir(webManagerPath))))
	}

	if jsClientPath != "" {
		http.Handle(hostname+"/canopy-js-client/", http.StripPrefix("/canopy-js-client/", http.FileServer(http.Dir(jsClientPath))))
	}

	// Run HTTP and HTTPS servers simultaneously (if both are enabled)
	httpResultChan := make(chan error)
	httpsResultChan := make(chan error)
	if cfg.OptEnableHTTP() {
		go func() {
			httpPort := cfg.OptHTTPPort()
			srv := &http.Server{
				Addr:    fmt.Sprintf(":%d", httpPort),
				Handler: context.ClearHandler(http.DefaultServeMux),
			}
			err = srv.ListenAndServe()
			httpResultChan <- err
		}()
	}
	if cfg.OptEnableHTTPS() {
		go func() {
			httpsPort := cfg.OptHTTPSPort()
			httpsCertFile := cfg.OptHTTPSCertFile()
			httpsPrivKeyFile := cfg.OptHTTPSPrivKeyFile()
			srv := &http.Server{
				Addr:    fmt.Sprintf(":%d", httpsPort),
				Handler: context.ClearHandler(http.DefaultServeMux),
			}
			err := srv.ListenAndServeTLS(httpsCertFile, httpsPrivKeyFile)
			httpsResultChan <- err
		}()
	}

	// Exit if either server has error
	select {
	case err := <-httpResultChan:
		canolog.Error(err)
	case err := <-httpsResultChan:
		canolog.Error(err)
	}

}
// This handler forwards an HTTP request along as a Pigeon job.
func CanopyRestJobForwarder(
	jobKey string,
	cookieStore *sessions.CookieStore,
	allowOrigin string,
	outbox jobqueue.Outbox) http.HandlerFunc {

	return func(w http.ResponseWriter, r *http.Request) {

		// Log crashes
		defer func() {
			r := recover()
			if r != nil {
				var buf [4096]byte
				runtime.Stack(buf[:], false)
				canolog.Error("PANIC ", r, string(buf[:]))
				w.WriteHeader(http.StatusInternalServerError)
				fmt.Fprintf(w, "{\"result\" : \"error\", \"error_type\" : \"crash\"}")
			}
		}()

		// Log request
		canolog.Info("Request: ", r.Method, r.URL, " BY ", r.RemoteAddr)

		// Check for session-based AUTH
		cookieUsername := ""
		if cookieStore != nil {
			session, _ := cookieStore.Get(r, "canopy-login-session")
			cookieUsername, _ = session.Values["logged_in_username"].(string)
		}

		// Read message body
		bodyBytes, err := ioutil.ReadAll(r.Body)
		if err != nil {
			fmt.Fprintf(w, "{\"error\" : \"reading_body\"}")
			return
		}
		bodyString := string(bodyBytes)

		// Launch backend job
		payload := map[string]interface{}{
			"url-vars":        mux.Vars(r),
			"query":           r.URL.Query(), // map[string][]string
			"auth-header":     r.Header["Authorization"],
			"cookie-username": cookieUsername,
			"http-body":       bodyString,
		}
		//
		canolog.Info("Launching job", jobKey)
		respChan, err := outbox.Launch(jobKey, payload)
		if err != nil {
			w.WriteHeader(http.StatusInternalServerError)
			fmt.Fprintf(w, "{\"result\" : \"error\", \"error_type\" : \"failed_to_launch_job\"}")
			return
		}

		w.Header().Set("Content-Type", "application/json")
		if allowOrigin != "" {
			w.Header().Set("Access-Control-Allow-Origin", allowOrigin)
		}

		// Wait for pigeon response
		resp := (<-respChan).Body()

		// Parse pigeon response
		httpStatus, ok := resp["http-status"].(int)
		if !ok {
			w.WriteHeader(http.StatusInternalServerError)
			fmt.Fprintf(w, "{\"result\" : \"error\", \"error\" : \"Expected int http-status\"}")
			return
		}

		clearCookies, ok := resp["clear-cookies"].([]string)
		if ok {
			session, _ := cookieStore.Get(r, "canopy-login-session")
			for _, cookie := range clearCookies {
				canolog.Info("Clearing cookie: ", session, session.Values, cookie)
				session.Values[cookie] = ""
				canolog.Info("Cleared")
			}
			err := session.Save(r, w)
			if err != nil {
				w.WriteHeader(http.StatusInternalServerError)
				fmt.Fprintf(w, "{\"result\" : \"error\", \"error\" : \"error_saving_session\"}")
				return
			}
		}

		setCookies, ok := resp["set-cookies"].(map[string]string)
		if ok {
			session, _ := cookieStore.Get(r, "canopy-login-session")
			for key, value := range setCookies {
				canolog.Info("Setting cookie: ", key, ":", value)
				session.Values[key] = value
			}
			err := session.Save(r, w)
			if err != nil {
				w.WriteHeader(http.StatusInternalServerError)
				fmt.Fprintf(w, "{\"result\" : \"error\", \"error\" : \"error_saving_session\"}")
				return
			}
		}

		// Write HTTP Response
		w.WriteHeader(httpStatus)
		fmt.Fprint(w, resp["http-body"])
	}
}
Example #21
0
func NewCanopyWebsocketServer(cfg config.Config, outbox jobqueue.Outbox, pigeonServer jobqueue.Server) func(ws *websocket.Conn) {
	// Main websocket server routine.
	// This event loop runs until the websocket connection is broken.
	return func(ws *websocket.Conn) {
		canolog.Websocket("Websocket connection established")

		var cnt int32
		var device datalayer.Device
		var inbox jobqueue.Inbox
		var inboxReciever jobqueue.RecieveHandler
		lastPingTime := time.Now()

		cnt = 0

		// connect to cassandra
		dl := cassandra_datalayer.NewDatalayer(cfg)
		conn, err := dl.Connect("canopy")
		if err != nil {
			canolog.Error("Could not connect to database: ", err)
			return
		}
		defer conn.Close()

		for {
			var in string

			// check for message from client
			ws.SetReadDeadline(time.Now().Add(100 * time.Millisecond))
			err := websocket.Message.Receive(ws, &in)
			if err == nil {
				// success, payload received
				cnt++
				resp := service.ProcessDeviceComm(cfg, conn, device, "", "", in)
				if resp.Device == nil {
					canolog.Error("Error processing device communications: ", resp.Err)
				} else {
					device = resp.Device
					if inbox == nil {
						deviceIdString := device.ID().String()
						inbox, err = pigeonServer.CreateInbox("canopy_ws:" + deviceIdString)
						if err != nil {
							canolog.Error("Error initializing inbox:", err)
							return
						}
						inboxReciever = jobqueue.NewRecieveHandler()
						inbox.SetHandler(inboxReciever)

						err = device.UpdateWSConnected(true)
						if err != nil {
							canolog.Error("Unexpected error: ", err)
						}
					}
				}
			} else if err == io.EOF {
				canolog.Websocket("Websocket connection closed")
				// connection closed
				if inbox != nil {
					if device != nil {
						err = device.UpdateWSConnected(false)
						if err != nil {
							canolog.Error("Unexpected error: ", err)
						}
					}
					inbox.Close()
				}
				return
			} else if nerr, ok := err.(net.Error); ok && nerr.Timeout() {
				// timeout reached, no data for me this time
			} else {
				canolog.Error("Unexpected error: ", err)
			}

			// Periodically send blank message
			if time.Now().After(lastPingTime.Add(30 * time.Second)) {
				err := websocket.Message.Send(ws, "{}")
				if err != nil {
					canolog.Websocket("Websocket connection closed during ping")
					// connection closed
					if inbox != nil {
						if device != nil {
							err = device.UpdateWSConnected(false)
							if err != nil {
								canolog.Error("Unexpected error: ", err)
							}
						}
						inbox.Close()
					}
					return
				}
				canolog.Info("Pinging WS")
				lastPingTime = time.Now()
			}

			if inbox != nil {
				msg, _ := inboxReciever.Recieve(time.Duration(100 * time.Millisecond))
				if msg != nil {
					msgString, err := json.Marshal(msg)

					if err != nil {
						canolog.Error("Unexpected error: ", err)
					}

					canolog.Info("Websocket sending", msgString)
					canolog.Websocket("Websocket sending: ", msgString)
					websocket.Message.Send(ws, msgString)
				}
			}
		}
	}
}
func (client *CanopyNoOpMailClient) Send(m MailMessage) error {
	canolog.Info("Noop Mail Client: not sending message")
	return nil
}
Example #23
0
// Wrapper for handling pigeon requests that originated from
// CanopyRestJobForwarder
func RestJobWrapper(handler RestJobHandler) jobqueue.HandlerFunc {
	return func(jobKey string, userCtxItf interface{}, req jobqueue.Request, resp jobqueue.Response) {
		// This expects to recieve the following over the wire from the Pigeon
		// client:
		//  {
		//      "url-vars" : map[string]string,
		//      "auth-header" : string,
		//      "cookie-username" : string,
		//      "http-body" : string,
		//  }
		//
		// This sends the following response to the Pigeon client:
		//  {
		//      "http-status" : int,
		//      "http-body" : string,
		//      "clear-cookies" : []string,
		//      "set-cookies" : map[string]string,
		//  }
		var ok bool

		defer func() {
			// Catch exceptions and return callstack
			r := recover()
			if r != nil {
				var buf [4096]byte
				runtime.Stack(buf[:], false)
				n := bytes.Index(buf[:], []byte{0})
				canolog.Error(string(buf[:n]))
				RestSetError(resp, InternalServerError(fmt.Sprint("Crash: ", string(buf[:n]))))
			}
		}()

		canolog.Info("Handling job", jobKey)
		info := &RestRequestInfo{}
		body := req.Body()
		canolog.Info("Request:", body)

		// Get URL vars from job request
		info.URLVars, ok = body["url-vars"].(map[string]string)
		if !ok {
			RestSetError(resp, InternalServerError("Expected map[string]string for 'url-vars'").Log())
			return
		}

		// Get URL query parameters from job request
		info.Query, ok = body["query"].(url.Values)
		if !ok {
			RestSetError(resp, InternalServerError("Expected url.Values for 'query'").Log())
			return
		}

		userCtx, ok := userCtxItf.(map[string]interface{})
		if !ok {
			RestSetError(resp, InternalServerError("Expected map[string]interface{} for userCtx").Log())
			return
		}

		// Get DB Connection from userCtx
		info.Conn, ok = userCtx["db-conn"].(datalayer.Connection)
		conn := info.Conn
		if !ok {
			RestSetError(resp, InternalServerError("Expected datalayer.Connection for 'db-conn'").Log())
			return
		}

		// Get Config from userCtx
		info.Config, ok = userCtx["cfg"].(config.Config)
		if !ok {
			RestSetError(resp, InternalServerError("Expected config.Config for 'cfg'").Log())
			return
		}

		// Get MailClient from userCtx
		mailer, ok := userCtx["mailer"].(mail.MailClient)
		if !ok {
			RestSetError(resp, InternalServerError("Expected MailClient for 'mailer'").Log())
			return
		}

		// Check for BASIC AUTH
		authHeader, ok := body["auth-header"].([]string)
		if !ok {
			RestSetError(resp, InternalServerError("Expected []string for 'auth-header'").Log())
			return
		}
		username_string, password, err := parseBasicAuth(authHeader)
		if err == nil {
			// was a UUID provided?
			if len(username_string) == 36 {
				device, err := info.Conn.LookupDeviceByStringID(username_string)
				if err != nil {
					RestSetError(resp, IncorrectUsernameOrPasswordError().Log())
					return
				}

				if device.SecretKey() != password {
					RestSetError(resp, IncorrectUsernameOrPasswordError().Log())
					return
				}

				info.AuthType = CANOPY_REST_AUTH_DEVICE_BASIC
				info.Device = device

				// update last_seen for this device
				err = device.UpdateLastActivityTime(nil)
				if err != nil {
					RestSetError(resp, InternalServerError("Updating last seen time "+err.Error()).Log())
					return
				}
				canolog.Info("Device BASIC auth provided")
			} else {
				// otherwise, assume user account username/password provided
				acct, err := conn.LookupAccountVerifyPassword(username_string, password)
				if err != nil {
					if err == datalayer.InvalidPasswordError {
						RestSetError(resp, IncorrectUsernameOrPasswordError().Log())
						return
					} else {
						RestSetError(resp, InternalServerError("Account lookup failed").Log())
						return
					}
				}

				canolog.Info("Basic auth provided")
				info.AuthType = CANOPY_REST_AUTH_BASIC
				info.Account = acct
			}
		}

		// Check for session-based AUTH
		info.Cookies = make(map[string]string)
		info.Cookies["username"], ok = body["cookie-username"].(string)
		if !ok {
			RestSetError(resp, InternalServerError("Expected string for 'cookie-username'").Log())
			return
		}

		username, ok := info.Cookies["username"]
		if ok && username != "" {
			canolog.Info("Looking up account: ", username)
			acct, err := conn.LookupAccount(username)
			if err != nil {
				// TODO: Handle clear cookie logic on client side as well
				RestSetErrorClearCookies(resp, InternalServerError("Account lookup failed").Log())
				return
			}

			canolog.Info("Session auth provided")
			info.AuthType = CANOPY_REST_AUTH_SESSION
			info.Account = acct
		}

		httpBody, ok := body["http-body"].(string)
		if !ok {
			RestSetError(resp, InternalServerError("Expected string for 'http-body'").Log())
			return
		}

		// Decode httpBody JSON
		var bodyObj map[string]interface{}
		if httpBody != "" {
			decoder := json.NewDecoder(strings.NewReader(httpBody))
			err := decoder.Decode(&bodyObj)
			if err != nil {
				RestSetError(resp, BadInputError("JSON decode failed: %s "+err.Error()).Log())
				return
			}
		}
		info.BodyObj = bodyObj

		// Call the wrapped handler.
		sideEffects := NewRestSideEffects(mailer)
		respObj, restErr := handler(info, sideEffects)
		if restErr != nil {
			// Send the error response
			RestSetError(resp, restErr)
			return
		}

		// Marshall the success response
		jsonBytes, err := json.MarshalIndent(respObj, "", "    ")
		if err != nil {
			RestSetError(resp, InternalServerError("Error JSON-encoding Response").Log())
			return
		}
		resp.SetBody(map[string]interface{}{
			"http-body":   string(jsonBytes),
			"http-status": http.StatusOK,
		})

		// Perform deferred side effects
		// This must occur after resp.SetBody
		sideEffects.Perform(req, resp)
	}
}