Ejemplo n.º 1
0
// Verify user identity and hand out a JWT.
func getJWT(w http.ResponseWriter, r *http.Request) {
	w.Header().Set("Content-Type", "application/json")
	addCommonJwtRespHeaders(w, r)
	// Verify identity
	user := r.FormValue(JWT_USER_ATTR)
	if user == "" {
		http.Error(w, "Please pass JWT 'user' parameter", http.StatusBadRequest)
		return
	}
	jwtCol := HttpDB.Use(JWT_COL_NAME)
	if jwtCol == nil {
		http.Error(w, "Server is missing JWT identity collection, please restart the server.", http.StatusInternalServerError)
		return
	}
	userQuery := map[string]interface{}{
		"eq": user,
		"in": []interface{}{JWT_USER_ATTR}}
	userQueryResult := make(map[int]struct{})
	if err := db.EvalQuery(userQuery, jwtCol, &userQueryResult); err != nil {
		tdlog.CritNoRepeat("Query failed in JWT identity collection : %v", err)
		http.Error(w, "Query failed in JWT identity collection", http.StatusInternalServerError)
		return
	}
	// Verify password
	pass := r.FormValue(JWT_PASS_ATTR)
	for recID := range userQueryResult {
		rec, err := jwtCol.Read(recID)
		if err != nil {
			break
		}
		if rec[JWT_PASS_ATTR] != pass {
			tdlog.CritNoRepeat("JWT: identitify verification failed from request sent by %s", r.RemoteAddr)
			break
		}
		// Successful password match
		token := jwt.New(jwt.GetSigningMethod("RS256"))
		token.Claims = jwt.MapClaims{
			JWT_USER_ATTR:        rec[JWT_USER_ATTR],
			JWT_COLLECTIONS_ATTR: rec[JWT_COLLECTIONS_ATTR],
			JWT_ENDPOINTS_ATTR:   rec[JWT_ENDPOINTS_ATTR],
			JWT_EXPIRY:           time.Now().Add(time.Hour * 72).Unix(),
		}
		var tokenString string
		var e error
		if tokenString, e = token.SignedString(privateKey); e != nil {
			panic(e)
		}
		w.Header().Set("Authorization", "Bearer "+tokenString)
		w.WriteHeader(http.StatusOK)
		return
	}
	// ... password mismatch
	http.Error(w, "Invalid password", http.StatusUnauthorized)
}
Ejemplo n.º 2
0
// Close all file handles.
func (part *Partition) Close() error {

	var err error

	if e := part.col.Close(); e != nil {
		tdlog.CritNoRepeat("Failed to close %s: %v", part.col.Path, e)
		err = dberr.New(dberr.ErrorIO)
	}
	if e := part.lookup.Close(); e != nil {
		tdlog.CritNoRepeat("Failed to close %s: %v", part.lookup.Path, e)
		err = dberr.New(dberr.ErrorIO)
	}
	return err
}
Ejemplo n.º 3
0
// Close all file handles.
func (part *Partition) Close() (err error) {
	var failure bool
	if err = part.col.Close(); err != nil {
		tdlog.CritNoRepeat("Failed to close %s: %v", part.col.Path, err)
		failure = true
	}
	if err = part.lookup.Close(); err != nil {
		tdlog.CritNoRepeat("Failed to close %s: %v", part.lookup.Path, err)
		failure = true
	}
	if failure {
		err = errors.New("Operation did not complete successfully")
	}
	return
}
Ejemplo n.º 4
0
Archivo: db.go Proyecto: W3SS/tiedot
// Load all collection schema.
func (db *DB) load() error {
	// Create DB directory and PART_NUM_FILE if necessary
	var numPartsAssumed = false
	numPartsFilePath := path.Join(db.path, PART_NUM_FILE)
	if err := os.MkdirAll(db.path, 0700); err != nil {
		return err
	}
	if partNumFile, err := os.Stat(numPartsFilePath); err != nil {
		// The new database has as many partitions as number of CPUs recognized by OS
		if err := ioutil.WriteFile(numPartsFilePath, []byte(strconv.Itoa(runtime.NumCPU())), 0600); err != nil {
			return err
		}
		numPartsAssumed = true
	} else if partNumFile.IsDir() {
		return fmt.Errorf("Database config file %s is actually a directory, is database path correct?", PART_NUM_FILE)
	}
	// Get number of partitions from the text file
	if numParts, err := ioutil.ReadFile(numPartsFilePath); err != nil {
		return err
	} else if db.numParts, err = strconv.Atoi(strings.Trim(string(numParts), "\r\n ")); err != nil {
		return err
	}
	// Look for collection directories and open the collections
	db.cols = make(map[string]*Col)
	dirContent, err := ioutil.ReadDir(db.path)
	if err != nil {
		return err
	}
	for _, maybeColDir := range dirContent {
		if !maybeColDir.IsDir() {
			continue
		}
		if numPartsAssumed {
			return fmt.Errorf("Please manually repair database partition number config file %s", numPartsFilePath)
		}
		if db.cols[maybeColDir.Name()], err = OpenCol(db, maybeColDir.Name()); err != nil {
			return err
		}
	}
	// Synchronize data files at regular interval
	if db.autoSync == nil {
		db.autoSync = time.NewTicker(AUTO_SYNC_INTERVAL * time.Millisecond)
		go func() {
			for {
				select {
				case <-db.autoSync.C:
					if err := db.Sync(); err != nil {
						tdlog.CritNoRepeat("Background Auto-Sync on %s: Failed with error: %v", db.path, err)
					}
				case <-db.autoSyncStop:
					db.autoSync.Stop()
					return
				}
			}
		}()
	}
	return err
}
Ejemplo n.º 5
0
// Return number of the next chained bucket.
func (ht *HashTable) nextBucket(bucket int) int {
	if bucket >= ht.numBuckets {
		return 0
	}
	bucketAddr := bucket * BUCKET_SIZE
	nextUint, err := binary.Varint(ht.Buf[bucketAddr : bucketAddr+10])
	next := int(nextUint)
	if next == 0 {
		return 0
	} else if err < 0 || next <= bucket || next >= ht.numBuckets || next < INITIAL_BUCKETS {
		tdlog.CritNoRepeat("Bad hash table - repair ASAP %s", ht.Path)
		return 0
	} else {
		return next
	}
}
Ejemplo n.º 6
0
// Look for indexed integer values within the specified integer range.
func IntRange(intFrom interface{}, expr map[string]interface{}, src *Col, result *map[int]struct{}) (err error) {
	path, hasPath := expr["in"]
	if !hasPath {
		return errors.New("Missing path `in`")
	}
	// Figure out the path
	vecPath := make([]string, 0)
	if vecPathInterface, ok := path.([]interface{}); ok {
		for _, v := range vecPathInterface {
			vecPath = append(vecPath, fmt.Sprint(v))
		}
	} else {
		return errors.New(fmt.Sprintf("Expecting vector path `in`, but %v given", path))
	}
	// Figure out result number limit
	intLimit := int(0)
	if limit, hasLimit := expr["limit"]; hasLimit {
		if floatLimit, ok := limit.(float64); ok {
			intLimit = int(floatLimit)
		} else if _, ok := limit.(int); ok {
			intLimit = limit.(int)
		} else {
			return dberr.New(dberr.ErrorExpectingInt, limit)
		}
	}
	// Figure out the range ("from" value & "to" value)
	from, to := int(0), int(0)
	if floatFrom, ok := intFrom.(float64); ok {
		from = int(floatFrom)
	} else if _, ok := intFrom.(int); ok {
		from = intFrom.(int)
	} else {
		return dberr.New(dberr.ErrorExpectingInt, "int-from", from)
	}
	if intTo, ok := expr["int-to"]; ok {
		if floatTo, ok := intTo.(float64); ok {
			to = int(floatTo)
		} else if _, ok := intTo.(int); ok {
			to = intTo.(int)
		} else {
			return dberr.New(dberr.ErrorExpectingInt, "int-to", to)
		}
	} else if intTo, ok := expr["int to"]; ok {
		if floatTo, ok := intTo.(float64); ok {
			to = int(floatTo)
		} else if _, ok := intTo.(int); ok {
			to = intTo.(int)
		} else {
			return dberr.New(dberr.ErrorExpectingInt, "int to", to)
		}
	} else {
		return dberr.New(dberr.ErrorMissing, "int-to")
	}
	if to > from && to-from > 1000 || from > to && from-to > 1000 {
		tdlog.CritNoRepeat("Query %v involves index lookup on more than 1000 values, which can be very inefficient", expr)
	}
	counter := int(0) // Number of results already collected
	htPath := strings.Join(vecPath, ",")
	if _, indexScan := src.indexPaths[htPath]; !indexScan {
		return dberr.New(dberr.ErrorNeedIndex, vecPath, expr)
	}
	if from < to {
		// Forward scan - from low value to high value
		for lookupValue := from; lookupValue <= to; lookupValue++ {
			lookupStrValue := fmt.Sprint(lookupValue)
			hashValue := StrHash(lookupStrValue)
			vals := src.hashScan(htPath, hashValue, int(intLimit))
			for _, docID := range vals {
				if intLimit > 0 && counter == intLimit {
					break
				}
				counter += 1
				(*result)[docID] = struct{}{}
			}
		}
	} else {
		// Backward scan - from high value to low value
		for lookupValue := from; lookupValue >= to; lookupValue-- {
			lookupStrValue := fmt.Sprint(lookupValue)
			hashValue := StrHash(lookupStrValue)
			vals := src.hashScan(htPath, hashValue, int(intLimit))
			for _, docID := range vals {
				if intLimit > 0 && counter == intLimit {
					break
				}
				counter += 1
				(*result)[docID] = struct{}{}
			}
		}
	}
	return
}