Пример #1
0
func (ms *MapStorage) cacheAccounting(loadID string, alsKeys []string) error {
	CacheBeginTransaction()
	if alsKeys == nil {
		CacheRemPrefixKey(utils.ALIASES_PREFIX) // Forced until we can fine tune it
	}
	for k, _ := range ms.dict {
		if strings.HasPrefix(k, utils.ALIASES_PREFIX) {
			// check if it already exists
			// to remove reverse cache keys
			if avs, err := CacheGet(k); err == nil && avs != nil {
				al := &Alias{Values: avs.(AliasValues)}
				al.SetId(k[len(utils.ALIASES_PREFIX):])
				al.RemoveReverseCache()
			}
			CacheRemKey(k)
			if _, err := ms.GetAlias(k[len(utils.ALIASES_PREFIX):], true); err != nil {
				CacheRollbackTransaction()
				return err
			}
		}
	}
	CacheCommitTransaction()

	loadHistList, err := ms.GetLoadHistory(1, true)
	if err != nil || len(loadHistList) == 0 {
		utils.Logger.Info(fmt.Sprintf("could not get load history: %v (%v)", loadHistList, err))
	}
	var loadHist *utils.LoadInstance
	if len(loadHistList) == 0 {
		loadHist = &utils.LoadInstance{
			RatingLoadID:     utils.GenUUID(),
			AccountingLoadID: utils.GenUUID(),
			LoadID:           loadID,
			LoadTime:         time.Now(),
		}
	} else {
		loadHist = loadHistList[0]
		loadHist.AccountingLoadID = utils.GenUUID()
		loadHist.LoadID = loadID
		loadHist.LoadTime = time.Now()
	}
	if err := ms.AddLoadHistory(loadHist, 10); err != nil {
		utils.Logger.Info(fmt.Sprintf("error saving load history: %v (%v)", loadHist, err))
		return err
	}
	ms.GetLoadHistory(1, true) // to load last instance in cache
	return utils.SaveCacheFileInfo(ms.cacheDumpDir, &utils.CacheFileInfo{Encoding: utils.MSGPACK, LoadInfo: loadHist})
}
Пример #2
0
func (ms *MapStorage) cacheRating(loadID string, dKeys, rpKeys, rpfKeys, lcrKeys, dcsKeys, actKeys, aplKeys, shgKeys []string) error {
	CacheBeginTransaction()
	if dKeys == nil || (float64(CacheCountEntries(utils.DESTINATION_PREFIX))*utils.DESTINATIONS_LOAD_THRESHOLD < float64(len(dKeys))) {
		CacheRemPrefixKey(utils.DESTINATION_PREFIX)
	} else {
		CleanStalePrefixes(dKeys)
	}
	if rpKeys == nil {
		CacheRemPrefixKey(utils.RATING_PLAN_PREFIX)
	}
	if rpfKeys == nil {
		CacheRemPrefixKey(utils.RATING_PROFILE_PREFIX)
	}
	if lcrKeys == nil {
		CacheRemPrefixKey(utils.LCR_PREFIX)
	}
	if dcsKeys == nil {
		CacheRemPrefixKey(utils.DERIVEDCHARGERS_PREFIX)
	}
	if actKeys == nil {
		CacheRemPrefixKey(utils.ACTION_PREFIX) // Forced until we can fine tune it
	}
	if aplKeys == nil {
		CacheRemPrefixKey(utils.ACTION_PLAN_PREFIX)
	}
	if shgKeys == nil {
		CacheRemPrefixKey(utils.SHARED_GROUP_PREFIX) // Forced until we can fine tune it
	}
	for k, _ := range ms.dict {
		if strings.HasPrefix(k, utils.DESTINATION_PREFIX) {
			if _, err := ms.GetDestination(k[len(utils.DESTINATION_PREFIX):]); err != nil {
				CacheRollbackTransaction()
				return err
			}
		}
		if strings.HasPrefix(k, utils.RATING_PLAN_PREFIX) {
			CacheRemKey(k)
			if _, err := ms.GetRatingPlan(k[len(utils.RATING_PLAN_PREFIX):], true); err != nil {
				CacheRollbackTransaction()
				return err
			}
		}
		if strings.HasPrefix(k, utils.RATING_PROFILE_PREFIX) {
			CacheRemKey(k)
			if _, err := ms.GetRatingProfile(k[len(utils.RATING_PROFILE_PREFIX):], true); err != nil {
				CacheRollbackTransaction()
				return err
			}
		}
		if strings.HasPrefix(k, utils.LCR_PREFIX) {
			CacheRemKey(k)
			if _, err := ms.GetLCR(k[len(utils.LCR_PREFIX):], true); err != nil {
				CacheRollbackTransaction()
				return err
			}
		}
		if strings.HasPrefix(k, utils.DERIVEDCHARGERS_PREFIX) {
			CacheRemKey(k)
			if _, err := ms.GetDerivedChargers(k[len(utils.DERIVEDCHARGERS_PREFIX):], true); err != nil {
				CacheRollbackTransaction()
				return err
			}
		}
		if strings.HasPrefix(k, utils.ACTION_PREFIX) {
			CacheRemKey(k)
			if _, err := ms.GetActions(k[len(utils.ACTION_PREFIX):], true); err != nil {
				CacheRollbackTransaction()
				return err
			}
		}
		if strings.HasPrefix(k, utils.ACTION_PLAN_PREFIX) {
			CacheRemKey(k)
			if _, err := ms.GetActionPlan(k[len(utils.ACTION_PLAN_PREFIX):], true); err != nil {
				CacheRollbackTransaction()
				return err
			}
		}
		if strings.HasPrefix(k, utils.SHARED_GROUP_PREFIX) {
			CacheRemKey(k)
			if _, err := ms.GetSharedGroup(k[len(utils.SHARED_GROUP_PREFIX):], true); err != nil {
				CacheRollbackTransaction()
				return err
			}
		}
	}
	CacheCommitTransaction()

	loadHistList, err := ms.GetLoadHistory(1, true)
	if err != nil || len(loadHistList) == 0 {
		utils.Logger.Info(fmt.Sprintf("could not get load history: %v (%v)", loadHistList, err))
	}
	var loadHist *utils.LoadInstance
	if len(loadHistList) == 0 {
		loadHist = &utils.LoadInstance{
			RatingLoadID:     utils.GenUUID(),
			AccountingLoadID: utils.GenUUID(),
			LoadID:           loadID,
			LoadTime:         time.Now(),
		}
	} else {
		loadHist = loadHistList[0]
		loadHist.AccountingLoadID = utils.GenUUID()
		loadHist.LoadID = loadID
		loadHist.LoadTime = time.Now()
	}
	if err := ms.AddLoadHistory(loadHist, 10); err != nil {
		utils.Logger.Info(fmt.Sprintf("error saving load history: %v (%v)", loadHist, err))
		return err
	}
	ms.GetLoadHistory(1, true) // to load last instance in cache
	return utils.SaveCacheFileInfo(ms.cacheDumpDir, &utils.CacheFileInfo{Encoding: utils.MSGPACK, LoadInfo: loadHist})
}
Пример #3
0
func (rs *RedisStorage) cacheAccounting(loadID string, alsKeys []string) (err error) {
	start := time.Now()
	CacheBeginTransaction()
	conn, err := rs.db.Get()
	if err != nil {
		return err
	}
	defer rs.db.Put(conn)
	if alsKeys == nil {
		utils.Logger.Info("Caching all aliases")
		if alsKeys, err = conn.Cmd("KEYS", utils.ALIASES_PREFIX+"*").List(); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("aliases: %s", err.Error())
		}
		CacheRemPrefixKey(utils.ALIASES_PREFIX)
		CacheRemPrefixKey(utils.REVERSE_ALIASES_PREFIX)
	} else if len(alsKeys) != 0 {
		utils.Logger.Info(fmt.Sprintf("Caching aliases: %v", alsKeys))
	}
	al := &Alias{}
	for _, key := range alsKeys {
		// check if it already exists
		// to remove reverse cache keys
		if avs, err := CacheGet(key); err == nil && avs != nil {
			al.Values = avs.(AliasValues)
			al.SetId(key[len(utils.ALIASES_PREFIX):])
			al.RemoveReverseCache()
		}
		CacheRemKey(key)
		if _, err = rs.GetAlias(key[len(utils.ALIASES_PREFIX):], true); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("aliases: %s", err.Error())
		}
	}
	if len(alsKeys) != 0 {
		utils.Logger.Info("Finished aliases caching.")
	}
	utils.Logger.Info("Caching load history")
	if _, err = rs.GetLoadHistory(1, true); err != nil {
		CacheRollbackTransaction()
		return err
	}
	utils.Logger.Info("Finished load history caching.")
	CacheCommitTransaction()
	utils.Logger.Info(fmt.Sprintf("Cache accounting creation time: %v", time.Since(start)))

	loadHistList, err := rs.GetLoadHistory(1, true)
	if err != nil || len(loadHistList) == 0 {
		utils.Logger.Info(fmt.Sprintf("could not get load history: %v (%v)", loadHistList, err))
	}
	var loadHist *utils.LoadInstance
	if len(loadHistList) == 0 {
		loadHist = &utils.LoadInstance{
			RatingLoadID:     utils.GenUUID(),
			AccountingLoadID: utils.GenUUID(),
			LoadID:           loadID,
			LoadTime:         time.Now(),
		}
	} else {
		loadHist = loadHistList[0]
		loadHist.AccountingLoadID = utils.GenUUID()
		loadHist.LoadID = loadID
		loadHist.LoadTime = time.Now()
	}
	if err := rs.AddLoadHistory(loadHist, rs.loadHistorySize); err != nil {
		utils.Logger.Info(fmt.Sprintf("error saving load history: %v (%v)", loadHist, err))
		return err
	}

	rs.GetLoadHistory(1, true) // to load last instance in cache
	return utils.SaveCacheFileInfo(rs.cacheDumpDir, &utils.CacheFileInfo{Encoding: utils.MSGPACK, LoadInfo: loadHist})
}
Пример #4
0
func (rs *RedisStorage) cacheRating(loadID string, dKeys, rpKeys, rpfKeys, lcrKeys, dcsKeys, actKeys, aplKeys, shgKeys []string) (err error) {
	start := time.Now()
	CacheBeginTransaction()
	conn, err := rs.db.Get()
	if err != nil {
		return err
	}
	defer rs.db.Put(conn)
	if dKeys == nil || (float64(CacheCountEntries(utils.DESTINATION_PREFIX))*utils.DESTINATIONS_LOAD_THRESHOLD < float64(len(dKeys))) {
		// if need to load more than a half of exiting keys load them all
		utils.Logger.Info("Caching all destinations")
		if dKeys, err = conn.Cmd("KEYS", utils.DESTINATION_PREFIX+"*").List(); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("destinations: %s", err.Error())
		}
		CacheRemPrefixKey(utils.DESTINATION_PREFIX)
	} else if len(dKeys) != 0 {
		utils.Logger.Info(fmt.Sprintf("Caching destinations: %v", dKeys))
		CleanStalePrefixes(dKeys)
	}
	for _, key := range dKeys {
		if len(key) <= len(utils.DESTINATION_PREFIX) {
			utils.Logger.Warning(fmt.Sprintf("Got malformed destination id: %s", key))
			continue
		}
		if _, err = rs.GetDestination(key[len(utils.DESTINATION_PREFIX):]); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("destinations: %s", err.Error())
		}
	}
	if len(dKeys) != 0 {
		utils.Logger.Info("Finished destinations caching.")
	}
	if rpKeys == nil {
		utils.Logger.Info("Caching all rating plans")
		if rpKeys, err = conn.Cmd("KEYS", utils.RATING_PLAN_PREFIX+"*").List(); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("rating plans: %s", err.Error())
		}
		CacheRemPrefixKey(utils.RATING_PLAN_PREFIX)
	} else if len(rpKeys) != 0 {
		utils.Logger.Info(fmt.Sprintf("Caching rating plans: %v", rpKeys))
	}
	for _, key := range rpKeys {
		CacheRemKey(key)
		if _, err = rs.GetRatingPlan(key[len(utils.RATING_PLAN_PREFIX):], true); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("rating plans: %s", err.Error())
		}
	}
	if len(rpKeys) != 0 {
		utils.Logger.Info("Finished rating plans caching.")
	}
	if rpfKeys == nil {
		utils.Logger.Info("Caching all rating profiles")
		if rpfKeys, err = conn.Cmd("KEYS", utils.RATING_PROFILE_PREFIX+"*").List(); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("rating profiles: %s", err.Error())
		}
		CacheRemPrefixKey(utils.RATING_PROFILE_PREFIX)
	} else if len(rpfKeys) != 0 {
		utils.Logger.Info(fmt.Sprintf("Caching rating profile: %v", rpfKeys))
	}
	for _, key := range rpfKeys {
		CacheRemKey(key)
		if _, err = rs.GetRatingProfile(key[len(utils.RATING_PROFILE_PREFIX):], true); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("rating profiles: %s", err.Error())
		}
	}
	if len(rpfKeys) != 0 {
		utils.Logger.Info("Finished rating profile caching.")
	}
	if lcrKeys == nil {
		utils.Logger.Info("Caching LCR rules.")
		if lcrKeys, err = conn.Cmd("KEYS", utils.LCR_PREFIX+"*").List(); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("lcr rules: %s", err.Error())
		}
		CacheRemPrefixKey(utils.LCR_PREFIX)
	} else if len(lcrKeys) != 0 {
		utils.Logger.Info(fmt.Sprintf("Caching LCR rules: %v", lcrKeys))
	}
	for _, key := range lcrKeys {
		CacheRemKey(key)
		if _, err = rs.GetLCR(key[len(utils.LCR_PREFIX):], true); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("lcr rules: %s", err.Error())
		}
	}
	if len(lcrKeys) != 0 {
		utils.Logger.Info("Finished LCR rules caching.")
	}
	// DerivedChargers caching
	if dcsKeys == nil {
		utils.Logger.Info("Caching all derived chargers")
		if dcsKeys, err = conn.Cmd("KEYS", utils.DERIVEDCHARGERS_PREFIX+"*").List(); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("derived chargers: %s", err.Error())
		}
		CacheRemPrefixKey(utils.DERIVEDCHARGERS_PREFIX)
	} else if len(dcsKeys) != 0 {
		utils.Logger.Info(fmt.Sprintf("Caching derived chargers: %v", dcsKeys))
	}
	for _, key := range dcsKeys {
		CacheRemKey(key)
		if _, err = rs.GetDerivedChargers(key[len(utils.DERIVEDCHARGERS_PREFIX):], true); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("derived chargers: %s", err.Error())
		}
	}
	if len(dcsKeys) != 0 {
		utils.Logger.Info("Finished derived chargers caching.")
	}
	if actKeys == nil {
		utils.Logger.Info("Caching all actions")
		if actKeys, err = conn.Cmd("KEYS", utils.ACTION_PREFIX+"*").List(); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("actions: %s", err.Error())
		}
		CacheRemPrefixKey(utils.ACTION_PREFIX)
	} else if len(actKeys) != 0 {
		utils.Logger.Info(fmt.Sprintf("Caching actions: %v", actKeys))
	}
	for _, key := range actKeys {
		CacheRemKey(key)
		if _, err = rs.GetActions(key[len(utils.ACTION_PREFIX):], true); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("actions: %s", err.Error())
		}
	}
	if len(actKeys) != 0 {
		utils.Logger.Info("Finished actions caching.")
	}

	if aplKeys == nil {
		utils.Logger.Info("Caching all action plans")
		if aplKeys, err = rs.db.Cmd("KEYS", utils.ACTION_PLAN_PREFIX+"*").List(); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf(" %s", err.Error())
		}
		CacheRemPrefixKey(utils.ACTION_PLAN_PREFIX)
	} else if len(aplKeys) != 0 {
		utils.Logger.Info(fmt.Sprintf("Caching action plan: %v", aplKeys))
	}
	for _, key := range aplKeys {
		CacheRemKey(key)
		if _, err = rs.GetActionPlan(key[len(utils.ACTION_PLAN_PREFIX):], true); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf(" %s", err.Error())
		}
	}
	if len(aplKeys) != 0 {
		utils.Logger.Info("Finished action plans caching.")
	}

	if shgKeys == nil {
		utils.Logger.Info("Caching all shared groups")
		if shgKeys, err = conn.Cmd("KEYS", utils.SHARED_GROUP_PREFIX+"*").List(); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("shared groups: %s", err.Error())
		}
		CacheRemPrefixKey(utils.SHARED_GROUP_PREFIX)
	} else if len(shgKeys) != 0 {
		utils.Logger.Info(fmt.Sprintf("Caching shared groups: %v", shgKeys))
	}
	for _, key := range shgKeys {
		CacheRemKey(key)
		if _, err = rs.GetSharedGroup(key[len(utils.SHARED_GROUP_PREFIX):], true); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("shared groups: %s", err.Error())
		}
	}
	if len(shgKeys) != 0 {
		utils.Logger.Info("Finished shared groups caching.")
	}

	CacheCommitTransaction()
	utils.Logger.Info(fmt.Sprintf("Cache rating creation time: %v", time.Since(start)))
	loadHistList, err := rs.GetLoadHistory(1, true)
	if err != nil || len(loadHistList) == 0 {
		utils.Logger.Info(fmt.Sprintf("could not get load history: %v (%v)", loadHistList, err))
	}
	var loadHist *utils.LoadInstance
	if len(loadHistList) == 0 {
		loadHist = &utils.LoadInstance{
			RatingLoadID:     utils.GenUUID(),
			AccountingLoadID: utils.GenUUID(),
			LoadID:           loadID,
			LoadTime:         time.Now(),
		}
	} else {
		loadHist = loadHistList[0]
		loadHist.RatingLoadID = utils.GenUUID()
		loadHist.LoadID = loadID
		loadHist.LoadTime = time.Now()
	}
	if err := rs.AddLoadHistory(loadHist, rs.loadHistorySize); err != nil {
		utils.Logger.Info(fmt.Sprintf("error saving load history: %v (%v)", loadHist, err))
		return err
	}
	rs.GetLoadHistory(1, true) // to load last instance in cache
	return utils.SaveCacheFileInfo(rs.cacheDumpDir, &utils.CacheFileInfo{Encoding: utils.MSGPACK, LoadInfo: loadHist})
}
Пример #5
0
func (ms *MongoStorage) cacheAccounting(loadID string, alsKeys []string) (err error) {
	start := time.Now()
	CacheBeginTransaction()
	var keyResult struct{ Key string }
	if alsKeys == nil {
		CacheRemPrefixKey(utils.ALIASES_PREFIX)
	}
	session := ms.session.Copy()
	defer session.Close()
	db := session.DB(ms.db)
	if alsKeys == nil {
		utils.Logger.Info("Caching all aliases")
		iter := db.C(colAls).Find(nil).Select(bson.M{"key": 1}).Iter()
		alsKeys = make([]string, 0)
		for iter.Next(&keyResult) {
			alsKeys = append(alsKeys, utils.ALIASES_PREFIX+keyResult.Key)
		}
		if err := iter.Close(); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("aliases: %s", err.Error())
		}
	} else if len(alsKeys) != 0 {
		utils.Logger.Info(fmt.Sprintf("Caching aliases: %v", alsKeys))
	}
	for _, key := range alsKeys {
		// check if it already exists
		// to remove reverse cache keys
		if avs, err := CacheGet(key); err == nil && avs != nil {
			al := &Alias{Values: avs.(AliasValues)}
			al.SetId(key[len(utils.ALIASES_PREFIX):])
			al.RemoveReverseCache()
		}
		CacheRemKey(key)
		if _, err = ms.GetAlias(key[len(utils.ALIASES_PREFIX):], true); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("aliases: %s", err.Error())
		}
	}
	if len(alsKeys) != 0 {
		utils.Logger.Info("Finished aliases caching.")
	}
	utils.Logger.Info("Caching load history")
	loadHistList, err := ms.GetLoadHistory(1, true)
	if err != nil {
		CacheRollbackTransaction()
		return err
	}
	utils.Logger.Info("Finished load history caching.")
	utils.Logger.Info(fmt.Sprintf("Cache accounting creation time: %v", time.Since(start)))

	var loadHist *utils.LoadInstance
	if len(loadHistList) == 0 {
		loadHist = &utils.LoadInstance{
			RatingLoadID:     utils.GenUUID(),
			AccountingLoadID: utils.GenUUID(),
			LoadID:           loadID,
			LoadTime:         time.Now(),
		}
	} else {
		loadHist = loadHistList[0]
		loadHist.AccountingLoadID = utils.GenUUID()
		loadHist.LoadID = loadID
		loadHist.LoadTime = time.Now()
	}
	if err := ms.AddLoadHistory(loadHist, ms.loadHistorySize); err != nil { //FIXME replace 100 with cfg
		utils.Logger.Info(fmt.Sprintf("error saving load history: %v (%v)", loadHist, err))
		return err
	}
	ms.GetLoadHistory(1, true) // to load last instance in cache
	return utils.SaveCacheFileInfo(ms.cacheDumpDir, &utils.CacheFileInfo{Encoding: utils.MSGPACK, LoadInfo: loadHist})
}
Пример #6
0
func (ms *MongoStorage) cacheRating(loadID string, dKeys, rpKeys, rpfKeys, lcrKeys, dcsKeys, actKeys, aplKeys, shgKeys []string) (err error) {
	start := time.Now()
	CacheBeginTransaction()
	keyResult := struct{ Key string }{}
	idResult := struct{ Id string }{}
	session := ms.session.Copy()
	defer session.Close()
	db := session.DB(ms.db)
	if dKeys == nil || (float64(CacheCountEntries(utils.DESTINATION_PREFIX))*utils.DESTINATIONS_LOAD_THRESHOLD < float64(len(dKeys))) {
		// if need to load more than a half of exiting keys load them all
		utils.Logger.Info("Caching all destinations")
		iter := db.C(colDst).Find(nil).Select(bson.M{"key": 1}).Iter()
		dKeys = make([]string, 0)
		for iter.Next(&keyResult) {
			dKeys = append(dKeys, utils.DESTINATION_PREFIX+keyResult.Key)
		}
		if err := iter.Close(); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("destinations: %s", err.Error())
		}
		CacheRemPrefixKey(utils.DESTINATION_PREFIX)
	} else if len(dKeys) != 0 {
		utils.Logger.Info(fmt.Sprintf("Caching destinations: %v", dKeys))
		CleanStalePrefixes(dKeys)
	}
	for _, key := range dKeys {
		if len(key) <= len(utils.DESTINATION_PREFIX) {
			utils.Logger.Warning(fmt.Sprintf("Got malformed destination id: %s", key))
			continue
		}
		if _, err = ms.GetDestination(key[len(utils.DESTINATION_PREFIX):]); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("destinations: %s", err.Error())
		}
	}
	if len(dKeys) != 0 {
		utils.Logger.Info("Finished destinations caching.")
	}
	if rpKeys == nil {
		utils.Logger.Info("Caching all rating plans")
		iter := db.C(colRpl).Find(nil).Select(bson.M{"key": 1}).Iter()
		rpKeys = make([]string, 0)
		for iter.Next(&keyResult) {
			rpKeys = append(rpKeys, utils.RATING_PLAN_PREFIX+keyResult.Key)
		}
		if err := iter.Close(); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("rating plans: %s", err.Error())
		}
		CacheRemPrefixKey(utils.RATING_PLAN_PREFIX)
	} else if len(rpKeys) != 0 {
		utils.Logger.Info(fmt.Sprintf("Caching rating plans: %v", rpKeys))
	}
	for _, key := range rpKeys {
		CacheRemKey(key)
		if _, err = ms.GetRatingPlan(key[len(utils.RATING_PLAN_PREFIX):], true); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("rating plans: %s", err.Error())
		}
	}
	if len(rpKeys) != 0 {
		utils.Logger.Info("Finished rating plans caching.")
	}
	if rpfKeys == nil {
		utils.Logger.Info("Caching all rating profiles")
		iter := db.C(colRpf).Find(nil).Select(bson.M{"id": 1}).Iter()
		rpfKeys = make([]string, 0)
		for iter.Next(&idResult) {
			rpfKeys = append(rpfKeys, utils.RATING_PROFILE_PREFIX+idResult.Id)
		}
		if err := iter.Close(); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("rating profiles: %s", err.Error())
		}
		CacheRemPrefixKey(utils.RATING_PROFILE_PREFIX)
	} else if len(rpfKeys) != 0 {
		utils.Logger.Info(fmt.Sprintf("Caching rating profile: %v", rpfKeys))
	}
	for _, key := range rpfKeys {
		CacheRemKey(key)
		if _, err = ms.GetRatingProfile(key[len(utils.RATING_PROFILE_PREFIX):], true); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("rating profiles: %s", err.Error())
		}
	}
	if len(rpfKeys) != 0 {
		utils.Logger.Info("Finished rating profile caching.")
	}
	if lcrKeys == nil {
		utils.Logger.Info("Caching LCR rules.")
		iter := db.C(colLcr).Find(nil).Select(bson.M{"key": 1}).Iter()
		lcrKeys = make([]string, 0)
		for iter.Next(&keyResult) {
			lcrKeys = append(lcrKeys, utils.LCR_PREFIX+keyResult.Key)
		}
		if err := iter.Close(); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("lcr rules: %s", err.Error())
		}
		CacheRemPrefixKey(utils.LCR_PREFIX)
	} else if len(lcrKeys) != 0 {
		utils.Logger.Info(fmt.Sprintf("Caching LCR rules: %v", lcrKeys))
	}
	for _, key := range lcrKeys {
		CacheRemKey(key)
		if _, err = ms.GetLCR(key[len(utils.LCR_PREFIX):], true); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("lcr rules: %s", err.Error())
		}
	}
	if len(lcrKeys) != 0 {
		utils.Logger.Info("Finished LCR rules caching.")
	}
	// DerivedChargers caching
	if dcsKeys == nil {
		utils.Logger.Info("Caching all derived chargers")
		iter := db.C(colDcs).Find(nil).Select(bson.M{"key": 1}).Iter()
		dcsKeys = make([]string, 0)
		for iter.Next(&keyResult) {
			dcsKeys = append(dcsKeys, utils.DERIVEDCHARGERS_PREFIX+keyResult.Key)
		}
		if err := iter.Close(); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("derived chargers: %s", err.Error())
		}
		CacheRemPrefixKey(utils.DERIVEDCHARGERS_PREFIX)
	} else if len(dcsKeys) != 0 {
		utils.Logger.Info(fmt.Sprintf("Caching derived chargers: %v", dcsKeys))
	}
	for _, key := range dcsKeys {
		CacheRemKey(key)
		if _, err = ms.GetDerivedChargers(key[len(utils.DERIVEDCHARGERS_PREFIX):], true); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("derived chargers: %s", err.Error())
		}
	}
	if len(dcsKeys) != 0 {
		utils.Logger.Info("Finished derived chargers caching.")
	}
	if actKeys == nil {
		CacheRemPrefixKey(utils.ACTION_PREFIX)
	}
	if actKeys == nil {
		utils.Logger.Info("Caching all actions")
		iter := db.C(colAct).Find(nil).Select(bson.M{"key": 1}).Iter()
		actKeys = make([]string, 0)
		for iter.Next(&keyResult) {
			actKeys = append(actKeys, utils.ACTION_PREFIX+keyResult.Key)
		}
		if err := iter.Close(); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("actions: %s", err.Error())
		}
		CacheRemPrefixKey(utils.ACTION_PREFIX)
	} else if len(actKeys) != 0 {
		utils.Logger.Info(fmt.Sprintf("Caching actions: %v", actKeys))
	}
	for _, key := range actKeys {
		CacheRemKey(key)
		if _, err = ms.GetActions(key[len(utils.ACTION_PREFIX):], true); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("actions: %s", err.Error())
		}
	}
	if len(actKeys) != 0 {
		utils.Logger.Info("Finished actions caching.")
	}

	if aplKeys == nil {
		CacheRemPrefixKey(utils.ACTION_PLAN_PREFIX)
	}
	if aplKeys == nil {
		utils.Logger.Info("Caching all action plans")
		iter := db.C(colApl).Find(nil).Select(bson.M{"key": 1}).Iter()
		aplKeys = make([]string, 0)
		for iter.Next(&keyResult) {
			aplKeys = append(aplKeys, utils.ACTION_PLAN_PREFIX+keyResult.Key)
		}
		if err := iter.Close(); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("action plans: %s", err.Error())
		}
		CacheRemPrefixKey(utils.ACTION_PLAN_PREFIX)
	} else if len(aplKeys) != 0 {
		utils.Logger.Info(fmt.Sprintf("Caching action plans: %v", aplKeys))
	}
	for _, key := range aplKeys {
		CacheRemKey(key)
		if _, err = ms.GetActionPlan(key[len(utils.ACTION_PLAN_PREFIX):], true); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("action plans: %s", err.Error())
		}
	}
	if len(aplKeys) != 0 {
		utils.Logger.Info("Finished action plans caching.")
	}

	if shgKeys == nil {
		CacheRemPrefixKey(utils.SHARED_GROUP_PREFIX)
	}
	if shgKeys == nil {
		utils.Logger.Info("Caching all shared groups")
		iter := db.C(colShg).Find(nil).Select(bson.M{"id": 1}).Iter()
		shgKeys = make([]string, 0)
		for iter.Next(&idResult) {
			shgKeys = append(shgKeys, utils.SHARED_GROUP_PREFIX+idResult.Id)
		}
		if err := iter.Close(); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("shared groups: %s", err.Error())
		}
	} else if len(shgKeys) != 0 {
		utils.Logger.Info(fmt.Sprintf("Caching shared groups: %v", shgKeys))
	}
	for _, key := range shgKeys {
		CacheRemKey(key)
		if _, err = ms.GetSharedGroup(key[len(utils.SHARED_GROUP_PREFIX):], true); err != nil {
			CacheRollbackTransaction()
			return fmt.Errorf("shared groups: %s", err.Error())
		}
	}
	if len(shgKeys) != 0 {
		utils.Logger.Info("Finished shared groups caching.")
	}
	CacheCommitTransaction()
	utils.Logger.Info(fmt.Sprintf("Cache rating creation time: %v", time.Since(start)))
	loadHistList, err := ms.GetLoadHistory(1, true)
	if err != nil || len(loadHistList) == 0 {
		utils.Logger.Info(fmt.Sprintf("could not get load history: %v (%v)", loadHistList, err))
	}
	var loadHist *utils.LoadInstance
	if len(loadHistList) == 0 {
		loadHist = &utils.LoadInstance{
			RatingLoadID:     utils.GenUUID(),
			AccountingLoadID: utils.GenUUID(),
			LoadID:           loadID,
			LoadTime:         time.Now(),
		}
	} else {
		loadHist = loadHistList[0]
		loadHist.RatingLoadID = utils.GenUUID()
		loadHist.LoadID = loadID
		loadHist.LoadTime = time.Now()
	}
	if err := ms.AddLoadHistory(loadHist, ms.loadHistorySize); err != nil {
		utils.Logger.Info(fmt.Sprintf("error saving load history: %v (%v)", loadHist, err))
		return err
	}
	ms.GetLoadHistory(1, true) // to load last instance in cache
	return utils.SaveCacheFileInfo(ms.cacheDumpDir, &utils.CacheFileInfo{Encoding: utils.MSGPACK, LoadInfo: loadHist})
}