func (apl Actions) Clone() (interface{}, error) { var cln Actions if err := utils.Clone(apl, &cln); err != nil { return nil, err } return cln, nil }
func (apl *ActionPlan) Clone() (interface{}, error) { cln := new(ActionPlan) if err := utils.Clone(*apl, cln); err != nil { return nil, err } return cln, nil }
// replicateSessions will replicate session based on configuration func (smg *SMGeneric) replicateSessions(cgrID string) (err error) { if smg.cgrCfg.SmGenericConfig.DebitInterval != 0 { return } smg.aSessionsMux.RLock() var aSessions []*SMGSession if err = utils.Clone(smg.activeSessions[cgrID], &aSessions); err != nil { return } smg.aSessionsMux.RUnlock() var wg sync.WaitGroup for _, rplConn := range smg.smgReplConns { if rplConn.Synchronous { wg.Add(1) } go func(conn rpcclient.RpcClientConnection, sync bool, ss []*SMGSession) { var reply string argSet := ArgsSetPassiveSessions{CGRID: cgrID, Sessions: ss} conn.Call("SMGenericV1.SetPassiveSessions", argSet, &reply) if sync { wg.Done() } }(rplConn.Connection, rplConn.Synchronous, aSessions) } wg.Wait() // wait for synchronous replication to finish return }
// AsBareDiameterMessage converts CCA into a bare DiameterMessage func (self *CCA) AsBareDiameterMessage() *diam.Message { var m diam.Message utils.Clone(self.diamMessage, &m) m.NewAVP(avp.SessionID, avp.Mbit, 0, datatype.UTF8String(self.SessionId)) m.NewAVP(avp.OriginHost, avp.Mbit, 0, datatype.DiameterIdentity(self.OriginHost)) m.NewAVP(avp.OriginRealm, avp.Mbit, 0, datatype.DiameterIdentity(self.OriginRealm)) m.NewAVP(avp.AuthApplicationID, avp.Mbit, 0, datatype.Unsigned32(self.AuthApplicationId)) m.NewAVP(avp.CCRequestType, avp.Mbit, 0, datatype.Enumerated(self.CCRequestType)) m.NewAVP(avp.CCRequestNumber, avp.Mbit, 0, datatype.Enumerated(self.CCRequestNumber)) m.NewAVP(avp.ResultCode, avp.Mbit, 0, datatype.Unsigned32(self.ResultCode)) return &m }
// Estimate whether the increments are the same ignoring the CompressFactor func (incs Increments) SharingSignature(other Increments) bool { var otherCloned Increments // Clone so we don't affect with decompress the original structure if err := utils.Clone(other, &otherCloned); err != nil { return false } var thisCloned Increments if err := utils.Clone(incs, &thisCloned); err != nil { return false } otherCloned.Compress() thisCloned.Compress() if len(otherCloned) < len(thisCloned) { // Protect index in case of not being the same size return false } for index, i := range thisCloned { if !i.Equal(otherCloned[index]) { return false } } return true }
// Merges timespans if they share the same charging signature, useful to run in SM before compressing func (ts *TimeSpan) Merge(other *TimeSpan) bool { if !ts.SharingSignature(other) { return false } else if !ts.TimeEnd.Equal(other.TimeStart) { // other needs to continue ts for merge to be possible return false } var otherCloned TimeSpan // Clone so we don't affect with decompress the original structure if err := utils.Clone(*other, &otherCloned); err != nil { return false } otherCloned.Increments.Decompress() ts.Increments.Decompress() ts.TimeEnd = otherCloned.TimeEnd ts.Cost += otherCloned.Cost ts.DurationIndex = otherCloned.DurationIndex ts.Increments = append(ts.Increments, otherCloned.Increments...) ts.Increments.Compress() return true }
// Merge the sum of costs and sends it to CDRS for storage // originID could have been changed from original event, hence passing as argument here // pass cc as the clone of original to avoid concurrency issues func (self *SMGSession) saveOperations(originID string) error { if len(self.CallCosts) == 0 { return nil // There are no costs to save, ignore the operation } cc := self.CallCosts[0] // was merged in close method cc.Round() roundIncrements := cc.GetRoundIncrements() if len(roundIncrements) != 0 { cd := cc.CreateCallDescriptor() cd.CgrID = self.CD.CgrID cd.RunID = self.CD.RunID cd.Increments = roundIncrements var response float64 if err := self.rater.Call("Responder.RefundRounding", cd, &response); err != nil { return err } } smCost := &engine.SMCost{ CGRID: self.EventStart.GetCgrId(self.Timezone), CostSource: utils.SESSION_MANAGER_SOURCE, RunID: self.RunID, OriginHost: self.EventStart.GetOriginatorIP(utils.META_DEFAULT), OriginID: originID, Usage: self.TotalUsage.Seconds(), CostDetails: cc, } if len(smCost.CostDetails.Timespans) > MaxTimespansInCost { // Merge since we will get a callCost too big if err := utils.Clone(cc, &smCost.CostDetails); err != nil { // Avoid concurrency on CC utils.Logger.Err(fmt.Sprintf("<SMGeneric> Could not clone callcost for sessionID: %s, RunID: %s, error: %s", originID, self.RunID, err.Error())) } go func(smCost *engine.SMCost) { // could take longer than the locked stage if err := self.storeSMCost(smCost); err != nil { utils.Logger.Err(fmt.Sprintf("<SMGeneric> Could not store callcost for sessionID: %s, RunID: %s, error: %s", originID, self.RunID, err.Error())) } }(smCost) } else { return self.storeSMCost(smCost) } return nil }
func (cdr *CDR) Clone() *CDR { var clnedCDR CDR utils.Clone(cdr, &clnedCDR) return &clnedCDR }
func (a *Action) Clone() *Action { var clonedAction Action utils.Clone(a, &clonedAction) return &clonedAction }
func (s *Scheduler) GetQueue() (queue engine.ActionTimingPriorityList) { s.RLock() utils.Clone(s.queue, &queue) defer s.RUnlock() return queue }