func New(r *rand.Rand) *Simplex { var s Simplex perm := r.Perm(256) copy(s[:256], perm) copy(s[256:], perm) return &s }
// Shuffle a slice of strings. func shuffleStrings(strings []string, rng *rand.Rand) []string { var shuffled = make([]string, len(strings)) for i, j := range rng.Perm(len(strings)) { shuffled[j] = strings[i] } return shuffled }
// DepthFirstRandom traverses a graph depth first, but following arcs in // random order among arcs from a single node. // // If Rand r is nil, the method creates a new source and generator for // one-time use. // // Usage is otherwise like the DepthFirst method. See DepthFirst. // // There are equivalent labeled and unlabeled versions of this method. func (g AdjacencyList) DepthFirstRandom(start NI, bm *Bits, v OkNodeVisitor, r *rand.Rand) (ok bool) { if bm == nil { if v == nil { return false } bm = &Bits{} } if r == nil { r = rand.New(rand.NewSource(time.Now().UnixNano())) } var df func(n NI) bool df = func(n NI) bool { if bm.Bit(n) == 1 { return true } bm.SetBit(n, 1) if v != nil && !v(n) { return false } to := g[n] for _, i := range r.Perm(len(to)) { if !df(to[i]) { return false } } return true } return df(start) }
func generateInput(rnd *rand.Rand, size int) []byte { permutations := rnd.Perm(size) data := make([]byte, size) for i, p := range permutations { data[i] = letters[p%len(letters)] } return data }
func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL { p := r.Perm(len(eps)) neps := make([]url.URL, len(eps)) for i, k := range p { neps[i] = eps[k] } return neps }
func (collection *exampleCollection) shuffle(r *rand.Rand) { sort.Sort(collection) permutation := r.Perm(len(collection.examples)) shuffledExamples := make([]*example, len(collection.examples)) for i, j := range permutation { shuffledExamples[i] = collection.examples[j] } collection.examples = shuffledExamples }
func (e *Specs) Shuffle(r *rand.Rand) { sort.Sort(e) permutation := r.Perm(len(e.specs)) shuffledSpecs := make([]*Spec, len(e.specs)) for i, j := range permutation { shuffledSpecs[i] = e.specs[j] } e.specs = shuffledSpecs }
func (container *containerNode) shuffle(r *rand.Rand) { sort.Sort(container) permutation := r.Perm(len(container.subjectAndContainerNodes)) shuffledNodes := make([]node, len(container.subjectAndContainerNodes)) for i, j := range permutation { shuffledNodes[i] = container.subjectAndContainerNodes[j] } container.subjectAndContainerNodes = shuffledNodes }
// Sample n unique individuals from a slice of individuals func (indis Individuals) sample(n int, generator *rand.Rand) ([]int, Individuals) { var ( sample = make(Individuals, n) indexes = generator.Perm(len(indis))[:n] ) for i, j := range indexes { sample[i] = indis[j] } return indexes, sample }
func ShuffledIndex(rnd *rand.Rand, n, round int, fn func(i int)) { if rnd == nil { rnd = NewRand() } for x := 0; x < round; x++ { for _, i := range rnd.Perm(n) { fn(i) } } return }
// BreadthFirst traverses a directed or undirected graph in breadth first order. // // Argument start is the start node for the traversal. If r is nil, nodes are // visited in deterministic order. If a random number generator is supplied, // nodes at each level are visited in random order. // // Argument f can be nil if you have no interest in the FromList path result. // If FromList f is non-nil, the method populates f.Paths and sets f.MaxLen. // It does not set f.Leaves. For convenience argument f can be a zero value // FromList. If f.Paths is nil, the FromList is initialized first. If f.Paths // is non-nil however, the FromList is used as is. The method uses a value of // PathEnd.Len == 0 to indentify unvisited nodes. Existing non-zero values // will limit the traversal. // // Traversal calls the visitor function v for each node starting with node // start. If v returns true, traversal continues. If v returns false, the // traversal terminates immediately. PathEnd Len and From values are updated // before calling the visitor function. // // On return f.Paths and f.MaxLen are set but not f.Leaves. // // Returned is the number of nodes visited and ok = true if the traversal // ran to completion or ok = false if it was terminated by the visitor // function returning false. // // There are equivalent labeled and unlabeled versions of this method. func (g AdjacencyList) BreadthFirst(start NI, r *rand.Rand, f *FromList, v OkNodeVisitor) (visited int, ok bool) { switch { case f == nil: e := NewFromList(len(g)) f = &e case f.Paths == nil: *f = NewFromList(len(g)) } rp := f.Paths // the frontier consists of nodes all at the same level frontier := []NI{start} level := 1 // assign path when node is put on frontier, rp[start] = PathEnd{Len: level, From: -1} for { f.MaxLen = level level++ var next []NI if r == nil { for _, n := range frontier { visited++ if !v(n) { // visit nodes as they come off frontier return } for _, nb := range g[n] { if rp[nb].Len == 0 { next = append(next, nb) rp[nb] = PathEnd{From: n, Len: level} } } } } else { // take nodes off frontier at random for _, i := range r.Perm(len(frontier)) { n := frontier[i] // remainder of block same as above visited++ if !v(n) { return } for _, nb := range g[n] { if rp[nb].Len == 0 { next = append(next, nb) rp[nb] = PathEnd{From: n, Len: level} } } } } if len(next) == 0 { break } frontier = next } return visited, true }
// Apply permutation mutation. func (mut MutPermute) Apply(indi *Individual, generator *rand.Rand) { for i := 0; i <= generator.Intn(mut.Max); i++ { // Choose two points on the genome var ( points = generator.Perm(len(indi.Genome))[:2] i = points[0] j = points[1] ) // Permute the genes indi.Genome[i], indi.Genome[j] = indi.Genome[j], indi.Genome[i] } }
// genValues creates a slice containing a random number of random values // that when scaled by adding minValue will fall into [min, max]. func (w *WeightedDist) genValues(rng *rand.Rand) { nValues := (w.maxValue + 1) - w.minValue values := rng.Perm(nValues) if nValues < minValues { nValues = minValues } if nValues > maxValues { nValues = maxValues } nValues = rng.Intn(nValues) + 1 w.values = values[:nValues] }
// selectRandom chooses up to count random store descriptors from the given // store list. func selectRandom(randGen *rand.Rand, count int, sl StoreList) []*roachpb.StoreDescriptor { var descs []*roachpb.StoreDescriptor // Randomly permute available stores matching the required attributes. for _, idx := range randGen.Perm(len(sl.stores)) { // Add this store; exit loop if we've satisfied count. descs = append(descs, sl.stores[idx]) if len(descs) >= count { break } } if len(descs) == 0 { return nil } return descs }
func (h *dbCorruptHarness) buildShuffled(n int, rnd *rand.Rand) { p := &h.dbHarness t := p.t db := p.db batch := new(Batch) for i := range rnd.Perm(n) { batch.Reset() batch.Put(tkey(i), tval(i, ctValSize)) err := db.Write(batch, p.wo) if err != nil { t.Fatal("write error: ", err) } } }
// LatinHypercube generates len(batch) samples using Latin hypercube sampling // from the given distribution. If src != nil, it will be used to generate // random numbers, otherwise rand.Float64 will be used. // // Latin hypercube sampling divides the cumulative distribution function into equally // spaced bins and guarantees that one sample is generated per bin. Within each bin, // the location is randomly sampled. The distuv.UnitNormal variable can be used // for easy generation from the unit interval. func LatinHypercube(batch []float64, q distuv.Quantiler, src *rand.Rand) { n := len(batch) var perm []int var f64 func() float64 if src != nil { f64 = src.Float64 perm = src.Perm(n) } else { f64 = rand.Float64 perm = rand.Perm(n) } for i := range batch { v := f64()/float64(n) + float64(i)/float64(n) batch[perm[i]] = q.Quantile(v) } }
// LatinHypercube generates len(samples) samples using Latin hypercube sampling // from the given distribution. If src != nil, it will be used to generate // random numbers, otherwise rand.Float64 will be used. // // Latin hypercube sampling divides the cumulative distribution function into equally // spaced bins and guarantees that one sample is generated per bin. Within each bin, // the location is randomly sampled. The dist.UnitNormal variable can be used // for easy generation from the unit interval. func LatinHypercube(samples []float64, q dist.Quantiler, src *rand.Rand) { n := len(samples) var perm []int var f64 func() float64 if src != nil { f64 = src.Float64 perm = src.Perm(n) } else { f64 = rand.Float64 perm = rand.Perm(n) } for i := range samples { v := f64()/float64(n) + float64(i)/float64(n) samples[perm[i]] = q.Quantile(v) } }
func (d *Director) randomPartition(rng *rand.Rand) []*network.Link { count := state.NodeCount() // If there are fewer than three nodes, netsplits are pretty // boring if count < 3 { return []*network.Link{d.net.Links()[0]} } perm := rng.Perm(count) splitPoint := rng.Intn(count-2) + 1 split := make([]uint, 0) for i := 0; i < splitPoint; i++ { split = append(split, uint(perm[i])) } log.Debugf("Made a netsplit: %v", split) return d.net.FindPerimeter(split) }
func NewRandomSimilarityProvider(r *rand.Rand) *RandomSimilarityProvider { sims := make([]Similarity, len(allSims)) for i, v := range r.Perm(len(allSims)) { sims[i] = allSims[v] assert(sims[i] != nil) } ans := &RandomSimilarityProvider{ Locker: &sync.Mutex{}, defaultSim: NewDefaultSimilarity(), previousMappings: make(map[string]Similarity), perFieldSeed: r.Int(), coordType: r.Intn(3), shouldQueryNorm: r.Intn(2) == 0, knownSims: sims, } ans.PerFieldSimilarityWrapper = NewPerFieldSimilarityWrapper(ans) return ans }
func NewRandomSimilarityProvider(r *rand.Rand) *RandomSimilarityProvider { sims := make([]Similarity, len(allSims)) for i, v := range r.Perm(len(allSims)) { sims[i] = allSims[v] } return &RandomSimilarityProvider{ PerFieldSimilarityWrapper: NewPerFieldSimilarityWrapper(func(name string) Similarity { panic("not implemented yet") }), Locker: &sync.Mutex{}, defaultSim: NewDefaultSimilarity(), previousMappings: make(map[string]Similarity), perFieldSeed: r.Int(), coordType: r.Intn(3), shouldQueryNorm: r.Intn(2) == 0, knownSims: sims, } }
// SpofMonkey detects single points of failure by netsplitting one node at a // time away from the rest of the cluster and ensuring that the cluster // continues to make progress. func (d *Director) SpofMonkey(rng *rand.Rand, intensity float64) bool { if spofIndex >= state.NodeCount() { return false } else if len(spofOrder) != state.NodeCount() { // Unfortunately we can't do this in an init() because we defer // the parsing of flag arguments until later. spofOrder = rng.Perm(state.NodeCount()) } i := spofOrder[spofIndex] log.Printf("[monkey] Testing if %v is a single point of failure", d.agents[i]) netsplit := d.net.FindPerimeter([]uint{uint(i)}) spofIndex++ d.agents[i].Freeze() for _, target := range netsplit { target.GoodbyeForever() } // We need to make sure that the cluster is capable of servicing // requests that no member of the cluster has ever seen before in order // to ensure that the cluster is making progress. To do this, we // determine the request ID of the last request that has been created, // and make sure that we see a request that was created *after* that // (any requests generated after targetRequestId were necessarily // generated after the actions above). targetRequestId := state.LastGeneratedRequest() for <-state.GotRequest() <= targetRequestId { } log.Printf("[monkey] %v is (probably) not a single point of failure!", d.agents[i]) for _, target := range netsplit { target.WhyHelloThere() } d.agents[i].Thaw() return true }
// Choose random connections such that each node has a connection in the // returned list. func (d *Director) randomNeighborLinks(rng *rand.Rand) []*network.Link { links := d.net.Links() perm := rng.Perm(len(links)) cover := make(map[uint]bool, state.NodeCount()) for i := 0; i < state.NodeCount(); i++ { cover[uint(i)] = false } out := make([]*network.Link, 0, (state.NodeCount()+1)/2) for _, i := range perm { link := links[i] a1, a2 := link.Agents() if !cover[a1] || !cover[a2] { out = append(out, link) cover[a1] = true cover[a2] = true } } return out }
func makeRoutes(ps []place, r *rand.Rand) { for i := range ps { p := &ps[i] others := r.Perm(len(ps)) percentageToConnectTo := r.Float32() next := i + 1 if next == len(ps) { next = 0 } p.neighbours = append(p.neighbours, dist{to: int32(next), cost: calcDist(p, &ps[next])}) for _, o := range others { var newNeighbour dist if r.Float32() < percentageToConnectTo && o != next && o != i { newNeighbour = dist{to: int32(o), cost: calcDist(p, &ps[o])} } else { newNeighbour = dist{-1, -1} } p.neighbours = append(p.neighbours, newNeighbour) } } }
// selectRandom chooses up to count random store descriptors from the given // store list. func selectRandom(randGen *rand.Rand, count int, sl StoreList, excluded nodeIDSet) []*roachpb.StoreDescriptor { var descs []*roachpb.StoreDescriptor // Randomly permute available stores matching the required attributes. for _, idx := range randGen.Perm(len(sl.stores)) { desc := sl.stores[idx] // Skip if store is in excluded set. if _, ok := excluded[desc.Node.NodeID]; ok { continue } // Add this store; exit loop if we've satisfied count. descs = append(descs, sl.stores[idx]) if len(descs) >= count { break } } if len(descs) == 0 { return nil } return descs }
// SampleGen has a parametric type: // // func SampleGen(population []A, n int, rng *rand.Rand) []A // // SampleGen returns a random sample of size `n` from a list // `population` using a given random number generator `rng`. // All elements in `population` have an equal chance of being selected. // If `n` is greater than the size of `population`, then `n` is set to // the size of the population. func SampleGen(population interface{}, n int, rng *rand.Rand) interface{} { chk := ty.Check( new(func([]ty.A, int, *rand.Rand) []ty.A), population, n, rng) rpop, tsamp := chk.Args[0], chk.Returns[0] popLen := rpop.Len() if n == 0 { return reflect.MakeSlice(tsamp, 0, 0).Interface() } if n > popLen { n = popLen } // TODO(burntsushi): Implement an algorithm that doesn't depend on // the size of the population. rsamp := reflect.MakeSlice(tsamp, n, n) choices := rng.Perm(popLen) for i := 0; i < n; i++ { rsamp.Index(i).Set(rpop.Index(choices[i])) } return rsamp.Interface() }
// Styled after the Graph500 example code. Not well tested currently. // Graph500 example generates undirected only. No idea if the directed variant // here is meaningful or not. // // note mma returns arc size ma for dir=true, but returns size m for dir=false func kronecker(scale uint, edgeFactor float64, dir bool, r *rand.Rand) (g AdjacencyList, mma int) { if r == nil { r = rand.New(rand.NewSource(time.Now().UnixNano())) } N := NI(1 << scale) // node extent M := int(edgeFactor*float64(N) + .5) // number of arcs/edges to generate a, b, c := 0.57, 0.19, 0.19 // initiator probabilities ab := a + b cNorm := c / (1 - ab) aNorm := a / ab ij := make([][2]NI, M) var bm Bits var nNodes int for k := range ij { var i, j NI for b := NI(1); b < N; b <<= 1 { if r.Float64() > ab { i |= b if r.Float64() > cNorm { j |= b } } else if r.Float64() > aNorm { j |= b } } if bm.Bit(i) == 0 { bm.SetBit(i, 1) nNodes++ } if bm.Bit(j) == 0 { bm.SetBit(j, 1) nNodes++ } r := r.Intn(k + 1) // shuffle edges as they are generated ij[k] = ij[r] ij[r] = [2]NI{i, j} } p := r.Perm(nNodes) // mapping to shuffle IDs of non-isolated nodes px := 0 rn := make([]NI, N) for i := range rn { if bm.Bit(NI(i)) == 1 { rn[i] = NI(p[px]) // fill lookup table px++ } } g = make(AdjacencyList, nNodes) ij: for _, e := range ij { if e[0] == e[1] { continue // skip loops } ri, rj := rn[e[0]], rn[e[1]] for _, nb := range g[ri] { if nb == rj { continue ij // skip parallel edges } } g[ri] = append(g[ri], rj) mma++ if !dir { g[rj] = append(g[rj], ri) } } return }
func RandPortMap(r *rand.Rand) PortMap { return PortMap(r.Perm(Nhost)) }