func cache(z *big.Int, filename string) (err error) { var b []byte if b, err = z.GobEncode(); err == nil { err = ioutil.WriteFile(filename, b, 0644) } return }
// Just like Split, but return an error when receiving a kill signal from t. func splitOrQuit(z *big.Int, quit <-chan time.Time) (p, q *big.Int, err error) { q, r := big.NewInt(0), big.NewInt(0) if z.Sign() == 0 { return } max := roughSqrt(z) primes := sieve.BigSieve() for { select { case <-quit: err = ErrTimeout return case p = <-primes: if q.DivMod(z, p, r); r.Sign() == 0 { return } if max.Cmp(p) == -1 { q.SetInt64(1) p.Set(z) return } } } return }
// For each input modulus 'x' and remainderTree parent 'y', compute z = (y%(x*x))/x; gcd(z, x) func lowmemRemainderTreeFinal(input, moduli chan *gmp.Int, output chan<- Collision) { defer close(output) tmp := new(gmp.Int) for y := range input { for i := 0; i < 2; i++ { modulus, ok := <-moduli if !ok { log.Print("Odd number of moduli? (should only see this once)") continue } tmp.Mul(modulus, modulus) tmp.Rem(y, tmp) tmp.Quo(tmp, modulus) if tmp.GCD(nil, nil, tmp, modulus).BitLen() != 1 { q := new(gmp.Int).Quo(modulus, tmp) output <- Collision{ Modulus: modulus, P: tmp, Q: q, } tmp = new(gmp.Int) } } y.Clear() } }
// For each productTree node 'x', and remainderTree parent 'y', compute y%(x*x) func remainderTreeLevel(tree [][]*gmp.Int, level int, wg *sync.WaitGroup, start, step int) { prevLevel := tree[level+1] thisLevel := tree[level] tmp := new(gmp.Int) for i := start; i < len(thisLevel); i += step { x := thisLevel[i] y := prevLevel[i/2] tmp.Mul(x, x) x.Rem(y, tmp) } wg.Done() }
func tmpfileReadWriter(ch fileChannels) { tmpFile, err := ioutil.TempFile(".", "product") if err != nil { log.Panic(err) } length := make([]byte, 8) var writeCount uint64 for inData := range ch.writeChan { buf := inData.Bytes() encodeLength(length, len(buf)) if _, err := tmpFile.Write(length); err != nil { log.Panic(err) } if _, err := tmpFile.Write(buf); err != nil { log.Panic(err) } inData.Clear() writeCount += 1 } if newOffset, e := tmpFile.Seek(0, 0); e != nil || newOffset != 0 { log.Panic(e) } var readCount uint64 m := new(gmp.Int) for { if _, e := io.ReadFull(tmpFile, length); e != nil { if e == io.EOF { break } log.Panic(e) } buf := make([]byte, decodeLength(length)) if _, e := io.ReadFull(tmpFile, buf); e != nil { log.Panic(e) } readCount += 1 ch.readChan <- m.SetBytes(buf) m = new(gmp.Int) } if writeCount != readCount { log.Panicf("Didn't write as many as we read: write=%v read=%v", writeCount, readCount) } close(ch.readChan) syscall.Unlink(tmpFile.Name()) // tmpFile.Truncate(0); }
// For each productTree node 'x', and remainderTree parent 'y', compute y%(x*x) func lowmemRemainderTreeLevel(input chan *gmp.Int, productTree []fileChannels, finalOutput chan<- Collision) { tmp := new(gmp.Int) ch := productTree[len(productTree)-1] productTree = productTree[:len(productTree)-1] // We close the fileWriteChannel here so it kicks off reading now, instead of starting too early products := ch.readChan output := make(chan *gmp.Int, 2) defer close(output) if len(productTree) == 0 { ch.StartProducing() lowmemRemainderTreeFinal(input, products, finalOutput) return } else { go lowmemRemainderTreeLevel(output, productTree, finalOutput) } for y := range input { ch.StartProducing() x, ok := <-products if !ok { log.Panicf("Expecting more products") } tmp.Mul(x, x) x.Rem(y, tmp) output <- x x, ok = <-products if ok { tmp.Mul(x, x) x.Rem(y, tmp) output <- x } y.Clear() } }
// For each input modulus 'x' and remainderTree parent 'y', compute z = (y%(x*x))/x; gcd(z, x) func remainderTreeFinal(lastLevel, moduli []*gmp.Int, output chan<- Collision, wg *sync.WaitGroup, start, step int) { tmp := new(gmp.Int) for i := start; i < len(moduli); i += step { modulus := moduli[i] y := lastLevel[i/2] tmp.Mul(modulus, modulus) tmp.Rem(y, tmp) tmp.Quo(tmp, modulus) if tmp.GCD(nil, nil, tmp, modulus).BitLen() != 1 { q := new(gmp.Int).Quo(modulus, tmp) output <- Collision{ Modulus: modulus, P: tmp, Q: q, } tmp = new(gmp.Int) } } wg.Done() }
// This performs the GCD of the product of all previous moduli with the current one. // This uses around double the memory (minus quite a lot of overhead), and identifies // problematic input in O(n) time, but has to do another O(n) scan for each collision // to figure get the private key back. // If there are no collisions, this algorithm isn't parallel at all. // If we get a GCD that is the same as the modulus, we do a manual scan for either colliding Q or identical moduli // If we get a GCD lower than the modulus, we have one private key, then do a manual scan for others. func MulAccumGCD(moduli []*gmp.Int, collisions chan<- Collision) { accum := gmp.NewInt(1) gcd := new(gmp.Int) var wg sync.WaitGroup for i, modulus := range moduli { gcd.GCD(nil, nil, accum, modulus) if gcd.BitLen() != 1 { wg.Add(1) if gcd.Cmp(modulus) == 0 { go findGCD(&wg, moduli, i, collisions) continue } else { go findDivisors(&wg, moduli, i, gcd, collisions) gcd = new(gmp.Int) } } accum.Mul(accum, modulus) } wg.Wait() close(collisions) }
func ExampleInt_SetString() { i := new(big.Int) i.SetString("644", 8) // octal fmt.Println(i) // Output: 420 }
// Rought square root of z. func roughSqrt(z *big.Int) *big.Int { return big.NewInt(0).Exp(big.NewInt(2), big.NewInt(int64((z.BitLen()+1)/2)), nil) }