Exemple #1
0
// Sqrt returns the square root n.
func Sqrt(n *big.Float) *big.Float {
	prec := n.Prec()

	x := new(big.Float).SetPrec(prec).SetInt64(1)
	z := new(big.Float).SetPrec(prec).SetInt64(1)

	half := new(big.Float).SetPrec(prec).SetFloat64(0.5)
	t := new(big.Float).SetPrec(prec)

	for {
		z.Copy(x)

		t.Mul(x, x)
		t.Sub(t, n)
		t.Quo(t, x)
		t.Mul(t, half)
		x.Sub(x, t)

		if x.Cmp(z) == 0 {
			break
		}
	}

	return x
}
Exemple #2
0
func mandelbrotFloat(a, b *big.Float) color.Color {
	var x, y, nx, ny, x2, y2, f2, f4, r2, tmp big.Float
	f2.SetInt64(2)
	f4.SetInt64(4)
	x.SetInt64(0)
	y.SetInt64(0)

	defer func() { recover() }()

	for n := uint8(0); n < iterations; n++ {
		// Not update x2 and y2
		// because they are already updated in the previous loop
		nx.Sub(&x2, &y2)
		nx.Add(&nx, a)

		tmp.Mul(&x, &y)
		ny.Mul(&f2, &tmp)
		ny.Add(&ny, b)

		x.Set(&nx)
		y.Set(&ny)

		x2.Mul(&x, &x)
		y2.Mul(&y, &y)
		r2.Add(&x2, &y2)

		if r2.Cmp(&f4) > 0 {
			return color.Gray{255 - contrast*n}
		}
	}
	return color.Black
}
Exemple #3
0
func quo(x, y *complexFloat) *complexFloat {
	z := newComplexFloat()
	denominator := new(big.Float).SetPrec(prec)
	c2 := new(big.Float).SetPrec(prec)
	d2 := new(big.Float).SetPrec(prec)
	c2.Mul(y.r, y.r)
	d2.Mul(y.i, y.i)
	denominator.Add(c2, d2)

	if denominator.Cmp(zero) == 0 || denominator.IsInf() {
		return newComplexFloat()
	}

	ac := new(big.Float).SetPrec(prec)
	bd := new(big.Float).SetPrec(prec)
	ac.Mul(x.r, y.r)
	bd.Mul(x.i, y.i)

	bc := new(big.Float).SetPrec(prec)
	ad := new(big.Float).SetPrec(prec)
	bc.Mul(x.i, y.r)
	ad.Mul(x.r, y.i)

	z.r.Add(ac, bd)
	z.r.Quo(z.r, denominator)

	z.i.Add(bc, ad.Neg(ad))
	z.i.Quo(z.i, denominator)

	return z
}
Exemple #4
0
Fichier : pslq.go Projet : ncw/pslq
// Compute the square root of n using Newton's Method. We start with
// an initial estimate for sqrt(n), and then iterate
//     x_{i+1} = 1/2 * ( x_i + (n / x_i) )
// Result is returned in x
func (e *Pslq) Sqrt(n, x *big.Float) {
	if n == x {
		panic("need distinct input and output")
	}
	if n.Sign() == 0 {
		x.Set(n)
		return
	} else if n.Sign() < 0 {
		panic("Sqrt of negative number")
	}
	prec := n.Prec()

	// Use the floating point square root as initial estimate
	nFloat64, _ := n.Float64()
	x.SetPrec(prec).SetFloat64(math.Sqrt(nFloat64))

	// We use t as a temporary variable. There's no need to set its precision
	// since big.Float values with unset (== 0) precision automatically assume
	// the largest precision of the arguments when used as the result (receiver)
	// of a big.Float operation.
	var t big.Float

	// Iterate.
	for {
		t.Quo(n, x)        // t = n / x_i
		t.Add(x, &t)       // t = x_i + (n / x_i)
		t.Mul(&e.half, &t) // x_{i+1} = 0.5 * t
		if x.Cmp(&t) == 0 {
			// Exit loop if no change to result
			break
		}
		x.Set(&t)
	}
}
Exemple #5
0
// floatLog computes natural log(x) using the Maclaurin series for log(1-x).
func floatLog(x *big.Float) *big.Float {
	if x.Sign() <= 0 {
		Errorf("log of non-positive value")
	}
	// The series wants x < 1, and log 1/x == -log x, so exploit that.
	invert := false
	if x.Cmp(floatOne) > 0 {
		invert = true
		xx := newF()
		xx.Quo(floatOne, x)
		x = xx
	}

	// x = mantissa * 2**exp, and 0.5 <= mantissa < 1.
	// So log(x) is log(mantissa)+exp*log(2), and 1-x will be
	// between 0 and 0.5, so the series for 1-x will converge well.
	// (The series converges slowly in general.)
	mantissa := newF()
	exp2 := x.MantExp(mantissa)
	exp := newF().SetInt64(int64(exp2))
	exp.Mul(exp, floatLog2)
	if invert {
		exp.Neg(exp)
	}

	// y = 1-x (whereupon x = 1-y and we use that in the series).
	y := newF().SetInt64(1)
	y.Sub(y, mantissa)

	// The Maclaurin series for log(1-y) == log(x) is: -y - y²/2 - y³/3 ...

	yN := newF().Set(y)
	term := newF()
	n := newF().Set(floatOne)
	z := newF()

	// This is the slowest-converging series, so we add a factor of ten to the cutoff.
	// Only necessary when FloatPrec is at or beyond constPrecisionInBits.
	loop := newLoop("log", y, 40)
	for {
		term.Set(yN)
		term.Quo(term, n)
		z.Sub(z, term)
		if loop.terminate(z) {
			break
		}
		// Advance y**index (multiply by y).
		yN.Mul(yN, y)
		n.Add(n, floatOne)
	}

	if invert {
		z.Neg(z)
	}
	z.Add(z, exp)

	return z
}
Exemple #6
0
func compareJSONNumber(a, b json.Number) int {
	bigA, ok := new(big.Float).SetString(string(a))
	if !ok {
		panic("illegal value")
	}
	bigB, ok := new(big.Float).SetString(string(b))
	if !ok {
		panic("illegal value")
	}
	return bigA.Cmp(bigB)
}
Exemple #7
0
func TestPow(t *testing.T) {
	x := big.NewFloat(0.12381245613960218386)
	n := 3
	res := Pow(x, n)
	exp := big.NewFloat(0.00189798605)
	diff := new(big.Float).Sub(res, exp)
	diff = diff.Abs(diff)
	if diff.Cmp(big.NewFloat(0.00000001)) >= 0 {
		log.Fatal("Pow failed:", exp, res)
	}
}
Exemple #8
0
func TestRoot(t *testing.T) {
	x := big.NewFloat(0.12381245613960218386)
	n := 16
	res := Root(x, n)
	exp := big.NewFloat(0.8776023372475015)
	diff := new(big.Float).Sub(res, exp)
	diff = diff.Abs(diff)
	if diff.Cmp(big.NewFloat(0.00000001)) >= 0 {
		log.Fatal("Exp failed:", exp, res)
	}
}
Exemple #9
0
// twoPiReduce guarantees x < 2𝛑; x is known to be >= 0 coming in.
func twoPiReduce(x *big.Float) {
	// TODO: Is there an easy better algorithm?
	twoPi := newF().Set(floatTwo)
	twoPi.Mul(twoPi, floatPi)
	// Do something clever(er) if it's large.
	if x.Cmp(newF().SetInt64(1000)) > 0 {
		multiples := make([]*big.Float, 0, 100)
		sixteen := newF().SetInt64(16)
		multiple := newF().Set(twoPi)
		for {
			multiple.Mul(multiple, sixteen)
			if x.Cmp(multiple) < 0 {
				break
			}
			multiples = append(multiples, newF().Set(multiple))
		}
		// From the right, subtract big multiples.
		for i := len(multiples) - 1; i >= 0; i-- {
			multiple := multiples[i]
			for x.Cmp(multiple) >= 0 {
				x.Sub(x, multiple)
			}
		}
	}
	for x.Cmp(twoPi) >= 0 {
		x.Sub(x, twoPi)
	}
}
Exemple #10
0
// hypot for big.Float
func hypot(p, q *big.Float) *big.Float {
	// special cases
	switch {
	case p.IsInf() || q.IsInf():
		return big.NewFloat(math.Inf(1))
	}
	p = p.Abs(p)
	q = q.Abs(q)
	if p.Cmp(p) < 0 {
		p, q = q, p
	}
	if p.Cmp(big.NewFloat(0)) == 0 {
		return big.NewFloat(0)
	}
	q = q.Quo(q, p)
	return sqrt(q.Mul(q, q).Add(q, big.NewFloat(1))).Mul(q, p)
}
Exemple #11
0
// Implements the nth root algorithm from
// https://en.wikipedia.org/wiki/Nth_root_algorithm
// return: nth root of x within some epsilon
func Root(x *big.Float, n int64) *big.Float {
	guess := new(big.Float).Quo(x, big.NewFloat(float64(n)))
	diff := big.NewFloat(1)
	ep := big.NewFloat(0.00000001)
	abs := new(big.Float).Abs(diff)
	for abs.Cmp(ep) >= 0 {
		//fmt.Println(guess, abs)
		prev := Pow(guess, n-1)
		diff = new(big.Float).Quo(x, prev)
		diff = diff.Sub(diff, guess)
		diff = diff.Quo(diff, big.NewFloat(float64(n)))

		guess = guess.Add(guess, diff)
		abs = new(big.Float).Abs(diff)
	}
	return guess
}
Exemple #12
0
func sqrtFloat(x *big.Float) *big.Float {
	t1 := new(big.Float).SetPrec(prec)
	t2 := new(big.Float).SetPrec(prec)
	t1.Copy(x)

	// Iterate.
	// x{n} = (x{n-1}+x{0}/x{n-1}) / 2
	for i := 0; i <= steps; i++ {
		if t1.Cmp(zero) == 0 || t1.IsInf() {
			return t1
		}
		t2.Quo(x, t1)
		t2.Add(t2, t1)
		t1.Mul(half, t2)
	}

	return t1
}
Exemple #13
0
func runTest(encoderDecoder EncoderDecoder, n *big.Float) (nBytes uint64) {
	y := newBigFloat(0)

	buf := new(bytes.Buffer)
	err := encoderDecoder.Encode(buf, n)
	if err != nil {
		panic(err)
	}
	nBytes += uint64(buf.Len())
	buf = bytes.NewBuffer(buf.Bytes())

	err = encoderDecoder.Decode(buf, y)
	nBytes += uint64(buf.Len())
	if n.Cmp(y) != 0 {
		panic(fmt.Sprintf("write and read are not the same: %v, %v - %d - %d, %d", stringOfBigFloat(n), stringOfBigFloat(y), nBytes, n.Prec(), y.Prec()))
	}
	return
}
Exemple #14
0
// floatAsin computes asin(x) using the formula asin(x) = atan(x/sqrt(1-x²)).
func floatAsin(c Context, x *big.Float) *big.Float {
	// The asin Taylor series converges very slowly near ±1, but our
	// atan implementation converges well for all values, so we use
	// the formula above to compute asin. But be careful when |x|=1.
	if x.Cmp(floatOne) == 0 {
		z := newFloat(c).Set(floatPi)
		return z.Quo(z, floatTwo)
	}
	if x.Cmp(floatMinusOne) == 0 {
		z := newFloat(c).Set(floatPi)
		z.Quo(z, floatTwo)
		return z.Neg(z)
	}
	z := newFloat(c)
	z.Mul(x, x)
	z.Sub(floatOne, z)
	z = floatSqrt(c, z)
	z.Quo(x, z)
	return floatAtan(c, z)
}
Exemple #15
0
// Pow returns a big.Float representation of z**w. Precision is the same as the one
// of the first argument. The function panics when z is negative.
func Pow(z *big.Float, w *big.Float) *big.Float {

	if z.Sign() < 0 {
		panic("Pow: negative base")
	}

	// Pow(z, 0) = 1.0
	if w.Sign() == 0 {
		return big.NewFloat(1).SetPrec(z.Prec())
	}

	// Pow(z, 1) = z
	// Pow(+Inf, n) = +Inf
	if w.Cmp(big.NewFloat(1)) == 0 || z.IsInf() {
		return new(big.Float).Copy(z)
	}

	// Pow(z, -w) = 1 / Pow(z, w)
	if w.Sign() < 0 {
		x := new(big.Float)
		zExt := new(big.Float).Copy(z).SetPrec(z.Prec() + 64)
		wNeg := new(big.Float).Neg(w)
		return x.Quo(big.NewFloat(1), Pow(zExt, wNeg)).SetPrec(z.Prec())
	}

	// w integer fast path
	if w.IsInt() {
		wi, _ := w.Int64()
		return powInt(z, int(wi))
	}

	// compute w**z as exp(z log(w))
	x := new(big.Float).SetPrec(z.Prec() + 64)
	logZ := Log(new(big.Float).Copy(z).SetPrec(z.Prec() + 64))
	x.Mul(w, logZ)
	x = Exp(x)
	return x.SetPrec(z.Prec())

}
Exemple #16
0
// Evaluates a BBP term
//
// sum(k=0->inf)(1/base**k * (1/a*k + b))
func bbp(prec uint, base, a, b int64, result *big.Float) {
	var term, power, aFp, bFp, _1, k, _base, oldresult big.Float
	power.SetPrec(prec).SetInt64(1)
	result.SetPrec(prec).SetInt64(0)
	aFp.SetPrec(prec).SetInt64(a)
	bFp.SetPrec(prec).SetInt64(b)
	_1.SetPrec(prec).SetInt64(1)
	k.SetPrec(prec).SetInt64(0)
	_base.SetPrec(prec).SetInt64(base)
	for {
		oldresult.Set(result)
		term.Mul(&aFp, &k)
		term.Add(&term, &bFp)
		term.Quo(&_1, &term)
		term.Mul(&term, &power)
		result.Add(result, &term)
		if oldresult.Cmp(result) == 0 {
			break
		}
		power.Quo(&power, &_base)
		k.Add(&k, &_1)
	}
}
Exemple #17
0
// Returns acot(x) in result
func acot(prec uint, x int64, result *big.Float) {
	var term, power, _x, _kp, x2, oldresult big.Float
	_x.SetPrec(prec).SetInt64(x)
	power.SetPrec(prec).SetInt64(1)
	power.Quo(&power, &_x) // 1/x
	x2.Mul(&_x, &_x)
	result.SetPrec(prec).SetInt64(0)
	positive := true
	for k := int64(1); ; k += 2 {
		oldresult.Set(result)
		kp := k
		if !positive {
			kp = -k
		}
		positive = !positive
		_kp.SetPrec(prec).SetInt64(kp)
		term.Quo(&power, &_kp)
		result.Add(result, &term)
		if oldresult.Cmp(result) == 0 {
			break
		}
		power.Quo(&power, &x2)
	}
}
Exemple #18
0
// Log returns a big.Float representation of the natural logarithm of
// z. Precision is the same as the one of the argument. The function
// panics if z is negative, returns -Inf when z = 0, and +Inf when z =
// +Inf
func Log(z *big.Float) *big.Float {

	// panic on negative z
	if z.Sign() == -1 {
		panic("Log: argument is negative")
	}

	// Log(0) = -Inf
	if z.Sign() == 0 {
		return big.NewFloat(math.Inf(-1)).SetPrec(z.Prec())
	}

	prec := z.Prec() + 64 // guard digits

	one := big.NewFloat(1).SetPrec(prec)
	two := big.NewFloat(2).SetPrec(prec)
	four := big.NewFloat(4).SetPrec(prec)

	// Log(1) = 0
	if z.Cmp(one) == 0 {
		return big.NewFloat(0).SetPrec(z.Prec())
	}

	// Log(+Inf) = +Inf
	if z.IsInf() {
		return big.NewFloat(math.Inf(+1)).SetPrec(z.Prec())
	}

	x := new(big.Float).SetPrec(prec)

	// if 0 < z < 1 we compute log(z) as -log(1/z)
	var neg bool
	if z.Cmp(one) < 0 {
		x.Quo(one, z)
		neg = true
	} else {
		x.Set(z)
	}

	// We scale up x until x >= 2**(prec/2), and then we'll be allowed
	// to use the AGM formula for Log(x).
	//
	// Double x until the condition is met, and keep track of the
	// number of doubling we did (needed to scale back later).

	lim := new(big.Float)
	lim.SetMantExp(two, int(prec/2))

	k := 0
	for x.Cmp(lim) < 0 {
		x.Mul(x, x)
		k++
	}

	// Compute the natural log of x using the fact that
	//     log(x) = π / (2 * AGM(1, 4/x))
	// if
	//     x >= 2**(prec/2),
	// where prec is the desired precision (in bits)
	pi := pi(prec)
	agm := agm(one, x.Quo(four, x)) // agm = AGM(1, 4/x)

	x.Quo(pi, x.Mul(two, agm)) // reuse x, we don't need it

	if neg {
		x.Neg(x)
	}

	// scale the result back multiplying by 2**-k
	// reuse lim to reduce allocations.
	x.Mul(x, lim.SetMantExp(one, -k))

	return x.SetPrec(z.Prec())
}
Exemple #19
0
// floatAtan computes atan(x) using a Taylor series. There are two series,
// one for |x| < 1 and one for larger values.
func floatAtan(c Context, x *big.Float) *big.Float {
	// atan(-x) == -atan(x). Do this up top to simplify the Euler crossover calculation.
	if x.Sign() < 0 {
		z := newFloat(c).Set(x)
		z = floatAtan(c, z.Neg(z))
		return z.Neg(z)
	}

	// The series converge very slowly near 1. atan 1.00001 takes over a million
	// iterations at the default precision. But there is hope, an Euler identity:
	//	atan(x) = atan(y) + atan((x-y)/(1+xy))
	// Note that y is a free variable. If x is near 1, we can use this formula
	// to push the computation to values that converge faster. Because
	//	tan(π/8) = √2 - 1, or equivalently atan(√2 - 1) == π/8
	// we choose y = √2 - 1 and then we only need to calculate one atan:
	//	atan(x) = π/8 + atan((x-y)/(1+xy))
	// Where do we cross over? This version converges significantly faster
	// even at 0.5, but we must be careful that (x-y)/(1+xy) never approaches 1.
	// At x = 0.5, (x-y)/(1+xy) is 0.07; at x=1 it is 0.414214; at x=1.5 it is
	// 0.66, which is as big as we dare go. With 256 bits of precision and a
	// crossover at 0.5, here are the number of iterations done by
	//	atan .1*iota 20
	// 0.1 39, 0.2 55, 0.3 73, 0.4 96, 0.5 126, 0.6 47, 0.7 59, 0.8 71, 0.9 85, 1.0 99, 1.1 116, 1.2 38, 1.3 44, 1.4 50, 1.5 213, 1.6 183, 1.7 163, 1.8 147, 1.9 135, 2.0 125
	tmp := newFloat(c).Set(floatOne)
	tmp.Sub(tmp, x)
	tmp.Abs(tmp)
	if tmp.Cmp(newFloat(c).SetFloat64(0.5)) < 0 {
		z := newFloat(c).Set(floatPi)
		z.Quo(z, newFloat(c).SetInt64(8))
		y := floatSqrt(c, floatTwo)
		y.Sub(y, floatOne)
		num := newFloat(c).Set(x)
		num.Sub(num, y)
		den := newFloat(c).Set(x)
		den = den.Mul(den, y)
		den = den.Add(den, floatOne)
		z = z.Add(z, floatAtan(c, num.Quo(num, den)))
		return z
	}

	if x.Cmp(floatOne) > 0 {
		return floatAtanLarge(c, x)
	}

	// This is the series for small values |x| <  1.
	// asin(x) = x - x³/3 + x⁵/5 - x⁷/7 + ...
	// First term to compute in loop will be x

	n := newFloat(c)
	term := newFloat(c)
	xN := newFloat(c).Set(x)
	xSquared := newFloat(c).Set(x)
	xSquared.Mul(x, x)
	z := newFloat(c)

	// n goes up by two each loop.
	for loop := newLoop(c.Config(), "atan", x, 4); ; {
		term.Set(xN)
		term.Quo(term, n.SetUint64(2*loop.i+1))
		z.Add(z, term)
		xN.Neg(xN)

		if loop.done(z) {
			break
		}
		// xN *= x², becoming x**(n+2).
		xN.Mul(xN, xSquared)
	}

	return z
}
Exemple #20
0
func opmax(a, b *big.Float) *big.Float {
	if a != nil && (b == nil || a.Cmp(b) >= 0) {
		return a
	}
	return b
}
Exemple #21
0
Fichier : pslq.go Projet : ncw/pslq
// Given a vector of real numbers x = [x_0, x_1, ..., x_n], this
// uses the PSLQ algorithm to find a list of integers
// [c_0, c_1, ..., c_n] such that
//
//     |c_1 * x_1 + c_2 * x_2 + ... + c_n * x_n| < tolerance
//
// and such that max |c_k| < maxcoeff. If no such vector exists, Pslq
// returns one of the errors in this package depending on whether it
// has run out of iterations, precision or explored up to the
// maxcoeff. The tolerance defaults to 3/4 of the precision.
//
// This is a fairly direct translation of the pseudocode given by
// David Bailey, "The PSLQ Integer Relation Algorithm":
// http://www.cecm.sfu.ca/organics/papers/bailey/paper/html/node3.html
//
// If a result is returned, the first non-zero element will be positive
func (e *Pslq) Run(x []big.Float) ([]big.Int, error) {
	n := len(x)
	if n <= 1 {
		return nil, ErrorBadArguments
	}

	// At too low precision, the algorithm becomes meaningless
	if e.prec < 64 {
		return nil, ErrorPrecisionTooLow
	}

	if e.verbose && int(e.prec)/max(2, int(n)) < 5 {
		log.Printf("Warning: precision for PSLQ may be too low")
	}

	if e.verbose {
		log.Printf("PSLQ using prec %d and tol %g", e.prec, e.tol)
	}

	if e.tol.Sign() == 0 {
		return nil, ErrorToleranceRoundsToZero
	}

	// Temporary variables
	tmp0 := new(big.Float).SetPrec(e.prec)
	tmp1 := new(big.Float).SetPrec(e.prec)
	bigTmp := new(big.Int)

	// Convert to use 1-based indexing to allow us to be
	// consistent with Bailey's indexing.
	xNew := make([]big.Float, len(x)+1)
	minx := new(big.Float).SetPrec(e.prec)
	minxFirst := true
	for i, xk := range x {
		p := &xNew[i+1]
		p.Set(&xk)
		tmp0.Abs(p)
		if minxFirst || tmp0.Cmp(minx) < 0 {
			minxFirst = false
			minx.Set(tmp0)
		}
	}
	x = xNew
	if debug {
		printVector("x", x)
	}

	// Sanity check on magnitudes
	if minx.Sign() == 0 {
		return nil, ErrorZeroArguments
	}
	tmp1.SetInt64(128)
	tmp0.Quo(&e.tol, tmp1)
	if minx.Cmp(tmp0) < 0 { //  minx < tol/128
		return nil, ErrorArgumentTooSmall
	}

	tmp0.SetInt64(4)
	tmp1.SetInt64(3)
	tmp0.Quo(tmp0, tmp1)
	var γ big.Float
	e.Sqrt(tmp0, &γ) // sqrt(4<<prec)/3)
	if debug {
		fmt.Printf("γ = %f\n", &γ)
	}
	A := newBigIntMatrix(n+1, n+1)
	B := newBigIntMatrix(n+1, n+1)
	H := newMatrix(n+1, n+1)
	// Initialization Step 1
	//
	// Set the n×n matrices A and B to the identity.
	for i := 1; i <= n; i++ {
		for j := 1; j <= n; j++ {
			if i == j {
				A[i][j].SetInt64(1)
				B[i][j].SetInt64(1)
			} else {
				A[i][j].SetInt64(0)
				B[i][j].SetInt64(0)
			}
			H[i][j].SetInt64(0)
		}
	}
	if debug {
		printBigIntMatrix("A", A)
		printBigIntMatrix("B", B)
		printMatrix("H", H)
	}
	// Initialization Step 2
	//
	// For k := 1 to n
	//     compute s_k := sqrt( sum_j=k^n x_j^2 )
	// endfor.
	// Set t = 1/s1.
	// For k := 1 to n:
	//     y_k := t * x_k
	//     s_k := t * s_k
	// endfor.
	s := make([]big.Float, n+1)
	for i := 1; i <= n; i++ {
		s[i].SetInt64(0)
	}
	for k := 1; k <= n; k++ {
		var t big.Float
		t.SetInt64(0)
		for j := k; j <= n; j++ {
			tmp0.Mul(&x[j], &x[j])
			t.Add(&t, tmp0)
		}
		e.Sqrt(&t, &s[k])
	}
	if debug {
		fmt.Println("Init Step 2")
		printVector("s", s)
	}
	var t big.Float
	t.Set(&s[1])
	y := make([]big.Float, len(x))
	copy(y, x)
	for k := 1; k <= n; k++ {
		// y[k] = (x[k] << prec) / t
		y[k].Quo(&x[k], &t)
		// s[k] = (s[k] << prec) / t
		s[k].Quo(&s[k], &t)
	}
	if debug {
		printVector("y", y)
		printVector("s", s)
	}
	// Init Step 3
	//
	// Compute the n×(n−1) matrix H as follows:
	// For i := 1 to n:
	//     for j := i + 1 to n − 1:
	//         set Hij := 0
	//     endfor
	//     if i ≤ n − 1 then set Hii := s_(i+1)/s_i
	//     for j := 1 to i−1:
	//         set Hij := −y_i * y_j / (s_j * s_(j+1))
	//     endfor
	// endfor
	for i := 1; i <= n; i++ {
		for j := i + 1; j < n; j++ {
			H[i][j].SetInt64(0)
		}
		if i <= n-1 {
			if s[i].Sign() == 0 {
				// Precision probably exhausted
				return nil, ErrorPrecisionExhausted
			}
			// H[i][i] = (s[i+1] << prec) / s[i]
			H[i][i].Quo(&s[i+1], &s[i])
		}
		for j := 1; j < i; j++ {
			var sjj1 big.Float
			sjj1.Mul(&s[j], &s[j+1])
			if debug {
				fmt.Printf("sjj1 = %f\n", &sjj1)
			}
			if sjj1.Sign() == 0 {
				// Precision probably exhausted
				return nil, ErrorPrecisionExhausted
			}
			// H[i][j] = ((-y[i] * y[j]) << prec) / sjj1
			tmp0.Mul(&y[i], &y[j])
			tmp0.Neg(tmp0)
			H[i][j].Quo(tmp0, &sjj1)
		}
	}
	if debug {
		fmt.Println("Init Step 3")
		printMatrix("H", H)
	}
	// Init Step 4
	//
	// Perform full reduction on H, simultaneously updating y, A and B:
	//
	// For i := 2 to n:
	//     for j := i−1 to 1 step−1:
	//         t := nint(Hij/Hjj)
	//         y_j := y_j + t * y_i
	//         for k := 1 to j:
	//             Hik := Hik − t * Hjk
	//         endfor
	//         for k := 1 to n:
	//             Aik := Aik − t * Ajk
	//             Bkj := Bkj + t * Bki
	//         endfor
	//     endfor
	// endfor
	for i := 2; i <= n; i++ {
		for j := i - 1; j > 0; j-- {
			//t = floor(H[i][j]/H[j,j] + 0.5)
			var t big.Int
			var tFloat big.Float
			if H[j][j].Sign() == 0 {
				// Precision probably exhausted
				return nil, ErrorPrecisionExhausted
			}
			tmp0.Quo(&H[i][j], &H[j][j])
			e.NearestInt(tmp0, &t)
			tFloat.SetInt(&t).SetPrec(e.prec)
			if debug {
				fmt.Printf("H[i][j]=%f\n", &H[i][j])
				fmt.Printf("H[j][j]=%f\n", &H[j][j])
				fmt.Printf("tmp=%f\n", tmp0)
				fmt.Printf("t=%d\n", &t)
			}
			// y[j] = y[j] + (t * y[i] >> prec)
			tmp0.Mul(&y[i], &tFloat)
			y[j].Add(&y[j], tmp0)
			for k := 1; k <= j; k++ {
				// H[i][k] = H[i][k] - (t * H[j][k] >> prec)
				tmp0.Mul(&H[j][k], &tFloat)
				H[i][k].Sub(&H[i][k], tmp0)
			}
			for k := 1; k <= n; k++ {
				bigTmp.Mul(&t, &A[j][k])
				A[i][k].Sub(&A[i][k], bigTmp)
				bigTmp.Mul(&t, &B[k][i])
				B[k][j].Add(&B[k][j], bigTmp)
			}
		}
	}
	if debug {
		fmt.Println("Init Step 4")
		printBigIntMatrix("A", A)
		printBigIntMatrix("B", B)
		printMatrix("H", H)
	}
	// Main algorithm
	var REP int
	var norm big.Int
	vec := make([]big.Int, n)
	for REP = 0; REP < e.maxsteps; REP++ {
		// Step 1
		//
		// Select m such that γ^i * |Hii| is maximal when i = m.
		m := -1
		var szmax big.Float
		szmax.SetInt64(-1)
		var γPower big.Float
		γPower.Set(&γ)
		for i := 1; i < n; i++ {
			var absH big.Float
			absH.Abs(&H[i][i])
			var sz big.Float
			sz.Mul(&γPower, &absH)
			// sz := (g**i * abs(h)) >> (prec * (i - 1))
			if sz.Cmp(&szmax) > 0 {
				m = i
				szmax.Set(&sz)
			}
			γPower.Mul(&γPower, &γ)
		}
		if debug {
			fmt.Println("Step 1")
			fmt.Printf("szmax=%f\n", &szmax)
			fmt.Printf("m=%d\n", m)
		}
		// Step 2
		//
		// Exchange entries m and m+1 of y, corresponding rows
		// of A and H, and corresponding columns of B.
		y[m], y[m+1] = y[m+1], y[m]
		for i := 1; i < n+1; i++ {
			H[m][i], H[m+1][i] = H[m+1][i], H[m][i]
		}
		for i := 1; i < n+1; i++ {
			A[m][i], A[m+1][i] = A[m+1][i], A[m][i]
		}
		for i := 1; i < n+1; i++ {
			B[i][m], B[i][m+1] = B[i][m+1], B[i][m]
		}
		if debug {
			fmt.Println("Step 2")
			printVector("y", y)
			printBigIntMatrix("A", A)
			printBigIntMatrix("B", B)
			printMatrix("H", H)
		}
		// Step 3
		//
		// If m ≤ n−2 then update H as follows:
		//
		// t0 := sqrt( Hmm^2 + H(m,m+1)^2 )
		// t1 := Hmm/t0
		// t2 := H(m,m+1)/t0.
		// for i := m to n:
		//     t3 := Him
		//     t4 := Hi,m+1
		//     Him := t1t3 +t2t4
		//     Hi,m+1 := −t2t3 +t1t4
		// endfor.
		if m <= n-2 {
			tmp0.Mul(&H[m][m], &H[m][m])
			tmp1.Mul(&H[m][m+1], &H[m][m+1])
			tmp0.Add(tmp0, tmp1)
			var t0 big.Float
			e.Sqrt(tmp0, &t0)
			// Precision probably exhausted
			if t0.Sign() == 0 {
				return nil, ErrorPrecisionExhausted
			}
			var t1, t2 big.Float
			t1.Quo(&H[m][m], &t0)
			t2.Quo(&H[m][m+1], &t0)
			for i := m; i <= n; i++ {
				var t3, t4 big.Float
				t3.Set(&H[i][m])
				t4.Set(&H[i][m+1])
				// H[i][m] = (t1*t3 + t2*t4) >> prec
				tmp0.Mul(&t1, &t3)
				tmp1.Mul(&t2, &t4)
				H[i][m].Add(tmp0, tmp1)
				// H[i][m+1] = (-t2*t3 + t1*t4) >> prec
				tmp0.Mul(&t2, &t3)
				tmp1.Mul(&t1, &t4)
				H[i][m+1].Sub(tmp1, tmp0)
			}
		}
		if debug {
			fmt.Println("Step 3")
			printMatrix("H", H)
		}
		// Step 4
		// Perform block reduction on H, simultaneously updating y, A and B:
		//
		// For i := m+1 to n:
		//     for j := min(i−1, m+1) to 1 step −1:
		//         t := nint(Hij/Hjj)
		//         yj := yj + t * yi
		//         for k := 1 to j:
		//             Hik := Hik − tHjk
		//         endfor
		//         for k := 1 to n:
		//             Aik := Aik −tAjk
		//             Bkj := Bkj +tBki
		//         endfor
		//     endfor
		// endfor.
		for i := m + 1; i <= n; i++ {
			var t big.Int
			var tFloat big.Float
			for j := min(i-1, m+1); j > 0; j-- {
				if H[j][j].Sign() == 0 {
					// Precision probably exhausted
					return nil, ErrorPrecisionExhausted
				}
				tmp0.Quo(&H[i][j], &H[j][j])
				e.NearestInt(tmp0, &t)
				tFloat.SetInt(&t).SetPrec(e.prec)
				// y[j] = y[j] + ((t * y[i]) >> prec)
				tmp0.Mul(&y[i], &tFloat)
				y[j].Add(&y[j], tmp0)
				for k := 1; k <= j; k++ {
					// H[i][k] = H[i][k] - (t * H[j][k] >> prec)
					tmp0.Mul(&H[j][k], &tFloat)
					H[i][k].Sub(&H[i][k], tmp0)
				}
				for k := 1; k <= n; k++ {
					bigTmp.Mul(&t, &A[j][k])
					A[i][k].Sub(&A[i][k], bigTmp)
					bigTmp.Mul(&t, &B[k][i])
					B[k][j].Add(&B[k][j], bigTmp)
				}
			}
		}
		if debug {
			fmt.Println("Step 4")
			printBigIntMatrix("A", A)
			printBigIntMatrix("B", B)
			printMatrix("H", H)
		}

		// Step 6
		//
		// Termination test: If the largest entry of A exceeds
		// the level of numeric precision used, then precision
		// is exhausted. If the smallest entry of the y vector
		// is less than the detection threshold, a relation
		// has been detected and is given in the corresponding
		// column of B.
		//
		// Until a relation is found, the error typically decreases
		// slowly (e.g. a factor 1-10) with each step TODO: we could
		// compare err from two successive iterations. If there is a
		// large drop (several orders of magnitude), that indicates a
		// "high quality" relation was detected. Reporting this to
		// the user somehow might be useful.

		maxAPrecision := 0
		for i := 1; i <= n; i++ {
			for j := 1; j <= n; j++ {
				precision := A[i][j].BitLen()
				if precision > maxAPrecision {
					maxAPrecision = precision
				}
			}
		}
		if debug {
			log.Printf("Max A precision = %d, precision = %d, tolerance %d, ratio = %.3f\n", maxAPrecision, e.prec, e.target, float64(maxAPrecision)/float64(e.target))
		}
		if float64(maxAPrecision)/float64(e.target) > 0.85 {
			if e.verbose {
				log.Printf("CANCELLING after step %d/%d.", REP, e.maxsteps)
			}
			return nil, ErrorPrecisionExhausted
		}

		var best_err big.Float
		best_err.Set(&e.maxcoeff_fp)
		for i := 1; i <= n; i++ {
			var err big.Float
			err.Abs(&y[i])
			// Maybe we are done?
			if err.Cmp(&e.tol) < 0 {
				// We are done if the coefficients are acceptable
				var maxc big.Int
				for j := 1; j <= n; j++ {
					if debug {
						fmt.Printf("vec[%d]=%d\n", j-1, &B[j][i])
					}
					t := B[j][i]
					if debug {
						fmt.Printf("vec[%d]=%d\n", j-1, t)
					}
					vec[j-1] = t
					if t.Sign() < 0 {
						t.Neg(&t)
					}
					if t.Cmp(&maxc) > 0 {
						maxc.Set(&t)
					}
				}
				if debug {
					fmt.Printf("maxc = %d, maxcoeff = %d\n", maxc, e.maxcoeff)
				}
				if maxc.Cmp(&e.maxcoeff) < 0 {
					if e.verbose {
						log.Printf("FOUND relation at iter %d/%d, error: %g", REP, e.maxsteps, &err)
					}
					// Find sign of first non zero item
					sign := 0
					for i := range vec {
						sign = vec[i].Sign()
						if sign != 0 {
							break
						}
					}
					// Normalise vec making first non-zero argument positive
					if sign < 0 {
						for i := range vec {
							vec[i].Neg(&vec[i])
						}
					}
					return vec, nil
				}
			}
			if err.Cmp(&best_err) < 0 {
				best_err = err
			}
		}
		// Step 5
		//
		// Norm bound: Compute M := 1/maxj |Hj|, where Hj
		// denotes the j-th row of H.
		//
		// Then there can exist no relation vector whose
		// Euclidean norm is less than M.
		//
		// Calculate a lower bound for the norm. We could do this
		// more exactly (using the Euclidean norm) but there is probably
		// no practical benefit.
		var recnorm big.Float
		recnorm.SetInt64(0)
		for i := 1; i <= n; i++ {
			for j := 1; j <= n; j++ {
				tmp0.Abs(&H[i][j])
				if tmp0.Cmp(&recnorm) > 0 {
					recnorm.Set(tmp0)
				}
			}
		}
		norm.Set(&e.maxcoeff)
		if recnorm.Sign() != 0 {
			// norm = ((1 << (2 * prec)) / recnorm) >> prec
			tmp0.Quo(&e.one, &recnorm)
			tmp0.Int(&norm)
		}
		if e.verbose {
			log.Printf("%2d/%2d:  Error: %g   Norm: %d", REP, e.maxsteps, &best_err, &norm)
		}
		if norm.Cmp(&e.maxcoeff) >= 0 {
			if e.verbose {
				log.Printf("CANCELLING after step %d/%d.", REP, e.maxsteps)
				log.Printf("Could not find an integer relation. Norm bound: %d", &norm)
			}
			return nil, ErrorNoRelationFound
		}
	}
	if e.verbose {
		log.Printf("CANCELLING after step %d/%d.", REP, e.maxsteps)
		log.Printf("Could not find an integer relation. Norm bound: %d", &norm)
	}
	return nil, ErrorIterationsExceeded
}