Beispiel #1
0
Datei: pslq.go Projekt: ncw/pslq
// Compute the square root of n using Newton's Method. We start with
// an initial estimate for sqrt(n), and then iterate
//     x_{i+1} = 1/2 * ( x_i + (n / x_i) )
// Result is returned in x
func (e *Pslq) Sqrt(n, x *big.Float) {
	if n == x {
		panic("need distinct input and output")
	}
	if n.Sign() == 0 {
		x.Set(n)
		return
	} else if n.Sign() < 0 {
		panic("Sqrt of negative number")
	}
	prec := n.Prec()

	// Use the floating point square root as initial estimate
	nFloat64, _ := n.Float64()
	x.SetPrec(prec).SetFloat64(math.Sqrt(nFloat64))

	// We use t as a temporary variable. There's no need to set its precision
	// since big.Float values with unset (== 0) precision automatically assume
	// the largest precision of the arguments when used as the result (receiver)
	// of a big.Float operation.
	var t big.Float

	// Iterate.
	for {
		t.Quo(n, x)        // t = n / x_i
		t.Add(x, &t)       // t = x_i + (n / x_i)
		t.Mul(&e.half, &t) // x_{i+1} = 0.5 * t
		if x.Cmp(&t) == 0 {
			// Exit loop if no change to result
			break
		}
		x.Set(&t)
	}
}
Beispiel #2
0
func makeFloat(x *big.Float) Value {
	// convert -0
	if x.Sign() == 0 {
		return floatVal0
	}
	return floatVal{x}
}
Beispiel #3
0
// BigCommaf produces a string form of the given big.Float in base 10
// with commas after every three orders of magnitude.
func BigCommaf(v *big.Float) string {
	buf := &bytes.Buffer{}
	if v.Sign() < 0 {
		buf.Write([]byte{'-'})
		v.Abs(v)
	}

	comma := []byte{','}

	parts := strings.Split(v.Text('f', -1), ".")
	pos := 0
	if len(parts[0])%3 != 0 {
		pos += len(parts[0]) % 3
		buf.WriteString(parts[0][:pos])
		buf.Write(comma)
	}
	for ; pos < len(parts[0]); pos += 3 {
		buf.WriteString(parts[0][pos : pos+3])
		buf.Write(comma)
	}
	buf.Truncate(buf.Len() - 1)

	if len(parts) > 1 {
		buf.Write([]byte{'.'})
		buf.WriteString(parts[1])
	}
	return buf.String()
}
Beispiel #4
0
Datei: sqrt.go Projekt: db47h/ivy
// floatSqrt computes the square root of x using Newton's method.
// TODO: Use a better algorithm such as the one from math/sqrt.go.
func floatSqrt(c Context, x *big.Float) *big.Float {
	switch x.Sign() {
	case -1:
		Errorf("square root of negative number")
	case 0:
		return newFloat(c)
	}

	// Each iteration computes
	// 	z = z - (z²-x)/2z
	// z holds the result so far. A good starting point is to halve the exponent.
	// Experiments show we converge in only a handful of iterations.
	z := newFloat(c)
	exp := x.MantExp(z)
	z.SetMantExp(z, exp/2)

	// Intermediates, allocated once.
	zSquared := newFloat(c)
	num := newFloat(c)
	den := newFloat(c)

	for loop := newLoop(c.Config(), "sqrt", x, 1); ; {
		zSquared.Mul(z, z)
		num.Sub(zSquared, x)
		den.Mul(floatTwo, z)
		num.Quo(num, den)
		z.Sub(z, num)
		if loop.done(z) {
			break
		}
	}
	return z
}
Beispiel #5
0
// floatLog computes natural log(x) using the Maclaurin series for log(1-x).
func floatLog(x *big.Float) *big.Float {
	if x.Sign() <= 0 {
		Errorf("log of non-positive value")
	}
	// The series wants x < 1, and log 1/x == -log x, so exploit that.
	invert := false
	if x.Cmp(floatOne) > 0 {
		invert = true
		xx := newF()
		xx.Quo(floatOne, x)
		x = xx
	}

	// x = mantissa * 2**exp, and 0.5 <= mantissa < 1.
	// So log(x) is log(mantissa)+exp*log(2), and 1-x will be
	// between 0 and 0.5, so the series for 1-x will converge well.
	// (The series converges slowly in general.)
	mantissa := newF()
	exp2 := x.MantExp(mantissa)
	exp := newF().SetInt64(int64(exp2))
	exp.Mul(exp, floatLog2)
	if invert {
		exp.Neg(exp)
	}

	// y = 1-x (whereupon x = 1-y and we use that in the series).
	y := newF().SetInt64(1)
	y.Sub(y, mantissa)

	// The Maclaurin series for log(1-y) == log(x) is: -y - y²/2 - y³/3 ...

	yN := newF().Set(y)
	term := newF()
	n := newF().Set(floatOne)
	z := newF()

	// This is the slowest-converging series, so we add a factor of ten to the cutoff.
	// Only necessary when FloatPrec is at or beyond constPrecisionInBits.
	loop := newLoop("log", y, 40)
	for {
		term.Set(yN)
		term.Quo(term, n)
		z.Sub(z, term)
		if loop.terminate(z) {
			break
		}
		// Advance y**index (multiply by y).
		yN.Mul(yN, y)
		n.Add(n, floatOne)
	}

	if invert {
		z.Neg(z)
	}
	z.Add(z, exp)

	return z
}
Beispiel #6
0
Datei: pslq.go Projekt: ncw/pslq
// NearestInt set res to the nearest integer to x
func (e *Pslq) NearestInt(x *big.Float, res *big.Int) {
	prec := x.Prec()
	var tmp big.Float
	tmp.SetPrec(prec)
	if x.Sign() >= 0 {
		tmp.Add(x, &e.half)
	} else {
		tmp.Sub(x, &e.half)
	}
	tmp.Int(res)
}
Beispiel #7
0
// floatSin computes sin(x) using argument reduction and a Taylor series.
func floatSin(c Context, x *big.Float) *big.Float {
	negate := false
	if x.Sign() < 0 {
		x.Neg(x)
		negate = true
	}
	twoPiReduce(c, x)

	// sin(x) = x - x³/3! + x⁵/5! - ...
	// First term to compute in loop will be -x³/3!
	factorial := newFloat(c).SetInt64(6)

	result := sincos("sin", c, 3, x, newFloat(c).Set(x), 3, factorial)

	if negate {
		result.Neg(result)
	}

	return result
}
Beispiel #8
0
// floatSqrt computes the square root of x using Newton's method.
// TODO: Use a better algorithm such as the one from math/sqrt.go.
func floatSqrt(x *big.Float) *big.Float {
	switch x.Sign() {
	case -1:
		Errorf("square root of negative number")
	case 0:
		return newF()
	}

	// Each iteration computes
	// 	z = z - (z²-x)/2z
	// delta holds the difference between the result
	// this iteration and the previous. The loop stops
	// when it hits zero.

	// z holds the result so far. A good starting point is to halve the exponent.
	// Experiments show we converge in only a handful of iterations.
	z := newF()
	exp := x.MantExp(z)
	z.SetMantExp(z, exp/2)

	// Intermediates, allocated once.
	zSquared := newF()
	num := newF()
	den := newF()

	loop := newLoop("sqrt", x, 1)
	for {
		zSquared.Mul(z, z)
		num.Sub(zSquared, x)
		den.Mul(floatTwo, z)
		num.Quo(num, den)
		z.Sub(z, num)
		if loop.terminate(z) {
			break
		}
	}
	return z
}
Beispiel #9
0
// Sqrt returns a big.Float representation of the square root of
// z. Precision is the same as the one of the argument. The function
// panics if z is negative, returns ±0 when z = ±0, and +Inf when z =
// +Inf.
func Sqrt(z *big.Float) *big.Float {

	// panic on negative z
	if z.Sign() == -1 {
		panic("Sqrt: argument is negative")
	}

	// √±0 = ±0
	if z.Sign() == 0 {
		return big.NewFloat(float64(z.Sign()))
	}

	// √+Inf  = +Inf
	if z.IsInf() {
		return big.NewFloat(math.Inf(+1))
	}

	// Compute √(a·2**b) as
	//   √(a)·2**b/2       if b is even
	//   √(2a)·2**b/2      if b > 0 is odd
	//   √(0.5a)·2**b/2    if b < 0 is odd
	//
	// The difference in the odd exponent case is due to the fact that
	// exp/2 is rounded in different directions when exp is negative.
	mant := new(big.Float)
	exp := z.MantExp(mant)
	switch exp % 2 {
	case 1:
		mant.Mul(big.NewFloat(2), mant)
	case -1:
		mant.Mul(big.NewFloat(0.5), mant)
	}

	// Solving x² - z = 0 directly requires a Quo call, but it's
	// faster for small precisions.
	//
	// Solving 1/x² - z = 0 avoids the Quo call and is much faster for
	// high precisions.
	//
	// Use sqrtDirect for prec <= 128 and sqrtInverse for prec > 128.
	var x *big.Float
	if z.Prec() <= 128 {
		x = sqrtDirect(mant)
	} else {
		x = sqrtInverse(mant)
	}

	// re-attach the exponent and return
	return x.SetMantExp(x, exp/2)

}
Beispiel #10
0
// Exp returns a big.Float representation of exp(z). Precision is
// the same as the one of the argument. The function returns +Inf
// when z = +Inf, and 0 when z = -Inf.
func Exp(z *big.Float) *big.Float {

	// exp(0) == 1
	if z.Sign() == 0 {
		return big.NewFloat(1).SetPrec(z.Prec())
	}

	// Exp(+Inf) = +Inf
	if z.IsInf() && z.Sign() > 0 {
		return big.NewFloat(math.Inf(+1)).SetPrec(z.Prec())
	}

	// Exp(-Inf) = 0
	if z.IsInf() && z.Sign() < 0 {
		return big.NewFloat(0).SetPrec(z.Prec())
	}

	guess := new(big.Float)

	// try to get initial estimate using IEEE-754 math
	zf, _ := z.Float64()
	if zfs := math.Exp(zf); zfs == math.Inf(+1) || zfs == 0 {
		// too big or too small for IEEE-754 math,
		// perform argument reduction using
		//     e^{2z} = (e^z)²
		halfZ := new(big.Float).Mul(z, big.NewFloat(0.5))
		halfExp := Exp(halfZ.SetPrec(z.Prec() + 64))
		return new(big.Float).Mul(halfExp, halfExp).SetPrec(z.Prec())
	} else {
		// we got a nice IEEE-754 estimate
		guess.SetFloat64(zfs)
	}

	// f(t)/f'(t) = t*(log(t) - z)
	f := func(t *big.Float) *big.Float {
		x := new(big.Float)
		x.Sub(Log(t), z)
		return x.Mul(x, t)
	}

	x := newton(f, guess, z.Prec())

	return x
}
Beispiel #11
0
// Pow returns a big.Float representation of z**w. Precision is the same as the one
// of the first argument. The function panics when z is negative.
func Pow(z *big.Float, w *big.Float) *big.Float {

	if z.Sign() < 0 {
		panic("Pow: negative base")
	}

	// Pow(z, 0) = 1.0
	if w.Sign() == 0 {
		return big.NewFloat(1).SetPrec(z.Prec())
	}

	// Pow(z, 1) = z
	// Pow(+Inf, n) = +Inf
	if w.Cmp(big.NewFloat(1)) == 0 || z.IsInf() {
		return new(big.Float).Copy(z)
	}

	// Pow(z, -w) = 1 / Pow(z, w)
	if w.Sign() < 0 {
		x := new(big.Float)
		zExt := new(big.Float).Copy(z).SetPrec(z.Prec() + 64)
		wNeg := new(big.Float).Neg(w)
		return x.Quo(big.NewFloat(1), Pow(zExt, wNeg)).SetPrec(z.Prec())
	}

	// w integer fast path
	if w.IsInt() {
		wi, _ := w.Int64()
		return powInt(z, int(wi))
	}

	// compute w**z as exp(z log(w))
	x := new(big.Float).SetPrec(z.Prec() + 64)
	logZ := Log(new(big.Float).Copy(z).SetPrec(z.Prec() + 64))
	x.Mul(w, logZ)
	x = Exp(x)
	return x.SetPrec(z.Prec())

}
Beispiel #12
0
// Log returns a big.Float representation of the natural logarithm of
// z. Precision is the same as the one of the argument. The function
// panics if z is negative, returns -Inf when z = 0, and +Inf when z =
// +Inf
func Log(z *big.Float) *big.Float {

	// panic on negative z
	if z.Sign() == -1 {
		panic("Log: argument is negative")
	}

	// Log(0) = -Inf
	if z.Sign() == 0 {
		return big.NewFloat(math.Inf(-1)).SetPrec(z.Prec())
	}

	prec := z.Prec() + 64 // guard digits

	one := big.NewFloat(1).SetPrec(prec)
	two := big.NewFloat(2).SetPrec(prec)
	four := big.NewFloat(4).SetPrec(prec)

	// Log(1) = 0
	if z.Cmp(one) == 0 {
		return big.NewFloat(0).SetPrec(z.Prec())
	}

	// Log(+Inf) = +Inf
	if z.IsInf() {
		return big.NewFloat(math.Inf(+1)).SetPrec(z.Prec())
	}

	x := new(big.Float).SetPrec(prec)

	// if 0 < z < 1 we compute log(z) as -log(1/z)
	var neg bool
	if z.Cmp(one) < 0 {
		x.Quo(one, z)
		neg = true
	} else {
		x.Set(z)
	}

	// We scale up x until x >= 2**(prec/2), and then we'll be allowed
	// to use the AGM formula for Log(x).
	//
	// Double x until the condition is met, and keep track of the
	// number of doubling we did (needed to scale back later).

	lim := new(big.Float)
	lim.SetMantExp(two, int(prec/2))

	k := 0
	for x.Cmp(lim) < 0 {
		x.Mul(x, x)
		k++
	}

	// Compute the natural log of x using the fact that
	//     log(x) = π / (2 * AGM(1, 4/x))
	// if
	//     x >= 2**(prec/2),
	// where prec is the desired precision (in bits)
	pi := pi(prec)
	agm := agm(one, x.Quo(four, x)) // agm = AGM(1, 4/x)

	x.Quo(pi, x.Mul(two, agm)) // reuse x, we don't need it

	if neg {
		x.Neg(x)
	}

	// scale the result back multiplying by 2**-k
	// reuse lim to reduce allocations.
	x.Mul(x, lim.SetMantExp(one, -k))

	return x.SetPrec(z.Prec())
}
Beispiel #13
0
// floatAtan computes atan(x) using a Taylor series. There are two series,
// one for |x| < 1 and one for larger values.
func floatAtan(c Context, x *big.Float) *big.Float {
	// atan(-x) == -atan(x). Do this up top to simplify the Euler crossover calculation.
	if x.Sign() < 0 {
		z := newFloat(c).Set(x)
		z = floatAtan(c, z.Neg(z))
		return z.Neg(z)
	}

	// The series converge very slowly near 1. atan 1.00001 takes over a million
	// iterations at the default precision. But there is hope, an Euler identity:
	//	atan(x) = atan(y) + atan((x-y)/(1+xy))
	// Note that y is a free variable. If x is near 1, we can use this formula
	// to push the computation to values that converge faster. Because
	//	tan(π/8) = √2 - 1, or equivalently atan(√2 - 1) == π/8
	// we choose y = √2 - 1 and then we only need to calculate one atan:
	//	atan(x) = π/8 + atan((x-y)/(1+xy))
	// Where do we cross over? This version converges significantly faster
	// even at 0.5, but we must be careful that (x-y)/(1+xy) never approaches 1.
	// At x = 0.5, (x-y)/(1+xy) is 0.07; at x=1 it is 0.414214; at x=1.5 it is
	// 0.66, which is as big as we dare go. With 256 bits of precision and a
	// crossover at 0.5, here are the number of iterations done by
	//	atan .1*iota 20
	// 0.1 39, 0.2 55, 0.3 73, 0.4 96, 0.5 126, 0.6 47, 0.7 59, 0.8 71, 0.9 85, 1.0 99, 1.1 116, 1.2 38, 1.3 44, 1.4 50, 1.5 213, 1.6 183, 1.7 163, 1.8 147, 1.9 135, 2.0 125
	tmp := newFloat(c).Set(floatOne)
	tmp.Sub(tmp, x)
	tmp.Abs(tmp)
	if tmp.Cmp(newFloat(c).SetFloat64(0.5)) < 0 {
		z := newFloat(c).Set(floatPi)
		z.Quo(z, newFloat(c).SetInt64(8))
		y := floatSqrt(c, floatTwo)
		y.Sub(y, floatOne)
		num := newFloat(c).Set(x)
		num.Sub(num, y)
		den := newFloat(c).Set(x)
		den = den.Mul(den, y)
		den = den.Add(den, floatOne)
		z = z.Add(z, floatAtan(c, num.Quo(num, den)))
		return z
	}

	if x.Cmp(floatOne) > 0 {
		return floatAtanLarge(c, x)
	}

	// This is the series for small values |x| <  1.
	// asin(x) = x - x³/3 + x⁵/5 - x⁷/7 + ...
	// First term to compute in loop will be x

	n := newFloat(c)
	term := newFloat(c)
	xN := newFloat(c).Set(x)
	xSquared := newFloat(c).Set(x)
	xSquared.Mul(x, x)
	z := newFloat(c)

	// n goes up by two each loop.
	for loop := newLoop(c.Config(), "atan", x, 4); ; {
		term.Set(xN)
		term.Quo(term, n.SetUint64(2*loop.i+1))
		z.Add(z, term)
		xN.Neg(xN)

		if loop.done(z) {
			break
		}
		// xN *= x², becoming x**(n+2).
		xN.Mul(xN, xSquared)
	}

	return z
}
Beispiel #14
0
func (f BigFloat) String() string {
	var mant big.Float
	exp := f.Float.MantExp(&mant)
	positive := 1
	if exp < 0 {
		positive = 0
		exp = -exp
	}
	verb, prec := byte('g'), 12
	format := conf.Format()
	if format != "" {
		v, p, ok := conf.FloatFormat()
		if ok {
			verb, prec = v, p
		}
	}
	// Printing huge floats can be very slow using
	// big.Float's native methods; see issue #11068.
	// For example 1e5000000 takes a minute of CPU time just
	// to print. The code below is instantaneous, by rescaling
	// first. It is however less feature-complete.
	// (Big ints are problematic too, but if you print 1e50000000
	// as an integer you probably won't be surprised it's slow.)

	if fastFloatPrint && exp > 10000 {
		// We always use %g to print the fraction, and it will
		// never have an exponent, but if the format is %E we
		// need to use a capital E.
		eChar := 'e'
		if verb == 'E' || verb == 'G' {
			eChar = 'E'
		}
		fexp := newF().SetInt64(int64(exp))
		fexp.Mul(fexp, floatLog2)
		fexp.Quo(fexp, floatLog10)
		// We now have a floating-point base 10 exponent.
		// Break into the integer part and the fractional part.
		// The integer part is what we will show.
		// The 10**(fractional part) will be multiplied back in.
		iexp, _ := fexp.Int(nil)
		fraction := fexp.Sub(fexp, newF().SetInt(iexp))
		// Now compute 10**(fractional part).
		// Fraction is in base 10. Move it to base e.
		fraction.Mul(fraction, floatLog10)
		scale := exponential(fraction)
		if positive > 0 {
			mant.Mul(&mant, scale)
		} else {
			mant.Quo(&mant, scale)
		}
		ten := newF().SetInt64(10)
		i64exp := iexp.Int64()
		// For numbers not too far from one, print without the E notation.
		// Shouldn't happen (exp must be large to get here) but just
		// in case, we keep this around.
		if -4 <= i64exp && i64exp <= 11 {
			if i64exp > 0 {
				for i := 0; i < int(i64exp); i++ {
					mant.Mul(&mant, ten)
				}
			} else {
				for i := 0; i < int(-i64exp); i++ {
					mant.Quo(&mant, ten)
				}
			}
			return fmt.Sprintf("%g\n", &mant)
		} else {
			sign := ""
			if mant.Sign() < 0 {
				sign = "-"
				mant.Neg(&mant)
			}
			// If it has a leading zero, rescale.
			digits := mant.Text('g', prec)
			for digits[0] == '0' {
				mant.Mul(&mant, ten)
				if positive > 0 {
					i64exp--
				} else {
					i64exp++
				}
				digits = mant.Text('g', prec)
			}
			return fmt.Sprintf("%s%s%c%c%d", sign, digits, eChar, "-+"[positive], i64exp)
		}
	}
	return f.Float.Text(verb, prec)
}
Beispiel #15
0
Datei: pslq.go Projekt: ncw/pslq
// Given a vector of real numbers x = [x_0, x_1, ..., x_n], this
// uses the PSLQ algorithm to find a list of integers
// [c_0, c_1, ..., c_n] such that
//
//     |c_1 * x_1 + c_2 * x_2 + ... + c_n * x_n| < tolerance
//
// and such that max |c_k| < maxcoeff. If no such vector exists, Pslq
// returns one of the errors in this package depending on whether it
// has run out of iterations, precision or explored up to the
// maxcoeff. The tolerance defaults to 3/4 of the precision.
//
// This is a fairly direct translation of the pseudocode given by
// David Bailey, "The PSLQ Integer Relation Algorithm":
// http://www.cecm.sfu.ca/organics/papers/bailey/paper/html/node3.html
//
// If a result is returned, the first non-zero element will be positive
func (e *Pslq) Run(x []big.Float) ([]big.Int, error) {
	n := len(x)
	if n <= 1 {
		return nil, ErrorBadArguments
	}

	// At too low precision, the algorithm becomes meaningless
	if e.prec < 64 {
		return nil, ErrorPrecisionTooLow
	}

	if e.verbose && int(e.prec)/max(2, int(n)) < 5 {
		log.Printf("Warning: precision for PSLQ may be too low")
	}

	if e.verbose {
		log.Printf("PSLQ using prec %d and tol %g", e.prec, e.tol)
	}

	if e.tol.Sign() == 0 {
		return nil, ErrorToleranceRoundsToZero
	}

	// Temporary variables
	tmp0 := new(big.Float).SetPrec(e.prec)
	tmp1 := new(big.Float).SetPrec(e.prec)
	bigTmp := new(big.Int)

	// Convert to use 1-based indexing to allow us to be
	// consistent with Bailey's indexing.
	xNew := make([]big.Float, len(x)+1)
	minx := new(big.Float).SetPrec(e.prec)
	minxFirst := true
	for i, xk := range x {
		p := &xNew[i+1]
		p.Set(&xk)
		tmp0.Abs(p)
		if minxFirst || tmp0.Cmp(minx) < 0 {
			minxFirst = false
			minx.Set(tmp0)
		}
	}
	x = xNew
	if debug {
		printVector("x", x)
	}

	// Sanity check on magnitudes
	if minx.Sign() == 0 {
		return nil, ErrorZeroArguments
	}
	tmp1.SetInt64(128)
	tmp0.Quo(&e.tol, tmp1)
	if minx.Cmp(tmp0) < 0 { //  minx < tol/128
		return nil, ErrorArgumentTooSmall
	}

	tmp0.SetInt64(4)
	tmp1.SetInt64(3)
	tmp0.Quo(tmp0, tmp1)
	var γ big.Float
	e.Sqrt(tmp0, &γ) // sqrt(4<<prec)/3)
	if debug {
		fmt.Printf("γ = %f\n", &γ)
	}
	A := newBigIntMatrix(n+1, n+1)
	B := newBigIntMatrix(n+1, n+1)
	H := newMatrix(n+1, n+1)
	// Initialization Step 1
	//
	// Set the n×n matrices A and B to the identity.
	for i := 1; i <= n; i++ {
		for j := 1; j <= n; j++ {
			if i == j {
				A[i][j].SetInt64(1)
				B[i][j].SetInt64(1)
			} else {
				A[i][j].SetInt64(0)
				B[i][j].SetInt64(0)
			}
			H[i][j].SetInt64(0)
		}
	}
	if debug {
		printBigIntMatrix("A", A)
		printBigIntMatrix("B", B)
		printMatrix("H", H)
	}
	// Initialization Step 2
	//
	// For k := 1 to n
	//     compute s_k := sqrt( sum_j=k^n x_j^2 )
	// endfor.
	// Set t = 1/s1.
	// For k := 1 to n:
	//     y_k := t * x_k
	//     s_k := t * s_k
	// endfor.
	s := make([]big.Float, n+1)
	for i := 1; i <= n; i++ {
		s[i].SetInt64(0)
	}
	for k := 1; k <= n; k++ {
		var t big.Float
		t.SetInt64(0)
		for j := k; j <= n; j++ {
			tmp0.Mul(&x[j], &x[j])
			t.Add(&t, tmp0)
		}
		e.Sqrt(&t, &s[k])
	}
	if debug {
		fmt.Println("Init Step 2")
		printVector("s", s)
	}
	var t big.Float
	t.Set(&s[1])
	y := make([]big.Float, len(x))
	copy(y, x)
	for k := 1; k <= n; k++ {
		// y[k] = (x[k] << prec) / t
		y[k].Quo(&x[k], &t)
		// s[k] = (s[k] << prec) / t
		s[k].Quo(&s[k], &t)
	}
	if debug {
		printVector("y", y)
		printVector("s", s)
	}
	// Init Step 3
	//
	// Compute the n×(n−1) matrix H as follows:
	// For i := 1 to n:
	//     for j := i + 1 to n − 1:
	//         set Hij := 0
	//     endfor
	//     if i ≤ n − 1 then set Hii := s_(i+1)/s_i
	//     for j := 1 to i−1:
	//         set Hij := −y_i * y_j / (s_j * s_(j+1))
	//     endfor
	// endfor
	for i := 1; i <= n; i++ {
		for j := i + 1; j < n; j++ {
			H[i][j].SetInt64(0)
		}
		if i <= n-1 {
			if s[i].Sign() == 0 {
				// Precision probably exhausted
				return nil, ErrorPrecisionExhausted
			}
			// H[i][i] = (s[i+1] << prec) / s[i]
			H[i][i].Quo(&s[i+1], &s[i])
		}
		for j := 1; j < i; j++ {
			var sjj1 big.Float
			sjj1.Mul(&s[j], &s[j+1])
			if debug {
				fmt.Printf("sjj1 = %f\n", &sjj1)
			}
			if sjj1.Sign() == 0 {
				// Precision probably exhausted
				return nil, ErrorPrecisionExhausted
			}
			// H[i][j] = ((-y[i] * y[j]) << prec) / sjj1
			tmp0.Mul(&y[i], &y[j])
			tmp0.Neg(tmp0)
			H[i][j].Quo(tmp0, &sjj1)
		}
	}
	if debug {
		fmt.Println("Init Step 3")
		printMatrix("H", H)
	}
	// Init Step 4
	//
	// Perform full reduction on H, simultaneously updating y, A and B:
	//
	// For i := 2 to n:
	//     for j := i−1 to 1 step−1:
	//         t := nint(Hij/Hjj)
	//         y_j := y_j + t * y_i
	//         for k := 1 to j:
	//             Hik := Hik − t * Hjk
	//         endfor
	//         for k := 1 to n:
	//             Aik := Aik − t * Ajk
	//             Bkj := Bkj + t * Bki
	//         endfor
	//     endfor
	// endfor
	for i := 2; i <= n; i++ {
		for j := i - 1; j > 0; j-- {
			//t = floor(H[i][j]/H[j,j] + 0.5)
			var t big.Int
			var tFloat big.Float
			if H[j][j].Sign() == 0 {
				// Precision probably exhausted
				return nil, ErrorPrecisionExhausted
			}
			tmp0.Quo(&H[i][j], &H[j][j])
			e.NearestInt(tmp0, &t)
			tFloat.SetInt(&t).SetPrec(e.prec)
			if debug {
				fmt.Printf("H[i][j]=%f\n", &H[i][j])
				fmt.Printf("H[j][j]=%f\n", &H[j][j])
				fmt.Printf("tmp=%f\n", tmp0)
				fmt.Printf("t=%d\n", &t)
			}
			// y[j] = y[j] + (t * y[i] >> prec)
			tmp0.Mul(&y[i], &tFloat)
			y[j].Add(&y[j], tmp0)
			for k := 1; k <= j; k++ {
				// H[i][k] = H[i][k] - (t * H[j][k] >> prec)
				tmp0.Mul(&H[j][k], &tFloat)
				H[i][k].Sub(&H[i][k], tmp0)
			}
			for k := 1; k <= n; k++ {
				bigTmp.Mul(&t, &A[j][k])
				A[i][k].Sub(&A[i][k], bigTmp)
				bigTmp.Mul(&t, &B[k][i])
				B[k][j].Add(&B[k][j], bigTmp)
			}
		}
	}
	if debug {
		fmt.Println("Init Step 4")
		printBigIntMatrix("A", A)
		printBigIntMatrix("B", B)
		printMatrix("H", H)
	}
	// Main algorithm
	var REP int
	var norm big.Int
	vec := make([]big.Int, n)
	for REP = 0; REP < e.maxsteps; REP++ {
		// Step 1
		//
		// Select m such that γ^i * |Hii| is maximal when i = m.
		m := -1
		var szmax big.Float
		szmax.SetInt64(-1)
		var γPower big.Float
		γPower.Set(&γ)
		for i := 1; i < n; i++ {
			var absH big.Float
			absH.Abs(&H[i][i])
			var sz big.Float
			sz.Mul(&γPower, &absH)
			// sz := (g**i * abs(h)) >> (prec * (i - 1))
			if sz.Cmp(&szmax) > 0 {
				m = i
				szmax.Set(&sz)
			}
			γPower.Mul(&γPower, &γ)
		}
		if debug {
			fmt.Println("Step 1")
			fmt.Printf("szmax=%f\n", &szmax)
			fmt.Printf("m=%d\n", m)
		}
		// Step 2
		//
		// Exchange entries m and m+1 of y, corresponding rows
		// of A and H, and corresponding columns of B.
		y[m], y[m+1] = y[m+1], y[m]
		for i := 1; i < n+1; i++ {
			H[m][i], H[m+1][i] = H[m+1][i], H[m][i]
		}
		for i := 1; i < n+1; i++ {
			A[m][i], A[m+1][i] = A[m+1][i], A[m][i]
		}
		for i := 1; i < n+1; i++ {
			B[i][m], B[i][m+1] = B[i][m+1], B[i][m]
		}
		if debug {
			fmt.Println("Step 2")
			printVector("y", y)
			printBigIntMatrix("A", A)
			printBigIntMatrix("B", B)
			printMatrix("H", H)
		}
		// Step 3
		//
		// If m ≤ n−2 then update H as follows:
		//
		// t0 := sqrt( Hmm^2 + H(m,m+1)^2 )
		// t1 := Hmm/t0
		// t2 := H(m,m+1)/t0.
		// for i := m to n:
		//     t3 := Him
		//     t4 := Hi,m+1
		//     Him := t1t3 +t2t4
		//     Hi,m+1 := −t2t3 +t1t4
		// endfor.
		if m <= n-2 {
			tmp0.Mul(&H[m][m], &H[m][m])
			tmp1.Mul(&H[m][m+1], &H[m][m+1])
			tmp0.Add(tmp0, tmp1)
			var t0 big.Float
			e.Sqrt(tmp0, &t0)
			// Precision probably exhausted
			if t0.Sign() == 0 {
				return nil, ErrorPrecisionExhausted
			}
			var t1, t2 big.Float
			t1.Quo(&H[m][m], &t0)
			t2.Quo(&H[m][m+1], &t0)
			for i := m; i <= n; i++ {
				var t3, t4 big.Float
				t3.Set(&H[i][m])
				t4.Set(&H[i][m+1])
				// H[i][m] = (t1*t3 + t2*t4) >> prec
				tmp0.Mul(&t1, &t3)
				tmp1.Mul(&t2, &t4)
				H[i][m].Add(tmp0, tmp1)
				// H[i][m+1] = (-t2*t3 + t1*t4) >> prec
				tmp0.Mul(&t2, &t3)
				tmp1.Mul(&t1, &t4)
				H[i][m+1].Sub(tmp1, tmp0)
			}
		}
		if debug {
			fmt.Println("Step 3")
			printMatrix("H", H)
		}
		// Step 4
		// Perform block reduction on H, simultaneously updating y, A and B:
		//
		// For i := m+1 to n:
		//     for j := min(i−1, m+1) to 1 step −1:
		//         t := nint(Hij/Hjj)
		//         yj := yj + t * yi
		//         for k := 1 to j:
		//             Hik := Hik − tHjk
		//         endfor
		//         for k := 1 to n:
		//             Aik := Aik −tAjk
		//             Bkj := Bkj +tBki
		//         endfor
		//     endfor
		// endfor.
		for i := m + 1; i <= n; i++ {
			var t big.Int
			var tFloat big.Float
			for j := min(i-1, m+1); j > 0; j-- {
				if H[j][j].Sign() == 0 {
					// Precision probably exhausted
					return nil, ErrorPrecisionExhausted
				}
				tmp0.Quo(&H[i][j], &H[j][j])
				e.NearestInt(tmp0, &t)
				tFloat.SetInt(&t).SetPrec(e.prec)
				// y[j] = y[j] + ((t * y[i]) >> prec)
				tmp0.Mul(&y[i], &tFloat)
				y[j].Add(&y[j], tmp0)
				for k := 1; k <= j; k++ {
					// H[i][k] = H[i][k] - (t * H[j][k] >> prec)
					tmp0.Mul(&H[j][k], &tFloat)
					H[i][k].Sub(&H[i][k], tmp0)
				}
				for k := 1; k <= n; k++ {
					bigTmp.Mul(&t, &A[j][k])
					A[i][k].Sub(&A[i][k], bigTmp)
					bigTmp.Mul(&t, &B[k][i])
					B[k][j].Add(&B[k][j], bigTmp)
				}
			}
		}
		if debug {
			fmt.Println("Step 4")
			printBigIntMatrix("A", A)
			printBigIntMatrix("B", B)
			printMatrix("H", H)
		}

		// Step 6
		//
		// Termination test: If the largest entry of A exceeds
		// the level of numeric precision used, then precision
		// is exhausted. If the smallest entry of the y vector
		// is less than the detection threshold, a relation
		// has been detected and is given in the corresponding
		// column of B.
		//
		// Until a relation is found, the error typically decreases
		// slowly (e.g. a factor 1-10) with each step TODO: we could
		// compare err from two successive iterations. If there is a
		// large drop (several orders of magnitude), that indicates a
		// "high quality" relation was detected. Reporting this to
		// the user somehow might be useful.

		maxAPrecision := 0
		for i := 1; i <= n; i++ {
			for j := 1; j <= n; j++ {
				precision := A[i][j].BitLen()
				if precision > maxAPrecision {
					maxAPrecision = precision
				}
			}
		}
		if debug {
			log.Printf("Max A precision = %d, precision = %d, tolerance %d, ratio = %.3f\n", maxAPrecision, e.prec, e.target, float64(maxAPrecision)/float64(e.target))
		}
		if float64(maxAPrecision)/float64(e.target) > 0.85 {
			if e.verbose {
				log.Printf("CANCELLING after step %d/%d.", REP, e.maxsteps)
			}
			return nil, ErrorPrecisionExhausted
		}

		var best_err big.Float
		best_err.Set(&e.maxcoeff_fp)
		for i := 1; i <= n; i++ {
			var err big.Float
			err.Abs(&y[i])
			// Maybe we are done?
			if err.Cmp(&e.tol) < 0 {
				// We are done if the coefficients are acceptable
				var maxc big.Int
				for j := 1; j <= n; j++ {
					if debug {
						fmt.Printf("vec[%d]=%d\n", j-1, &B[j][i])
					}
					t := B[j][i]
					if debug {
						fmt.Printf("vec[%d]=%d\n", j-1, t)
					}
					vec[j-1] = t
					if t.Sign() < 0 {
						t.Neg(&t)
					}
					if t.Cmp(&maxc) > 0 {
						maxc.Set(&t)
					}
				}
				if debug {
					fmt.Printf("maxc = %d, maxcoeff = %d\n", maxc, e.maxcoeff)
				}
				if maxc.Cmp(&e.maxcoeff) < 0 {
					if e.verbose {
						log.Printf("FOUND relation at iter %d/%d, error: %g", REP, e.maxsteps, &err)
					}
					// Find sign of first non zero item
					sign := 0
					for i := range vec {
						sign = vec[i].Sign()
						if sign != 0 {
							break
						}
					}
					// Normalise vec making first non-zero argument positive
					if sign < 0 {
						for i := range vec {
							vec[i].Neg(&vec[i])
						}
					}
					return vec, nil
				}
			}
			if err.Cmp(&best_err) < 0 {
				best_err = err
			}
		}
		// Step 5
		//
		// Norm bound: Compute M := 1/maxj |Hj|, where Hj
		// denotes the j-th row of H.
		//
		// Then there can exist no relation vector whose
		// Euclidean norm is less than M.
		//
		// Calculate a lower bound for the norm. We could do this
		// more exactly (using the Euclidean norm) but there is probably
		// no practical benefit.
		var recnorm big.Float
		recnorm.SetInt64(0)
		for i := 1; i <= n; i++ {
			for j := 1; j <= n; j++ {
				tmp0.Abs(&H[i][j])
				if tmp0.Cmp(&recnorm) > 0 {
					recnorm.Set(tmp0)
				}
			}
		}
		norm.Set(&e.maxcoeff)
		if recnorm.Sign() != 0 {
			// norm = ((1 << (2 * prec)) / recnorm) >> prec
			tmp0.Quo(&e.one, &recnorm)
			tmp0.Int(&norm)
		}
		if e.verbose {
			log.Printf("%2d/%2d:  Error: %g   Norm: %d", REP, e.maxsteps, &best_err, &norm)
		}
		if norm.Cmp(&e.maxcoeff) >= 0 {
			if e.verbose {
				log.Printf("CANCELLING after step %d/%d.", REP, e.maxsteps)
				log.Printf("Could not find an integer relation. Norm bound: %d", &norm)
			}
			return nil, ErrorNoRelationFound
		}
	}
	if e.verbose {
		log.Printf("CANCELLING after step %d/%d.", REP, e.maxsteps)
		log.Printf("Could not find an integer relation. Norm bound: %d", &norm)
	}
	return nil, ErrorIterationsExceeded
}