// StdDevBatch predicts the standard deviation at a set of locations of x. func (g *GP) StdDevBatch(std []float64, x mat64.Matrix) []float64 { r, c := x.Dims() if c != g.inputDim { panic(badInputLength) } if std == nil { std = make([]float64, r) } if len(std) != r { panic(badStorage) } // For a single point, the stddev is // sigma = k(x,x) - k_*^T * K^-1 * k_* // where k is the vector of kernels between the input points and the output points // For many points, the formula is: // nu_* = k(x_*, k_*) - k_*^T * K^-1 * k_* // This creates the full covariance matrix which is an rxr matrix. However, // the standard deviations are just the diagonal of this matrix. Instead, be // smart about it and compute the diagonal terms one at a time. kStar := g.formKStar(x) var tmp mat64.Dense tmp.SolveCholesky(g.cholK, kStar) // set k(x_*, x_*) into std then subtract k_*^T K^-1 k_* , computed one row at a time var tmp2 mat64.Vector row := make([]float64, c) for i := range std { for k := 0; k < c; k++ { row[k] = x.At(i, k) } std[i] = g.kernel.Distance(row, row) tmp2.MulVec(kStar.ColView(i).T(), tmp.ColView(i)) rt, ct := tmp2.Dims() if rt != 1 && ct != 1 { panic("bad size") } std[i] -= tmp2.At(0, 0) std[i] = math.Sqrt(std[i]) } // Need to scale the standard deviation to be in the same units as y. floats.Scale(g.std, std) return std }
// StdDev predicts the standard deviation of the function at x. func (g *GP) StdDev(x []float64) float64 { if len(x) != g.inputDim { panic(badInputLength) } // nu_* = k(x_*, k_*) - k_*^T * K^-1 * k_* n := len(g.outputs) kstar := mat64.NewVector(n, nil) for i := 0; i < n; i++ { v := g.kernel.Distance(g.inputs.RawRowView(i), x) kstar.SetVec(i, v) } self := g.kernel.Distance(x, x) var tmp mat64.Vector tmp.SolveCholeskyVec(g.cholK, kstar) var tmp2 mat64.Vector tmp2.MulVec(kstar.T(), &tmp) rt, ct := tmp2.Dims() if rt != 1 || ct != 1 { panic("bad size") } return math.Sqrt(self-tmp2.At(0, 0)) * g.std }
// ConditionNormal returns the Normal distribution that is the receiver conditioned // on the input evidence. The returned multivariate normal has dimension // n - len(observed), where n is the dimension of the original receiver. The updated // mean and covariance are // mu = mu_un + sigma_{ob,un}^T * sigma_{ob,ob}^-1 (v - mu_ob) // sigma = sigma_{un,un} - sigma_{ob,un}^T * sigma_{ob,ob}^-1 * sigma_{ob,un} // where mu_un and mu_ob are the original means of the unobserved and observed // variables respectively, sigma_{un,un} is the unobserved subset of the covariance // matrix, sigma_{ob,ob} is the observed subset of the covariance matrix, and // sigma_{un,ob} are the cross terms. The elements of x_2 have been observed with // values v. The dimension order is preserved during conditioning, so if the value // of dimension 1 is observed, the returned normal represents dimensions {0, 2, ...} // of the original Normal distribution. // // ConditionNormal returns {nil, false} if there is a failure during the update. // Mathematically this is impossible, but can occur with finite precision arithmetic. func (n *Normal) ConditionNormal(observed []int, values []float64, src *rand.Rand) (*Normal, bool) { if len(observed) == 0 { panic("normal: no observed value") } if len(observed) != len(values) { panic("normal: input slice length mismatch") } for _, v := range observed { if v < 0 || v >= n.Dim() { panic("normal: observed value out of bounds") } } ob := len(observed) unob := n.Dim() - ob obMap := make(map[int]struct{}) for _, v := range observed { if _, ok := obMap[v]; ok { panic("normal: observed dimension occurs twice") } obMap[v] = struct{}{} } if len(observed) == n.Dim() { panic("normal: all dimensions observed") } unobserved := make([]int, 0, unob) for i := 0; i < n.Dim(); i++ { if _, ok := obMap[i]; !ok { unobserved = append(unobserved, i) } } mu1 := make([]float64, unob) for i, v := range unobserved { mu1[i] = n.mu[v] } mu2 := make([]float64, ob) // really v - mu2 for i, v := range observed { mu2[i] = values[i] - n.mu[v] } n.setSigma() var sigma11, sigma22 mat64.SymDense sigma11.SubsetSym(n.sigma, unobserved) sigma22.SubsetSym(n.sigma, observed) sigma21 := mat64.NewDense(ob, unob, nil) for i, r := range observed { for j, c := range unobserved { v := n.sigma.At(r, c) sigma21.Set(i, j, v) } } var chol mat64.Cholesky ok := chol.Factorize(&sigma22) if !ok { return nil, ok } // Compute sigma_{2,1}^T * sigma_{2,2}^-1 (v - mu_2). v := mat64.NewVector(ob, mu2) var tmp, tmp2 mat64.Vector err := tmp.SolveCholeskyVec(&chol, v) if err != nil { return nil, false } tmp2.MulVec(sigma21.T(), &tmp) // Compute sigma_{2,1}^T * sigma_{2,2}^-1 * sigma_{2,1}. // TODO(btracey): Should this be a method of SymDense? var tmp3, tmp4 mat64.Dense err = tmp3.SolveCholesky(&chol, sigma21) if err != nil { return nil, false } tmp4.Mul(sigma21.T(), &tmp3) for i := range mu1 { mu1[i] += tmp2.At(i, 0) } // TODO(btracey): If tmp2 can constructed with a method, then this can be // replaced with SubSym. for i := 0; i < len(unobserved); i++ { for j := i; j < len(unobserved); j++ { v := sigma11.At(i, j) sigma11.SetSym(i, j, v-tmp4.At(i, j)) } } return NewNormal(mu1, &sigma11, src) }
func testSimplex(t *testing.T, initialBasic []int, c []float64, a mat64.Matrix, b []float64, convergenceTol float64) error { primalOpt, primalX, _, errPrimal := simplex(initialBasic, c, a, b, convergenceTol) if errPrimal == nil { // No error solving the simplex, check that the solution is feasible. var bCheck mat64.Vector bCheck.MulVec(a, mat64.NewVector(len(primalX), primalX)) if !mat64.EqualApprox(&bCheck, mat64.NewVector(len(b), b), 1e-10) { t.Errorf("No error in primal but solution infeasible") } } primalInfeasible := errPrimal == ErrInfeasible primalUnbounded := errPrimal == ErrUnbounded primalBounded := errPrimal == nil primalASingular := errPrimal == ErrSingular primalZeroRow := errPrimal == ErrZeroRow primalZeroCol := errPrimal == ErrZeroColumn primalBad := !primalInfeasible && !primalUnbounded && !primalBounded && !primalASingular && !primalZeroRow && !primalZeroCol // It's an error if it's not one of the known returned errors. If it's // singular the problem is undefined and so the result cannot be compared // to the dual. if errPrimal == ErrSingular || primalBad { if primalBad { t.Errorf("non-known error returned: %s", errPrimal) } return errPrimal } // Compare the result to the answer found from solving the dual LP. // Construct and solve the dual LP. // Standard Form: // minimize c^T * x // subject to A * x = b, x >= 0 // The dual of this problem is // maximize -b^T * nu // subject to A^T * nu + c >= 0 // Which is // minimize b^T * nu // subject to -A^T * nu <= c negAT := &mat64.Dense{} negAT.Clone(a.T()) negAT.Scale(-1, negAT) cNew, aNew, bNew := Convert(b, negAT, c, nil, nil) dualOpt, dualX, _, errDual := simplex(nil, cNew, aNew, bNew, convergenceTol) if errDual == nil { // Check that the dual is feasible var bCheck mat64.Vector bCheck.MulVec(aNew, mat64.NewVector(len(dualX), dualX)) if !mat64.EqualApprox(&bCheck, mat64.NewVector(len(bNew), bNew), 1e-10) { t.Errorf("No error in dual but solution infeasible") } } // Check about the zero status. if errPrimal == ErrZeroRow || errPrimal == ErrZeroColumn { return errPrimal } // If the primal problem is feasible, then the primal and the dual should // be the same answer. We have flopped the sign in the dual (minimizing // b^T *nu instead of maximizing -b^T*nu), so flip it back. if errPrimal == nil { if errDual != nil { fmt.Println("errDual", errDual) panic("here") t.Errorf("Primal feasible but dual errored: %s", errDual) } dualOpt *= -1 if !floats.EqualWithinAbsOrRel(dualOpt, primalOpt, convergenceTol, convergenceTol) { t.Errorf("Primal and dual value mismatch. Primal %v, dual %v.", primalOpt, dualOpt) } } // If the primal problem is unbounded, then the dual should be infeasible. if errPrimal == ErrUnbounded && errDual != ErrInfeasible { t.Errorf("Primal unbounded but dual not infeasible. ErrDual = %s", errDual) } // If the dual is unbounded, then the primal should be infeasible. if errDual == ErrUnbounded && errPrimal != ErrInfeasible { t.Errorf("Dual unbounded but primal not infeasible. ErrDual = %s", errPrimal) } // If the primal is infeasible, then the dual should be either infeasible // or unbounded. if errPrimal == ErrInfeasible { if errDual != ErrUnbounded && errDual != ErrInfeasible && errDual != ErrZeroColumn { t.Errorf("Primal infeasible but dual not infeasible or unbounded: %s", errDual) } } return errPrimal }