Ejemplo n.º 1
0
func MakeFitLinScale(targetImage *imgut.Image) func(*imgut.Image) float64 {
	// Pre-compute image to slice of floats
	dataTarg := imgut.ToSlice(targetImage)
	// Pre-compute average
	avgt := floats.Sum(dataTarg) / float64(len(dataTarg))
	return func(indImage *imgut.Image) float64 {
		// Images to vector
		dataInd := imgut.ToSlice(indImage)
		// Compute average pixels
		avgy := floats.Sum(dataInd) / float64(len(dataInd))
		// Difference y - avgy
		y_avgy := make([]float64, len(dataInd))
		copy(y_avgy, dataInd)
		floats.AddConst(-avgy, y_avgy)
		// Difference t - avgt
		t_avgt := make([]float64, len(dataTarg))
		copy(t_avgt, dataTarg)
		floats.AddConst(-avgt, t_avgt)
		// Multuplication (t - avgt)(y - avgy)
		floats.Mul(t_avgt, y_avgy)
		// Summation
		numerator := floats.Sum(t_avgt)
		// Square (y - avgy)^2
		floats.Mul(y_avgy, y_avgy)
		denomin := floats.Sum(y_avgy)
		// Compute b-value
		b := numerator / denomin
		// Compute a-value
		a := avgt - b*avgy

		// Compute now the scaled RMSE, using y' = a + b*y
		floats.Scale(b, dataInd)      // b*y
		floats.AddConst(a, dataInd)   // a + b*y
		floats.Sub(dataInd, dataTarg) // (a + b * y - t)
		floats.Mul(dataInd, dataInd)  // (a + b * y - t)^2
		total := floats.Sum(dataInd)  // Sum(...)
		return math.Sqrt(total / float64(len(dataInd)))
	}
}
Ejemplo n.º 2
0
// CovarianceMatrix calculates a covariance matrix (also known as a
// variance-covariance matrix) from a matrix of data, using a two-pass
// algorithm.
//
// The weights must have length equal to the number of rows in
// input data matrix x. If cov is nil, then a new matrix with appropriate size will
// be constructed. If cov is not nil, it should have the same number of columns as the
// input data matrix x, and it will be used as the destination for the covariance
// data. Weights must not be negative.
func CovarianceMatrix(cov *mat64.SymDense, x mat64.Matrix, weights []float64) *mat64.SymDense {
	// This is the matrix version of the two-pass algorithm. It doesn't use the
	// additional floating point error correction that the Covariance function uses
	// to reduce the impact of rounding during centering.

	r, c := x.Dims()

	if cov == nil {
		cov = mat64.NewSymDense(c, nil)
	} else if n := cov.Symmetric(); n != c {
		panic(matrix.ErrShape)
	}

	var xt mat64.Dense
	xt.Clone(x.T())
	// Subtract the mean of each of the columns.
	for i := 0; i < c; i++ {
		v := xt.RawRowView(i)
		// This will panic with ErrShape if len(weights) != len(v), so
		// we don't have to check the size later.
		mean := Mean(v, weights)
		floats.AddConst(-mean, v)
	}

	if weights == nil {
		// Calculate the normalization factor
		// scaled by the sample size.
		cov.SymOuterK(1/(float64(r)-1), &xt)
		return cov
	}

	// Multiply by the sqrt of the weights, so that multiplication is symmetric.
	sqrtwts := make([]float64, r)
	for i, w := range weights {
		if w < 0 {
			panic("stat: negative covariance matrix weights")
		}
		sqrtwts[i] = math.Sqrt(w)
	}
	// Weight the rows.
	for i := 0; i < c; i++ {
		v := xt.RawRowView(i)
		floats.Mul(v, sqrtwts)
	}

	// Calculate the normalization factor
	// scaled by the weighted sample size.
	cov.SymOuterK(1/(floats.Sum(weights)-1), &xt)
	return cov
}
Ejemplo n.º 3
0
// PrincipalComponents returns the principal component direction vectors and
// the column variances of the principal component scores, vecs * a, computed
// using the singular value decomposition of the input. The input a is an n×d
// matrix where each row is an observation and each column represents a variable.
//
// PrincipalComponents centers the variables but does not scale the variance.
//
// The slice weights is used to weight the observations. If weights is nil,
// each weight is considered to have a value of one, otherwise the length of
// weights must match the number of observations or PrincipalComponents will
// panic.
//
// On successful completion, the principal component direction vectors are
// returned in vecs as a d×min(n, d) matrix, and the variances are returned in
// vars as a min(n, d)-long slice in descending sort order.
//
// If no singular value decomposition is possible, vecs and vars are returned
// nil and ok is returned false.
func PrincipalComponents(a mat64.Matrix, weights []float64) (vecs *mat64.Dense, vars []float64, ok bool) {
	n, d := a.Dims()
	if weights != nil && len(weights) != n {
		panic("stat: len(weights) != observations")
	}

	centered := mat64.NewDense(n, d, nil)
	col := make([]float64, n)
	for j := 0; j < d; j++ {
		mat64.Col(col, j, a)
		floats.AddConst(-Mean(col, weights), col)
		centered.SetCol(j, col)
	}
	for i, w := range weights {
		floats.Scale(math.Sqrt(w), centered.RawRowView(i))
	}

	kind := matrix.SVDFull
	if n > d {
		kind = matrix.SVDThin
	}
	var svd mat64.SVD
	ok = svd.Factorize(centered, kind)
	if !ok {
		return nil, nil, false
	}

	vecs = &mat64.Dense{}
	vecs.VFromSVD(&svd)
	if n < d {
		// Don't retain columns that are not valid direction vectors.
		vecs.Clone(vecs.View(0, 0, d, n))
	}
	vars = svd.Values(nil)
	var f float64
	if weights == nil {
		f = 1 / float64(n-1)
	} else {
		f = 1 / (floats.Sum(weights) - 1)
	}
	for i, v := range vars {
		vars[i] = f * v * v
	}
	return vecs, vars, true
}
Ejemplo n.º 4
0
Archivo: norm.go Proyecto: cjslep/stat
// ConjugateUpdate updates the parameters of the distribution from the sufficient
// statistics of a set of samples. The sufficient statistics, suffStat, have been
// observed with nSamples observations. The prior values of the distribution are those
// currently in the distribution, and have been observed with priorStrength samples.
//
// For the normal distribution, the sufficient statistics are the mean and
// uncorrected standard deviation of the samples.
// The prior is having seen strength[0] samples with mean Normal.Mu
// and strength[1] samples with standard deviation Normal.Sigma. As a result of
// this function, Normal.Mu and Normal.Sigma are updated based on the weighted
// samples, and strength is modified to include the new number of samples observed.
//
// This function panics if len(suffStat) != 2 or len(priorStrength) != 2.
func (n *Normal) ConjugateUpdate(suffStat []float64, nSamples float64, priorStrength []float64) {

	// TODO: Support prior strength with math.Inf(1) to allow updating with
	// a known mean/standard deviation

	totalMeanSamples := nSamples + priorStrength[0]
	totalSum := suffStat[0]*nSamples + n.Mu*priorStrength[0]

	totalVarianceSamples := nSamples + priorStrength[1]
	// sample variance
	totalVariance := nSamples * suffStat[1] * suffStat[1]
	// add prior variance
	totalVariance += priorStrength[1] * n.Sigma * n.Sigma
	// add cross variance from the difference of the means
	meanDiff := (suffStat[0] - n.Mu)
	totalVariance += priorStrength[0] * nSamples * meanDiff * meanDiff / totalMeanSamples

	n.Mu = totalSum / totalMeanSamples
	n.Sigma = math.Sqrt(totalVariance / totalVarianceSamples)
	floats.AddConst(nSamples, priorStrength)
}
Ejemplo n.º 5
0
// CovarianceMatrix calculates a covariance matrix (also known as a
// variance-covariance matrix) from a matrix of data, using a two-pass
// algorithm. The matrix returned will be symmetric and square.
//
// The weights wts should have the length equal to the number of rows in
// input data matrix x. If c is nil, then a new matrix with appropriate size will
// be constructed.  If c is not nil, it should be a square matrix with the same
// number of columns as the input data matrix x, and it will be used as the receiver
// for the covariance data.  Weights cannot be negative.
func CovarianceMatrix(cov *mat64.Dense, x mat64.Matrix, wts []float64) *mat64.Dense {
	// This is the matrix version of the two-pass algorithm. It doesn't use the
	// additional floating point error correction that the Covariance function uses
	// to reduce the impact of rounding during centering.

	// TODO(jonlawlor): indicate that the resulting matrix is symmetric, and change
	// the returned type from a *mat.Dense to a *mat.Symmetric.

	r, c := x.Dims()

	if cov == nil {
		cov = mat64.NewDense(c, c, nil)
	} else if covr, covc := cov.Dims(); covr != covc || covc != c {
		panic(mat64.ErrShape)
	}

	var xt mat64.Dense
	xt.TCopy(x)
	// Subtract the mean of each of the columns.
	for i := 0; i < c; i++ {
		v := xt.RawRowView(i)
		// This will panic with ErrShape if len(wts) != len(v), so
		// we don't have to check the size later.
		mean := Mean(v, wts)
		floats.AddConst(-mean, v)
	}

	var n float64
	if wts == nil {

		n = float64(r)

		cov.MulTrans(&xt, false, &xt, true)

		// Scale by the sample size.
		cov.Scale(1/(n-1), cov)
		return cov
	}

	// Multiply by the sqrt of the weights, so that multiplication is symmetric.
	sqrtwts := make([]float64, r)
	for i, w := range wts {
		if w < 0 {
			panic("stat: negative covariance matrix weights")
		}
		sqrtwts[i] = math.Sqrt(w)
	}
	// Weight the rows.
	for i := 0; i < c; i++ {
		v := xt.RawRowView(i)
		floats.Mul(v, sqrtwts)
	}

	// Calculate the normalization factor.
	n = floats.Sum(wts)
	cov.MulTrans(&xt, false, &xt, true)

	// Scale by the sample size.
	cov.Scale(1/(n-1), cov)
	return cov
}
Ejemplo n.º 6
0
// PageRankSparse returns the PageRank weights for nodes of the sparse directed
// graph g using the given damping factor and terminating when the 2-norm of the
// vector difference between iterations is below tol. The returned map is
// keyed on the graph node IDs.
func PageRankSparse(g graph.Directed, damp, tol float64) map[int]float64 {
	// PageRankSparse is implemented according to "How Google Finds Your Needle
	// in the Web's Haystack".
	//
	// G.I^k = alpha.H.I^k + alpha.A.I^k + (1-alpha).1/n.1.I^k
	//
	// http://www.ams.org/samplings/feature-column/fcarc-pagerank

	nodes := g.Nodes()
	indexOf := make(map[int]int, len(nodes))
	for i, n := range nodes {
		indexOf[n.ID()] = i
	}

	m := make(rowCompressedMatrix, len(nodes))
	var dangling compressedRow
	df := damp / float64(len(nodes))
	for j, u := range nodes {
		to := g.From(u)
		f := damp / float64(len(to))
		for _, v := range to {
			m.addTo(indexOf[v.ID()], j, f)
		}
		if len(to) == 0 {
			dangling.addTo(j, df)
		}
	}

	last := make([]float64, len(nodes))
	for i := range last {
		last[i] = 1
	}
	lastV := mat64.NewVector(len(nodes), last)

	vec := make([]float64, len(nodes))
	var sum float64
	for i := range vec {
		r := rand.NormFloat64()
		sum += r
		vec[i] = r
	}
	f := 1 / sum
	for i := range vec {
		vec[i] *= f
	}
	v := mat64.NewVector(len(nodes), vec)

	dt := (1 - damp) / float64(len(nodes))
	for {
		lastV, v = v, lastV

		m.mulVecUnitary(v, lastV)          // First term of the G matrix equation;
		with := dangling.dotUnitary(lastV) // Second term;
		away := onesDotUnitary(dt, lastV)  // Last term.

		floats.AddConst(with+away, v.RawVector().Data)
		if normDiff(vec, last) < tol {
			break
		}
	}

	ranks := make(map[int]float64, len(nodes))
	for i, r := range v.RawVector().Data {
		ranks[nodes[i].ID()] = r
	}

	return ranks
}
Ejemplo n.º 7
0
func (r *Rosenbrock) OptLoc() []float64 {
	ans := make([]float64, r.nDim)
	floats.AddConst(1, ans)
	return ans
}
Ejemplo n.º 8
0
Archivo: scale.go Proyecto: reggo/reggo
func (n None) Linear() (shift, scale []float64) {
	shift = make([]float64, n.Dim)
	scale = make([]float64, n.Dim)
	floats.AddConst(1, scale)
	return shift, scale
}