示例#1
0
func Grad(x *mat64.Dense, y, w []float64, b float64, s int) (w_grad, b_grad []float64) {
	errs := []float64{}

	yhat := P_y_given_x(x, w, b, s)

	for i := 0; i < len(y); i++ {
		errs = append(errs, y[i]-yhat[i]) // error = label - pred
	}
	e := mat64.NewDense(s, 1, errs)

	w_grad = append(w_grad, -1*mat64.Dot(x.ColView(0), e))
	w_grad = append(w_grad, -1*mat64.Dot(x.ColView(1), e))
	b_grad = append(b_grad, -1*Mean(errs))

	return w_grad, b_grad
}
示例#2
0
func (lr *LinearRegression) Fit() {
	h := *mat64.NewVector(lr.m, nil)
	partials := mat64.NewVector(lr.n, nil)
	alpha_m := lr.alpha / float64(lr.m)

Descent:
	for i := 0; i < lr.maxIters; i++ {
		// Calculate partial derivatives
		h.MulVec(lr.x, lr.Theta)
		for x := 0; x < lr.m; x++ {
			h.SetVec(x, h.At(x, 0)-lr.y.At(x, 0))
		}
		partials.MulVec(lr.x.T(), &h)

		// Update theta values with the precalculated partials
		for x := 0; x < lr.n; x++ {
			theta_j := lr.Theta.At(x, 0) - alpha_m*partials.At(x, 0)
			lr.Theta.SetVec(x, theta_j)
		}

		// Check the "distance" to the local minumum
		dist := math.Sqrt(mat64.Dot(partials, partials))

		if dist <= lr.tolerance {
			break Descent
		}
	}
}
示例#3
0
func GradientDescent(X *mat64.Dense, y *mat64.Vector, alpha, tolerance float64, maxIters int) *mat64.Vector {
	// m = Number of Training Examples
	// n = Number of Features
	m, n := X.Dims()
	h := mat64.NewVector(m, nil)
	partials := mat64.NewVector(n, nil)
	new_theta := mat64.NewVector(n, nil)

Regression:
	for i := 0; i < maxIters; i++ {
		// Calculate partial derivatives
		h.MulVec(X, new_theta)
		for el := 0; el < m; el++ {
			val := (h.At(el, 0) - y.At(el, 0)) / float64(m)
			h.SetVec(el, val)
		}
		partials.MulVec(X.T(), h)

		// Update theta values
		for el := 0; el < n; el++ {
			new_val := new_theta.At(el, 0) - (alpha * partials.At(el, 0))
			new_theta.SetVec(el, new_val)
		}

		// Check the "distance" to the local minumum
		dist := math.Sqrt(mat64.Dot(partials, partials))

		if dist <= tolerance {
			break Regression
		}
	}
	return new_theta
}
示例#4
0
// InnerProduct computes the inner product through a kernel trick
// K(x, y) = (x^T y + 1)^d
func (p *PolyKernel) InnerProduct(vectorX *mat64.Dense, vectorY *mat64.Dense) float64 {
	subVectorX := vectorX.ColView(0)
	subVectorY := vectorY.ColView(0)
	result := mat64.Dot(subVectorX, subVectorY)
	result = math.Pow(result+1, float64(p.degree))

	return result
}
示例#5
0
func (bicg *BiCG) Iterate(ctx *Context) Operation {
	switch bicg.resume {
	case 1:
		cg.resume = 2
		return SolvePreconditioner
		// Solve M z = r_{i-1}
	case 2:
		// ρ_i = r_{i-1} · z
		cg.rho = mat64.Dot(ctx.Residual, ctx.Z)
	default:
		panic("unreachable")
	}
}
示例#6
0
文件: cg.go 项目: vladimir-ch/sparse
func (cg *CG) Iterate(ctx *Context) Operation {
	switch cg.resume {
	case 1:
		cg.resume = 2
		return SolvePreconditioner
		// Solve M z = r_{i-1}
	case 2:
		// ρ_i = r_{i-1} · z
		cg.rho = mat64.Dot(ctx.Residual, ctx.Z)
		if !cg.first {
			// β = ρ_i / ρ_{i-1}
			beta := cg.rho / cg.rho1
			// z = z + β p_{i-1}
			ctx.Z.AddScaledVec(ctx.Z, beta, ctx.P)
		}
		cg.first = false
		// p_i = z
		ctx.P.CopyVec(ctx.Z)

		cg.resume = 3
		return ComputeAp
		// Compute Ap
	case 3:
		// α = ρ_i / (p_i · Ap_i)
		alpha := cg.rho / mat64.Dot(ctx.P, ctx.Ap)
		// x_i = x_{i-1} + α p_i
		ctx.X.AddScaledVec(ctx.X, alpha, ctx.P)
		// r_i = r_{i-1} - α Ap_i
		ctx.Residual.AddScaledVec(ctx.Residual, -alpha, ctx.Ap)

		cg.rho1 = cg.rho

		cg.resume = 1
		return CheckConvergence
	default:
		panic("unreachable")
	}
}
示例#7
0
文件: bfgs.go 项目: jgcarvalho/zdd
func (b *BFGS) NextDirection(loc *Location, dir []float64) (stepSize float64) {
	dim := b.dim
	if len(loc.X) != dim {
		panic("bfgs: unexpected size mismatch")
	}
	if len(loc.Gradient) != dim {
		panic("bfgs: unexpected size mismatch")
	}
	if len(dir) != dim {
		panic("bfgs: unexpected size mismatch")
	}

	x := mat64.NewVector(dim, loc.X)
	grad := mat64.NewVector(dim, loc.Gradient)

	// s = x_{k+1} - x_{k}
	b.s.SubVec(x, &b.x)
	// y = g_{k+1} - g_{k}
	b.y.SubVec(grad, &b.grad)

	sDotY := mat64.Dot(&b.s, &b.y)

	if b.first {
		// Rescale the initial Hessian.
		// From: Nocedal, J., Wright, S.: Numerical Optimization (2nd ed).
		//       Springer (2006), page 143, eq. 6.20.
		yDotY := mat64.Dot(&b.y, &b.y)
		scale := sDotY / yDotY
		for i := 0; i < dim; i++ {
			for j := i; j < dim; j++ {
				if i == j {
					b.invHess.SetSym(i, i, scale)
				} else {
					b.invHess.SetSym(i, j, 0)
				}
			}
		}
		b.first = false
	}

	if math.Abs(sDotY) != 0 {
		// Update the inverse Hessian according to the formula
		//
		//  B_{k+1}^-1 = B_k^-1
		//             + (s_k^T y_k + y_k^T B_k^-1 y_k) / (s_k^T y_k)^2 * (s_k s_k^T)
		//             - (B_k^-1 y_k s_k^T + s_k y_k^T B_k^-1) / (s_k^T y_k).
		//
		// Note that y_k^T B_k^-1 y_k is a scalar, and that the third term is a
		// rank-two update where B_k^-1 y_k is one vector and s_k is the other.
		yBy := mat64.Inner(&b.y, b.invHess, &b.y)
		b.tmp.MulVec(b.invHess, &b.y)
		scale := (1 + yBy/sDotY) / sDotY
		b.invHess.SymRankOne(b.invHess, scale, &b.s)
		b.invHess.RankTwo(b.invHess, -1/sDotY, &b.tmp, &b.s)
	}

	// Update the stored BFGS data.
	b.x.CopyVec(x)
	b.grad.CopyVec(grad)

	// New direction is stored in dir.
	d := mat64.NewVector(dim, dir)
	d.MulVec(b.invHess, grad)
	d.ScaleVec(-1, d)

	return 1
}
示例#8
0
func (lr *LinearRegression) Predict(x *mat64.Vector) float64 {
	return mat64.Dot(x, lr.Theta)
}
示例#9
0
func Hypothesis(x, theta *mat64.Vector) float64 {
	//	var res mat64.Dense
	//	res.Mul(x.T(), theta)
	//	return res.At(0, 0)
	return mat64.Dot(x, theta)
}
示例#10
0
// InnerProduct computes a Eucledian inner product.
func (e *Euclidean) InnerProduct(vectorX *mat64.Dense, vectorY *mat64.Dense) float64 {
	result := mat64.Dot(vectorX, vectorY)

	return result
}
示例#11
0
// InnerProduct computes the inner product through a kernel trick
// K(x, y) = (x^T y + 1)^d
func (p *PolyKernel) InnerProduct(vectorX *mat64.Dense, vectorY *mat64.Dense) float64 {
	result := mat64.Dot(vectorX, vectorY)
	result = math.Pow(result+1, float64(p.degree))

	return result
}