Exemple #1
0
// GcvInitCameraMatrix2D takes one 3-by-N matrix and one 2-by-N Matrix as input.
// Each column in the input matrix represents a point in real world (objPts) or
// in image (imgPts).
// Return: the camera matrix.
func GcvInitCameraMatrix2D(objPts, imgPts *mat64.Dense, dims [2]int,
	aspectRatio float64) (camMat *mat64.Dense) {

	objDim, nObjPts := objPts.Dims()
	imgDim, nImgPts := imgPts.Dims()

	if objDim != 3 || imgDim != 2 || nObjPts != nImgPts {
		panic("Invalid dimensions for objPts and imgPts")
	}

	objPtsVec := NewGcvPoint3f32Vector(int64(nObjPts))
	imgPtsVec := NewGcvPoint2f32Vector(int64(nObjPts))

	for j := 0; j < nObjPts; j++ {
		objPtsVec.Set(j, NewGcvPoint3f32(mat64.Col(nil, j, objPts.ColView(j))...))
	}

	for j := 0; j < nObjPts; j++ {
		imgPtsVec.Set(j, NewGcvPoint2f32(mat64.Col(nil, j, imgPts.ColView(j))...))
	}

	_imgSize := NewGcvSize2i(dims[0], dims[1])

	camMat = GcvMatToMat64(GcvInitCameraMatrix2D_(
		objPtsVec, imgPtsVec, _imgSize, aspectRatio))
	return camMat
}
Exemple #2
0
// InnerProduct computes the inner product through a kernel trick
// K(x, y) = (x^T y + 1)^d
func (p *PolyKernel) InnerProduct(vectorX *mat64.Dense, vectorY *mat64.Dense) float64 {
	subVectorX := vectorX.ColView(0)
	subVectorY := vectorY.ColView(0)
	result := mat64.Dot(subVectorX, subVectorY)
	result = math.Pow(result+1, float64(p.degree))

	return result
}
Exemple #3
0
func DivideData(data *mat64.Dense, start, size int) (*mat64.Dense, []float64) {
	_x := []float64{}
	y := []float64{}

	for i := start; i < (start + size); i++ {
		_x = append(_x, data.ColView(0).At(i, 0)) // x1
		_x = append(_x, data.ColView(1).At(i, 0)) // x2
		y = append(y, data.ColView(2).At(i, 0))   // label
	}
	x := mat64.NewDense(size, 2, _x) // x1 and x2

	return x, y
}
Exemple #4
0
func Accuracy(y_true, y_pred *mat64.Dense) float64 {
	y1 := y_true.ColView(0).RawVector().Data
	y2 := y_pred.ColView(0).RawVector().Data

	total := 0.0
	correct := 0.0

	for i := 0; i < len(y1); i++ {
		if y1[i] == y2[i] {
			correct++
		}
		total++
	}
	return correct / total
}
Exemple #5
0
func Grad(x *mat64.Dense, y, w []float64, b float64, s int) (w_grad, b_grad []float64) {
	errs := []float64{}

	yhat := P_y_given_x(x, w, b, s)

	for i := 0; i < len(y); i++ {
		errs = append(errs, y[i]-yhat[i]) // error = label - pred
	}
	e := mat64.NewDense(s, 1, errs)

	w_grad = append(w_grad, -1*mat64.Dot(x.ColView(0), e))
	w_grad = append(w_grad, -1*mat64.Dot(x.ColView(1), e))
	b_grad = append(b_grad, -1*Mean(errs))

	return w_grad, b_grad
}
Exemple #6
0
// StdDevBatch predicts the standard deviation at a set of locations of x.
func (g *GP) StdDevBatch(std []float64, x mat64.Matrix) []float64 {
	r, c := x.Dims()
	if c != g.inputDim {
		panic(badInputLength)
	}
	if std == nil {
		std = make([]float64, r)
	}
	if len(std) != r {
		panic(badStorage)
	}
	// For a single point, the stddev is
	// 		sigma = k(x,x) - k_*^T * K^-1 * k_*
	// where k is the vector of kernels between the input points and the output points
	// For many points, the formula is:
	// 		nu_* = k(x_*, k_*) - k_*^T * K^-1 * k_*
	// This creates the full covariance matrix which is an rxr matrix. However,
	// the standard deviations are just the diagonal of this matrix. Instead, be
	// smart about it and compute the diagonal terms one at a time.
	kStar := g.formKStar(x)
	var tmp mat64.Dense
	tmp.SolveCholesky(g.cholK, kStar)

	// set k(x_*, x_*) into std then subtract k_*^T K^-1 k_* , computed one row at a time
	var tmp2 mat64.Vector
	row := make([]float64, c)
	for i := range std {
		for k := 0; k < c; k++ {
			row[k] = x.At(i, k)
		}
		std[i] = g.kernel.Distance(row, row)
		tmp2.MulVec(kStar.ColView(i).T(), tmp.ColView(i))
		rt, ct := tmp2.Dims()
		if rt != 1 && ct != 1 {
			panic("bad size")
		}
		std[i] -= tmp2.At(0, 0)
		std[i] = math.Sqrt(std[i])
	}
	// Need to scale the standard deviation to be in the same units as y.
	floats.Scale(g.std, std)
	return std
}
Exemple #7
0
func mulMulti(a *mat64.Dense, b []float64, rows int) (r []float64) {
	var m, m2 mat64.Dense

	b1 := mat64.NewDense(1, 1, []float64{b[0]})
	b2 := mat64.NewDense(1, 1, []float64{b[1]})

	m.Mul(a.ColView(0), b1)
	m2.Mul(a.ColView(1), b2)

	for i := 0; i < rows; i++ {
		r = append(r, m.ColView(0).At(i, 0)+m2.ColView(0).At(i, 0))
	}
	return r
}
Exemple #8
0
func GcvCalibrateCamera(objPts, imgPts, camMat, distCoeffs *mat64.Dense,
	dims [2]int, flags int) (calCamMat, rvec, tvec *mat64.Dense) {

	objDim, nObjPts := objPts.Dims()
	imgDim, nImgPts := imgPts.Dims()

	if objDim != 3 || imgDim != 2 || nObjPts != nImgPts {
		panic("Invalid dimensions for objPts and imgPts")
	}

	objPtsVec := NewGcvPoint3f32Vector(int64(nObjPts))
	imgPtsVec := NewGcvPoint2f32Vector(int64(nObjPts))

	for j := 0; j < nObjPts; j++ {
		objPtsVec.Set(j, NewGcvPoint3f32(mat64.Col(nil, j, objPts.ColView(j))...))
	}

	for j := 0; j < nObjPts; j++ {
		imgPtsVec.Set(j, NewGcvPoint2f32(mat64.Col(nil, j, imgPts.ColView(j))...))
	}

	_camMat := Mat64ToGcvMat(camMat)
	_distCoeffs := Mat64ToGcvMat(distCoeffs)
	_rvec := NewGcvMat()
	_tvec := NewGcvMat()
	_imgSize := NewGcvSize2i(dims[0], dims[1])

	GcvCalibrateCamera_(
		objPtsVec, imgPtsVec,
		_imgSize, _camMat, _distCoeffs,
		_rvec, _tvec, flags)

	calCamMat = GcvMatToMat64(_camMat)
	rvec = GcvMatToMat64(_rvec)
	tvec = GcvMatToMat64(_tvec)

	return calCamMat, rvec, tvec
}
Exemple #9
0
// Wrapper for the `train` function in liblinear.
//
// `model* train(const struct problem *prob, const struct parameter *param);`
//
// The explanation of parameters are:
//
// solverType:
//
//   for multi-class classification
//          0 -- L2-regularized logistic regression (primal)
//          1 -- L2-regularized L2-loss support vector classification (dual)
//          2 -- L2-regularized L2-loss support vector classification (primal)
//          3 -- L2-regularized L1-loss support vector classification (dual)
//          4 -- support vector classification by Crammer and Singer
//          5 -- L1-regularized L2-loss support vector classification
//          6 -- L1-regularized logistic regression
//          7 -- L2-regularized logistic regression (dual)
//   for regression
//         11 -- L2-regularized L2-loss support vector regression (primal)
//         12 -- L2-regularized L2-loss support vector regression (dual)
//         13 -- L2-regularized L1-loss support vector regression (dual)
//
// eps is the stopping criterion.
//
// C_ is the cost of constraints violation.
//
// p is the sensitiveness of loss of support vector regression.
//
// classWeights is a map from int to float64, with the key be the class and the
// value be the weight. For example, {1: 10, -1: 0.5} means giving weight=10 for
// class=1 while weight=0.5 for class=-1
//
// If you do not want to change penalty for any of the classes, just set
// classWeights to nil.
func Train(X, y *mat64.Dense, bias float64, solverType int, c_, p, eps float64, classWeights map[int]float64) *Model {
	var weightLabelPtr *C.int
	var weightPtr *C.double

	nRows, nCols := X.Dims()

	cX := mapCDouble(X.RawMatrix().Data)
	cY := mapCDouble(y.ColView(0).RawVector().Data)

	nrWeight := len(classWeights)
	weightLabel := []C.int{}
	weight := []C.double{}

	for key, val := range classWeights {
		weightLabel = append(weightLabel, (C.int)(key))
		weight = append(weight, (C.double)(val))
	}

	if nrWeight > 0 {
		weightLabelPtr = &weightLabel[0]
		weightPtr = &weight[0]
	} else {
		weightLabelPtr = nil
		weightPtr = nil
	}

	model := C.call_train(
		&cX[0], &cY[0],
		C.int(nRows), C.int(nCols), C.double(bias),
		C.int(solverType), C.double(c_), C.double(p), C.double(eps),
		C.int(nrWeight), weightLabelPtr, weightPtr)

	return &Model{
		cModel: model,
	}
}