Exemplo n.º 1
0
// GcvInitCameraMatrix2D takes one 3-by-N matrix and one 2-by-N Matrix as input.
// Each column in the input matrix represents a point in real world (objPts) or
// in image (imgPts).
// Return: the camera matrix.
func GcvInitCameraMatrix2D(objPts, imgPts *mat64.Dense, dims [2]int,
	aspectRatio float64) (camMat *mat64.Dense) {

	objDim, nObjPts := objPts.Dims()
	imgDim, nImgPts := imgPts.Dims()

	if objDim != 3 || imgDim != 2 || nObjPts != nImgPts {
		panic("Invalid dimensions for objPts and imgPts")
	}

	objPtsVec := NewGcvPoint3f32Vector(int64(nObjPts))
	imgPtsVec := NewGcvPoint2f32Vector(int64(nObjPts))

	for j := 0; j < nObjPts; j++ {
		objPtsVec.Set(j, NewGcvPoint3f32(objPts.Col(nil, j)...))
	}

	for j := 0; j < nObjPts; j++ {
		imgPtsVec.Set(j, NewGcvPoint2f32(imgPts.Col(nil, j)...))
	}

	_imgSize := NewGcvSize2i(dims[0], dims[1])

	camMat = GcvMatToMat64(GcvInitCameraMatrix2D_(
		objPtsVec, imgPtsVec, _imgSize, aspectRatio))
	return camMat
}
Exemplo n.º 2
0
func (nb *NaiveBayes) Fit(X, y *mat64.Dense) {
	nSamples, nFeatures := X.Dims()

	tokensTotal := 0
	langsTotal, _ := y.Dims()

	langsCount := histogram(y.Col(nil, 0))

	tokensTotalPerLang := map[int]int{}
	tokenCountPerLang := map[int](map[int]int){}

	for i := 0; i < nSamples; i++ {
		langIdx := int(y.At(i, 0))

		for j := 0; j < nFeatures; j++ {
			tokensTotal += int(X.At(i, j))
			tokensTotalPerLang[langIdx] += int(X.At(i, j))

			if _, ok := tokenCountPerLang[langIdx]; !ok {
				tokenCountPerLang[langIdx] = map[int]int{}
			}
			tokenCountPerLang[langIdx][j] += int(X.At(i, j))
		}
	}

	params := nbParams{
		TokensTotal:        tokensTotal,
		LangsTotal:         langsTotal,
		LangsCount:         langsCount,
		TokensTotalPerLang: tokensTotalPerLang,
		TokenCountPerLang:  tokenCountPerLang,
	}

	nb.params = params
}
Exemplo n.º 3
0
Arquivo: scale.go Projeto: reggo/reggo
func (n *InnerNormal) SetScale(data *mat64.Dense) error {
	rows, dim := data.Dims()
	if rows < 2 {
		return errors.New("scale: less than two inputs")
	}
	means := make([]float64, dim)
	stds := make([]float64, dim)
	for i := 0; i < dim; i++ {
		// Filter out the extremes
		r := data.Col(nil, i)
		if len(r) != rows {
			panic("bad lengths")
		}
		sort.Float64s(r)

		lowerIdx := int(math.Floor(float64(rows) * n.LowerQuantile))
		upperIdx := int(math.Ceil(float64(rows) * n.UpperQuantile))

		trimmed := r[lowerIdx:upperIdx]

		mean, std := stat.MeanStdDev(trimmed, nil)
		//std := stat.StdDev(trimmed, mean, nil)
		means[i] = mean
		stds[i] = std
	}
	n.Mu = means
	n.Sigma = stds
	fmt.Println(n.Mu, n.Sigma)
	n.Dim = dim
	n.Scaled = true
	return nil
}
Exemplo n.º 4
0
func Accuracy(y_true, y_pred *mat64.Dense) float64 {
	y1 := y_true.Col(nil, 0)
	y2 := y_pred.Col(nil, 0)

	total := 0.0
	correct := 0.0

	for i := 0; i < len(y1); i++ {
		if y1[i] == y2[i] {
			correct++
		}
		total++
	}
	return correct / total
}
Exemplo n.º 5
0
// Wrapper for the `train` function in liblinear.
//
// `model* train(const struct problem *prob, const struct parameter *param);`
//
// The explanation of parameters are:
//
// solverType:
//
//   for multi-class classification
//          0 -- L2-regularized logistic regression (primal)
//          1 -- L2-regularized L2-loss support vector classification (dual)
//          2 -- L2-regularized L2-loss support vector classification (primal)
//          3 -- L2-regularized L1-loss support vector classification (dual)
//          4 -- support vector classification by Crammer and Singer
//          5 -- L1-regularized L2-loss support vector classification
//          6 -- L1-regularized logistic regression
//          7 -- L2-regularized logistic regression (dual)
//   for regression
//         11 -- L2-regularized L2-loss support vector regression (primal)
//         12 -- L2-regularized L2-loss support vector regression (dual)
//         13 -- L2-regularized L1-loss support vector regression (dual)
//
// eps is the stopping criterion.
//
// C_ is the cost of constraints violation.
//
// p is the sensitiveness of loss of support vector regression.
//
// classWeights is a map from int to float64, with the key be the class and the
// value be the weight. For example, {1: 10, -1: 0.5} means giving weight=10 for
// class=1 while weight=0.5 for class=-1
//
// If you do not want to change penalty for any of the classes, just set
// classWeights to nil.
func Train(X, y *mat64.Dense, bias float64, pm *Parameter) *Model {

	var problem C.struct_problem

	nRows, nCols := X.Dims()

	cY := mapCDouble(y.Col(nil, 0))
	cX := toFeatureNodes(X)
	problem.x = &cX[0]
	problem.y = &cY[0]
	problem.n = C.int(nCols)
	problem.l = C.int(nRows)
	problem.bias = C.double(bias)

	model := C.train(&problem, pm.GetPtr())
	return &Model{
		cModel: model,
	}
}
Exemplo n.º 6
0
func removeCol(df *mat64.Dense, col int) *mat64.Dense {
	r, c := df.Dims()
	if col > c || col < 0 {
		panic("Column Index not supported")
	}

	cop := mat64.NewDense(r, c-1, nil)

	m := 0

	for i := 0; i < c; i++ {
		if i != col {
			cop.SetCol(m, df.Col(nil, i))
			m++
		}
	}

	return cop
}
Exemplo n.º 7
0
func GcvCalibrateCamera(objPts, imgPts, camMat, distCoeffs *mat64.Dense,
	dims [2]int, flags int) (calCamMat, rvec, tvec *mat64.Dense) {

	objDim, nObjPts := objPts.Dims()
	imgDim, nImgPts := imgPts.Dims()

	if objDim != 3 || imgDim != 2 || nObjPts != nImgPts {
		panic("Invalid dimensions for objPts and imgPts")
	}

	objPtsVec := NewGcvPoint3f32Vector(int64(nObjPts))
	imgPtsVec := NewGcvPoint2f32Vector(int64(nObjPts))

	for j := 0; j < nObjPts; j++ {
		objPtsVec.Set(j, NewGcvPoint3f32(objPts.Col(nil, j)...))
	}

	for j := 0; j < nObjPts; j++ {
		imgPtsVec.Set(j, NewGcvPoint2f32(imgPts.Col(nil, j)...))
	}

	_camMat := Mat64ToGcvMat(camMat)
	_distCoeffs := Mat64ToGcvMat(distCoeffs)
	_rvec := NewGcvMat()
	_tvec := NewGcvMat()
	_imgSize := NewGcvSize2i(dims[0], dims[1])

	GcvCalibrateCamera_(
		objPtsVec, imgPtsVec,
		_imgSize, _camMat, _distCoeffs,
		_rvec, _tvec, flags)

	calCamMat = GcvMatToMat64(_camMat)
	rvec = GcvMatToMat64(_rvec)
	tvec = GcvMatToMat64(_tvec)

	return calCamMat, rvec, tvec
}