Exemple #1
0
// Print prints coefficients
func (o *DynCoefs) Print() {
	io.Pfgrey("θ=%v, θ1=%v, θ2=%v, α=%v\n", o.θ, o.θ1, o.θ2, o.α)
	io.Pfgrey("HHT=%v\n", o.HHT)
	io.Pfgrey("β1=%v, β2=%v\n", o.β1, o.β2)
	io.Pfgrey("α1=%v, α2=%v, α3=%v, α4=%v, α5=%v, α6=%v\n", o.α1, o.α2, o.α3, o.α4, o.α5, o.α6)
	io.Pfgrey("α7=%v, α8=%v\n", o.α7, o.α8)
}
Exemple #2
0
// msg prints information on residuals
func (o *NlSolver) msg(typ string, it int, Ldx, fx_max float64, first, last bool) {
	if first {
		io.Pfpink("\n%4s%23s%23s\n", "it", "Ldx", "fx_max")
		io.Pfpink("%4s%23s%23s\n", "", io.Sf("(%7.1e)", o.fnewt), io.Sf("(%7.1e)", o.ftol))
		return
	}
	io.Pfyel("%4d%23.15e%23.15e\n", it, Ldx, fx_max)
	if last {
		io.Pfgrey(". . . converged with %s. nit=%d, nFeval=%d, nJeval=%d\n", typ, it, o.NFeval, o.NJeval)
	}
}
Exemple #3
0
// KrefineN return a new Nurbs with each span divided into ndiv parts = [2, 3, ...]
func (o *Nurbs) KrefineN(ndiv int, hughesEtAlPaper bool) *Nurbs {
	X := make([][]float64, o.gnd)
	if hughesEtAlPaper {
		elems := o.Elements()
		switch o.gnd {
		case 2:
			for _, e := range elems {
				umin, umax := o.b[0].T[e[0]], o.b[0].T[e[1]]
				vmin, vmax := o.b[1].T[e[2]], o.b[1].T[e[3]]
				xa := o.Point([]float64{umin, vmin})
				xb := o.Point([]float64{umax, vmin})
				xc := []float64{(xa[0] + xb[0]) / 2.0, (xa[1] + xb[1]) / 2.0}

				io.Pf("xa = %v\n", xa)
				io.Pf("xb = %v\n", xb)
				io.Pf("xc = %v\n", xc)

				xa = o.Point([]float64{umin, vmax})
				xb = o.Point([]float64{umax, vmax})
				xc = []float64{(xa[0] + xb[0]) / 2.0, (xa[1] + xb[1]) / 2.0}

				io.Pfpink("xa, xb, xc = %v, %v, %v\n", xa, xb, xc)
				chk.Panic("KrefineN with hughesEtAlPaper==true is not implemented in 2D yet")
			}
		case 3:
			chk.Panic("KrefineN with hughesEtAlPaper==true is not implemented in 3D yet")
		}
		io.Pfgrey("KrefineN with hughesEtAlPaper==true => not implemented yet\n")
		return nil
	} else {
		for d := 0; d < o.gnd; d++ {
			nspans := o.b[d].m - 2*o.p[d] - 1
			nnewk := nspans * (ndiv - 1)
			X[d] = make([]float64, nnewk)
			k := 0
			for i := 0; i < nspans; i++ {
				umin, umax := o.b[d].T[o.p[d]+i], o.b[d].T[o.p[d]+i+1]
				du := (umax - umin) / float64(ndiv)
				for j := 1; j < ndiv; j++ {
					X[d][k] = umin + du*float64(j)
					k += 1
				}
			}
		}
	}
	return o.Krefine(X)
}
Exemple #4
0
// backward-Euler
func bweuler_step(o *ODE, y []float64, x float64, args ...interface{}) (rerr float64, err error) {

	// new x
	x += o.h

	// previous y
	la.VecCopy(o.v[0], 1, y) // v := y_old

	// iterations
	var rmsnr float64 // rms norm of residual
	var it int
	for it = 0; it < o.NmaxIt; it++ {

		// max iterations ?
		o.nit = it + 1
		if o.nit > o.nitmax {
			o.nitmax = o.nit
		}

		// calculate f @ update y
		o.nfeval += 1
		err = o.fcn(o.f[0], x, y, args...)
		if err != nil {
			return
		}

		// calculate residual
		rmsnr = 0.0
		for i := 0; i < o.ndim; i++ {
			o.w[0][i] = y[i] - o.v[0][i] - o.h*o.f[0][i] // w := residual
			if o.UseRmsNorm {
				rmsnr += math.Pow(o.w[0][i]/o.scal[i], 2.0)
			} else {
				rmsnr += o.w[0][i] * o.w[0][i]
			}
		}
		if o.UseRmsNorm {
			rmsnr = math.Sqrt(rmsnr / float64(o.ndim))
		} else {
			rmsnr = math.Sqrt(rmsnr)
		}
		if o.Verbose {
			io.Pfgrey("    residual = %10.5e    (tol = %10.5e)\n", rmsnr, o.fnewt)
		}

		// converged
		if rmsnr < o.fnewt {
			break
		}

		// Jacobian matrix
		if o.doinit || !o.CteTg {
			o.njeval += 1

			// calculate Jacobian
			if o.jac == nil { // numerical
				err = num.Jacobian(&o.dfdyT, func(fy, yy []float64) (e error) {
					e = o.fcn(fy, x, yy, args...)
					return
				}, y, o.f[0], o.δw[0], o.Distr) // δw works here as workspace variable
			} else { // analytical
				err = o.jac(&o.dfdyT, x, y, args...)
			}
			if err != nil {
				return
			}
			// debug
			//if true {
			//io.Pfblue2("J = %v\n", o.dfdyT.ToMatrix(nil).ToDense()[0])
			//}
			if o.doinit {
				o.rctriR = new(la.Triplet)
				o.rctriR.Init(o.ndim, o.ndim, o.mTri.Len()+o.dfdyT.Len())
			}

			// calculate drdy matrix
			la.SpTriAdd(o.rctriR, 1, o.mTri, -o.h, &o.dfdyT) // rctriR := I - h * dfdy
			//la.PrintMat("rcmat", o.rctriR.ToMatrix(nil).ToDense(), "%8.3f", false)

			// initialise linear solver
			if o.doinit {
				err = o.lsolR.InitR(o.rctriR, false, false, false)
				if err != nil {
					return
				}
			}

			// perform factorisation
			o.ndecomp += 1
			o.lsolR.Fact()
		}

		// solve linear system
		o.nlinsol += 1
		o.lsolR.SolveR(o.δw[0], o.w[0], false) // δw := inv(rcmat) * residual

		// update y
		for i := 0; i < o.ndim; i++ {
			y[i] -= o.δw[0][i]
		}
	}

	// did not converge
	if it == o.NmaxIt-1 {
		chk.Panic("bweuler_step failed with it = %d", it)
	}

	return 1e+20, err // must not be used with automatic substepping
}
Exemple #5
0
// Run computes β starting witn an initial guess
func (o *ReliabFORM) Run(βtrial float64, verbose bool, args ...interface{}) (β float64, μ, σ, x []float64) {

	// initial random variables
	β = βtrial
	nx := len(o.μ)
	μ = make([]float64, nx) // mean values (equivalent normal value)
	σ = make([]float64, nx) // deviation values (equivalent normal value)
	x = make([]float64, nx) // current vector of random variables defining min(β)
	for i := 0; i < nx; i++ {
		μ[i] = o.μ[i]
		σ[i] = o.σ[i]
		x[i] = o.μ[i]
	}

	// lognormal distribution structure
	var lnd DistLogNormal

	// has lognormal random variable?
	haslrv := false
	for _, found := range o.lrv {
		if found {
			haslrv = true
			break
		}
	}

	// function to compute β with x-constant
	//  gβ(β) = g(μ - β・A・σ) = 0
	var err error
	gβfcn := func(fy, y []float64) error {
		βtmp := y[0]
		for i := 0; i < nx; i++ {
			o.xtmp[i] = μ[i] - βtmp*o.α[i]*σ[i]
		}
		fy[0], err = o.gfcn(o.xtmp, args)
		if err != nil {
			chk.Panic("cannot compute gfcn(%v):\n%v", o.xtmp, err)
		}
		return nil
	}

	// derivative of gβ w.r.t β
	hβfcn := func(dfdy [][]float64, y []float64) error {
		βtmp := y[0]
		for i := 0; i < nx; i++ {
			o.xtmp[i] = μ[i] - βtmp*o.α[i]*σ[i]
		}
		err = o.hfcn(o.dgdx, o.xtmp, args)
		if err != nil {
			chk.Panic("cannot compute hfcn(%v):\n%v", o.xtmp, err)
		}
		dfdy[0][0] = 0
		for i := 0; i < nx; i++ {
			dfdy[0][0] -= o.dgdx[i] * o.α[i] * σ[i]
		}
		return nil
	}

	// nonlinear solver with y[0] = β
	// solving:  gβ(β) = g(μ - β・A・σ) = 0
	var nls num.NlSolver
	nls.Init(1, gβfcn, nil, hβfcn, true, false, nil)
	defer nls.Clean()

	// message
	if verbose {
		io.Pf("\n%s", io.StrThickLine(60))
	}

	// plotting
	plot := o.PlotFnk != ""
	if nx != 2 {
		plot = false
	}
	if plot {
		if o.PlotNp < 3 {
			o.PlotNp = 41
		}
		var umin, umax, vmin, vmax float64
		if o.PlotCf < 1 {
			o.PlotCf = 2
		}
		if len(o.PlotUrange) == 0 {
			umin, umax = μ[0]-o.PlotCf*μ[0], μ[0]+o.PlotCf*μ[0]
			vmin, vmax = μ[1]-o.PlotCf*μ[1], μ[1]+o.PlotCf*μ[1]
		} else {
			chk.IntAssert(len(o.PlotUrange), 2)
			chk.IntAssert(len(o.PlotVrange), 2)
			umin, umax = o.PlotUrange[0], o.PlotUrange[1]
			vmin, vmax = o.PlotVrange[0], o.PlotVrange[1]
		}
		o.PlotU, o.PlotV = utl.MeshGrid2D(umin, umax, vmin, vmax, o.PlotNp, o.PlotNp)
		o.PlotZ = la.MatAlloc(o.PlotNp, o.PlotNp)
		plt.SetForEps(0.8, 300)
		for i := 0; i < o.PlotNp; i++ {
			for j := 0; j < o.PlotNp; j++ {
				o.xtmp[0] = o.PlotU[i][j]
				o.xtmp[1] = o.PlotV[i][j]
				o.PlotZ[i][j], err = o.gfcn(o.xtmp, args)
				if err != nil {
					chk.Panic("cannot compute gfcn(%v):\n%v", x, err)
				}
			}
		}
		plt.Contour(o.PlotU, o.PlotV, o.PlotZ, "")
		plt.ContourSimple(o.PlotU, o.PlotV, o.PlotZ, true, 8, "levels=[0], colors=['yellow']")
		plt.PlotOne(x[0], x[1], "'ro', label='initial'")
	}

	// iterations to find β
	var dat VarData
	B := []float64{β}
	itB := 0
	for itB = 0; itB < o.NmaxItB; itB++ {

		// message
		if verbose {
			gx, err := o.gfcn(x, args)
			if err != nil {
				chk.Panic("cannot compute gfcn(%v):\n%v", x, err)
			}
			io.Pf("%s itB=%d β=%g g=%g\n", io.StrThinLine(60), itB, β, gx)
		}

		// plot
		if plot {
			plt.PlotOne(x[0], x[1], "'r.'")
		}

		// compute direction cosines
		itA := 0
		for itA = 0; itA < o.NmaxItA; itA++ {

			// has lognormal random variable (lrv)
			if haslrv {

				// find equivalent normal mean and std deviation for lognormal variables
				for i := 0; i < nx; i++ {
					if o.lrv[i] {

						// set distribution
						dat.M, dat.S = o.μ[i], o.σ[i]
						lnd.Init(&dat)

						// update μ and σ
						fx := lnd.Pdf(x[i])
						Φinvx := (math.Log(x[i]) - lnd.M) / lnd.S
						φx := math.Exp(-Φinvx*Φinvx/2.0) / math.Sqrt2 / math.SqrtPi
						σ[i] = φx / fx
						μ[i] = x[i] - Φinvx*σ[i]
					}
				}
			}

			// compute direction cosines
			err = o.hfcn(o.dgdx, x, args)
			if err != nil {
				chk.Panic("cannot compute hfcn(%v):\n%v", x, err)
			}
			den := 0.0
			for i := 0; i < nx; i++ {
				den += math.Pow(o.dgdx[i]*σ[i], 2.0)
			}
			den = math.Sqrt(den)
			αerr := 0.0 // difference on α
			for i := 0; i < nx; i++ {
				αnew := o.dgdx[i] * σ[i] / den
				αerr += math.Pow(αnew-o.α[i], 2.0)
				o.α[i] = αnew
			}
			αerr = math.Sqrt(αerr)

			// message
			if verbose {
				io.Pf(" itA=%d\n", itA)
				io.Pf("%12s%12s%12s%12s\n", "x", "μ", "σ", "α")
				for i := 0; i < nx; i++ {
					io.Pf("%12.3f%12.3f%12.3f%12.3f\n", x[i], μ[i], σ[i], o.α[i])
				}
			}

			// update x-star
			for i := 0; i < nx; i++ {
				x[i] = μ[i] - β*o.α[i]*σ[i]
			}

			// check convergence on α
			if itA > 1 && αerr < o.TolA {
				if verbose {
					io.Pfgrey(". . . converged on α with αerr=%g . . .\n", αerr)
				}
				break
			}
		}

		// failed to converge on α
		if itA == o.NmaxItA {
			chk.Panic("failed to convege on α")
		}

		// compute new β
		B[0] = β
		nls.Solve(B, o.NlsSilent)
		βerr := math.Abs(B[0] - β)
		β = B[0]
		if o.NlsCheckJ {
			nls.CheckJ(B, o.NlsCheckJtol, true, false)
		}

		// update x-star
		for i := 0; i < nx; i++ {
			x[i] = μ[i] - β*o.α[i]*σ[i]
		}

		// check convergence on β
		if βerr < o.TolB {
			if verbose {
				io.Pfgrey2(". . . converged on β with βerr=%g . . .\n", βerr)
			}
			break
		}
	}

	// failed to converge on β
	if itB == o.NmaxItB {
		chk.Panic("failed to converge on β")
	}

	// message
	if verbose {
		gx, err := o.gfcn(x, args)
		if err != nil {
			chk.Panic("cannot compute gfcn(%v):\n%v", x, err)
		}
		io.Pfgreen("x = %v\n", x)
		io.Pfgreen("g = %v\n", gx)
		io.PfGreen("β = %v\n", β)
	}

	// plot
	if plot {
		plt.Gll("$x_0$", "$x_1$", "")
		plt.Cross("")
		plt.SaveD("/tmp/gosl", "fig_form_"+o.PlotFnk+".eps")
	}
	return
}
Exemple #6
0
func Test_nurbs01(tst *testing.T) {

	/*  4 (1,2)             (2,2) 6
	    5  2@o--------------o@3   7
	         |              |
	         |              |      @     -- control point
	         |              |      o     -- node
	         |              |      (a,b) -- span
	         |              |
	         |              |
	    0  0@o--------------o@1   2
	    1 (1,1)             (2,1) 3
	*/

	//verbose()
	chk.PrintTitle("nurb01. square with initial stress")

	// fem
	analysis := NewFEM("data/nurbs01.sim", "", true, false, false, false, chk.Verbose, 0)

	// set stage
	err := analysis.SetStage(0)
	if err != nil {
		tst.Errorf("SetStage failed:\n%v", err)
		return
	}

	// initialise solution vectors
	err = analysis.ZeroStage(0, true)
	if err != nil {
		tst.Errorf("ZeroStage failed:\n%v", err)
		return
	}

	// domain
	dom := analysis.Domains[0]

	// draw NURBS
	if false {
		nurbs := dom.Msh.Cells[0].Shp.Nurbs
		gm.PlotNurbs("/tmp/gofem", "test_nurbs01", nurbs, 21, false, nil)
	}

	// nodes and elements
	chk.IntAssert(len(dom.Nodes), 4)
	chk.IntAssert(len(dom.Elems), 1)

	// check dofs
	for _, nod := range dom.Nodes {
		chk.IntAssert(len(nod.Dofs), 2)
		chk.StrAssert(nod.Dofs[0].Key, "ux")
		chk.StrAssert(nod.Dofs[1].Key, "uy")
	}

	// check equations
	nids, eqs := get_nids_eqs(dom)
	chk.Ints(tst, "eqs", eqs, utl.IntRange(4*2))
	chk.Ints(tst, "nids", nids, []int{0, 1, 2, 3})

	// check Umap
	Umaps := [][]int{
		{0, 1, 2, 3, 4, 5, 6, 7},
	}
	for i, ele := range dom.Elems {
		e := ele.(*ElemU)
		io.Pfpink("%2d : Umap = %v\n", e.Id(), e.Umap)
		chk.Ints(tst, "Umap", e.Umap, Umaps[i])
	}

	// constraints
	chk.IntAssert(len(dom.EssenBcs.Bcs), 4)
	var ct_ux_eqs []int // equations with ux prescribed [sorted]
	var ct_uy_eqs []int // equations with uy prescribed [sorted]
	for _, c := range dom.EssenBcs.Bcs {
		chk.IntAssert(len(c.Eqs), 1)
		eq := c.Eqs[0]
		io.Pfgrey("key=%v eq=%v\n", c.Key, eq)
		switch c.Key {
		case "ux":
			ct_ux_eqs = append(ct_ux_eqs, eq)
		case "uy":
			ct_uy_eqs = append(ct_uy_eqs, eq)
		default:
			tst.Errorf("key %s is incorrect", c.Key)
		}
	}
	sort.Ints(ct_ux_eqs)
	sort.Ints(ct_uy_eqs)
	chk.Ints(tst, "equations with ux prescribed", ct_ux_eqs, []int{0, 4})
	chk.Ints(tst, "equations with uy prescribed", ct_uy_eqs, []int{1, 3})

	// check displacements
	tolu := 1e-16
	for _, n := range dom.Nodes {
		eqx := n.GetEq("ux")
		eqy := n.GetEq("uy")
		u := []float64{dom.Sol.Y[eqx], dom.Sol.Y[eqy]}
		chk.Vector(tst, "u", tolu, u, nil)
	}

	// analytical solution
	qnV, qnH := -100.0, -50.0
	ν := 0.25
	σx, σy := qnH, qnV
	σz := ν * (σx + σy)
	σref := []float64{σx, σy, σz, 0}

	// check stresses
	e := dom.Elems[0].(*ElemU)
	tols := 1e-13
	for idx, _ := range e.IpsElem {
		σ := e.States[idx].Sig
		io.Pforan("σ = %v\n", σ)
		chk.Vector(tst, "σ", tols, σ, σref)
	}
}
Exemple #7
0
func Test_up01a(tst *testing.T) {

	/* this tests simulates seepage flow along a column
	 * by reducing the initial hydrostatic pressure at
	 * at the bottom of the column
	 *
	 *   using mesh from col104elay.msh
	 *
	 *      Nodes / Tags                       Equations
	 *                              ux uy pl               ux uy pl
	 *     8 o----o----o 9 (-5)     53 54 55  o----o----o  50 51 52
	 *       |   14    |             .  .  .  |  58 59  |   .  .  .
	 *       |  (-1)   |             .  .  .  |         |   .  .  .
	 *    21 o    o    o 22 (-6)    60 61  .  o    o    o  56 57  .
	 *       |   26    |             .  .  .  |  62 63  |   .  .  .
	 *       |         |             .  .  .  |         |   .  .  .
	 *     6 o----o----o 7 (-4)     39 40 41  o----o----o  36 37 38
	 *       |   13    |             .  .  .  |  44 45  |   .  .  .
	 *       |  (-1)   |             .  .  .  |         |   .  .  .
	 *    19 |    o    o 20 (-6)    46 47  .  |    o    o  42 43  .
	 *       |   25    |             .  .  .  |  48 49  |   .  .  .
	 *       |         |             .  .  .  |         |   .  .  .
	 *     4 o----o----o 5 (-3)     25 26 27  o----o----o  22 23 24
	 *       |   12    |             .  .  .  |  30 31  |   .  .  .
	 *       |  (-2)   |             .  .  .  |         |   .  .  .
	 *    17 o    o    o 18 (-6)    32 33  .  o    o    o  28 29  .
	 *       |   24    |             .  .  .  |  34 35  |   .  .  .
	 *       |         |             .  .  .  |         |   .  .  .
	 *     2 o----o----o 3 (-2)      9 10 11  o----o----o   6  7  8
	 *       |   11    |             .  .  .  |  16 17  |   .  .  .
	 *       |  (-2)   |             .  .  .  |         |   .  .  .
	 *    15 o    o    o 16 (-6)    18 19     o    o    o  14 15
	 *       |   23    |             .  .  .  |  20 21  |   .  .  .
	 *       |         |             .  .  .  |         |   .  .  .
	 *     0 o----o----o 1 (-1)      0  1  2  o----o----o   3  4  5
	 *           10                              12 13
	 */

	// capture errors and flush log
	defer End()

	//verbose()
	chk.PrintTitle("up01a")

	// start simulation
	if !Start("data/up01.sim", true, chk.Verbose) {
		chk.Panic("cannot start simulation")
	}

	// domain
	distr := false
	dom := NewDomain(Global.Sim.Regions[0], distr)
	if dom == nil {
		chk.Panic("cannot allocate new domain")
	}

	// set stage
	if !dom.SetStage(0, Global.Sim.Stages[0], distr) {
		chk.Panic("cannot set stage")
	}

	// nodes and elements
	chk.IntAssert(len(dom.Nodes), 27)
	chk.IntAssert(len(dom.Elems), 4)

	if true {

		// nodes with pl
		nods_with_pl := map[int]bool{0: true, 2: true, 4: true, 6: true, 8: true, 1: true, 3: true, 5: true, 7: true, 9: true}

		// check dofs
		for _, nod := range dom.Nodes {
			if nods_with_pl[nod.Vert.Id] {
				chk.IntAssert(len(nod.Dofs), 3)
				chk.StrAssert(nod.Dofs[0].Key, "ux")
				chk.StrAssert(nod.Dofs[1].Key, "uy")
				chk.StrAssert(nod.Dofs[2].Key, "pl")
			} else {
				chk.IntAssert(len(nod.Dofs), 2)
				chk.StrAssert(nod.Dofs[0].Key, "ux")
				chk.StrAssert(nod.Dofs[1].Key, "uy")
			}
		}

		// check equations
		nids, eqs := get_nids_eqs(dom)
		chk.Ints(tst, "eqs", eqs, utl.IntRange(10*3+17*2))
		chk.Ints(tst, "nids", nids, []int{
			0, 1, 3, 2, 10, 16, 11, 15, 23,
			5, 4, 18, 12, 17, 24,
			7, 6, 20, 13, 19, 25,
			9, 8, 22, 14, 21, 26,
		})

		// check pmap
		Pmaps := [][]int{
			{2, 5, 8, 11},
			{11, 8, 24, 27},
			{27, 24, 38, 41},
			{41, 38, 52, 55},
		}
		Umaps := [][]int{
			{0, 1, 3, 4, 6, 7, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21},
			{9, 10, 6, 7, 22, 23, 25, 26, 16, 17, 28, 29, 30, 31, 32, 33, 34, 35},
			{25, 26, 22, 23, 36, 37, 39, 40, 30, 31, 42, 43, 44, 45, 46, 47, 48, 49},
			{39, 40, 36, 37, 50, 51, 53, 54, 44, 45, 56, 57, 58, 59, 60, 61, 62, 63},
		}
		for i, ele := range dom.Elems {
			e := ele.(*ElemUP)
			io.Pfpink("%2d : Pmap = %v\n", e.Id(), e.P.Pmap)
			io.Pfpink("%2d : Umap = %v\n", e.Id(), e.U.Umap)
			chk.Ints(tst, "Pmap", e.P.Pmap, Pmaps[i])
			chk.Ints(tst, "Umap", e.U.Umap, Umaps[i])
		}

		// constraints
		chk.IntAssert(len(dom.EssenBcs.Bcs), 9*2+2+3)
		var ct_ux_eqs []int // equations with ux prescribed [sorted]
		var ct_uy_eqs []int // equations with uy prescribed [sorted]
		var ct_pl_eqs []int // equations with pl prescribed [sorted]
		for _, c := range dom.EssenBcs.Bcs {
			chk.IntAssert(len(c.Eqs), 1)
			eq := c.Eqs[0]
			io.Pfgrey("key=%v eq=%v\n", c.Key, eq)
			switch c.Key {
			case "ux":
				ct_ux_eqs = append(ct_ux_eqs, eq)
			case "uy":
				ct_uy_eqs = append(ct_uy_eqs, eq)
			case "pl":
				ct_pl_eqs = append(ct_pl_eqs, eq)
			default:
				tst.Errorf("key %s is incorrect", c.Key)
			}
		}
		sort.Ints(ct_ux_eqs)
		sort.Ints(ct_uy_eqs)
		sort.Ints(ct_pl_eqs)
		chk.Ints(tst, "equations with ux prescribed", ct_ux_eqs, []int{0, 3, 6, 9, 14, 18, 22, 25, 28, 32, 36, 39, 42, 46, 50, 53, 56, 60})
		chk.Ints(tst, "equations with uy prescribed", ct_uy_eqs, []int{1, 4, 13})
		chk.Ints(tst, "equations with pl prescribed", ct_pl_eqs, []int{2, 5})

	}

	// initial values @ nodes
	io.Pforan("initial values @ nodes\n")
	for _, nod := range dom.Nodes {
		z := nod.Vert.C[1]
		for _, dof := range nod.Dofs {
			u := dom.Sol.Y[dof.Eq]
			switch dof.Key {
			case "ux":
				chk.Scalar(tst, io.Sf("nod %3d : ux(@ %4g)= %6g", nod.Vert.Id, z, u), 1e-17, u, 0)
			case "uy":
				chk.Scalar(tst, io.Sf("nod %3d : uy(@ %4g)= %6g", nod.Vert.Id, z, u), 1e-17, u, 0)
			case "pl":
				plC, _, _ := Global.HydroSt.Calc(z)
				chk.Scalar(tst, io.Sf("nod %3d : pl(@ %4g)= %6g", nod.Vert.Id, z, u), 1e-13, u, plC)
			}
		}
	}

	// intial values @ integration points
	io.Pforan("initial values @ integration points\n")
	for _, ele := range dom.Elems {
		e := ele.(*ElemUP)
		for idx, ip := range e.P.IpsElem {
			s := e.P.States[idx]
			z := e.P.Shp.IpRealCoords(e.P.X, ip)[1]
			chk.AnaNum(tst, io.Sf("sl(z=%11.8f)", z), 1e-17, s.A_sl, 1, chk.Verbose)
		}
	}

	// parameters
	ν := 0.2            // Poisson's coefficient
	K0 := ν / (1.0 - ν) // earth pressure at rest
	nf := 0.3           // porosity
	sl := 1.0           // saturation
	ρL := 1.0           // intrinsic (real) density of liquid
	ρS_top := 2.0       // intrinsic (real) density of solids in top layer
	ρS_bot := 3.0       // intrinsic (real) density of solids in bottom layer
	h := 5.0            // height of each layer
	g := 10.0           // gravity

	// densities
	nl := nf * sl         // volume fraction of luqid
	ns := 1.0 - nf        // volume fraction of solid
	ρl := nl * ρL         // partial density of liquid
	ρs_top := ns * ρS_top // partial density of solids in top layer
	ρs_bot := ns * ρS_bot // partial density of solids in bottom layer
	ρ_top := ρl + ρs_top  // density of mixture in top layer
	ρ_bot := ρl + ρs_bot  // density of mixture in bottom layer

	// absolute values of stresses
	σV_z5 := ρ_top * g * h     // total vertical stress @ elevation z = 5 m (absolute value)
	σV_z0 := σV_z5 + ρ_bot*g*h // total vertical stress @ elevation z = 0 m (absolute value)
	io.Pfyel("ρ_top       = %g\n", ρ_top)
	io.Pfyel("ρ_bot       = %g\n", ρ_bot)
	io.Pfyel("|ΔσV_top|   = %g\n", ρ_top*g*h)
	io.Pfyel("|ΔσV_bot|   = %g\n", ρ_bot*g*h)
	io.PfYel("|σV|(@ z=0) = %g\n", σV_z0)
	io.PfYel("|σV|(@ z=5) = %g\n", σV_z5)

	// stress functions
	var sig fun.Pts
	var pres fun.Pts
	sig.Init(fun.Prms{
		&fun.Prm{N: "t0", V: 0.00}, {N: "y0", V: -σV_z0},
		&fun.Prm{N: "t1", V: 5.00}, {N: "y1", V: -σV_z5},
		&fun.Prm{N: "t2", V: 10.00}, {N: "y2", V: 0.0},
	})
	pres.Init(fun.Prms{
		&fun.Prm{N: "t0", V: 0.00}, {N: "y0", V: 100},
		&fun.Prm{N: "t1", V: 10.00}, {N: "y1", V: 0},
	})

	// check stresses
	io.Pforan("initial stresses @ integration points\n")
	for _, ele := range dom.Elems {
		e := ele.(*ElemUP)
		for idx, ip := range e.U.IpsElem {
			z := e.U.Shp.IpRealCoords(e.U.X, ip)[1]
			σe := e.U.States[idx].Sig
			sv := sig.F(z, nil)
			sve := sv + pres.F(z, nil)
			she := sve * K0
			if math.Abs(σe[2]-σe[0]) > 1e-17 {
				tst.Errorf("σx is not equal to σz: %g != %g\n", σe[2], σe[0])
				return
			}
			if math.Abs(σe[3]) > 1e-17 {
				tst.Errorf("σxy is not equal to zero: %g != 0\n", σe[3])
				return
			}
			chk.AnaNum(tst, io.Sf("sx(z=%11.8f)", z), 0.0003792, σe[0], she, chk.Verbose)
			chk.AnaNum(tst, io.Sf("sy(z=%11.8f)", z), 0.001517, σe[1], sve, chk.Verbose)
		}
	}
	return
}
Exemple #8
0
// Update updates state
//  pl and pg are updated (new) values
func (o Model) Update(s *State, Δpl, Δpg, pl, pg float64) (err error) {

	// auxiliary variables
	slmin := o.Lrm.SlMin()
	Δpc := Δpg - Δpl
	wet := Δpc < 0
	pl0 := pl - Δpl
	pg0 := pg - Δpg
	pc0 := pg0 - pl0
	sl0 := s.A_sl
	pc := pc0 + Δpc
	sl := sl0

	// update liquid saturation
	if pc <= 0.0 {
		sl = 1 // full liquid saturation if capillary pressure is ineffective

	} else if o.nonrateLrm != nil && !o.AllBE {
		sl = o.nonrateLrm.Sl(pc) // handle simple retention models

	} else { // unsaturated case with rate-type model

		// trial liquid saturation update
		fA, e := o.Lrm.Cc(pc0, sl0, wet)
		if e != nil {
			return e
		}
		if o.MEtrial {
			slFE := sl0 + Δpc*fA
			fB, e := o.Lrm.Cc(pc, slFE, wet)
			if e != nil {
				return e
			}
			sl += 0.5 * Δpc * (fA + fB)
		} else {
			sl += Δpc * fA
		}

		// fix trial sl out-of-range values
		if sl < slmin {
			sl = slmin
		}
		if sl > 1 {
			sl = 1
		}

		// message
		if o.ShowR {
			io.PfYel("%6s%18s%18s%18s%18s%8s\n", "it", "Cc", "sl", "δsl", "r", "ex(r)")
		}

		// backward-Euler update
		var f, r, J, δsl float64
		var it int
		for it = 0; it < o.NmaxIt; it++ {
			f, err = o.Lrm.Cc(pc, sl, wet)
			if err != nil {
				return
			}
			r = sl - sl0 - Δpc*f
			if o.ShowR {
				io.Pfyel("%6d%18.14f%18.14f%18.14f%18.10e%8d\n", it, f, sl, δsl, r, utl.Expon(r))
			}
			if math.Abs(r) < o.Itol {
				break
			}
			J, err = o.Lrm.J(pc, sl, wet)
			if err != nil {
				return
			}
			δsl = -r / (1.0 - Δpc*J)
			sl += δsl
			if math.IsNaN(sl) {
				return chk.Err("NaN found: Δpc=%v f=%v r=%v J=%v sl=%v\n", Δpc, f, r, J, sl)
			}
		}

		// message
		if o.ShowR {
			io.Pfgrey("  pc0=%.6f  sl0=%.6f  Δpl=%.6f  Δpg=%.6f  Δpc=%.6f\n", pc0, sl0, Δpl, Δpg, Δpc)
			io.Pfgrey("  converged with %d iterations\n", it)
		}

		// check convergence
		if it == o.NmaxIt {
			return chk.Err("saturation update failed after %d iterations\n", it)
		}
	}

	// check results
	if pc < 0 && sl < 1 {
		return chk.Err("inconsistent results: saturation must be equal to one when the capillary pressure is ineffective. pc = %g < 0 and sl = %g < 1 is incorrect", pc, sl)
	}
	if sl < slmin {
		return chk.Err("inconsistent results: saturation must be greater than minimum saturation. sl = %g < %g is incorrect", sl, slmin)
	}

	// set state
	s.A_sl = sl          // 2
	s.A_ρL += o.Cl * Δpl // 3
	s.A_ρG += o.Cg * Δpg // 4
	s.A_Δpc = Δpc        // 5
	s.A_wet = wet        // 6
	return
}
Exemple #9
0
func main() {

	mpi.Start(false)
	defer func() {
		mpi.Stop(false)
	}()

	if mpi.Rank() == 0 {
		io.PfYel("\nTest MPI 01\n")
	}
	if mpi.Size() != 3 {
		chk.Panic("this test needs 3 processors")
	}
	n := 11
	x := make([]float64, n)
	id, sz := mpi.Rank(), mpi.Size()
	start, endp1 := (id*n)/sz, ((id+1)*n)/sz
	for i := start; i < endp1; i++ {
		x[i] = float64(i)
	}

	// Barrier
	mpi.Barrier()

	io.Pfgrey("x @ proc # %d = %v\n", id, x)

	// SumToRoot
	r := make([]float64, n)
	mpi.SumToRoot(r, x)
	var tst testing.T
	if id == 0 {
		chk.Vector(&tst, fmt.Sprintf("SumToRoot:       r @ proc # %d", id), 1e-17, r, []float64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10})
	} else {
		chk.Vector(&tst, fmt.Sprintf("SumToRoot:       r @ proc # %d", id), 1e-17, r, make([]float64, n))
	}

	// BcastFromRoot
	r[0] = 666
	mpi.BcastFromRoot(r)
	chk.Vector(&tst, fmt.Sprintf("BcastFromRoot:   r @ proc # %d", id), 1e-17, r, []float64{666, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10})

	// AllReduceSum
	setslice(x)
	w := make([]float64, n)
	mpi.AllReduceSum(x, w)
	chk.Vector(&tst, fmt.Sprintf("AllReduceSum:    w @ proc # %d", id), 1e-17, w, []float64{110, 110, 110, 1021, 1021, 1021, 2032, 2032, 2032, 3043, 3043})

	// AllReduceSumAdd
	setslice(x)
	y := []float64{-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000}
	mpi.AllReduceSumAdd(y, x, w)
	chk.Vector(&tst, fmt.Sprintf("AllReduceSumAdd: y @ proc # %d", id), 1e-17, y, []float64{-890, -890, -890, 21, 21, 21, 1032, 1032, 1032, 2043, 2043})

	// AllReduceMin
	setslice(x)
	mpi.AllReduceMin(x, w)
	chk.Vector(&tst, fmt.Sprintf("AllReduceMin:    x @ proc # %d", id), 1e-17, x, []float64{0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3})

	// AllReduceMax
	setslice(x)
	mpi.AllReduceMax(x, w)
	chk.Vector(&tst, fmt.Sprintf("AllReduceMax:    x @ proc # %d", id), 1e-17, x, []float64{100, 100, 100, 1000, 1000, 1000, 2000, 2000, 2000, 3000, 3000})
}