Пример #1
0
// Render on existing image buffer. Resize it if needed
func On(img *image.RGBA, f *data.Slice, fmin, fmax string, arrowSize int, colormap ...color.RGBA) {
	dim := f.NComp()
	switch dim {
	default:
		log.Fatalf("unsupported number of components: %v", dim)
	case 3:
		drawVectors(img, f.Vectors(), arrowSize)
	case 1:
		min, max := extrema(f.Host()[0])
		if fmin != "auto" {
			m, err := strconv.ParseFloat(fmin, 32)
			if err != nil {
				util.Fatal("draw: scale:", err)
			}
			min = float32(m)
		}
		if fmax != "auto" {
			m, err := strconv.ParseFloat(fmax, 32)
			if err != nil {
				util.Fatal("draw: scale:", err)
			}
			max = float32(m)
		}
		if min == max {
			min -= 1
			max += 1 // make it gray instead of black
		}
		drawFloats(img, f.Scalars(), min, max, colormap...)
	}
}
Пример #2
0
func (b *thermField) update() {
	// we need to fix the time step here because solver will not yet have done it before the first step.
	// FixDt as an lvalue that sets Dt_si on change might be cleaner.
	if FixDt != 0 {
		Dt_si = FixDt
	}

	if b.generator == 0 {
		b.generator = curand.CreateGenerator(curand.PSEUDO_DEFAULT)
		b.generator.SetSeed(b.seed)
	}
	if b.noise == nil {
		b.noise = cuda.NewSlice(b.NComp(), b.Mesh().Size())
		// when noise was (re-)allocated it's invalid for sure.
		B_therm.step = -1
		B_therm.dt = -1
	}

	if Temp.isZero() {
		cuda.Memset(b.noise, 0, 0, 0)
		b.step = NSteps
		b.dt = Dt_si
		return
	}

	// keep constant during time step
	if NSteps == b.step && Dt_si == b.dt {
		return
	}

	if FixDt == 0 {
		util.Fatal("Finite temperature requires fixed time step. Set FixDt != 0.")
	}

	N := Mesh().NCell()
	k2_VgammaDt := 2 * mag.Kb / (GammaLL * cellVolume() * Dt_si)
	noise := cuda.Buffer(1, Mesh().Size())
	defer cuda.Recycle(noise)

	const mean = 0
	const stddev = 1
	dst := b.noise
	ms := Msat.MSlice()
	defer ms.Recycle()
	temp := Temp.MSlice()
	defer temp.Recycle()
	alpha := Alpha.MSlice()
	defer alpha.Recycle()
	for i := 0; i < 3; i++ {
		b.generator.GenerateNormal(uintptr(noise.DevPtr(0)), int64(N), mean, stddev)
		cuda.SetTemperature(dst.Comp(i), noise, k2_VgammaDt, ms, temp, alpha)
	}

	b.step = NSteps
	b.dt = Dt_si
}
Пример #3
0
func checkNaN(s *data.Slice, name string) {
	h := s.Host()
	for _, h := range h {
		for _, v := range h {
			if math.IsNaN(float64(v)) || math.IsInf(float64(v), 0) {
				util.Fatal("NaN or Inf in", name)
			}
		}
	}
}
Пример #4
0
// Cell layers #a (inclusive) up to #b (exclusive).
func Layers(a, b int) Shape {
	Nz := Mesh().Size()[Z]
	if a < 0 || a > Nz || b < 0 || b < a {
		util.Fatal("layers ", a, ":", b, " out of bounds (0 - ", Nz, ")")
	}
	c := Mesh().CellSize()[Z]
	z1 := Index2Coord(0, 0, a)[Z] - c/2
	z2 := Index2Coord(0, 0, b)[Z] - c/2
	return ZRange(z1, z2)
}
Пример #5
0
func sanityCheck(cellsize [3]float64, pbc [3]int) {
	a3 := cellsize[X] / cellsize[Y]
	a2 := cellsize[Y] / cellsize[Z]
	a1 := cellsize[Z] / cellsize[X]

	aMax := math.Max(a1, math.Max(a2, a3))
	aMin := math.Min(a1, math.Min(a2, a3))

	if aMax > maxAspect || aMin < 1./maxAspect {
		util.Fatal("Unrealistic cell aspect ratio:", cellsize)
	}
}
Пример #6
0
// Adds the current exchange field to dst
func AddExchangeField(dst *data.Slice) {
	inter := !Dind.isZero()
	bulk := !Dbulk.isZero()
	switch {
	case !inter && !bulk:
		cuda.AddExchange(dst, M.Buffer(), lex2.Gpu(), regions.Gpu(), M.Mesh())
	case inter && !bulk:
		cuda.AddDMI(dst, M.Buffer(), lex2.Gpu(), din2.Gpu(), regions.Gpu(), M.Mesh()) // dmi+exchange
	case bulk && !inter:
		util.AssertMsg(allowUnsafe || (Msat.IsUniform() && Aex.IsUniform() && Dbulk.IsUniform()), "DMI: Msat, Aex, Dex must be uniform")
		cuda.AddDMIBulk(dst, M.Buffer(), lex2.Gpu(), dbulk2.Gpu(), regions.Gpu(), M.Mesh()) // dmi+exchange
	case inter && bulk:
		util.Fatal("Cannot have induced and interfacial DMI at the same time")
	}
}
Пример #7
0
// Adds the current exchange field to dst
func AddExchangeField(dst *data.Slice) {
	inter := !Dind.isZero()
	bulk := !Dbulk.isZero()
	switch {
	case !inter && !bulk:
		cuda.AddExchange(dst, M.Buffer(), lex2.Gpu(), regions.Gpu(), M.Mesh())
	case inter && !bulk:
		// DMI kernel has space-dependent parameters, but
		// correct averaging between regions not yet clear nor tested, so disallow.
		util.AssertMsg(allowUnsafe || (Msat.IsUniform() && Aex.IsUniform() && Dind.IsUniform()), "DMI: Msat, Aex, Dex must be uniform")
		cuda.AddDMI(dst, M.Buffer(), lex2.Gpu(), din2.Gpu(), regions.Gpu(), M.Mesh()) // dmi+exchange
	case bulk && !inter:
		util.AssertMsg(allowUnsafe || (Msat.IsUniform() && Aex.IsUniform() && Dbulk.IsUniform()), "DMI: Msat, Aex, Dex must be uniform")
		cuda.AddDMIBulk(dst, M.Buffer(), lex2.Gpu(), dbulk2.Gpu(), regions.Gpu(), M.Mesh()) // dmi+exchange
	case inter && bulk:
		util.Fatal("Cannot have induced and interfacial DMI at the same time")
	}
}
Пример #8
0
// Compares FFT-accelerated convolution against brute-force on sparse data.
// This is not really needed but very quickly uncovers newly introduced bugs.
func testConvolution(c *DemagConvolution, PBC [3]int, realKern [3][3]*data.Slice) {
	if PBC != [3]int{0, 0, 0} || prod(c.inputSize) > 512*512 {
		// the brute-force method does not work for pbc,
		// and for large simulations it gets just too slow.
		util.Log("skipping convolution self-test")
		return
	}
	//fmt.Print("convolution test ")
	inhost := data.NewSlice(3, c.inputSize)
	initConvTestInput(inhost.Vectors())
	gpu := NewSlice(3, c.inputSize)
	defer gpu.Free()
	data.Copy(gpu, inhost)

	regions := NewBytes(prod(c.inputSize))
	defer regions.Free()
	Bsat := NewSlice(1, [3]int{1, 1, 256})
	defer Bsat.Free()
	Memset(Bsat, 1)
	BsatLUT := LUTPtr(Bsat.DevPtr(0))

	vol := data.NilSlice(1, c.inputSize)
	c.Exec(gpu, gpu, vol, BsatLUT, regions)

	output := gpu.HostCopy()

	brute := data.NewSlice(3, c.inputSize)
	bruteConv(inhost.Vectors(), brute.Vectors(), realKern)

	a, b := output.Host(), brute.Host()
	err := float32(0)
	for c := range a {
		for i := range a[c] {
			if fabs(a[c][i]-b[c][i]) > err {
				err = fabs(a[c][i] - b[c][i])
			}
		}
	}
	if err > CONV_TOLERANCE {
		util.Fatal("convolution self-test tolerance: ", err, " FAIL")
	}
}
Пример #9
0
// Load regions from ovf file, use first component.
// Regions should be between 0 and 256
func (r *Regions) LoadFile(fname string) {
	inSlice := LoadFile(fname)
	n := r.Mesh().Size()
	inSlice = data.Resample(inSlice, n)
	inArr := inSlice.Tensors()[0]
	l := r.HostList()
	arr := reshapeBytes(l, n)

	for iz := 0; iz < n[Z]; iz++ {
		for iy := 0; iy < n[Y]; iy++ {
			for ix := 0; ix < n[X]; ix++ {
				val := inArr[iz][iy][ix]
				if val < 0 || val > 256 {
					util.Fatal("regions.LoadFile(", fname, "): all values should be between 0 & 256, have: ", val)
				}
				arr[iz][iy][ix] = byte(val)
			}
		}
	}
	r.gpuCache.Upload(l)
}
Пример #10
0
// adapt time step: dt *= corr, but limited to sensible values.
func adaptDt(corr float64) {
	if FixDt != 0 {
		Dt_si = FixDt
		return
	}

	// corner case triggered by err = 0: just keep time step.
	// see test/regression017.mx3
	if math.IsNaN(corr) {
		corr = 1
	}

	util.AssertMsg(corr != 0, "Time step too small, check if parameters are sensible")
	corr *= Headroom
	if corr > 2 {
		corr = 2
	}
	if corr < 1./2. {
		corr = 1. / 2.
	}
	Dt_si *= corr
	if MinDt != 0 && Dt_si < MinDt {
		Dt_si = MinDt
	}
	if MaxDt != 0 && Dt_si > MaxDt {
		Dt_si = MaxDt
	}
	if Dt_si == 0 {
		util.Fatal("time step too small")
	}

	// do not cross alarm time
	if Time < alarm && Time+Dt_si > alarm {
		Dt_si = alarm - Time
	}

	util.AssertMsg(Dt_si > 0, fmt.Sprint("Time step too small: ", Dt_si))
}
Пример #11
0
// Compares FFT-accelerated convolution against brute-force on sparse data.
// This is not really needed but very quickly uncovers newly introduced bugs.
func testConvolution(c *DemagConvolution, PBC [3]int, realKern [3][3]*data.Slice) {
	if PBC != [3]int{0, 0, 0} {
		// the brute-force method does not work for pbc.
		util.Log("skipping convolution self-test for PBC")
		return
	}
	util.Log("//convolution self-test...")
	inhost := data.NewSlice(3, c.inputSize)
	initConvTestInput(inhost.Vectors())
	gpu := NewSlice(3, c.inputSize)
	defer gpu.Free()
	data.Copy(gpu, inhost)

	Msat := NewSlice(1, [3]int{1, 1, 256})
	defer Msat.Free()
	Memset(Msat, 1)

	vol := data.NilSlice(1, c.inputSize)
	c.Exec(gpu, gpu, vol, ToMSlice(Msat))

	output := gpu.HostCopy()

	brute := data.NewSlice(3, c.inputSize)
	bruteConv(inhost.Vectors(), brute.Vectors(), realKern)

	a, b := output.Host(), brute.Host()
	err := float32(0)
	for c := range a {
		for i := range a[c] {
			if fabs(a[c][i]-b[c][i]) > err {
				err = fabs(a[c][i] - b[c][i])
			}
		}
	}
	if err > CONV_TOLERANCE {
		util.Fatal("convolution self-test tolerance: ", err, " FAIL")
	}
}
Пример #12
0
func (t *DataTable) Add(output TableData) {
	if t.inited() {
		util.Fatal("data table add ", output.Name(), ": need to add quantity before table is output the first time")
	}
	t.outputs = append(t.outputs, output)
}
Пример #13
0
func (geometry *geom) setGeom(s Shape) {
	SetBusy(true)
	defer SetBusy(false)

	if s == nil {
		// TODO: would be nice not to save volume if entirely filled
		s = universe
	}

	geometry.shape = s
	if geometry.Gpu().IsNil() {
		geometry.buffer = cuda.NewSlice(1, geometry.Mesh().Size())
	}

	host := data.NewSlice(1, geometry.Gpu().Size())
	array := host.Scalars()
	V := host
	v := array
	n := geometry.Mesh().Size()
	c := geometry.Mesh().CellSize()
	cx, cy, cz := c[X], c[Y], c[Z]

	progress, progmax := 0, n[Y]*n[Z]

	var ok bool
	for iz := 0; iz < n[Z]; iz++ {
		for iy := 0; iy < n[Y]; iy++ {

			progress++
			util.Progress(progress, progmax, "Initializing geometry")

			for ix := 0; ix < n[X]; ix++ {

				r := Index2Coord(ix, iy, iz)
				x0, y0, z0 := r[X], r[Y], r[Z]

				// check if center and all vertices lie inside or all outside
				allIn, allOut := true, true
				if s(x0, y0, z0) {
					allOut = false
				} else {
					allIn = false
				}

				if edgeSmooth != 0 { // center is sufficient if we're not really smoothing
					for _, Δx := range []float64{-cx / 2, cx / 2} {
						for _, Δy := range []float64{-cy / 2, cy / 2} {
							for _, Δz := range []float64{-cz / 2, cz / 2} {
								if s(x0+Δx, y0+Δy, z0+Δz) { // inside
									allOut = false
								} else {
									allIn = false
								}
							}
						}
					}
				}

				switch {
				case allIn:
					v[iz][iy][ix] = 1
					ok = true
				case allOut:
					v[iz][iy][ix] = 0
				default:
					v[iz][iy][ix] = geometry.cellVolume(ix, iy, iz)
					ok = ok || (v[iz][iy][ix] != 0)
				}
			}
		}
	}

	if !ok {
		util.Fatal("SetGeom: geometry completely empty")
	}

	data.Copy(geometry.buffer, V)

	// M inside geom but previously outside needs to be re-inited
	needupload := false
	geomlist := host.Host()[0]
	mhost := M.Buffer().HostCopy()
	m := mhost.Host()
	rng := rand.New(rand.NewSource(0))
	for i := range m[0] {
		if geomlist[i] != 0 {
			mx, my, mz := m[X][i], m[Y][i], m[Z][i]
			if mx == 0 && my == 0 && mz == 0 {
				needupload = true
				rnd := randomDir(rng)
				m[X][i], m[Y][i], m[Z][i] = float32(rnd[X]), float32(rnd[Y]), float32(rnd[Z])
			}
		}
	}
	if needupload {
		data.Copy(M.Buffer(), mhost)
	}

	M.normalize() // removes m outside vol
}
Пример #14
0
// check if mesh is set
func checkMesh() {
	if globalmesh_.Size() == [3]int{0, 0, 0} {
		util.Fatal("need to set mesh first")
	}
}