示例#1
0
// Extract real parts, copy them from src to dst.
// In the meanwhile, check if imaginary parts are nearly zero
// and scale the kernel to compensate for unnormalized FFTs.
func scaleRealParts(dst, src *data.Slice, scale float32) {
	util.Argument(2*dst.Len() == src.Len())
	util.Argument(dst.NComp() == 1 && src.NComp() == 1)

	srcList := src.HostCopy().Host()[0]
	dstList := dst.Host()[0]

	// Normally, the FFT'ed kernel is purely real because of symmetry,
	// so we only store the real parts...
	maximg := float32(0.)
	maxreal := float32(0.)
	for i := 0; i < src.Len()/2; i++ {
		dstList[i] = srcList[2*i] * scale
		if fabs(srcList[2*i+0]) > maxreal {
			maxreal = fabs(srcList[2*i+0])
		}
		if fabs(srcList[2*i+1]) > maximg {
			maximg = fabs(srcList[2*i+1])
		}
	}
	// ...however, we check that the imaginary parts are nearly zero,
	// just to be sure we did not make a mistake during kernel creation.
	if maximg/maxreal > FFT_IMAG_TOLERANCE {
		log.Fatalf("Too large FFT kernel imaginary/real part: %v", maximg/maxreal)
	}
}
示例#2
0
func kernMulRSymm2Dx(fftMx, K00 *data.Slice, N1, N2 int, str cu.Stream) {
	util.Argument(K00.Len() == (N1/2+1)*N2)
	util.Argument(fftMx.NComp() == 1 && K00.NComp() == 1)

	cfg := make2DConf(N1, N2)

	k_kernmulRSymm2Dx_async(fftMx.DevPtr(0), K00.DevPtr(0), N1, N2, cfg, str)
}
示例#3
0
// Does not yet use Y mirror symmetry!!
// Even though it is implemented partially in kernel
func kernMulRSymm3D(fftM [3]*data.Slice, K00, K11, K22, K12, K02, K01 *data.Slice, N0, N1, N2 int, str cu.Stream) {
	util.Argument(K00.Len() == N0*(N1)*N2) // no symmetry yet
	util.Argument(fftM[0].NComp() == 1 && K00.NComp() == 1)

	cfg := make2DConf(N1, N2)

	k_kernmulRSymm3D_async(fftM[0].DevPtr(0), fftM[1].DevPtr(0), fftM[2].DevPtr(0),
		K00.DevPtr(0), K11.DevPtr(0), K22.DevPtr(0), K12.DevPtr(0), K02.DevPtr(0), K01.DevPtr(0),
		N0, N1, N2, cfg, str)
}
示例#4
0
func kernMulRSymm2Dyz(fftMy, fftMz, K11, K22, K12 *data.Slice, N1, N2 int, str cu.Stream) {
	util.Argument(K11.Len() == (N1/2+1)*N2)
	util.Argument(fftMy.NComp() == 1 && K11.NComp() == 1)

	cfg := make2DConf(N1, N2)

	k_kernmulRSymm2Dyz_async(fftMy.DevPtr(0), fftMz.DevPtr(0),
		K11.DevPtr(0), K22.DevPtr(0), K12.DevPtr(0),
		N1, N2, cfg, str)
}
示例#5
0
// Copies src into dst, which is larger or smaller, and multiplies by vol*Bsat.
// The remainder of dst is not filled with zeros.
func copyPadMul(dst, src *data.Slice, dstsize, srcsize [3]int, vol *data.Slice, Bsat float64, str cu.Stream) {
	util.Argument(dst.NComp() == 1)
	util.Argument(src.NComp() == 1)
	util.Argument(vol.NComp() == 1)
	util.Assert(dst.Len() == prod(dstsize) && src.Len() == prod(srcsize))
	util.Assert(vol.Mesh().Size() == srcsize)

	N0 := iMin(dstsize[1], srcsize[1])
	N1 := iMin(dstsize[2], srcsize[2])
	cfg := make2DConf(N0, N1)

	k_copypadmul_async(dst.DevPtr(0), dstsize[0], dstsize[1], dstsize[2],
		src.DevPtr(0), srcsize[0], srcsize[1], srcsize[2],
		vol.DevPtr(0), float32(Bsat), cfg, str)
}
示例#6
0
文件: reduce.go 项目: shenyp09/mx3
//// Maximum of the norms of the difference between all vectors (x1,y1,z1) and (x2,y2,z2)
//// 	(dx, dy, dz) = (x1, y1, z1) - (x2, y2, z2)
//// 	max_i sqrt( dx[i]*dx[i] + dy[i]*dy[i] + dz[i]*dz[i] )
func MaxVecDiff(x, y *data.Slice) float64 {
	util.Argument(x.Len() == y.Len())
	out := reduceBuf(0)
	k_reducemaxvecdiff2(x.DevPtr(0), x.DevPtr(1), x.DevPtr(2),
		y.DevPtr(0), y.DevPtr(1), y.DevPtr(2),
		out, 0, x.Len(), reducecfg)
	return math.Sqrt(float64(copyback(out)))
}
示例#7
0
文件: slice.go 项目: shenyp09/mx3
// Memset sets the Slice's components to the specified values.
func Memset(s *data.Slice, val ...float32) {
	util.Argument(len(val) == s.NComp())
	str := stream()
	for c, v := range val {
		cu.MemsetD32Async(cu.DevicePtr(s.DevPtr(c)), math.Float32bits(v), int64(s.Len()), str)
	}
	syncAndRecycle(str)
}
示例#8
0
文件: madd.go 项目: shenyp09/mx3
// Adds a constant to each element of the slice.
// 	dst[comp][index] += cnst[comp]
func AddConst(dst *data.Slice, cnst ...float32) {
	util.Argument(len(cnst) == dst.NComp())
	N := dst.Len()
	cfg := make1DConf(N)
	str := stream()
	for c := 0; c < dst.NComp(); c++ {
		if cnst[c] != 0 {
			k_madd2_async(dst.DevPtr(c), dst.DevPtr(c), 1, nil, cnst[c], N, cfg, str)
		}
	}
	syncAndRecycle(str)
}
示例#9
0
文件: fft3dr2c.go 项目: shenyp09/mx3
// Execute the FFT plan, asynchronous.
// src and dst are 3D arrays stored 1D arrays.
func (p *fft3DR2CPlan) ExecAsync(src, dst *data.Slice) {
	util.Argument(src.NComp() == 1 && dst.NComp() == 1)
	oksrclen := p.InputLen()
	if src.Len() != oksrclen {
		log.Panicf("fft size mismatch: expecting src len %v, got %v", oksrclen, src.Len())
	}
	okdstlen := p.OutputLen()
	if dst.Len() != okdstlen {
		log.Panicf("fft size mismatch: expecting dst len %v, got %v", okdstlen, dst.Len())
	}
	p.handle.ExecR2C(cu.DevicePtr(src.DevPtr(0)), cu.DevicePtr(dst.DevPtr(0)))
}
示例#10
0
// Copies src into dst, which is larger or smaller.
// The remainder of dst is not filled with zeros.
func copyPad(dst, src *data.Slice, dstsize, srcsize [3]int, str cu.Stream) {
	util.Argument(dst.NComp() == 1 && src.NComp() == 1)
	util.Assert(dst.Len() == prod(dstsize))
	util.Assert(src.Len() == prod(srcsize))

	N0 := iMin(dstsize[1], srcsize[1])
	N1 := iMin(dstsize[2], srcsize[2])
	cfg := make2DConf(N0, N1)

	k_copypad_async(dst.DevPtr(0), dstsize[0], dstsize[1], dstsize[2],
		src.DevPtr(0), srcsize[0], srcsize[1], srcsize[2], cfg, str)
}
示例#11
0
文件: config.go 项目: shenyp09/mx3
// Make a vortex magnetization with given circulation and core polarization (+1 or -1)
// Example:
// 	M.Upload(Vortex(1, 1))
func Vortex(circ, pol int) *data.Slice {
	util.Argument(circ == 1 || circ == -1)
	util.Argument(pol == 1 || pol == -1)

	mh := data.NewSlice(3, Mesh())
	v := mh.Vectors()
	cy, cz := len(v[0][0])/2, len(v[0][0][0])/2
	for i := range v[0] {
		for j := range v[0][i] {
			for k := range v[0][0][j] {
				y := j - cy
				x := k - cz
				v[X][i][j][k] = 0
				v[Y][i][j][k] = float32(x * circ)
				v[Z][i][j][k] = float32(-y * circ)
			}
		}
		v[Z][i][cy][cz] = 0.
		v[Y][i][cy][cz] = 0.
		v[X][i][cy][cz] = float32(pol)
	}
	return mh
}
示例#12
0
文件: zhangli.go 项目: shenyp09/mx3
func AddZhangLiTorque(torque, m *data.Slice, j [3]float64, Msat float64, j_MsMap *data.Slice, alpha, xi float64) {
	// TODO: assert...

	util.Argument(j_MsMap == nil) // not yet supported

	c := torque.Mesh().CellSize()
	N := torque.Mesh().Size()
	cfg := make2DConfSize(N[2], N[1], STENCIL_BLOCKSIZE)

	b := MuB / (Qe * Msat * (1 + xi*xi))
	ux := float32((j[0] * b) / (Gamma0 * 2 * c[0]))
	uy := float32((j[1] * b) / (Gamma0 * 2 * c[1]))
	uz := float32((j[2] * b) / (Gamma0 * 2 * c[2]))

	k_addzhanglitorque(torque.DevPtr(0), torque.DevPtr(1), torque.DevPtr(2),
		m.DevPtr(0), m.DevPtr(1), m.DevPtr(2),
		ux, uy, uz,
		j_MsMap.DevPtr(0), j_MsMap.DevPtr(1), j_MsMap.DevPtr(2),
		float32(alpha), float32(xi),
		N[0], N[1], N[2], cfg)
}
示例#13
0
// Calculates the magnetostatic kernel by brute-force integration
// of magnetic charges over the faces and averages over cell volumes.
// Mesh should NOT yet be zero-padded.
func BruteKernel(mesh *data.Mesh, accuracy float64) (kernel [3][3]*data.Slice) {

	{ // Kernel mesh is 2x larger than input, instead in case of PBC
		pbc := mesh.PBC()
		util.Argument(pbc == [3]int{0, 0, 0}) // PBC not supported yet
		sz := padSize(mesh.Size(), pbc)
		cs := mesh.CellSize()
		mesh = data.NewMesh(sz[0], sz[1], sz[2], cs[0], cs[1], cs[2], pbc[:]...)
	}

	// Shorthand
	size := mesh.Size()
	cellsize := mesh.CellSize()
	periodic := mesh.PBC()
	log.Println("calculating demag kernel:", "accuracy:", accuracy, ", size:", size[0], "x", size[1], "x", size[2])

	// Sanity check
	{
		util.Assert(size[0] > 0 && size[1] > 1 && size[2] > 1)
		util.Assert(cellsize[0] > 0 && cellsize[1] > 0 && cellsize[2] > 0)
		util.Assert(periodic[0] >= 0 && periodic[1] >= 0 && periodic[2] >= 0)
		util.Assert(accuracy > 0)
		// TODO: in case of PBC, this will not be met:
		util.Assert(size[1]%2 == 0 && size[2]%2 == 0)
		if size[0] > 1 {
			util.Assert(size[0]%2 == 0)
		}
	}

	// Allocate only upper diagonal part. The rest is symmetric due to reciprocity.
	var array [3][3][][][]float32
	for i := 0; i < 3; i++ {
		for j := i; j < 3; j++ {
			kernel[i][j] = data.NewSlice(1, mesh)
			array[i][j] = kernel[i][j].Scalars()
		}
	}

	// Field (destination) loop ranges
	x1, x2 := -(size[X]-1)/2, size[X]/2-1
	y1, y2 := -(size[Y]-1)/2, size[Y]/2-1
	z1, z2 := -(size[Z]-1)/2, size[Z]/2-1
	// support for 2D simulations (thickness 1)
	if size[X] == 1 && periodic[X] == 0 {
		x2 = 0
	}
	{ // Repeat for PBC:
		x1 *= (periodic[X] + 1)
		x2 *= (periodic[X] + 1)
		y1 *= (periodic[Y] + 1)
		y2 *= (periodic[Y] + 1)
		z1 *= (periodic[Z] + 1)
		z2 *= (periodic[Z] + 1)
	}

	// smallest cell dimension is our typical length scale
	L := cellsize[X]
	if cellsize[Y] < L {
		L = cellsize[Y]
	}
	if cellsize[Z] < L {
		L = cellsize[Z]
	}

	// Start brute integration
	// 9 nested loops, does that stress you out?
	// Fortunately, the 5 inner ones usually loop over just one element.
	// It might be nice to get rid of that branching though.
	var (
		R, R2  [3]float64 // field and source cell center positions
		pole   [3]float64 // position of point charge on the surface
		points int        // counts used integration points
	)
	for s := 0; s < 3; s++ { // source index Ksdxyz
		u, v, w := s, (s+1)%3, (s+2)%3 // u = direction of source (s), v & w are the orthogonal directions

		for x := x1; x <= x2; x++ { // in each dimension, go from -(size-1)/2 to size/2 -1, wrapped.
			xw := wrap(x, size[X])
			R[X] = float64(x) * cellsize[X]

			for y := y1; y <= y2; y++ {
				yw := wrap(y, size[Y])
				R[Y] = float64(y) * cellsize[Y]

				for z := z1; z <= z2; z++ {
					zw := wrap(z, size[Z])
					R[Z] = float64(z) * cellsize[Z]

					// choose number of integration points depending on how far we are from source.
					dx, dy, dz := delta(x)*cellsize[X], delta(y)*cellsize[Y], delta(z)*cellsize[Z]
					d := math.Sqrt(dx*dx + dy*dy + dz*dz)
					if d == 0 {
						d = L
					}
					maxSize := d / accuracy // maximum acceptable integration size
					nv := int(math.Max(cellsize[v]/maxSize, 1) + 0.5)
					nw := int(math.Max(cellsize[w]/maxSize, 1) + 0.5)
					nx := int(math.Max(cellsize[X]/maxSize, 1) + 0.5)
					ny := int(math.Max(cellsize[Y]/maxSize, 1) + 0.5)
					nz := int(math.Max(cellsize[Z]/maxSize, 1) + 0.5)
					// Stagger source and destination grids.
					// Massively improves accuracy. Could play with variations.
					// See note.
					nv *= 2
					nw *= 2

					util.Assert(nv > 0 && nw > 0 && nx > 0 && ny > 0 && nz > 0)

					scale := 1 / float64(nv*nw*nx*ny*nz)
					surface := cellsize[v] * cellsize[w] // the two directions perpendicular to direction s
					charge := surface * scale
					pu1 := cellsize[u] / 2. // positive pole center
					pu2 := -pu1             // negative pole center

					// Do surface integral over source cell, accumulate  in B
					var B [3]float64
					for i := 0; i < nv; i++ {
						pv := -(cellsize[v] / 2.) + cellsize[v]/float64(2*nv) + float64(i)*(cellsize[v]/float64(nv))
						pole[v] = pv
						for j := 0; j < nw; j++ {
							pw := -(cellsize[w] / 2.) + cellsize[w]/float64(2*nw) + float64(j)*(cellsize[w]/float64(nw))
							pole[w] = pw

							// Do volume integral over destination cell
							for α := 0; α < nx; α++ {
								rx := R[X] - cellsize[X]/2 + cellsize[X]/float64(2*nx) + (cellsize[X]/float64(nx))*float64(α)

								for β := 0; β < ny; β++ {
									ry := R[Y] - cellsize[Y]/2 + cellsize[Y]/float64(2*ny) + (cellsize[Y]/float64(ny))*float64(β)

									for γ := 0; γ < nz; γ++ {
										rz := R[Z] - cellsize[Z]/2 + cellsize[Z]/float64(2*nz) + (cellsize[Z]/float64(nz))*float64(γ)
										points++

										pole[u] = pu1
										R2[X], R2[Y], R2[Z] = rx-pole[X], ry-pole[Y], rz-pole[Z]
										r := math.Sqrt(R2[X]*R2[X] + R2[Y]*R2[Y] + R2[Z]*R2[Z])
										qr := charge / (4 * math.Pi * r * r * r)
										bx := R2[X] * qr
										by := R2[Y] * qr
										bz := R2[Z] * qr

										pole[u] = pu2
										R2[X], R2[Y], R2[Z] = rx-pole[X], ry-pole[Y], rz-pole[Z]
										r = math.Sqrt(R2[X]*R2[X] + R2[Y]*R2[Y] + R2[Z]*R2[Z])
										qr = -charge / (4 * math.Pi * r * r * r)
										B[X] += (bx + R2[X]*qr) // addition ordered for accuracy
										B[Y] += (by + R2[Y]*qr)
										B[Z] += (bz + R2[Z]*qr)

									}
								}
							}
						}
					}
					for d := s; d < 3; d++ { // destination index Ksdxyz
						// TODO: for PBC, need to add here
						array[s][d][xw][yw][zw] = float32(B[d])
					}
				}
			}
		}
	}
	log.Println("kernel used", points, "integration points")
	// for 2D these elements are zero:
	if size[0] == 1 {
		kernel[0][1] = nil
		kernel[0][2] = nil
	}
	// make result symmetric for tools that expect it so.
	kernel[1][0] = kernel[0][1]
	kernel[2][0] = kernel[0][2]
	kernel[2][1] = kernel[1][2]
	return kernel
}
示例#14
0
文件: reduce.go 项目: shenyp09/mx3
// Maximum of absolute values of all elements.
func MaxAbs(in *data.Slice) float32 {
	util.Argument(in.NComp() == 1)
	out := reduceBuf(0)
	k_reducemaxabs(in.DevPtr(0), out, 0, in.Len(), reducecfg)
	return copyback(out)
}
示例#15
0
文件: heun.go 项目: shenyp09/mx3
func NewHeun(y *data.Synced, torqueFn func(bool) *data.Synced, postStep func(*data.Slice), dt, multiplier float64, time *float64) *Heun {
	util.Argument(dt > 0 && multiplier > 0)
	m := y.Mesh()
	dy0 := NewSlice(3, m)
	return &Heun{newSolverCommon(dt, multiplier, time), y, dy0, torqueFn, postStep}
}