func (c *DemagConvolution) exec3D(outp, inp, vol *data.Slice, Bsat float64) { padded := c.kernSize // FW FFT for i := 0; i < 3; i++ { zero1(c.fftRBuf[i], c.stream) in := inp.Comp(i) copyPadMul(c.fftRBuf[i], in, padded, c.size, vol, Bsat, c.stream) c.fwPlan.ExecAsync(c.fftRBuf[i], c.fftCBuf[i]) } // kern mul N0, N1, N2 := c.fftKernSize[0], c.fftKernSize[1], c.fftKernSize[2] // TODO: rm these kernMulRSymm3D(c.fftCBuf, c.gpuFFTKern[0][0], c.gpuFFTKern[1][1], c.gpuFFTKern[2][2], c.gpuFFTKern[1][2], c.gpuFFTKern[0][2], c.gpuFFTKern[0][1], N0, N1, N2, c.stream) // BW FFT for i := 0; i < 3; i++ { c.bwPlan.ExecAsync(c.fftCBuf[i], c.fftRBuf[i]) out := outp.Comp(i) copyPad(out, c.fftRBuf[i], c.size, padded, c.stream) } c.stream.Synchronize() }
func kernMulRSymm2Dx(fftMx, K00 *data.Slice, N1, N2 int, str cu.Stream) { util.Argument(K00.Len() == (N1/2+1)*N2) util.Argument(fftMx.NComp() == 1 && K00.NComp() == 1) cfg := make2DConf(N1, N2) k_kernmulRSymm2Dx_async(fftMx.DevPtr(0), K00.DevPtr(0), N1, N2, cfg, str) }
func writeVTKHeader(out io.Writer, q *data.Slice) (err error) { gridsize := q.Mesh().Size() _, err = fmt.Fprintln(out, "<?xml version=\"1.0\"?>") _, err = fmt.Fprintln(out, "<VTKFile type=\"StructuredGrid\" version=\"0.1\" byte_order=\"LittleEndian\">") _, err = fmt.Fprintf(out, "\t<StructuredGrid WholeExtent=\"0 %d 0 %d 0 %d\">\n", gridsize[Z]-1, gridsize[Y]-1, gridsize[X]-1) _, err = fmt.Fprintf(out, "\t\t<Piece Extent=\"0 %d 0 %d 0 %d\">\n", gridsize[Z]-1, gridsize[Y]-1, gridsize[X]-1) return }
// Adds a constant to each element of the slice. // dst[comp][index] += cnst[comp] func AddConst(dst *data.Slice, cnst ...float32) { util.Argument(len(cnst) == dst.NComp()) N := dst.Len() cfg := make1DConf(N) str := stream() for c := 0; c < dst.NComp(); c++ { if cnst[c] != 0 { k_madd2_async(dst.DevPtr(c), dst.DevPtr(c), 1, nil, cnst[c], N, cfg, str) } } syncAndRecycle(str) }
func scale(f *data.Slice, factor float32) { a := f.Vectors() for i := range a[0] { for j := range a[0][i] { for k := range a[0][i][j] { a[0][i][j][k] *= factor a[1][i][j][k] *= factor a[2][i][j][k] *= factor } } } }
func preprocess(f *data.Slice) { if *flag_normalize { normalize(f, 1) } if *flag_normpeak { normpeak(f) } if *flag_comp != -1 { *f = *f.Comp(swapIndex(*flag_comp, f.NComp())) } if *flag_resize != "" { resize(f, *flag_resize) } //if *flag_scale != 1{ // rescale(f, *flag_scale) //} }
func writeVTKPoints(out io.Writer, q *data.Slice, dataformat string) (err error) { _, err = fmt.Fprintln(out, "\t\t\t<Points>") fmt.Fprintf(out, "\t\t\t\t<DataArray type=\"Float32\" NumberOfComponents=\"3\" format=\"%s\">\n\t\t\t\t\t", dataformat) gridsize := q.Mesh().Size() cellsize := q.Mesh().CellSize() switch dataformat { case "ascii": for k := 0; k < gridsize[X]; k++ { for j := 0; j < gridsize[Y]; j++ { for i := 0; i < gridsize[Z]; i++ { x := (float32)(i) * (float32)(cellsize[Z]) y := (float32)(j) * (float32)(cellsize[Y]) z := (float32)(k) * (float32)(cellsize[X]) _, err = fmt.Fprint(out, x, " ", y, " ", z, " ") } } } case "binary": buffer := new(bytes.Buffer) for k := 0; k < gridsize[X]; k++ { for j := 0; j < gridsize[Y]; j++ { for i := 0; i < gridsize[Z]; i++ { x := (float32)(i) * (float32)(cellsize[Z]) y := (float32)(j) * (float32)(cellsize[Y]) z := (float32)(k) * (float32)(cellsize[X]) binary.Write(buffer, binary.LittleEndian, x) binary.Write(buffer, binary.LittleEndian, y) binary.Write(buffer, binary.LittleEndian, z) } } } b64len := uint32(len(buffer.Bytes())) bufLen := new(bytes.Buffer) binary.Write(bufLen, binary.LittleEndian, b64len) base64out := base64.NewEncoder(base64.StdEncoding, out) base64out.Write(bufLen.Bytes()) base64out.Write(buffer.Bytes()) base64out.Close() default: log.Fatalf("Illegal VTK data format: %v. Options are: ascii, binary", dataformat) } _, err = fmt.Fprintln(out, "\n\t\t\t\t</DataArray>") _, err = fmt.Fprintln(out, "\t\t\t</Points>") return }
func dumpGnuplot(out io.Writer, f *data.Slice) (err error) { buf := bufio.NewWriter(out) defer buf.Flush() data := f.Tensors() gridsize := f.Mesh().Size() cellsize := f.Mesh().CellSize() // If no cell size is set, use generic cell index. if cellsize == [3]float64{0, 0, 0} { cellsize = [3]float64{1, 1, 1} } ncomp := f.NComp() // Here we loop over X,Y,Z, not Z,Y,X, because // internal in C-order == external in Fortran-order for i := 0; i < gridsize[0]; i++ { x := float64(i) * cellsize[0] for j := 0; j < gridsize[1]; j++ { y := float64(j) * cellsize[1] for k := 0; k < gridsize[2]; k++ { z := float64(k) * cellsize[2] _, err = fmt.Fprint(buf, z, " ", y, " ", x, "\t") for c := 0; c < ncomp; c++ { _, err = fmt.Fprint(buf, data[swapIndex(c, ncomp)][i][j][k], " ") // converts to user space. } _, err = fmt.Fprint(buf, "\n") } _, err = fmt.Fprint(buf, "\n") } } return }
func normpeak(f *data.Slice) { a := f.Vectors() maxnorm := 0. for i := range a[0] { for j := range a[0][i] { for k := range a[0][i][j] { x, y, z := a[0][i][j][k], a[1][i][j][k], a[2][i][j][k] norm := math.Sqrt(float64(x*x + y*y + z*z)) if norm > maxnorm { maxnorm = norm } } } } scale(f, float32(1/maxnorm)) }
// normalize vector data to given length func normalize(f *data.Slice, length float64) { a := f.Vectors() for i := range a[0] { for j := range a[0][i] { for k := range a[0][i][j] { x, y, z := a[0][i][j][k], a[1][i][j][k], a[2][i][j][k] norm := math.Sqrt(float64(x*x + y*y + z*z)) invnorm := float32(1) if norm != 0 { invnorm = float32(length / norm) } a[0][i][j][k] *= invnorm a[1][i][j][k] *= invnorm a[2][i][j][k] *= invnorm } } } }
func writeOvf2Binary4(out io.Writer, array *data.Slice) { data := array.Tensors() gridsize := array.Mesh().Size() var bytes []byte // OOMMF requires this number to be first to check the format var controlnumber float32 = OMF_CONTROL_NUMBER // Conversion form float32 [4]byte in big-endian // encoding/binary is too slow // Inlined for performance, terabytes of data will pass here... bytes = (*[4]byte)(unsafe.Pointer(&controlnumber))[:] out.Write(bytes) // Here we loop over X,Y,Z, not Z,Y,X, because // internal in C-order == external in Fortran-order ncomp := array.NComp() for i := 0; i < gridsize[X]; i++ { for j := 0; j < gridsize[Y]; j++ { for k := 0; k < gridsize[Z]; k++ { for c := 0; c < ncomp; c++ { bytes = (*[4]byte)(unsafe.Pointer(&data[swapIndex(c, ncomp)][i][j][k]))[:] out.Write(bytes) } } } } }
// Memset sets the Slice's components to the specified values. func Memset(s *data.Slice, val ...float32) { util.Argument(len(val) == s.NComp()) str := stream() for c, v := range val { cu.MemsetD32Async(cu.DevicePtr(s.DevPtr(c)), math.Float32bits(v), int64(s.Len()), str) } syncAndRecycle(str) }
// Writes the OMF header func writeOmfHeader(out io.Writer, q *data.Slice) (err error) { gridsize := q.Mesh().Size() cellsize := q.Mesh().CellSize() err = hdr(out, "OOMMF", "rectangular mesh v1.0") hdr(out, "Segment count", "1") hdr(out, "Begin", "Segment") hdr(out, "Begin", "Header") dsc(out, "Time", 0) //q.Time) // TODO !! hdr(out, "Title", q.Tag()) hdr(out, "meshtype", "rectangular") hdr(out, "meshunit", "m") hdr(out, "xbase", cellsize[Z]/2) hdr(out, "ybase", cellsize[Y]/2) hdr(out, "zbase", cellsize[X]/2) hdr(out, "xstepsize", cellsize[Z]) hdr(out, "ystepsize", cellsize[Y]) hdr(out, "zstepsize", cellsize[X]) hdr(out, "xmin", 0) hdr(out, "ymin", 0) hdr(out, "zmin", 0) hdr(out, "xmax", cellsize[Z]*float64(gridsize[Z])) hdr(out, "ymax", cellsize[Y]*float64(gridsize[Y])) hdr(out, "zmax", cellsize[X]*float64(gridsize[X])) hdr(out, "xnodes", gridsize[Z]) hdr(out, "ynodes", gridsize[Y]) hdr(out, "znodes", gridsize[X]) hdr(out, "ValueRangeMinMag", 1e-08) // not so "optional" as the OOMMF manual suggests... hdr(out, "ValueRangeMaxMag", 1) // TODO hdr(out, "valueunit", "?") hdr(out, "valuemultiplier", 1) hdr(out, "End", "Header") return }
// Writes data in OMF Text format func writeOmfText(out io.Writer, tens *data.Slice) (err error) { data := tens.Tensors() gridsize := tens.Mesh().Size() // Here we loop over X,Y,Z, not Z,Y,X, because // internal in C-order == external in Fortran-order for i := 0; i < gridsize[X]; i++ { for j := 0; j < gridsize[Y]; j++ { for k := 0; k < gridsize[Z]; k++ { for c := 0; c < tens.NComp(); c++ { _, err = fmt.Fprint(out, data[swapIndex(c, tens.NComp())][i][j][k], " ") // converts to user space. } _, err = fmt.Fprint(out, "\n") } } } return }
func (c *DemagConvolution) exec2D(outp, inp, vol *data.Slice, Bsat float64) { // Convolution is separated into // a 1D convolution for x and a 2D convolution for yz. // So only 2 FFT buffers are needed at the same time. // FFT x zero1(c.fftRBuf[0], c.stream) in := inp.Comp(0) padded := c.kernSize copyPadMul(c.fftRBuf[0], in, padded, c.size, vol, Bsat, c.stream) c.fwPlan.ExecAsync(c.fftRBuf[0], c.fftCBuf[0]) // kern mul X N1, N2 := c.fftKernSize[1], c.fftKernSize[2] // TODO: rm these kernMulRSymm2Dx(c.fftCBuf[0], c.gpuFFTKern[0][0], N1, N2, c.stream) // bw FFT x c.bwPlan.ExecAsync(c.fftCBuf[0], c.fftRBuf[0]) out := outp.Comp(0) copyPad(out, c.fftRBuf[0], c.size, padded, c.stream) // FW FFT yz for i := 1; i < 3; i++ { zero1(c.fftRBuf[i], c.stream) in := inp.Comp(i) copyPadMul(c.fftRBuf[i], in, padded, c.size, vol, Bsat, c.stream) c.fwPlan.ExecAsync(c.fftRBuf[i], c.fftCBuf[i]) } // kern mul yz kernMulRSymm2Dyz(c.fftCBuf[1], c.fftCBuf[2], c.gpuFFTKern[1][1], c.gpuFFTKern[2][2], c.gpuFFTKern[1][2], N1, N2, c.stream) // BW FFT yz for i := 1; i < 3; i++ { c.bwPlan.ExecAsync(c.fftCBuf[i], c.fftRBuf[i]) out := outp.Comp(i) copyPad(out, c.fftRBuf[i], c.size, padded, c.stream) } c.stream.Synchronize() }
func Image(f *data.Slice, fmin, fmax string) *image.NRGBA { dim := f.NComp() switch dim { default: log.Fatalf("unsupported number of components: %v", dim) case 3: return drawVectors(f.Vectors()) case 1: min, max := extrema(f.Host()[0]) if fmin != "auto" { m, err := strconv.ParseFloat(fmin, 32) util.FatalErr(err) min = float32(m) } if fmax != "auto" { m, err := strconv.ParseFloat(fmax, 32) util.FatalErr(err) max = float32(m) } return drawFloats(f.Scalars(), min, max) } panic("unreachable") }
// Add exchange field to Beff with different exchange constant for X,Y,Z direction. // m must be normalized to unit length. func AddAnisoExchange(Beff *data.Slice, m *data.Slice, AexX, AexY, AexZ, Msat float64) { // TODO: size check mesh := Beff.Mesh() N := mesh.Size() c := mesh.CellSize() w0 := float32(2 * AexX / (Msat * c[0] * c[0])) w1 := float32(2 * AexY / (Msat * c[1] * c[1])) w2 := float32(2 * AexZ / (Msat * c[2] * c[2])) cfg := make2DConfSize(N[2], N[1], STENCIL_BLOCKSIZE) str := [3]cu.Stream{stream(), stream(), stream()} for c := 0; c < 3; c++ { k_addexchange1comp_async(Beff.DevPtr(c), m.DevPtr(c), w0, w1, w2, N[0], N[1], N[2], cfg, str[c]) } syncAndRecycle(str[0]) syncAndRecycle(str[1]) syncAndRecycle(str[2]) }
func writeOvf2Header(out io.Writer, q *data.Slice, time, tstep float64) { gridsize := q.Mesh().Size() cellsize := q.Mesh().CellSize() fmt.Fprintln(out, "# OOMMF OVF 2.0") fmt.Fprintln(out, "#") hdr(out, "Segment count", "1") fmt.Fprintln(out, "#") hdr(out, "Begin", "Segment") hdr(out, "Begin", "Header") fmt.Fprintln(out, "#") hdr(out, "Title", q.Tag()) // TODO hdr(out, "meshtype", "rectangular") hdr(out, "meshunit", "m") hdr(out, "xmin", 0) hdr(out, "ymin", 0) hdr(out, "zmin", 0) hdr(out, "xmax", cellsize[Z]*float64(gridsize[Z])) hdr(out, "ymax", cellsize[Y]*float64(gridsize[Y])) hdr(out, "zmax", cellsize[X]*float64(gridsize[X])) name := q.Tag() var labels []interface{} if q.NComp() == 1 { labels = []interface{}{name} } else { for i := 0; i < q.NComp(); i++ { labels = append(labels, name+"_"+string('x'+i)) } } hdr(out, "valuedim", q.NComp()) hdr(out, "valuelabels", labels...) // TODO unit := q.Unit() if unit == "" { unit = "1" } if q.NComp() == 1 { hdr(out, "valueunits", unit) } else { hdr(out, "valueunits", unit, unit, unit) } // We don't really have stages fmt.Fprintln(out, "# Desc: Stage simulation time: ", tstep, " s") fmt.Fprintln(out, "# Desc: Total simulation time: ", time, " s") hdr(out, "xbase", cellsize[Z]/2) hdr(out, "ybase", cellsize[Y]/2) hdr(out, "zbase", cellsize[X]/2) hdr(out, "xnodes", gridsize[Z]) hdr(out, "ynodes", gridsize[Y]) hdr(out, "znodes", gridsize[X]) hdr(out, "xstepsize", cellsize[Z]) hdr(out, "ystepsize", cellsize[Y]) hdr(out, "zstepsize", cellsize[X]) fmt.Fprintln(out, "#") hdr(out, "End", "Header") fmt.Fprintln(out, "#") }
// multiply-add: dst[i] = src1[i] * factor1 + src2[i] * factor2 func Madd2(dst, src1, src2 *data.Slice, factor1, factor2 float32) { N := dst.Len() nComp := dst.NComp() util.Assert(src1.Len() == N && src2.Len() == N) util.Assert(src1.NComp() == nComp && src2.NComp() == nComp) cfg := make1DConf(N) str := stream() for c := 0; c < nComp; c++ { k_madd2_async(dst.DevPtr(c), src1.DevPtr(c), factor1, src2.DevPtr(c), factor2, N, cfg, str) } syncAndRecycle(str) }
// Maximum of the norms of all vectors (x[i], y[i], z[i]). // max_i sqrt( x[i]*x[i] + y[i]*y[i] + z[i]*z[i] ) func MaxVecNorm(v *data.Slice) float64 { out := reduceBuf(0) k_reducemaxvecnorm2(v.DevPtr(0), v.DevPtr(1), v.DevPtr(2), out, 0, v.Len(), reducecfg) return math.Sqrt(float64(copyback(out))) }
//// Maximum of the norms of the difference between all vectors (x1,y1,z1) and (x2,y2,z2) //// (dx, dy, dz) = (x1, y1, z1) - (x2, y2, z2) //// max_i sqrt( dx[i]*dx[i] + dy[i]*dy[i] + dz[i]*dz[i] ) func MaxVecDiff(x, y *data.Slice) float64 { util.Argument(x.Len() == y.Len()) out := reduceBuf(0) k_reducemaxvecdiff2(x.DevPtr(0), x.DevPtr(1), x.DevPtr(2), y.DevPtr(0), y.DevPtr(1), y.DevPtr(2), out, 0, x.Len(), reducecfg) return math.Sqrt(float64(copyback(out))) }
// Execute the FFT plan, asynchronous. // src and dst are 3D arrays stored 1D arrays. func (p *fft3DC2RPlan) ExecAsync(src, dst *data.Slice) { oksrclen := p.InputLenFloats() if src.Len() != oksrclen { panic(fmt.Errorf("fft size mismatch: expecting src len %v, got %v", oksrclen, src.Len())) } okdstlen := p.OutputLenFloats() if dst.Len() != okdstlen { panic(fmt.Errorf("fft size mismatch: expecting dst len %v, got %v", okdstlen, dst.Len())) } p.handle.ExecC2R(cu.DevicePtr(src.DevPtr(0)), cu.DevicePtr(dst.DevPtr(0))) }
// Maximum of absolute values of all elements. func MaxAbs(in *data.Slice) float32 { util.Argument(in.NComp() == 1) out := reduceBuf(0) k_reducemaxabs(in.DevPtr(0), out, 0, in.Len(), reducecfg) return copyback(out) }
// Normalize the vector field to length mask * norm. // nil mask interpreted as 1s. // 0-length vectors are unaffected. func Normalize(vec *data.Slice) { N := vec.Len() cfg := make1DConf(N) k_normalize(vec.DevPtr(0), vec.DevPtr(1), vec.DevPtr(2), N, cfg) }
// Landau-Lifshitz torque divided by gamma0: // - 1/(1+α²) [ m x B + α (m/|m|) x (m x B) ] // torque in Tesla/s // m normalized // B in Tesla func LLGTorque(torque, m, B *data.Slice, alpha float32) { // TODO: assert... N := torque.Len() cfg := make1DConf(N) k_llgtorque(torque.DevPtr(0), torque.DevPtr(1), torque.DevPtr(2), m.DevPtr(0), m.DevPtr(1), m.DevPtr(2), B.DevPtr(0), B.DevPtr(1), B.DevPtr(2), alpha, N, cfg) }
func AddZhangLiTorque(torque, m *data.Slice, j [3]float64, Msat float64, j_MsMap *data.Slice, alpha, xi float64) { // TODO: assert... util.Argument(j_MsMap == nil) // not yet supported c := torque.Mesh().CellSize() N := torque.Mesh().Size() cfg := make2DConfSize(N[2], N[1], STENCIL_BLOCKSIZE) b := MuB / (Qe * Msat * (1 + xi*xi)) ux := float32((j[0] * b) / (Gamma0 * 2 * c[0])) uy := float32((j[1] * b) / (Gamma0 * 2 * c[1])) uz := float32((j[2] * b) / (Gamma0 * 2 * c[2])) k_addzhanglitorque(torque.DevPtr(0), torque.DevPtr(1), torque.DevPtr(2), m.DevPtr(0), m.DevPtr(1), m.DevPtr(2), ux, uy, uz, j_MsMap.DevPtr(0), j_MsMap.DevPtr(1), j_MsMap.DevPtr(2), float32(alpha), float32(xi), N[0], N[1], N[2], cfg) }
// Add uniaxial magnetocrystalline anisotropy field to Beff. // m: normalized magnetization. // K: anisotropy axis in J/m³ func AddUniaxialAnisotropy(Beff, m *data.Slice, Kx, Ky, Kz, Msat float64) { // TODO: size check N := Beff.Len() cfg := make1DConf(N) k_adduniaxialanisotropy(Beff.DevPtr(0), Beff.DevPtr(1), Beff.DevPtr(2), m.DevPtr(0), m.DevPtr(1), m.DevPtr(2), float32(Kx/Msat), float32(Ky/Msat), float32(Kz/Msat), N, cfg) }
func writeVTKCellData(out io.Writer, q *data.Slice, dataformat string) (err error) { N := q.NComp() data := q.Tensors() switch N { case 1: fmt.Fprintf(out, "\t\t\t<PointData Scalars=\"%s\">\n", q.Tag()) fmt.Fprintf(out, "\t\t\t\t<DataArray type=\"Float32\" Name=\"%s\" NumberOfComponents=\"%d\" format=\"%s\">\n\t\t\t\t\t", q.Tag(), N, dataformat) case 3: fmt.Fprintf(out, "\t\t\t<PointData Vectors=\"%s\">\n", q.Tag()) fmt.Fprintf(out, "\t\t\t\t<DataArray type=\"Float32\" Name=\"%s\" NumberOfComponents=\"%d\" format=\"%s\">\n\t\t\t\t\t", q.Tag(), N, dataformat) case 6, 9: fmt.Fprintf(out, "\t\t\t<PointData Tensors=\"%s\">\n", q.Tag()) fmt.Fprintf(out, "\t\t\t\t<DataArray type=\"Float32\" Name=\"%s\" NumberOfComponents=\"%d\" format=\"%s\">\n\t\t\t\t\t", q.Tag(), 9, dataformat) // must be 9! default: log.Fatalf("vtk: cannot handle %v components", N) } gridsize := q.Mesh().Size() switch dataformat { case "ascii": for i := 0; i < gridsize[X]; i++ { for j := 0; j < gridsize[Y]; j++ { for k := 0; k < gridsize[Z]; k++ { // if symmetric tensor manage it appart to write the full 9 components if N == 6 { fmt.Fprint(out, data[swapIndex(0, 9)][i][j][k], " ") fmt.Fprint(out, data[swapIndex(1, 9)][i][j][k], " ") fmt.Fprint(out, data[swapIndex(2, 9)][i][j][k], " ") fmt.Fprint(out, data[swapIndex(1, 9)][i][j][k], " ") fmt.Fprint(out, data[swapIndex(3, 9)][i][j][k], " ") fmt.Fprint(out, data[swapIndex(4, 9)][i][j][k], " ") fmt.Fprint(out, data[swapIndex(2, 9)][i][j][k], " ") fmt.Fprint(out, data[swapIndex(4, 9)][i][j][k], " ") fmt.Fprint(out, data[swapIndex(5, 9)][i][j][k], " ") } else { for c := 0; c < N; c++ { fmt.Fprint(out, data[swapIndex(c, N)][i][j][k], " ") } } } } } case "binary": // Inlined for performance, terabytes of data will pass here... buffer := new(bytes.Buffer) for i := 0; i < gridsize[X]; i++ { for j := 0; j < gridsize[Y]; j++ { for k := 0; k < gridsize[Z]; k++ { // if symmetric tensor manage it appart to write the full 9 components if N == 6 { binary.Write(buffer, binary.LittleEndian, data[swapIndex(0, 9)][i][j][k]) binary.Write(buffer, binary.LittleEndian, data[swapIndex(1, 9)][i][j][k]) binary.Write(buffer, binary.LittleEndian, data[swapIndex(2, 9)][i][j][k]) binary.Write(buffer, binary.LittleEndian, data[swapIndex(1, 9)][i][j][k]) binary.Write(buffer, binary.LittleEndian, data[swapIndex(3, 9)][i][j][k]) binary.Write(buffer, binary.LittleEndian, data[swapIndex(4, 9)][i][j][k]) binary.Write(buffer, binary.LittleEndian, data[swapIndex(2, 9)][i][j][k]) binary.Write(buffer, binary.LittleEndian, data[swapIndex(4, 9)][i][j][k]) binary.Write(buffer, binary.LittleEndian, data[swapIndex(5, 9)][i][j][k]) } else { for c := 0; c < N; c++ { binary.Write(buffer, binary.LittleEndian, data[swapIndex(c, N)][i][j][k]) } } } } } b64len := uint32(len(buffer.Bytes())) bufLen := new(bytes.Buffer) binary.Write(bufLen, binary.LittleEndian, b64len) base64out := base64.NewEncoder(base64.StdEncoding, out) base64out.Write(bufLen.Bytes()) base64out.Write(buffer.Bytes()) base64out.Close() default: panic(fmt.Errorf("vtk: illegal data format " + dataformat + ". Options are: ascii, binary")) } fmt.Fprintln(out, "\n\t\t\t\t</DataArray>") fmt.Fprintln(out, "\t\t\t</PointData>") return }
// Add effective field of Dzyaloshinskii-Moriya interaction to Beff (Tesla). // According to Bagdanov and Röβler, PRL 87, 3, 2001. eq.8 (out-of-plane symmetry breaking). // m: normalized // D: J/m² func AddDMI(Beff *data.Slice, m *data.Slice, D, Msat float64) { // TODO: size check mesh := Beff.Mesh() N := mesh.Size() c := mesh.CellSize() dx := float32(D / (Msat * c[0])) // actually (2*D) / (Msat * 2*c), 2*c disappears in kernel. dy := float32(D / (Msat * c[1])) dz := float32(D / (Msat * c[2])) cfg := make2DConf(N[2], N[1]) k_adddmi(Beff.DevPtr(0), Beff.DevPtr(1), Beff.DevPtr(2), m.DevPtr(0), m.DevPtr(1), m.DevPtr(2), dx, dy, dz, N[0], N[1], N[2], cfg) }
// Only the damping term of LLGTorque, with alpha 1. Useful for relaxation. func DampingTorque(torque, m, B *data.Slice) { N := torque.Len() cfg := make1DConf(N) k_dampingtorque(torque.DevPtr(0), torque.DevPtr(1), torque.DevPtr(2), m.DevPtr(0), m.DevPtr(1), m.DevPtr(2), B.DevPtr(0), B.DevPtr(1), B.DevPtr(2), N, cfg) }