Beispiel #1
0
func main() {
	// PETSc initialization
	if err := petscgo.Initialize(); err != nil {
		petscgo.Fatal(err)
	}
	defer func() {
		if err := petscgo.Finalize(); err != nil {
			petscgo.Fatal(err)
		}
	}()
	rank, size := petscgo.RankSize()

	// Create particles
	var np1 int64 = 1
	if rank == 0 {
		np1 = 2
	}
	pp, err := structvec.NewStructVec(pstruct{}, np1, petscgo.DETERMINE)
	if err != nil {
		petscgo.Fatal(err)
	}
	defer pp.Destroy()

	lpp, _ := pp.GetArray().([]pstruct)
	for i := range lpp {
		for j := 0; j < 3; j++ {
			lpp[i].pos[j] = (float32(i) + 1) * (float32(j + 1 + rank*10))
		}
	}
	pp.RestoreArray()

	lpp, _ = pp.GetArray().([]pstruct)
	dump(lpp, rank)
	pp.RestoreArray()

	// Set up scatters
	var localndx, mpirank []int64
	if rank == 0 {
		localndx = make([]int64, 1+size)
		mpirank = make([]int64, 1+size)
		for i := 0; i < size; i++ {
			localndx[i] = 0
			mpirank[i] = int64(i)
		}
		localndx[size] = 1
		mpirank[size] = int64((rank + 1) % size)
	} else {
		localndx = make([]int64, 1)
		mpirank = make([]int64, 1)
		localndx[0] = 0
		mpirank[0] = int64((rank + 1) % size)
	}

	petscgo.Printf("\n\n\n")
	pp.Scatter(localndx, mpirank)
	lpp, _ = pp.GetArray().([]pstruct)
	dump(lpp, rank)
	pp.RestoreArray()

}
Beispiel #2
0
func main() {
	petscgo.Initialize()
	defer petscgo.Finalize()
	fftw3.Initialize()
	defer fftw3.Cleanup()

	rank, _ := petscgo.RankSize()

	dims := []int64{32, 16, 8, 12}

	// Test size routine
	lsize, n0, n1 := fftw3.LocalSizeTransposed(dims)
	petscgo.SyncPrintf("Rank %d : size = %d, n0=(%d,%d), n1=(%d,%d)\n", rank, lsize, n0[0], n0[1], n1[0], n1[1])
	petscgo.SyncFlush()

	// Ok, go create the grids
	dims = []int64{8, 8, 8}
	greal, gcmplx := fftw3.New(dims)
	petscgo.Printf("Considering the real array....\n")
	petscgo.SyncPrintf("Rank %d\n%s", rank, greal)
	petscgo.SyncFlush()

	petscgo.Printf("Considering the complex array....\n")
	petscgo.SyncPrintf("Rank %d\n%s", rank, gcmplx)
	petscgo.SyncFlush()

}
Beispiel #3
0
func Partition(nglobal int64) int64 {
	rank, size := petscgo.RankSize()
	retval := nglobal / int64(size)
	rem := nglobal % int64(size)

	if int64(rank) < rem {
		retval += 1
	}

	return retval
}
Beispiel #4
0
func main() {
	if err := petscgo.Initialize(); err != nil {
		petscgo.Fatal(err)
	}
	defer func() {
		if err := petscgo.Finalize(); err != nil {
			petscgo.Fatal(err)
		}
	}()
	rank, size := petscgo.RankSize()

	petscgo.Printf("Initialization successful\n")
	petscgo.SyncPrintf("Hello from rank %d of %d\n", rank, size)
	petscgo.SyncFlush()
}
Beispiel #5
0
func main() {
	// PETSc initialization
	if err := petscgo.Initialize(); err != nil {
		petscgo.Fatal(err)
	}
	defer func() {
		if err := petscgo.Finalize(); err != nil {
			petscgo.Fatal(err)
		}
	}()
	rank, size := petscgo.RankSize()

	pp := PW3D.NewVec(petscgo.DECIDE, 10000)
	defer pp.Destroy()

	lpp := PW3D.GetArray(pp)
	lpp.FillRandom(1, 1)
	pp.RestoreArray()
	petscgo.Printf("Generating random particles....\n")

	slab := particles.Slab{L: 1, N: size, Idim: 0}
	PW3D.DomainDecompose(slab, pp)
	petscgo.Printf("Slab decomposition complete\n")

	lpp = PW3D.GetArray(pp)
	_, mpirank := slab.Domain(lpp)
	rank64 := int64(rank)
	petscgo.SyncPrintf("# Rank %d has %d particles....\n", rank, lpp.Length())
	for ipart, irank := range mpirank {
		if irank != rank64 {
			petscgo.SyncPrintf("ERROR: %d expected, %d placed, %+v\n", rank, irank, lpp[ipart])
		}
	}
	petscgo.SyncFlush()
	pp.RestoreArray()

}
Beispiel #6
0
func main() {
	if err := petscgo.Initialize(); err != nil {
		petscgo.Fatal(err)
	}
	defer func() {
		if err := petscgo.Finalize(); err != nil {
			petscgo.Fatal(err)
		}
	}()
	rank, size := petscgo.RankSize()

	// Create a vector using the local size
	v, err := petscgo.NewVec(5, petscgo.DETERMINE)
	if err != nil {
		petscgo.Fatal(err)
	}
	n1, err := v.LocalSize()
	if err != nil {
		petscgo.Fatal(err)
	}
	lo, hi, err := v.OwnRange()
	if err != nil {
		petscgo.Fatal(err)
	}
	petscgo.SyncPrintf("%d rank has local size %d [%d, %d]\n", rank, n1, lo, hi)
	petscgo.SyncFlush()
	err = v.Destroy()
	if err != nil {
		petscgo.Fatal(err)
	}

	// Create a vector using the global size
	v, err = petscgo.NewVec(petscgo.DECIDE, 100)
	if err != nil {
		petscgo.Fatal(err)
	}
	n1, err = v.LocalSize()
	if err != nil {
		petscgo.Fatal(err)
	}
	lo, hi, err = v.OwnRange()
	if err != nil {
		petscgo.Fatal(err)
	}
	petscgo.SyncPrintf("%d rank has local size %d [%d, %d]\n", rank, n1, lo, hi)
	petscgo.SyncFlush()

	// Set and then access the array
	if err := v.Set(3.1415926); err != nil {
		petscgo.Fatal(err)
	}

	// Try running ownershipranges
	if rank == 0 {
		rr, err := v.Ranges()
		if err != nil {
			petscgo.Fatal(err)
		}
		fmt.Println(rr)
		if size > 2 {
			ix := []int64{rr[1], rr[2], rr[3]}
			y := []float64{4.14, 5.14, 6.14}
			v.SetValues(ix, y, true)
		}
	}
	v.AssemblyBegin()
	v.AssemblyEnd()

	if err := v.GetArray(); err != nil {
		petscgo.Fatal(err)
	}
	petscgo.SyncPrintf("%d rank has local size %d \n", rank, len(v.Arr))
	petscgo.SyncFlush()
	fmt.Println(rank, v.Arr[0:2])
	if err := v.RestoreArray(); err != nil {
		petscgo.Fatal(err)
	}

	sum, _ := v.Sum()
	petscgo.Printf("Sum = %f\n", sum)
	max, _, _ := v.Max()
	petscgo.Printf("Max = %f\n", max)
	min, _, _ := v.Min()
	petscgo.Printf("Max = %f\n", min)
	v.Scale(0.3)
	sum, _ = v.Sum()
	petscgo.Printf("Sum = %f\n", sum)

	err = v.Destroy()
	if err != nil {
		petscgo.Fatal(err)
	}

}
Beispiel #7
0
// Scatter takes the particles and reshuffles them across MPI ranks according to localid and
//
// localid is the local index of the particle, while mpirank is the destination rank for the particle.
// Note that this is completely general, and a particle may be shunted to many ranks (useful for ghosts).
// Also, note that localndx and mpirank will be modified by this routine.
//
func (s *StructVec) Scatter(localndx, mpirank []int64) {

	// Get the rank and size
	rank, size := petscgo.RankSize()

	// Work out the final number of particles
	var npart_local, npart_final int64
	npart_local = int64(len(mpirank))
	mpi.AllReduceInt64(petscgo.WORLD, &npart_local, &npart_final, 1, mpi.SUM)
	//petscgo.Printf("%d total particles expected after scatter\n", npart_final)

	// Allocate arrays
	// narr[rank] are the number of particles headed for rank from the current rank (hereafter crank)
	// narr1[crank*size + rank] collects all the narr's, allowing every processor to know what index it needs to send objects to.
	// icount keeps track of indices that objects need to go to.
	// icount_check is used for assertion tests, to make sure nothing bad happened.
	narr := make([]int64, size)
	icount := make([]int64, size)
	narr1 := make([]int64, size*size)
	icount_check := make([]int64, size)

	// Loop over mpirank, incrementing narr
	for _, irank := range mpirank {
		narr[irank] += 1
	}
	mpi.AllGatherInt64(petscgo.WORLD, narr, narr1)
	//petscgo.Printf("%v\n", narr1)

	// Reset narr, icount
	for i := range narr {
		narr[i] = 0
		icount[i] = 0
	}

	for i := 0; i < size; i++ {
		// narr now holds the total number of local particles
		for j := 0; j < size; j++ {
			narr[i] += narr1[i+j*size]
		}
		// icount now holds the number of particles from ranks before my rank, on rank i
		for j := 0; j < rank; j++ {
			icount[i] += narr1[i+j*size]
		}
	}

	// Now switch icount to global indices
	// icount_check is the expected final global index
	rtot := int64(0)
	for i := 0; i < size; i++ {
		icount[i] += rtot
		rtot += narr[i]
		icount_check[i] = icount[i] + narr1[i+rank*size]
	}
	// Assertion check
	if rtot != npart_final {
		petscgo.Fatal(errors.New("ASSERTION FAILURE : rtot != npart_final"))
	}

	// Now we start updating localndx and mpirank
	lo, _, _ := s.OwnRange()
	irank := int64(0)
	for i := range mpirank {
		localndx[i] += lo
		irank = mpirank[i]
		mpirank[i] = icount[irank]
		icount[irank]++
	}

	// Assertion check
	for i := range icount {
		if icount[i] != icount_check[i] {
			petscgo.Fatal(errors.New("ASSERTION FAILURE : icount != icount_check"))
		}
	}

	// Create destination
	vecnew, err := petscgo.NewVecBlocked(narr[rank]*s.bs, petscgo.DETERMINE, s.bs)
	if err != nil {
		petscgo.Fatal(err)
	}
	// Create index sets
	isin, err := petscgo.NewBlockedIS(s.bs, npart_local, localndx)
	if err != nil {
		petscgo.Fatal(err)
	}
	isout, err := petscgo.NewBlockedIS(s.bs, npart_local, mpirank)
	if err != nil {
		petscgo.Fatal(err)
	}
	defer isin.Destroy()
	defer isout.Destroy()

	// Create scatter context
	ctx, err := petscgo.NewScatter(s.v, vecnew, isin, isout)
	defer ctx.Destroy()
	ctx.Begin(s.v, vecnew, false, true)
	ctx.End(s.v, vecnew, false, true)

	// Clean up
	s.v.Destroy()
	s.v = vecnew
	s.Nlocal = narr[rank]
	s.Ntotal = npart_final

}
Beispiel #8
0
func main() {
	// PETSc initialization
	if err := petscgo.Initialize(); err != nil {
		petscgo.Fatal(err)
	}
	defer func() {
		if err := petscgo.Finalize(); err != nil {
			petscgo.Fatal(err)
		}
	}()
	rank, _ := petscgo.RankSize()

	v, err := structvec.NewStructVec(pstruct{}, petscgo.DECIDE, 10)
	if err != nil {
		petscgo.Fatal(err)
	}
	defer v.Destroy()
	petscgo.Printf("Type of v : %s\n", v.Type())
	petscgo.Printf("Size of v : %d\n", v.BlockSize())
	petscgo.SyncPrintf("Local size = %d\n", v.Nlocal)
	petscgo.SyncFlush()
	petscgo.Printf("Global size = %d\n", v.Ntotal)

	// local particle data
	lpp, ok := v.GetArray().([]pstruct)
	if !ok {
		petscgo.Fatal(err)
	}
	for i := range lpp {
		lpp[i].FillRandom()
	}
	err = v.RestoreArray()
	if err != nil {
		petscgo.Fatal(err)
	}

	// Print array
	lpp, ok = v.GetArray().([]pstruct)
	if !ok {
		petscgo.Fatal(err)
	}
	for i := range lpp {
		petscgo.SyncPrintf("%s\n", lpp[i])
	}
	petscgo.SyncFlush()
	err = v.RestoreArray()
	if err != nil {
		petscgo.Fatal(err)
	}

	petscgo.Printf("----------------\n")

	// Fiddle with array
	if rank == 0 {
		lpp = make([]pstruct, 2)
		ix := []int64{3, 7}
		err = v.SetValues(ix, lpp)
		if err != nil {
			petscgo.Fatal(err)
		}
	}
	v.AssemblyBegin()
	v.AssemblyEnd()

	// Print array
	lpp, ok = v.GetArray().([]pstruct)
	if !ok {
		petscgo.Fatal(err)
	}
	for i := range lpp {
		petscgo.SyncPrintf("%s\n", lpp[i])
	}
	petscgo.SyncFlush()
	err = v.RestoreArray()
	if err != nil {
		petscgo.Fatal(err)
	}

}
Beispiel #9
0
func main() {
	petscgo.Initialize()
	defer petscgo.Finalize()
	rank, size := petscgo.RankSize()

	// Set up basic elements for the vector
	var bs, nlocal, nlocal1 int64
	bs = 2                // block size
	nlocal1 = 5           // number of local blocks
	nlocal = nlocal1 * bs // local size
	r64 := int64(rank) * nlocal1
	s64 := int64(size) * nlocal1
	gndx := []int64{(r64 + nlocal1) % s64, (s64 + r64 - 1) % s64}
	petscgo.SyncPrintf("Ghost indices : %v \n", gndx)
	petscgo.SyncFlush()

	// Create the vector
	v, _ := petscgo.NewGhostVecBlocked(nlocal, petscgo.DETERMINE, bs, gndx)
	defer v.Destroy()

	// Fill in the local versions of the array
	lo, _, _ := v.OwnRange()
	v.GetArray()
	for ii := range v.Arr {
		v.Arr[ii] = float64(int64(ii) + lo)
	}
	v.RestoreArray()

	petscgo.Printf("Filled in vector\n")

	// Update ghost values
	v.GhostUpdateBegin(false, true)
	v.GhostUpdateEnd(false, true)

	// Get the local values and print them
	lv, _ := v.GhostGetLocalForm()
	lv.GetArray()
	petscgo.SyncPrintf("Rank %d : ", rank)
	for _, val := range lv.Arr {
		petscgo.SyncPrintf("%3d ", int(val))
	}
	petscgo.SyncPrintf("\n")
	petscgo.SyncFlush()
	lv.RestoreArray()
	lv.Destroy()

	// Now reset the array to 0
	v.Set(0)
	v.GhostUpdateBegin(false, true)
	v.GhostUpdateEnd(false, true)
	// Fill the array with 1's including the ghosts
	lv, _ = v.GhostGetLocalForm()
	lv.GetArray()
	for ii := range lv.Arr {
		lv.Arr[ii] = float64(rank + 1)
	}
	lv.RestoreArray()
	lv.Destroy()
	v.GhostUpdateBegin(true, false)
	v.GhostUpdateEnd(true, false)

	// Reprint, only with local pieces
	v.GetArray()
	petscgo.SyncPrintf("Rank %d : ", rank)
	for _, val := range v.Arr {
		petscgo.SyncPrintf("%3d ", int(val))
	}
	petscgo.SyncPrintf("\n")
	petscgo.SyncFlush()
	v.RestoreArray()

}