Esempio n. 1
0
func main() {
	// PETSc initialization
	if err := petsc.Initialize(); err != nil {
		petsc.Fatal(err)
	}
	defer func() {
		if err := petsc.Finalize(); err != nil {
			petsc.Fatal(err)
		}
	}()
	rank, size := petsc.RankSize()

	// Create particles
	var np1 int64 = 1
	if rank == 0 {
		np1 = 2
	}
	pp, err := structvec.NewStructVec(pstruct{}, np1, petsc.DETERMINE)
	if err != nil {
		petsc.Fatal(err)
	}
	defer pp.Destroy()

	lpp, _ := pp.GetArray().([]pstruct)
	for i := range lpp {
		for j := 0; j < 3; j++ {
			lpp[i].pos[j] = (float32(i) + 1) * (float32(j + 1 + rank*10))
		}
	}
	pp.RestoreArray()

	lpp, _ = pp.GetArray().([]pstruct)
	dump(lpp, rank)
	pp.RestoreArray()

	// Set up scatters
	var localndx, mpirank []int64
	if rank == 0 {
		localndx = make([]int64, 1+size)
		mpirank = make([]int64, 1+size)
		for i := 0; i < size; i++ {
			localndx[i] = 0
			mpirank[i] = int64(i)
		}
		localndx[size] = 1
		mpirank[size] = int64((rank + 1) % size)
	} else {
		localndx = make([]int64, 1)
		mpirank = make([]int64, 1)
		localndx[0] = 0
		mpirank[0] = int64((rank + 1) % size)
	}

	petsc.Printf("\n\n\n")
	pp.Scatter(localndx, mpirank)
	lpp, _ = pp.GetArray().([]pstruct)
	dump(lpp, rank)
	pp.RestoreArray()

}
Esempio n. 2
0
func main() {
	petsc.Initialize()
	defer petsc.Finalize()
	fftw3.Initialize()
	defer fftw3.Cleanup()

	rank, _ := petsc.RankSize()

	dims := []int64{32, 16, 8, 12}

	// Test size routine
	lsize, n0, n1 := fftw3.LocalSizeTransposed(dims)
	petsc.SyncPrintf("Rank %d : size = %d, n0=(%d,%d), n1=(%d,%d)\n", rank, lsize, n0[0], n0[1], n1[0], n1[1])
	petsc.SyncFlush()

	// Ok, go create the grids
	dims = []int64{8, 8, 8}
	greal, gcmplx := fftw3.New(dims)
	petsc.Printf("Considering the real array....\n")
	petsc.SyncPrintf("Rank %d\n%s", rank, greal)
	petsc.SyncFlush()

	petsc.Printf("Considering the complex array....\n")
	petsc.SyncPrintf("Rank %d\n%s", rank, gcmplx)
	petsc.SyncFlush()

}
Esempio n. 3
0
func Partition(nglobal int64) int64 {
	rank, size := petsc.RankSize()
	retval := nglobal / int64(size)
	rem := nglobal % int64(size)

	if int64(rank) < rem {
		retval += 1
	}

	return retval
}
Esempio n. 4
0
func main() {
	if err := petsc.Initialize(); err != nil {
		petsc.Fatal(err)
	}
	defer func() {
		if err := petsc.Finalize(); err != nil {
			petsc.Fatal(err)
		}
	}()
	rank, size := petsc.RankSize()

	petsc.Printf("Initialization successful\n")
	petsc.SyncPrintf("Hello from rank %d of %d\n", rank, size)
	petsc.SyncFlush()
}
Esempio n. 5
0
func main() {
	// PETSc initialization
	if err := petsc.Initialize(); err != nil {
		petsc.Fatal(err)
	}
	defer func() {
		if err := petsc.Finalize(); err != nil {
			petsc.Fatal(err)
		}
	}()
	rank, size := petsc.RankSize()

	pp := PW3D.NewVec(petsc.DECIDE, 10000)
	defer pp.Destroy()

	lpp := PW3D.GetArray(pp)
	lpp.FillRandom(1, 1)
	pp.RestoreArray()
	petsc.Printf("Generating random particles....\n")

	slab := particles.Slab{L: 1, N: size, Idim: 0}
	PW3D.DomainDecompose(slab, pp)
	petsc.Printf("Slab decomposition complete\n")

	lpp = PW3D.GetArray(pp)
	_, mpirank := slab.Domain(lpp)
	rank64 := int64(rank)
	petsc.SyncPrintf("# Rank %d has %d particles....\n", rank, lpp.Length())
	for ipart, irank := range mpirank {
		if irank != rank64 {
			petsc.SyncPrintf("ERROR: %d expected, %d placed, %+v\n", rank, irank, lpp[ipart])
		}
	}
	petsc.SyncFlush()
	pp.RestoreArray()

}
Esempio n. 6
0
func main() {
	if err := petsc.Initialize(); err != nil {
		petsc.Fatal(err)
	}
	defer func() {
		if err := petsc.Finalize(); err != nil {
			petsc.Fatal(err)
		}
	}()
	rank, size := petsc.RankSize()

	// Create a vector using the local size
	v, err := petsc.NewVec(5, petsc.DETERMINE)
	if err != nil {
		petsc.Fatal(err)
	}
	n1, err := v.LocalSize()
	if err != nil {
		petsc.Fatal(err)
	}
	lo, hi, err := v.OwnRange()
	if err != nil {
		petsc.Fatal(err)
	}
	petsc.SyncPrintf("%d rank has local size %d [%d, %d]\n", rank, n1, lo, hi)
	petsc.SyncFlush()
	err = v.Destroy()
	if err != nil {
		petsc.Fatal(err)
	}

	// Create a vector using the global size
	v, err = petsc.NewVec(petsc.DECIDE, 100)
	if err != nil {
		petsc.Fatal(err)
	}
	n1, err = v.LocalSize()
	if err != nil {
		petsc.Fatal(err)
	}
	lo, hi, err = v.OwnRange()
	if err != nil {
		petsc.Fatal(err)
	}
	petsc.SyncPrintf("%d rank has local size %d [%d, %d]\n", rank, n1, lo, hi)
	petsc.SyncFlush()

	// Set and then access the array
	if err := v.Set(3.1415926); err != nil {
		petsc.Fatal(err)
	}

	// Try running ownershipranges
	if rank == 0 {
		rr, err := v.Ranges()
		if err != nil {
			petsc.Fatal(err)
		}
		fmt.Println(rr)
		if size > 2 {
			ix := []int64{rr[1], rr[2], rr[3]}
			y := []float64{4.14, 5.14, 6.14}
			v.SetValues(ix, y, true)
		}
	}
	v.AssemblyBegin()
	v.AssemblyEnd()

	if err := v.GetArray(); err != nil {
		petsc.Fatal(err)
	}
	petsc.SyncPrintf("%d rank has local size %d \n", rank, len(v.Arr))
	petsc.SyncFlush()
	fmt.Println(rank, v.Arr[0:2])
	if err := v.RestoreArray(); err != nil {
		petsc.Fatal(err)
	}

	sum, _ := v.Sum()
	petsc.Printf("Sum = %f\n", sum)
	max, _, _ := v.Max()
	petsc.Printf("Max = %f\n", max)
	min, _, _ := v.Min()
	petsc.Printf("Max = %f\n", min)
	v.Scale(0.3)
	sum, _ = v.Sum()
	petsc.Printf("Sum = %f\n", sum)

	err = v.Destroy()
	if err != nil {
		petsc.Fatal(err)
	}

}
Esempio n. 7
0
func main() {
	petsc.Initialize()
	defer petsc.Finalize()
	rank, size := petsc.RankSize()

	// Set up basic elements for the vector
	var bs, nlocal, nlocal1 int64
	bs = 2                // block size
	nlocal1 = 5           // number of local blocks
	nlocal = nlocal1 * bs // local size
	r64 := int64(rank) * nlocal1
	s64 := int64(size) * nlocal1
	gndx := []int64{(r64 + nlocal1) % s64, (s64 + r64 - 1) % s64}
	petsc.SyncPrintf("Ghost indices : %v \n", gndx)
	petsc.SyncFlush()

	// Create the vector
	v, _ := petsc.NewGhostVecBlocked(nlocal, petsc.DETERMINE, bs, gndx)
	defer v.Destroy()

	// Fill in the local versions of the array
	lo, _, _ := v.OwnRange()
	v.GetArray()
	for ii := range v.Arr {
		v.Arr[ii] = float64(int64(ii) + lo)
	}
	v.RestoreArray()

	petsc.Printf("Filled in vector\n")

	// Update ghost values
	v.GhostUpdateBegin(false, true)
	v.GhostUpdateEnd(false, true)

	// Get the local values and print them
	lv, _ := v.GhostGetLocalForm()
	lv.GetArray()
	petsc.SyncPrintf("Rank %d : ", rank)
	for _, val := range lv.Arr {
		petsc.SyncPrintf("%3d ", int(val))
	}
	petsc.SyncPrintf("\n")
	petsc.SyncFlush()
	lv.RestoreArray()
	lv.Destroy()

	// Now reset the array to 0
	v.Set(0)
	v.GhostUpdateBegin(false, true)
	v.GhostUpdateEnd(false, true)
	// Fill the array with 1's including the ghosts
	lv, _ = v.GhostGetLocalForm()
	lv.GetArray()
	for ii := range lv.Arr {
		lv.Arr[ii] = float64(rank + 1)
	}
	lv.RestoreArray()
	lv.Destroy()
	v.GhostUpdateBegin(true, false)
	v.GhostUpdateEnd(true, false)

	// Reprint, only with local pieces
	v.GetArray()
	petsc.SyncPrintf("Rank %d : ", rank)
	for _, val := range v.Arr {
		petsc.SyncPrintf("%3d ", int(val))
	}
	petsc.SyncPrintf("\n")
	petsc.SyncFlush()
	v.RestoreArray()

}
Esempio n. 8
0
func main() {
	// PETSc initialization
	if err := petsc.Initialize(); err != nil {
		petsc.Fatal(err)
	}
	defer func() {
		if err := petsc.Finalize(); err != nil {
			petsc.Fatal(err)
		}
	}()
	rank, _ := petsc.RankSize()

	v, err := structvec.NewStructVec(pstruct{}, petsc.DECIDE, 10)
	if err != nil {
		petsc.Fatal(err)
	}
	defer v.Destroy()
	petsc.Printf("Type of v : %s\n", v.Type())
	petsc.Printf("Size of v : %d\n", v.BlockSize())
	petsc.SyncPrintf("Local size = %d\n", v.Nlocal)
	petsc.SyncFlush()
	petsc.Printf("Global size = %d\n", v.Ntotal)

	// local particle data
	lpp, ok := v.GetArray().([]pstruct)
	if !ok {
		petsc.Fatal(err)
	}
	for i := range lpp {
		lpp[i].FillRandom()
	}
	err = v.RestoreArray()
	if err != nil {
		petsc.Fatal(err)
	}

	// Print array
	lpp, ok = v.GetArray().([]pstruct)
	if !ok {
		petsc.Fatal(err)
	}
	for i := range lpp {
		petsc.SyncPrintf("%s\n", lpp[i])
	}
	petsc.SyncFlush()
	err = v.RestoreArray()
	if err != nil {
		petsc.Fatal(err)
	}

	petsc.Printf("----------------\n")

	// Fiddle with array
	if rank == 0 {
		lpp = make([]pstruct, 2)
		ix := []int64{3, 7}
		err = v.SetValues(ix, lpp)
		if err != nil {
			petsc.Fatal(err)
		}
	}
	v.AssemblyBegin()
	v.AssemblyEnd()

	// Print array
	lpp, ok = v.GetArray().([]pstruct)
	if !ok {
		petsc.Fatal(err)
	}
	for i := range lpp {
		petsc.SyncPrintf("%s\n", lpp[i])
	}
	petsc.SyncFlush()
	err = v.RestoreArray()
	if err != nil {
		petsc.Fatal(err)
	}

}
Esempio n. 9
0
// Scatter takes the particles and reshuffles them across MPI ranks according to localid and
//
// localid is the local index of the particle, while mpirank is the destination rank for the particle.
// Note that this is completely general, and a particle may be shunted to many ranks (useful for ghosts).
// Also, note that localndx and mpirank will be modified by this routine.
//
func (s *StructVec) Scatter(localndx, mpirank []int64) {

	// Get the rank and size
	rank, size := petsc.RankSize()

	// Work out the final number of particles
	var npart_local, npart_final int64
	npart_local = int64(len(mpirank))
	mpi.AllReduceInt64(petsc.WORLD, &npart_local, &npart_final, 1, mpi.SUM)
	//petsc.Printf("%d total particles expected after scatter\n", npart_final)

	// Allocate arrays
	// narr[rank] are the number of particles headed for rank from the current rank (hereafter crank)
	// narr1[crank*size + rank] collects all the narr's, allowing every processor to know what index it needs to send objects to.
	// icount keeps track of indices that objects need to go to.
	// icount_check is used for assertion tests, to make sure nothing bad happened.
	narr := make([]int64, size)
	icount := make([]int64, size)
	narr1 := make([]int64, size*size)
	icount_check := make([]int64, size)

	// Loop over mpirank, incrementing narr
	for _, irank := range mpirank {
		narr[irank] += 1
	}
	mpi.AllGatherInt64(petsc.WORLD, narr, narr1)
	//petsc.Printf("%v\n", narr1)

	// Reset narr, icount
	for i := range narr {
		narr[i] = 0
		icount[i] = 0
	}

	for i := 0; i < size; i++ {
		// narr now holds the total number of local particles
		for j := 0; j < size; j++ {
			narr[i] += narr1[i+j*size]
		}
		// icount now holds the number of particles from ranks before my rank, on rank i
		for j := 0; j < rank; j++ {
			icount[i] += narr1[i+j*size]
		}
	}

	// Now switch icount to global indices
	// icount_check is the expected final global index
	rtot := int64(0)
	for i := 0; i < size; i++ {
		icount[i] += rtot
		rtot += narr[i]
		icount_check[i] = icount[i] + narr1[i+rank*size]
	}
	// Assertion check
	if rtot != npart_final {
		petsc.Fatal(errors.New("ASSERTION FAILURE : rtot != npart_final"))
	}

	// Now we start updating localndx and mpirank
	lo, _, _ := s.OwnRange()
	irank := int64(0)
	for i := range mpirank {
		localndx[i] += lo
		irank = mpirank[i]
		mpirank[i] = icount[irank]
		icount[irank]++
	}

	// Assertion check
	for i := range icount {
		if icount[i] != icount_check[i] {
			petsc.Fatal(errors.New("ASSERTION FAILURE : icount != icount_check"))
		}
	}

	// Create destination
	vecnew, err := petsc.NewVecBlocked(narr[rank]*s.bs, petsc.DETERMINE, s.bs)
	if err != nil {
		petsc.Fatal(err)
	}
	// Create index sets
	isin, err := petsc.NewBlockedIS(s.bs, npart_local, localndx)
	if err != nil {
		petsc.Fatal(err)
	}
	isout, err := petsc.NewBlockedIS(s.bs, npart_local, mpirank)
	if err != nil {
		petsc.Fatal(err)
	}
	defer isin.Destroy()
	defer isout.Destroy()

	// Create scatter context
	ctx, err := petsc.NewScatter(s.v, vecnew, isin, isout)
	defer ctx.Destroy()
	ctx.Begin(s.v, vecnew, false, true)
	ctx.End(s.v, vecnew, false, true)

	// Clean up
	s.v.Destroy()
	s.v = vecnew
	s.Nlocal = narr[rank]
	s.Ntotal = npart_final

}