Skip to content

Commit 025ae06

Browse files
committed
Commit 3
1 parent ddd2fa1 commit 025ae06

17 files changed

+650
-386
lines changed

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,10 +25,10 @@ Git: https://github.com/raboonik
2525
## Step 2
2626
Run "add2path.sh" to automatically update the python enviornment by executing
2727

28-
    ./add2path.sh
28+
    . ./add2path.sh
2929

3030

3131
# Uninstallation
3232
To uninstall and update the python environment simply run
3333

34-
    ./uninstall.sh
34+
    . ./uninstall.sh

add2path.sh

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
2+
#!/bin/bash
3+
4+
: '
5+
Add AutoParallelizePy library to .bashrc and source it.
6+
'
7+
8+
if [ ! $EUID -ne 0 ]
9+
then
10+
echo Please do not run as root and try again without sudo!
11+
exit
12+
fi
13+
14+
if [ ! -f "/home/abbas/.bashrc" ]
15+
then
16+
touch "/home/abbas/.bashrc"
17+
fi
18+
19+
addLine="export PYTHONPATH=':/home/abbas/.local/lib/AutoParallelizePy/libs'"
20+
if grep -Fxq $addLine $HOME/.bashrc
21+
then
22+
echo .bashrc file already updated
23+
else
24+
echo Adding the AutoParallelizePy library to PYTHONPATH in .bashrc
25+
echo $addLine >> $HOME/.bashrc
26+
source $HOME/.bashrc
27+
fi
28+
Lines changed: 29 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,30 @@
1+
"""
2+
Summary: A simple MPI program to demonstrate
3+
the AutoParallelizePy utilities:
4+
function gather_scalar
5+
6+
Aims: Create scalar numbers on each proc and
7+
gather them on the main rank and print the
8+
results.
9+
"""
10+
11+
import numpy as np
12+
from mpi4py import *
13+
import AutoParallelizePy as APP
14+
15+
# Initialize the MPI environment
16+
#◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈
17+
# Init Parallel #◈
18+
comm = MPI.COMM_WORLD #◈
19+
size = comm.Get_size() #◈
20+
rank = comm.Get_rank() #◈
21+
mainrank = 0 #◈
22+
#◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈
23+
24+
# Define a traceable scalar on each rank
25+
scalarNum = rank
26+
27+
# Gather on the main rank and print
28+
out = APP.gather_scalar(comm, size, rank, mainrank, scalarNum, dtype='float')
129
if rank == mainrank:
2-
out = gather_scalar(comm, size, rank, mainrank, scalarNum, dtype='int')
30+
print("Gathered data on the main rank = ", out)
Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
"""
2+
Summary: A simple MPI program to demonstrate
3+
the AutoParallelizePy utilities:
4+
function get_subarray_ND
5+
function gather_array_ND
6+
class domainDecomposeND
7+
8+
Aims: Create a 2D array to work as our test
9+
input data. Use get_subarray_ND for each proc
10+
to take a chunk of the data according to some
11+
domain decomposition scheme and use gather_array_ND
12+
to gather all the subarrays back into another
13+
array on the main rank which recovers the original
14+
array.
15+
"""
16+
17+
import time
18+
start = time.time()
19+
20+
import numpy as np
21+
from mpi4py import *
22+
import AutoParallelizePy as APP
23+
24+
# Initialize the MPI environment
25+
#◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈
26+
# Init Parallel #◈
27+
comm = MPI.COMM_WORLD #◈
28+
size = comm.Get_size() #◈
29+
rank = comm.Get_rank() #◈
30+
mainrank = 0 #◈
31+
#◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈
32+
33+
# Create the 2D input array
34+
arrShape = [57,89]
35+
Arr = np.arange(np.prod(arrShape)).reshape(arrShape) * np.pi
36+
37+
# Configure the domain decomposition scheme such that
38+
# both axes of the 2D data is parallelized
39+
parallel_axes = [0,1]
40+
domDecompND = APP.domainDecomposeND(size,arrShape,parallel_axes)
41+
42+
# Have each proc to take a chunk of the input data
43+
myArr = APP.get_subarray_ND(rank,domDecompND,Arr)
44+
45+
# Gather the subarrays back on the mainrank and compare
46+
# with the original data
47+
gatheredArrOnMainRank = APP.gather_array_ND(comm, rank, mainrank, domDecompND, myArr, 'float')
48+
49+
if rank == mainrank:
50+
print("")
51+
if np.all(gatheredArrOnMainRank == Arr):
52+
print("The original data was successfully reconstructed!")
53+
else:
54+
print("Failed!")
55+
56+
print("Running example02 took ",time.time() - start, " seconds!")

examples/example02_gather_vector_1D.py

Lines changed: 0 additions & 1 deletion
This file was deleted.
Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
"""
2+
Summary: A simple MPI program to demonstrate
3+
the AutoParallelizePy utilities:
4+
function scatter_array_ND
5+
class domainDecomposeND
6+
7+
Aims: Create a 2D array ON THE MAIN RANK to work
8+
as our test input data. Use scatter_array_ND to
9+
SCATTER chunks of this data according to some
10+
domain decomposition scheme to all the other cores.
11+
Finally use gather_array_ND to gather all the sub-
12+
arrays back into another array on the main rank
13+
which recovers the original array.
14+
15+
Tip1: Note that as opposed to example02 where we
16+
assumed the input data was available on all the
17+
procs, here the input data only exists on the main
18+
rank, which makes it more memory efficient. However,
19+
since scattering the data potentially takes more time
20+
than slicing it in situ, which makes it slightly less
21+
time efficient.
22+
23+
Tip2: Scattering only works for sending subarrays of
24+
an array on a source proc to other procs. Use broadcasting
25+
to send a scalar to other procs.
26+
"""
27+
28+
import time
29+
start = time.time()
30+
31+
import numpy as np
32+
from mpi4py import *
33+
import AutoParallelizePy as APP
34+
35+
# Initialize the MPI environment
36+
#◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈
37+
# Init Parallel #◈
38+
comm = MPI.COMM_WORLD #◈
39+
size = comm.Get_size() #◈
40+
rank = comm.Get_rank() #◈
41+
mainrank = 0 #◈
42+
#◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈
43+
44+
# Create the same 2D input array as example02_gather_array_ND
45+
# this time only on the main rank, and set to None on other procs
46+
arrShape = [57,89]
47+
if rank == mainrank:
48+
Arr = np.arange(np.prod(arrShape)).reshape(arrShape) * np.pi
49+
else:
50+
Arr = None
51+
52+
# Configure the domain decomposition scheme such that
53+
# both axes of the 2D data is parallelized
54+
parallel_axes = [0,1]
55+
domDecompND = APP.domainDecomposeND(size,arrShape,parallel_axes)
56+
57+
# Use scatter_array_ND to scatter chunks of the input data to
58+
# other procs
59+
myArr = APP.scatter_array_ND(comm,rank,mainrank,domDecompND,Arr,dtype='float')
60+
61+
# Gather the subarrays back on the mainrank and compare
62+
# with the original data
63+
gatheredArrOnMainRank = APP.gather_array_ND(comm, rank, mainrank, domDecompND, myArr, 'float')
64+
65+
if rank == mainrank:
66+
print("")
67+
if np.all(gatheredArrOnMainRank == Arr):
68+
print("The original data was successfully reconstructed!")
69+
else:
70+
print("Failed!")
71+
72+
print("Running example03 took ",time.time() - start, " seconds!")

examples/example04_bcast.py

Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
"""
2+
Summary: A simple MPI program to demonstrate
3+
the AutoParallelizePy utilities:
4+
function bcast
5+
6+
Aims: Create a scalar, a 1D array, and a 3D
7+
array on the main rank and use bcast to copy
8+
them onto other procs.
9+
10+
Tip: Broadcasting is the copying of the same
11+
scalar or array onto all the other procs.
12+
"""
13+
14+
import time
15+
start = time.time()
16+
17+
import numpy as np
18+
from mpi4py import *
19+
import AutoParallelizePy as APP
20+
21+
# Initialize the MPI environment
22+
#◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈
23+
# Init Parallel #◈
24+
comm = MPI.COMM_WORLD #◈
25+
size = comm.Get_size() #◈
26+
rank = comm.Get_rank() #◈
27+
mainrank = 0 #◈
28+
#◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈
29+
30+
# Create a scalar integer on the main rank and bcast to other procs
31+
if rank == mainrank:
32+
intScalar = 5
33+
else:
34+
intScalar = None
35+
36+
bcastIntScalar = APP.bcast(comm, rank, mainrank, intScalar, dtype='int')
37+
38+
# Create a 1D array of length 127 of random real numbers on the main
39+
# rank and bcast to other procs
40+
if rank == mainrank:
41+
arr1D = np.random.uniform(low=-20, high=20, size=([127]))
42+
else:
43+
arr1D = None
44+
45+
bcastArr1D = APP.bcast(comm, rank, mainrank, arr1D, dtype='float')
46+
47+
# Create a 3D array of shape [23,34,67] of random real numbers on
48+
# the mainrank and bcast to other procs
49+
if rank == mainrank:
50+
arr3D = np.random.uniform(low=-20, high=20, size=([23,34,67]))
51+
else:
52+
arr3D = None
53+
54+
bcastArr3D = APP.bcast(comm, rank, mainrank, arr3D, dtype='float')
55+
56+
if rank == mainrank:
57+
print("")
58+
if (intScalar == bcastIntScalar and
59+
np.all(arr1D == bcastArr1D) and
60+
np.all(arr3D == bcastArr3D)):
61+
print("Broadcasting successful!")
62+
else:
63+
print("Failed!")
64+
65+
print("Running example04 took ",time.time() - start, " seconds!")
Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
2+
"""
3+
Summary: A simple MPI program to demonstrate
4+
the AutoParallelizePy utilities:
5+
function reshape_array_ND
6+
function get_subarray_ND
7+
function gather_array_ND
8+
class domainDecomposeND
9+
10+
Aims: Create a 3D array as the input data
11+
and use get_subarray_ND to have each proc
12+
take a slice of it according to some domain
13+
decomposition rule. Then use reshape_array_ND
14+
to reshape the subarrays according to a new
15+
domain decomposition rule.
16+
"""
17+
18+
import time
19+
start = time.time()
20+
21+
import numpy as np
22+
from mpi4py import *
23+
import AutoParallelizePy as APP
24+
25+
# Initialize the MPI environment
26+
#◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈
27+
# Init Parallel #◈
28+
comm = MPI.COMM_WORLD #◈
29+
size = comm.Get_size() #◈
30+
rank = comm.Get_rank() #◈
31+
mainrank = 0 #◈
32+
#◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈◈
33+
34+
# Create the 3D input array
35+
arrShape = [57,89,62]
36+
Arr = np.arange(np.prod(arrShape)).reshape(arrShape) * np.pi
37+
38+
# Configure the domain decomposition scheme such that
39+
# both axes of the 3D data is parallelized
40+
parallel_axes = [0,1,2]
41+
domDecompND = APP.domainDecomposeND(size,arrShape,parallel_axes)
42+
43+
# Have each proc to take a chunk of the input data
44+
myArr = APP.get_subarray_ND(rank,domDecompND,Arr)
45+
46+
# Reconfigure the domain decomposition scheme, this time only
47+
# parallelize the first and third axes
48+
new_parallel_axes = [0,2]
49+
new_DomDecompND = APP.domainDecomposeND(size,arrShape,new_parallel_axes)
50+
51+
# Reshape the subarrays according to the new domain decomposition rule
52+
new_myArr = APP.reshape_array_ND(comm, rank, mainrank, domDecompND, new_DomDecompND, myArr, dtype='float')
53+
54+
# Gather both
55+
gatheredArrOnMainRank = APP.gather_array_ND(comm, rank, mainrank, domDecompND, myArr, 'float')
56+
new_gatheredArrOnMainRank = APP.gather_array_ND(comm, rank, mainrank, new_DomDecompND, new_myArr, 'float')
57+
58+
if rank == mainrank:
59+
print("")
60+
if (np.all(Arr == gatheredArrOnMainRank) and
61+
np.all(Arr == new_gatheredArrOnMainRank)):
62+
print("Reshaping successful!")
63+
else:
64+
print("Failed!")
65+
66+
print("Running example05 took ",time.time() - start, " seconds!")

examples/example_bcast_vector_ND.py

Lines changed: 0 additions & 1 deletion
This file was deleted.

examples/example_create_randoms_acorss_cores.py

Lines changed: 0 additions & 1 deletion
This file was deleted.

0 commit comments

Comments
 (0)