Mpich

From MediaWiki

(Difference between revisions)
Jump to: navigation, search
 
(One intermediate revision not shown)
Line 1: Line 1:
-
In this exercise user should obtain prepared openmp job, extract archive, list content of files, submit job on PARADOX cluster, monitor his progress with information from queue and when job is done list resaults file.
+
In this exercise user should obtain prepared mpich job, extract archive, list content of files, submit job on PARADOX cluster, monitor his progress with information from queue and when job is done list resaults file.
1. Login on ui.ipb.ac.rs:
1. Login on ui.ipb.ac.rs:
Line 17: Line 17:
  $ tar xvzf Mpich.tgz
  $ tar xvzf Mpich.tgz
-
5. Enter openmp folder
+
5. Enter Mpich folder
  $ cd Mpich
  $ cd Mpich
Line 40: Line 40:
  cat $PBS_NODEFILE
  cat $PBS_NODEFILE
  ${MPI_MPICH_MPIEXEC} ./job
  ${MPI_MPICH_MPIEXEC} ./job
-
 
  $ cat job.c
  $ cat job.c
Line 140: Line 139:
  cat <jobID>.ce64.ipb.ac.rs.out
  cat <jobID>.ce64.ipb.ac.rs.out
-
  Hello World from thread = 0
+
  n06.ipb.ac.rs
-
  Number of threads = 6
+
n06.ipb.ac.rs
-
  Hello World from thread = 1
+
n08.ipb.ac.rs
-
  Hello World from thread = 3
+
n08.ipb.ac.rs
-
  Hello World from thread = 2
+
n13.ipb.ac.rs
-
  Hello World from thread = 4
+
n13.ipb.ac.rs
-
  Hello World from thread = 5
+
 +
Node: 2
 +
Node integral: 0.523599
 +
   
 +
  Node: 1
 +
  Node integral: 0.523599
 +
 +
Node: 3
 +
  Node integral: 0.523599
 +
   
 +
Node: 4
 +
  Node integral: 0.523599
 +
 +
Node: 5
 +
Node integral: 0.523599
 +
Compiled on Oct 12 2011 at 11:34:11
 +
Number of nodes: 6
 +
Number of intervals: 300000000
 +
 +
Node: 0
 +
Node integral: 0.523599
 +
PI exact  :      3.1415926535897931
 +
PI estimate:      3.1415926535899650
 +
PI diff    :      0.0000000000001719
 +
Wall clock: 1.894531

Latest revision as of 09:55, 12 October 2011

In this exercise user should obtain prepared mpich job, extract archive, list content of files, submit job on PARADOX cluster, monitor his progress with information from queue and when job is done list resaults file.

1. Login on ui.ipb.ac.rs:

$ ssh ngrkic@ui.ipb.ac.rs

2. Navigate to your folder in nfs filesystem.

$ cd /nfs/ngrkic

3. Download tgz archive with example files.

wget http://wiki.ipb.ac.rs/images/c/cc/Mpich.tgz

4. Extract archive :

$ tar xvzf Mpich.tgz

5. Enter Mpich folder

$ cd Mpich

6. List content of folder:

$ ll

7. List content of job.pbs and job.c files:

$ cat job.pbs
#!/bin/bash
#PBS -q hpsee
#PBS -l nodes=3:ppn=2
#PBS -l walltime=10:00:00
#PBS -e ${PBS_JOBID}.err
#PBS -o ${PBS_JOBID}.out

cd $PBS_O_WORKDIR
chmod +x job
cat $PBS_NODEFILE
${MPI_MPICH_MPIEXEC} ./job
$ cat job.c
# include <stdio.h>
# include <math.h>
# include "mpi.h"

/* Evaluation of the inregration function
   f(x)=1/(1+x*x) (0<=x<=1) */
double f(double x)
{
   double value;
   value=4.0/(1.0+x*x);
   return value;
}

int main (int argc, char *argv[])
{
   int counter;
   double walltime_start,walltime_end,walltime_diff;

   int master_node = 0;
   int number_of_nodes;
   int node_id;

   int number_of_intervals=300000000;
   double node_integral;
   double step;

   double x;

   double pi_estimate;
   double pi_diff;
   double pi_exact = 3.141592653589793238462643;

/* Establish the MPI environment */
   MPI_Init (&argc, &argv);
/* Get the number of processes */
   MPI_Comm_size (MPI_COMM_WORLD,&number_of_nodes);
/* Determine this processes's rank */
   MPI_Comm_rank (MPI_COMM_WORLD,&node_id);

   if (node_id==master_node)
   {
      printf("Compiled on %s at %s\n",__DATE__, __TIME__);
      printf("Number of nodes: %d\n",number_of_nodes);
      printf ( "Number of intervals: %d\n",number_of_intervals);
      walltime_start=MPI_Wtime();
   }

   MPI_Bcast (&number_of_nodes,1,MPI_INT,master_node,MPI_COMM_WORLD );

   step=1.0/(double)number_of_intervals;
   node_integral=0.0;
   for (counter=node_id+1;counter<=number_of_intervals;counter=counter+number_of_nodes)
   {
      x=step*((double)counter-0.5);
      node_integral=node_integral+f(x);
   }

   printf("\nNode: %d\n",node_id);
   printf("Node integral: %f\n",node_integral*step);

   MPI_Reduce (&node_integral,&pi_estimate,1,MPI_DOUBLE,MPI_SUM,master_node,MPI_COMM_WORLD);

/* The master node prints the answer */
   if (node_id==master_node)
   {
      pi_estimate=pi_estimate*step;
      pi_diff=fabs(pi_estimate-pi_exact);
      printf("PI exact   : %24.16f\n", pi_exact);
      printf("PI estimate: %24.16f\n", pi_estimate);
      printf("PI diff    : %24.16f\n", pi_diff);

      walltime_end=MPI_Wtime();
      walltime_diff=walltime_end-walltime_start;
      printf ("Wall clock: %f\n", walltime_diff );
   }

/* Shut down MPI */
   MPI_Finalize();
}

8. Submit job :

qsub job.pbs

qsub will print output :

<jobID>.ce64.ipb.ac.rs

9. Monitor your job :

qstat <jobID>.ce64.ipb.ac.rs

10.When job is done list content of <jobID>.ce64.ipb.ac.rs.out file :

cat <jobID>.ce64.ipb.ac.rs.out
n06.ipb.ac.rs
n06.ipb.ac.rs
n08.ipb.ac.rs
n08.ipb.ac.rs
n13.ipb.ac.rs
n13.ipb.ac.rs

Node: 2
Node integral: 0.523599

Node: 1
Node integral: 0.523599

Node: 3
Node integral: 0.523599

Node: 4
Node integral: 0.523599

Node: 5
Node integral: 0.523599
Compiled on Oct 12 2011 at 11:34:11
Number of nodes: 6
Number of intervals: 300000000

Node: 0
Node integral: 0.523599
PI exact   :       3.1415926535897931
PI estimate:       3.1415926535899650
PI diff    :       0.0000000000001719
Wall clock: 1.894531
Personal tools