Mpich

From MediaWiki

(Difference between revisions)
Jump to: navigation, search
(Created page with 'In this exercise user should obtain prepared openmp job, extract archive, list content of files, submit job on PARADOX cluster, monitor his progress with information from queue a…')
Line 11: Line 11:
3. Download tgz archive with example files.
3. Download tgz archive with example files.
-
  wget http://wiki.ipb.ac.rs/images/7/79/Openmp.tgz
+
  wget http://wiki.ipb.ac.rs/images/c/cc/Mpich.tgz
4. Extract archive :
4. Extract archive :
-
  $ tar xvzf openmp.tgz
+
  $ tar xvzf Mpich.tgz
5. Enter openmp folder
5. Enter openmp folder
-
  $ cd openmp
+
  $ cd Mpich
6. List content of folder:
6. List content of folder:
Line 31: Line 31:
  #!/bin/bash
  #!/bin/bash
  #PBS -q hpsee
  #PBS -q hpsee
-
  #PBS -l nodes=1:ppn=6
+
  #PBS -l nodes=3:ppn=2
  #PBS -l walltime=10:00:00
  #PBS -l walltime=10:00:00
  #PBS -e ${PBS_JOBID}.err
  #PBS -e ${PBS_JOBID}.err
Line 38: Line 38:
  cd $PBS_O_WORKDIR
  cd $PBS_O_WORKDIR
  chmod +x job
  chmod +x job
-
  export OMP_NUM_THREADS=6
+
  cat $PBS_NODEFILE
-
  ./job
+
  ${MPI_MPICH_MPIEXEC} ./job
 +
 
  $ cat job.c
  $ cat job.c
-
  #include <omp.h>
+
  # include <stdio.h>
-
  #include <stdio.h>
+
  # include <math.h>
-
  #include <stdlib.h>
+
  # include "mpi.h"
   
   
-
  int main (int argc, char *argv[])  
+
  /* Evaluation of the inregration function
 +
    f(x)=1/(1+x*x) (0<=x<=1) */
 +
double f(double x)
  {
  {
-
  int nthreads, tid;
+
    double value;
 +
    value=4.0/(1.0+x*x);
 +
    return value;
 +
}
 +
 +
  int main (int argc, char *argv[])
 +
{
 +
    int counter;
 +
    double walltime_start,walltime_end,walltime_diff;
 +
 +
    int master_node = 0;
 +
    int number_of_nodes;
 +
    int node_id;
 +
 +
    int number_of_intervals=300000000;
 +
    double node_integral;
 +
    double step;
 +
 +
    double x;
 +
 +
    double pi_estimate;
 +
    double pi_diff;
 +
    double pi_exact = 3.141592653589793238462643;
 +
 +
/* Establish the MPI environment */
 +
    MPI_Init (&argc, &argv);
 +
/* Get the number of processes */
 +
    MPI_Comm_size (MPI_COMM_WORLD,&number_of_nodes);
 +
/* Determine this processes's rank */
 +
    MPI_Comm_rank (MPI_COMM_WORLD,&node_id);
 +
 +
    if (node_id==master_node)
 +
    {
 +
      printf("Compiled on %s at %s\n",__DATE__, __TIME__);
 +
      printf("Number of nodes: %d\n",number_of_nodes);
 +
      printf ( "Number of intervals: %d\n",number_of_intervals);
 +
      walltime_start=MPI_Wtime();
 +
    }
 +
 +
    MPI_Bcast (&number_of_nodes,1,MPI_INT,master_node,MPI_COMM_WORLD );
 +
 +
    step=1.0/(double)number_of_intervals;
 +
    node_integral=0.0;
 +
    for (counter=node_id+1;counter<=number_of_intervals;counter=counter+number_of_nodes)
 +
    {
 +
      x=step*((double)counter-0.5);
 +
      node_integral=node_integral+f(x);
 +
    }
   
   
-
/* Fork a team of threads giving them their own copies of variables */
+
    printf("\nNode: %d\n",node_id);
-
#pragma omp parallel private(nthreads, tid)
+
    printf("Node integral: %f\n",node_integral*step);
-
  {
+
   
   
-
  /* Obtain thread number */
+
    MPI_Reduce (&node_integral,&pi_estimate,1,MPI_DOUBLE,MPI_SUM,master_node,MPI_COMM_WORLD);
-
  tid = omp_get_thread_num();
+
-
  printf("Hello World from thread = %d\n", tid);
+
   
   
-
  /* Only master thread does this */
+
/* The master node prints the answer */
-
  if (tid == 0)  
+
    if (node_id==master_node)
-
    {
+
    {
-
    nthreads = omp_get_num_threads();
+
      pi_estimate=pi_estimate*step;
-
    printf("Number of threads = %d\n", nthreads);
+
      pi_diff=fabs(pi_estimate-pi_exact);
-
    }
+
      printf("PI exact  : %24.16f\n", pi_exact);
 +
      printf("PI estimate: %24.16f\n", pi_estimate);
 +
      printf("PI diff    : %24.16f\n", pi_diff);
   
   
-
  } /* All threads join master thread and disband */
+
      walltime_end=MPI_Wtime();
 +
      walltime_diff=walltime_end-walltime_start;
 +
      printf ("Wall clock: %f\n", walltime_diff );
 +
    }
   
   
 +
/* Shut down MPI */
 +
    MPI_Finalize();
  }
  }

Revision as of 09:48, 12 October 2011

In this exercise user should obtain prepared openmp job, extract archive, list content of files, submit job on PARADOX cluster, monitor his progress with information from queue and when job is done list resaults file.

1. Login on ui.ipb.ac.rs:

$ ssh ngrkic@ui.ipb.ac.rs

2. Navigate to your folder in nfs filesystem.

$ cd /nfs/ngrkic

3. Download tgz archive with example files.

wget http://wiki.ipb.ac.rs/images/c/cc/Mpich.tgz

4. Extract archive :

$ tar xvzf Mpich.tgz

5. Enter openmp folder

$ cd Mpich

6. List content of folder:

$ ll

7. List content of job.pbs and job.c files:

$ cat job.pbs
#!/bin/bash
#PBS -q hpsee
#PBS -l nodes=3:ppn=2
#PBS -l walltime=10:00:00
#PBS -e ${PBS_JOBID}.err
#PBS -o ${PBS_JOBID}.out

cd $PBS_O_WORKDIR
chmod +x job
cat $PBS_NODEFILE
${MPI_MPICH_MPIEXEC} ./job


$ cat job.c
# include <stdio.h>
# include <math.h>
# include "mpi.h"

/* Evaluation of the inregration function
   f(x)=1/(1+x*x) (0<=x<=1) */
double f(double x)
{
   double value;
   value=4.0/(1.0+x*x);
   return value;
}

int main (int argc, char *argv[])
{
   int counter;
   double walltime_start,walltime_end,walltime_diff;

   int master_node = 0;
   int number_of_nodes;
   int node_id;

   int number_of_intervals=300000000;
   double node_integral;
   double step;

   double x;

   double pi_estimate;
   double pi_diff;
   double pi_exact = 3.141592653589793238462643;

/* Establish the MPI environment */
   MPI_Init (&argc, &argv);
/* Get the number of processes */
   MPI_Comm_size (MPI_COMM_WORLD,&number_of_nodes);
/* Determine this processes's rank */
   MPI_Comm_rank (MPI_COMM_WORLD,&node_id);

   if (node_id==master_node)
   {
      printf("Compiled on %s at %s\n",__DATE__, __TIME__);
      printf("Number of nodes: %d\n",number_of_nodes);
      printf ( "Number of intervals: %d\n",number_of_intervals);
      walltime_start=MPI_Wtime();
   }

   MPI_Bcast (&number_of_nodes,1,MPI_INT,master_node,MPI_COMM_WORLD );

   step=1.0/(double)number_of_intervals;
   node_integral=0.0;
   for (counter=node_id+1;counter<=number_of_intervals;counter=counter+number_of_nodes)
   {
      x=step*((double)counter-0.5);
      node_integral=node_integral+f(x);
   }

   printf("\nNode: %d\n",node_id);
   printf("Node integral: %f\n",node_integral*step);

   MPI_Reduce (&node_integral,&pi_estimate,1,MPI_DOUBLE,MPI_SUM,master_node,MPI_COMM_WORLD);

/* The master node prints the answer */
   if (node_id==master_node)
   {
      pi_estimate=pi_estimate*step;
      pi_diff=fabs(pi_estimate-pi_exact);
      printf("PI exact   : %24.16f\n", pi_exact);
      printf("PI estimate: %24.16f\n", pi_estimate);
      printf("PI diff    : %24.16f\n", pi_diff);

      walltime_end=MPI_Wtime();
      walltime_diff=walltime_end-walltime_start;
      printf ("Wall clock: %f\n", walltime_diff );
   }

/* Shut down MPI */
   MPI_Finalize();
}

8. Submit job :

qsub job.pbs

qsub will print output :

<jobID>.ce64.ipb.ac.rs

9. Monitor your job :

qstat <jobID>.ce64.ipb.ac.rs

10.When job is done list content of <jobID>.ce64.ipb.ac.rs.out file :

cat <jobID>.ce64.ipb.ac.rs.out
Hello World from thread = 0
Number of threads = 6
Hello World from thread = 1
Hello World from thread = 3
Hello World from thread = 2
Hello World from thread = 4
Hello World from thread = 5
Personal tools