ID
int64 0
88
| Nature Language
stringlengths 33
344
| CPP
stringlengths 85
3.03k
| Fortran
stringlengths 81
7.76k
|
---|---|---|---|
0 | openFile :Attempts to open the file passed as an argument.
| int openFile(char *fileName){\n\tprintf(\"Attempting to open %s ....\",fileName);\n\tinputFile = fopen(fileName, \"r\");\n\t/* Check that file opened successfully */\n\tif (inputFile == NULL){\n\t\tprintf(\"ERROR.\\n\");\n\t}\n\telse{\n\t\tprintf(\"Success.\\n\");\n\t}\n\treturn 0;\n}
| SUBROUTINE openFile()\n\tinteger :: argStatus\n\tcharacter (len = MAXSTRING) :: fileName\n\n\t!Read input file name from command line\n\t!CALL getarg(1,fileName)\n\tCALL GET_COMMAND_ARGUMENT(1,fileName,STATUS=argStatus)\n\tIF(argStatus > 0) THEN\n\t print , 'ERROR Reading input file from command line.'\n\t print , 'Usage: mpiexec -np mixedModeBenchmark <fileName>'\n\tEND IF\n\n\twrite(,fmt='(2x,A,A,A)',advance='no') 'Attempting to open '''&\n\t ,trim(fileName),'''''....'\n\n\t!Open fileName; assign it to unit 10\n\tOPEN(UNIT=10, FILE=fileName, STATUS='OLD', &\n\t ACTION='READ', FORM='FORMATTED', IOSTAT=ioStatus)\n\n\t!Check that file opened successfully\n\tIF(ioStatus > 0) THEN\n\t write(,) 'ERROR. IOSTAT =', ioStatus\n\tELSE\n\t write(,*) 'Success.'\n\tEND IF\n\nEND SUBROUTINE openFile
|
1 | closeFile:Closes the input file.
| int closeFile(){\n\t/* close the input file */\n\tfclose(inputFile);\n\treturn 0;\n}
| SUBROUTINE closeFile()\n\n\t!Close file\n\tCLOSE(10)\n\nEND SUBROUTINE closeFile
|
2 | setupBenchmarkList:Subroutine to setup the benchmarkList array with the list of all possible benchmarks.
| int setupBenchmarkList(){\n\t/* Pingpong benchmarks */\n\tstrcpy (benchmarkList[0], "masteronlypingpong");\n\tstrcpy (benchmarkList[1], "funnelledpingpong");\n\tstrcpy (benchmarkList[2], "multiplepingpong");\n\t/* Pingping benchmarks */\n\tstrcpy (benchmarkList[3], "masteronlypingping");\n\tstrcpy (benchmarkList[4], "funnelledpingping");\n\tstrcpy (benchmarkList[5], "multiplepingping");\n\t/* Haloexchange benchmarks */\n\tstrcpy (benchmarkList[6], "masteronlyhaloexchange");\n\tstrcpy (benchmarkList[7], "funnelledhaloexchange");\n\tstrcpy (benchmarkList[8], "multiplehaloexchange");\n\t/* Multi-pingpong benchmarks */\n\tstrcpy (benchmarkList[9], "masteronlymultipingpong");\n\tstrcpy (benchmarkList[10], "funnelledmultipingpong");\n\tstrcpy (benchmarkList[11], "multiplemultipingpong");\n\t/* Multi-pingpong benchmarks */\n\tstrcpy (benchmarkList[12], "masteronlymultipingping");\n\tstrcpy (benchmarkList[13], "funnelledmultipingping");\n\tstrcpy (benchmarkList[14], "multiplemultipingping");\n\t/* Collective benchmarks */\n\tstrcpy (benchmarkList[15], "barrier");\n\tstrcpy (benchmarkList[16], "reduce");\n\tstrcpy (benchmarkList[17], "allreduce");\n\tstrcpy (benchmarkList[18], "broadcast");\n\tstrcpy (benchmarkList[19], "scatter");\n\tstrcpy (benchmarkList[20], "gather");\n\tstrcpy (benchmarkList[21], "alltoall");\n\treturn 0;\n }
| SUBROUTINE readBenchmarkParams()\n\n\t!Rank 0 reads parameters from input file\n\tIF (myMPIRank == 0) THEN\n\t write (,) 'Reading parameters from input file....'\n\t !read minimum data size from input file\n\t read(10,) minDataSize \n\t !read maximum data size from input file\n\t read(10,) maxDataSize\n\t !read target time from input file\n\t read(10,) targetTime\n\n\t !set other benchmark parameters\n\t warmUpIters = 2\n\t defaultReps = 1000\n \n\t !Report benchmark parameters\n\t write(,fmt='(A)') '------------------------------------------'\n\t write(,fmt='(A)') ' Benchmark parameters '\n\t write(,fmt='(A)') '------------------------------------------'\n\t write(,fmt='(A,t25,i10)') 'Minimum data size', minDataSize\n\t write(,fmt='(A,t25,i10)') 'Maximum data size', maxDataSize\n\t write(,fmt='(A,t25,f10.2)') 'Target time (sec)', targetTime\n\t write(,fmt='(A,t25,i10)') 'Default Repetitions', defaultReps\n\t write(*,fmt='(A,t25,i10)') 'No. Warmup iterations', warmUpIters\n\n\tEND IF\n\t!Initialise benchmarkNumber to 0 so that the\n\t!DO WHILE loop in the driver is entered the first time\n\tbenchmarkNumber = 0\n\n\t!Broadcast benchmark parameters from master to all \n\t!other MPI processes.\n\tCALL MPI_Bcast(minDataSize, 1, MPI_INTEGER, 0, comm, ierr)\n\tCALL MPI_Bcast(maxDataSize, 1, MPI_INTEGER, 0, comm, ierr)\n\tCALL MPI_Bcast(targetTime, 1, MPI_DOUBLE_PRECISION, 0, comm, ierr)\n\tCALL MPI_Bcast(defaultReps, 1, MPI_INTEGER, 0, comm, ierr)\n\tCALL MPI_Bcast(warmUpIters, 1, MPI_INTEGER, 0, comm, ierr)\n\nEND SUBROUTINE readBenchmarkParams
|
3 | readBenchmarkParams:Initialises the benchmark parameters.Reads the minimum and maximum data for the benchmarks from the input file (unit 10).
| int readBenchmarkParams(){\n\t/* Rank 0 reads parameters from input file */\n\tif (myMPIRank == 0){\n\t\tprintf ("Reading parameters from input file....\\n");\n\t\t/* read minimum data size from input file */\n\t\tfscanf(inputFile, "%d", &minDataSize);\n\t\t/* read maximum data size from input file */\n\t\tfscanf(inputFile, "%d", &maxDataSize);\n\t\t/* read target time from input file */\n\t\tfscanf(inputFile, "%lf", &targetTime);\n\t\t/* set other benchmark parameters */\n\t\twarmUpIters = 2;\n\t\tdefaultReps = 1000;\n\t\t/* Report benchmark parameters */\n\t\tprintf("------------------------------------------\\n");\n\t\tprintf(" Benchmark parameters \\n");\n\t\tprintf("------------------------------------------\\n");\n\t\tprintf("Minimum data size %d\\n", minDataSize);\n\t\tprintf("Maximum data size %d\\n", maxDataSize);\n\t\tprintf("Target time (sec) %lf\\n", targetTime);\n\t\tprintf("Default Repetitions %d\\n", defaultReps);\n\t\tprintf("No. Warmup iterations %d\\n", warmUpIters);\n\t}\n\t/*Initialise benchmarkNumber to 0 so that the WHILE loop in\n\tthe driver is entered the first time */\n\tbenchmarkNumber = 0;\n\t/* Broadcast benchmark parameters from master to all\n\tother MPI processes. */\n\tMPI_Bcast(&minDataSize, 1, MPI_INT, 0, comm);\n\tMPI_Bcast(&maxDataSize, 1, MPI_INT, 0, comm);\n\tMPI_Bcast(&targetTime, 1, MPI_DOUBLE, 0, comm);\n\tMPI_Bcast(&defaultReps, 1, MPI_INT, 0, comm);\n\tMPI_Bcast(&warmUpIters, 1, MPI_INT, 0, comm);\n\treturn 0;\n}
| SUBROUTINE setupBenchmarkList()\n\n\t!Pingpong benchmarks\n\tbenchmarkList(1) = 'masteronlypingpong'\n\tbenchmarkList(2) = 'funnelledpingpong'\n\tbenchmarkList(3) = 'multiplepingpong'\n\t!Pingping benchmarks\n\tbenchmarkList(4) = 'masteronlypingping'\n\tbenchmarkList(5) = 'funnelledpingping'\n\tbenchmarkList(6) = 'multiplepingping'\n\t!Haloexchange benchmarks\n\tbenchmarkList(7) = 'masteronlyhaloexchange'\n\tbenchmarkList(8) = 'funnelledhaloexchange'\n\tbenchmarkList(9) = 'multiplehaloexchange'\n\t!Multi-Pingpong benchmarks\n\tbenchmarkList(10) = 'masteronlymultipingpong'\n\tbenchmarkList(11) = 'funnelledmultipingpong'\n\tbenchmarkList(12) = 'multiplemultipingpong'\n\t!Multi-Pingping benchmarks\n\tbenchmarkList(13) = 'masteronlymultipingping'\n\tbenchmarkList(14) = 'funnelledmultipingping'\n\tbenchmarkList(15) = 'multiplemultipingping'\n\t!Collective benchmarks\n\tbenchmarkList(16) = 'barrier'\n\tbenchmarkList(17) = 'reduce'\n\tbenchmarkList(18) = 'allreduce'\n\tbenchmarkList(19) = 'broadcast'\n\tbenchmarkList(20) = 'scatter'\n\tbenchmarkList(21) = 'gather'\n\tbenchmarkList(22) = 'alltoall'\n\nEND SUBROUTINE setupBenchmarkList
|
4 | findBenchmarkNumber:Finds the ID of the next benchmark which will be executed. Master MPI process reads next line from input file. It then compares it to the benchmark list to find the ID and broadcasts this to the other MPI processes. The function sets the benchmarkNumber variable and also returns the benchmarkNumber.
| int findBenchmarkNumber(){\n\tchar benchmarkName[MAXSTRING];\n\tint rankInA, rankInB;\n\tint i;\n\n\t/* Master MPI process reads next line from file */\n\tif (myMPIRank == 0){\n\t\t/* set benchmarkNumber to ERROR before read to allow error\n\t\tcheck */\n\t\tbenchmarkNumber = ERROR;\n\n\t\t/* read next benchmark from file */\n\t\tif (fscanf(inputFile, "%s", benchmarkName) == EOF){\n\t\t\tbenchmarkNumber = FINISHED;\n\t\t}\n\t\telse {\n\t\t\t/* convert benchmarkName to lowercase characters */\n\t\t\tconvertToLowercase(benchmarkName);\n\t\t\t/* ..and check if benchmark name matches. */\n\t\t\tfor (i = 0; i< NUM_BENCHMARKS; i++){\n\t\t\t\tif (strcmp(benchmarkName,benchmarkList[i]) == 0){\n\t\t\t\t\tbenchmarkNumber = i;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t/* Check if benchmark Name does not match */\n\t\tif (benchmarkNumber == ERROR){\n\t\t printf("ERROR: %s does not match any possible benchmarks\\n",benchmarkName);\n\t\t}\n\n\t\t/* Check if pingpong or pingping benchmark */\n\t\tif (benchmarkNumber <= LASTPPID){\n\t\t\t/* Read ranks from input file */\n\t\t\tif (fscanf(inputFile, "%d %d",&rankInA, &rankInB) != 2){\n\t\t\t\tprintf("ERROR: expecting ranks after %s\\n",benchmarkName);\n\t\t\t}\n\t\t\telse {\n\t\t\t\tPPRanks[0] = findRank(rankInA);\n\t\t\t\tPPRanks[1] = findRank(rankInB);\n\t\t\t}\n\t\t\t/* Check if PPRanks are the same */\n\t\t\tif (PPRanks[0] == PPRanks[1]){\n\t\t\t\tprintf("Warning: Ranks are the same; benchmark will not work.\\n");\n\t\t\t}\n\n\t\t}\n\t}\n\n\t/* Broadcast benchmarkNumber to other MPI processes */\n\tMPI_Bcast(&benchmarkNumber, 1, MPI_INT, 0, comm);\n\n\t/* If pingpong or pingping benchmark then broadcast ranks of participating processes */\n\tif (benchmarkNumber <= LASTPPID) {\n\t\tMPI_Bcast(PPRanks, 2, MPI_INT, 0, comm);\n\t}\n\n\treturn benchmarkNumber;\n}
| SUBROUTINE findBenchmarkNumber()\n\tcharacter (len = MAXSTRING) :: benchmarkName\n\tinteger :: rankInA, rankInB\n\tinteger :: i\n\n\t!Master MPI process reads next line from file\n\tIF (myMPIRank == 0) THEN\n\t\t!set benchmarkNumber to ERROR before read\n\t\t!to allow error check\n\t\tbenchmarkNumber = ERROR\n\t\t\n\t\t!Read next benchmark from file\n\t\tREAD(10,,IOSTAT=ioStatus) benchmarkName\n\t\t\n\t\t!Check if EOF is reached\n\t\tIF (ioStatus < 0) THEN\n\t\t\tbenchmarkNumber = FINISHED\n\t\tELSE\n\t\t\t!Convert benchmarkName to lowercase characters\n\t\t\tCALL ConvertTolowercase(benchmarkName)\n\t\t\t!..and check if benchmark name matches.\n\t\t\tDO i = 1,NUM_BENCHMARKS\n\t\t\t\tIF (benchmarkName == benchmarkList(i)) THEN\n\t\t\t\t\tbenchmarkNumber = i\n\t\t\t\tEND IF\n\t\t\tEND DO\n\t\tEND IF\n\n\t\t!Check if benchmark Name does not match\n\t\tIF (benchmarkNumber == ERROR) THEN\n\t\t\twrite(,) 'ERROR: ', trim(benchmarkName) , &\n\t\t\t\t'does not match any possible benchmarks'\n\t\tEND IF\n\n\t\t!Check if pingpong or pingping benchmark\n\t\tIF (benchmarkNumber <= LASTPPID) THEN\n\t\t\t!Read ranks from input file\n\t\t\tREAD(10,,IOSTAT=ioStatus) rankInA, rankInB\n\t\t\t\n\t\t\t!Check if error in read\n\t\t\tIF (ioStatus < 0) THEN\n\t\t\t\twrite(,) 'ERROR: expecting ranks after ',&\n\t\t\t\t\ttrim(benchmarkName)\n\t\t\tELSE !if no error find actual MPI ranks\n\t\t\t\tPPRanks(1) = findRank(rankInA)\n\t\t\t\tPPRanks(2) = findRank(rankInB)\n\t\t\tEND IF\n\t\t\t!Check if PPRanks are the same\n\t\t\tIF (PPRanks(1) == PPRanks(2)) THEN\n\t\t\t\twrite(,) 'Warning: Ranks are the same; benchmark will',&\n\t\t\t\t\t' not work.'\n\t\t\tEND IF\n\t\tEND IF\n\t\t\n\tEND IF\n\n\t!Broadcast benchmarkNumber to other MPI processes\n\tCALL MPI_Bcast(benchmarkNumber, 1, MPI_INTEGER, 0, &\n\t\tcomm, ierr)\n\n\t!If pingpong or pingping benchmark broadcast ranks of\n\t!participating processes.\n\tIF (benchmarkNumber <= LASTPPID) THEN\n\t\tCALL MPI_Bcast(PPRanks, 2, MPI_INTEGER, 0, comm, ierr)\n\tEND IF\n\nEND SUBROUTINE findBenchmarkNumber
|
5 | convertToLowerCase:Takes a string as an agrument and converts all uppercase characters to lowercase using its ASCII value.
| int convertToLowercase(char *convertString){\n\tint i;\n\tint len;\n\n\tlen = strlen(convertString);\n\n\tfor (i=0; i<len; i++){\n\t\tconvertString[i] = tolower(convertString[i]);\n\t}\n\n\treturn 0;\n}
| SUBROUTINE convertToLowerCase(string)\n\tcharacter (len = *), intent(inout) :: string\n\tinteger :: i, length\n\n\t!Find length of string.\n\tlength = LEN(string)\n\t\n\t!Loop through each character of string.\n\tDO i = 1, length\n\t\t!If character between A and Z...\n\t\tIF((string(i:i) >= 'A') .AND. (string(i:i)) <= 'Z') THEN\n\t\t\t!Make character lowercase\n\t\t\tstring(i:i) = ACHAR(IACHAR(string(i:i)) + 32)\n\t\tEND IF\n\tEND DO\n\nEND SUBROUTINE convertToLowerCase
|
6 | repTimeCheck:Checks if the time for the benchmark reached the target time. Changes the number of repetitions for the next data size based on the difference between the time taken and the target time.
| int repTimeCheck(double time, int numReps){\n\tint repCheck;\n\n\tif (time < targetTime){\n\t\trepsToDo = 2 * numReps;\n\t\trepCheck = FALSE;\n\t}\n\telse if (time > (2 * targetTime)){\n\t\trepsToDo = max(numReps/2,1);\n\t\trepCheck = TRUE;\n\t}\n\telse {\n\t\trepCheck = TRUE;\n\t}\n\n\treturn repCheck;\n}
| FUNCTION repTimeCheck(time, numReps)\n\tDOUBLE PRECISION, intent(in) :: time\n\tinteger, intent(in) :: numReps\n\tlogical :: repTimeCheck\n\n\tIF (time < targetTime) THEN\n\t\t!double repsToDo and repeat benchmark\n\t\trepsToDo = 2numReps\n\t\trepTimeCheck = .false.\n\tELSE IF (time > (2targetTime)) THEN\n\t\t!finish benchmark and half number of reps for next dataSize\n\t\trepsToDo = MAX(numReps/2,1) !repsToDo is at least 1\n\t\trepTimeCheck = .true.\n\tELSE !time is >= targetTime\n\t\t!finish benchmark and keep reps for next data size\n\t\trepTimeCheck = .true.\n\tEND IF\n\nEND FUNCTION repTimeCheck
|
7 | alltoall:Driver routine for the alltoall benchmark.
| int alltoall(){ int dataSizeIter; int bufferSize; repsToDo = defaultReps; dataSizeIter = minDataSize; while (dataSizeIter <= maxDataSize){ bufferSize = dataSizeIter * numThreads * numMPIprocs * numThreads; allocateAlltoallData(bufferSize); alltoallKernel(warmUpIters, dataSizeIter); testAlltoall(dataSizeIter); benchComplete = FALSE; while (benchComplete != TRUE){ MPI_Barrier(comm); startTime = MPI_Wtime(); alltoallKernel(repsToDo, dataSizeIter); MPI_Barrier(comm); finishTime = MPI_Wtime(); totalTime = finishTime - startTime; if (myMPIRank==0){ benchComplete = repTimeCheck(totalTime, repsToDo); } MPI_Bcast(&benchComplete, 1, MPI_INT, 0, comm); MPI_Bcast(&repsToDo, 1, MPI_INT, 0, comm); } if (myMPIRank == 0){ setReportParams(dataSizeIter, repsToDo, totalTime); printReport(); } freeAlltoallData(); dataSizeIter = dataSizeIter * 2; } return 0; }
| SUBROUTINE alltoall()\n\tinteger :: dataSizeIter\n\tinteger :: bufferSize\n\n\t!Initialise repsToDo to defaultReps\n\trepsToDo = defaultReps\n\n\t!Start loop over data sizes\n\tdataSizeIter = minDataSize !initialise dataSizeIter\n\tDO WHILE (dataSizeIter <= maxDataSize)\n\t\t!Calculate buffer size and allocate space for\n\t\t!the data arrays.\n\t\tbufferSize = dataSizeIter * (numThreads * numMPIprocs) &\n\t\t\t* numThreads\n\t\t\n\t\tCALL allocateData(bufferSize)\n\t\t\n\t\t!Perform warm-up of benchmark\n\t\tCALL alltoallKernel(warmUpIters,dataSizeIter)\n\t\t\n\t\t!Test if alltoall was successful\n\t\tCALL testAlltoall(dataSizeIter)\n\n\t\t!Initialise the benchmark\n\t\tbenchComplete = .false.\n\n\t\t!Execute benchmark until target time is reached\n\t\tDO WHILE (benchComplete .NEQV. .true.)\n\t\t\t!Start timer\n\t\t\tCALL MPI_Barrier(comm, ierr)\n\t\t\tstartTime = MPI_Wtime()\n\n\t\t\t!Execute alltoall for repsToDo repetitions\n\t\t\tCALL alltoallKernel(repsToDo, dataSizeIter)\n\n\t\t\t!Stop timer\n\t\t\tCALL MPI_Barrier(comm, ierr)\n\t\t\tfinishTime = MPI_Wtime()\n\t\t\ttotalTime = finishTime - startTime\n\n\t\t\t!Test if target time was reached\n\t\t\tIF (myMPIRank==0) THEN\n\t\t\t\tbenchComplete = repTimeCheck(totalTime, repsToDo)\n\t\t\tEND IF\n\t\t\t!Ensure all procs have the same value of benchComplete\n\t\t\t!and repsToDo\n\t\t\tCALL MPI_Bcast(benchComplete, 1, MPI_INTEGER, 0, comm, ierr)\n\t\t\tCALL MPI_Bcast(repsToDo, 1, MPI_INTEGER, 0, comm, ierr)\n\n\t\tEND DO\n\n\t\t!Master process sets benchmark result for reporting\n\t\tIF (myMPIRank == 0) THEN\n\t\t\tCALL setReportParams(dataSizeIter,repsToDo,totalTime)\n\t\t\tCALL printReport()\n\t\tEND IF\n\n\t\t!Free allocated data\n\t\tCALL freeData()\n\t\t\n\t\t!Double data size and loop again\n\t\tdataSizeIter = dataSizeIter * 2\n\n\tEND DO !End loop over data sizes\n\nEND SUBROUTINE alltoall
|
8 | alltoallKernel:Implements the all to all benchmark. Each thread sends/receives dataSize items to/from every other process.
| int alltoallKernel(int totalReps, int dataSize){ int repIter, i, j; int dataForEachProc, numsToWrite; int blockNum, startOffset; numsToWrite = numThreads * dataSize; dataForEachProc = numThreads * numThreads * dataSize; for (repIter=0; repIter<totalReps; repIter++){ #pragma omp parallel default(none) private(blockNum,i,j) shared(numsToWrite,dataForEachProc,globalIDarray) shared(alltoallSendBuf,numMPIprocs) { blockNum = (myThreadID)* numsToWrite; for (i=0; i<numMPIprocs; i++){ for (j=0; j<numsToWrite; j++){ alltoallSendBuf[blockNum +(i * dataForEachProc) + j] = globalIDarray[myThreadID]; } } } MPI_Alltoall(alltoallSendBuf, dataForEachProc, MPI_INT, alltoallRecvBuf, dataForEachProc, MPI_INT, comm); #pragma omp parallel default(none) private(blockNum,startOffset,i,j) shared(alltoallRecvBuf,alltoallFinalBuf,numMPIprocs) shared(dataForEachProc,numsToWrite,dataSize,globalIDarray) shared(numThreads) { blockNum = myThreadID * dataSize; startOffset = (numsToWrite * numMPIprocs) * myThreadID; for (i=0; i<(numThreads * numMPIprocs); i++){ for (j=0; j<dataSize; j++){ alltoallFinalBuf[startOffset + (i * dataSize) + j] = alltoallRecvBuf[blockNum + (i * numsToWrite) + j]; } } } } return 0; }
| SUBROUTINE alltoallKernel(totalReps,dataSize)\n\tinteger, intent(in) :: totalReps, dataSize\n\tinteger :: repIter, i, j\n\tinteger :: dataForEachProc, numsToWrite\n\tinteger :: blockNum, startOffset\n\n\t!Calculate how much data each thread sends to each process\n\tnumsToWrite = numThreads * dataSize\n\t!Calculate total amount of data each process gets\n\t!from any other process...\n\t!..each thread gets dataSize items from every other thread.\n\tdataForEachProc = numThreads * numThreads * dataSize\n\t\n\tDO repIter = 1, totalReps\n\t\t\n\t\t!Each thread writes to numsToWrite items for each\n\t\t!MPI process to alltoallSendBuf.\n\t\t!$OMP PARALLEL DEFAULT(NONE), &\n\t\t!$OMP PRIVATE(blockNum,i,j), &\n\t\t!$OMP SHARED(numsToWrite,dataForEachProc,globalIDarray), &\n\t\t!$OMP SHARED(alltoallSendBuf,numMPIprocs)\n\t\t!Calculate the blockNum of a thread.\n\t\t!This is used to find which portion of the\n\t\t!dataForEachProc elements a thread will be responsible.\n\t\tblockNum = (myThreadID - 1)*numsToWrite\n\t\t\n\t\t!Write threadID to correct location in alltoallSendBuf\n\t\tDO i = 1, numMPIprocs !loop over MPI processes\n\t\t\tDO j = 1, numsToWrite !loop over data to write\n\t\t\t\talltoallSendBuf(blockNum + ((i-1)*dataForEachProc + j)) = &\n\t\t\t\t\tglobalIDarray(myThreadID)\n\t\t\tEND DO\n\t\tEND DO\n\t\t!$OMP END PARALLEL\n\t\t\n\t\t!Call MPI_AlltoAll\n\t\tCALL MPI_Alltoall(alltoallSendBuf, dataForEachProc, MPI_INTEGER, &\n\t\t\talltoallRecvBuf, dataForEachProc, MPI_INTEGER, &\n\t\t\tcomm, ierr)\n\t\t\n\t\t!Each thread now reads the receive buffer so that\n\t\t!it gets dataSize values from every other thread\n\t\t!in its portion of alltoallFinalBuf\n\t\t!$OMP PARALLEL DEFAULT(NONE), &\n\t\t!$OMP PRIVATE(blockNum,startOffset,i,j),&\n\t\t!$OMP SHARED(alltoallRecvBuf,alltoallFinalBuf,numMPIprocs),&\n\t\t!$OMP SHARED(dataForEachProc,numsToWrite,dataSize,globalIDarray),&\n\t\t!$OMP SHARED(numThreads)\n\t\t\n\t\t!Calculate the blockNum.\n\t\t!This which portion of the data from each process\n\t\t!a thread is responsible for.\n\t\tblockNum = (myThreadID-1)dataSize\n\t\t\n\t\t!Calculate offset into the each MPI processes finalBuf where\n\t\t!each thread will start to write its data...\n\t\t!1) Calculate amount of data for each thread..\n\t\tstartOffset = (numsToWrite * numMPIprocs)\n\t\t!2) Find which block in finalBuf for each thread\n\t\tstartOffset = startOffset * (myThreadID -1)\n\t\t\n\t\t!Loop over all processors (threads & processes)\n\t\tDO i = 1, (numThreadsnumMPIprocs)\n\t\t\tDO j = 1, dataSize\n\t\t\t\talltoallFinalBuf(startOffset + ((i-1)*dataSize) + j ) = &\n\t\t\t\t\talltoallRecvBuf(blockNum + ((i-1)*numsToWrite) + j)\n\t\t\tEND DO\n\t\tEND DO\n\n\t\t!$OMP END PARALLEL\n\n\n\tEND DO !End loop over repetitions\n\nEND SUBROUTINE alltoallKernel
|
9 | allocateAlltoallData: Allocates memory for the main data arrays used in the alltoall benchmark.
| int allocateAlltoallData(int bufferSize){\n\talltoallSendBuf = (int *) malloc(bufferSize * sizeof(int));\n\talltoallRecvBuf = (int *) malloc(bufferSize * sizeof(int));\n\talltoallFinalBuf = (int *) malloc(bufferSize * sizeof(int));\n\n\treturn 0;\n}
| SUBROUTINE allocateData(bufferSize)\n\tinteger, intent(in) :: bufferSize\n\n\tallocate(alltoallSendBuf(bufferSize))\n\tallocate(alltoallRecvBuf(bufferSize))\n\tallocate(alltoallFinalBuf(bufferSize))\n\nEND SUBROUTINE allocateData
|
10 | freeAlltoallData:Free memory of the main data arrays.
| int freeAlltoallData() {\n free(alltoallSendBuf);\n free(alltoallRecvBuf);\n free(alltoallFinalBuf);\n return 0;\n}",
| SUBROUTINE freeData()\n\n\tdeallocate(alltoallSendBuf)\n\tdeallocate(alltoallRecvBuf)\n\tdeallocate(alltoallFinalBuf)\n\nEND SUBROUTINE freeData
|
11 | testAlltoall:Verifies that the all to all completed successfully.
| int testAlltoall(int dataSize){\n int sizeofBuffer, i, j;\n int dataForEachThread, startElem;\n int testFlag, reduceFlag;\n int *testBuf;\n\n testFlag = TRUE;\n sizeofBuffer = dataSize * numThreads * numMPIprocs * numThreads;\n testBuf = (int *) malloc(sizeofBuffer * sizeof(int));\n\n dataForEachThread = dataSize * numThreads * numMPIprocs;\n\n #pragma omp parallel default(none) \\\n private(i,j,startElem) \\\n shared(testBuf,globalIDarray,sizeofBuffer,dataSize) \\\n shared(numThreads,numMPIprocs,dataForEachThread)\n {\n startElem = (myThreadID) * dataForEachThread;\n\n for (i=0; i<(numThreads * numMPIprocs); i++){\n for (j=0; j<dataSize; j++){\n testBuf[startElem + (i * dataSize) + j] = i;\n }\n }\n }\n\n for (i=0; i<sizeofBuffer; i++){\n if (alltoallFinalBuf[i] != testBuf[i]){\n testFlag = FALSE;\n }\n }\n\n MPI_Reduce(&testFlag, &reduceFlag, 1, MPI_INT, MPI_LAND, 0, comm);\n\n if (myMPIRank == 0){\n setTestOutcome(reduceFlag);\n }\n\n free(testBuf);\n\n return 0;\n}",
| SUBROUTINE testAlltoall(dataSize)\n\n\tinteger, intent(in) :: dataSize\n\tinteger :: sizeofBuffer, i, j\n\tinteger :: dataForEachThread, startElem\n\tlogical :: testFlag, reduceFlag\n\n\t!Set testFlag to true\n\ttestFlag = .true.\n\n\t!calculate the size of buffer on each process and allocate\n\tsizeofBuffer = dataSize * numThreads * numMPIprocs * numThreads\n\tallocate(testBuf(sizeofBuffer))\n\n\t!Calculate how many elements each thread will work with\n\tdataForEachThread = dataSize * numThreads * numMPIProcs\n\n\t!Fill buffer with expected values.\n!$OMP PARALLEL DEFAULT(NONE), &\n!$OMP PRIVATE(i,j,startElem), &\n!$OMP SHARED(testBuf,globalIDarray,sizeofBuffer,dataSize),&\n!$OMP SHARED(numThreads,numMPIprocs,dataForEachThread)\n\n\t!Calculate start element for each thread\n\tstartElem = (myThreadID - 1)* dataForEachThread\n\n\tDO i = 1, (numThreads * numMPIprocs)\n\t\tDO j = 1, dataSize\n\t\t\ttestBuf(startElem + (i-1)*dataSize + j) = i\n\t\tEND DO\n\tEND DO\n\n!$OMP END PARALLEL\n\n\t!Compare\n\tDO i = 1, sizeofBuffer\n\t\tIF(alltoallFinalBuf(i) /= testBuf(i)) THEN\n\t\t\ttestFlag = .false.\n\t\tEND IF\n\tEND DO\n\n\t!Reduce testFlag with logical AND operator to \n\t!get overall test result.\n\tCALL MPI_Reduce(testFlag, reduceFlag, 1, MPI_LOGICAL, &\n\t\tMPI_LAND, 0, comm, ierr)\n\n\t!Master then sets testOutcome flag\n\tIF (myMPIRank == 0) THEN\n\t\tCALL setTestOutcome(reduceFlag)\n\tEND IF\n\n\t!free space for testBuf\n\tdeallocate(testBuf)\n\nEND SUBROUTINE testAlltoall
|
12 | barrierDriver:Driver subroutine for the barrier benchmark.
| int barrierDriver(){ \n repsToDo = defaultReps; \n barrierKernel(warmUpIters); \n benchComplete = FALSE; \n \n while (benchComplete != TRUE){ \n MPI_Barrier(comm); \n startTime = MPI_Wtime(); \n barrierKernel(repsToDo); \n MPI_Barrier(comm); \n finishTime = MPI_Wtime(); \n totalTime = finishTime - startTime; \n \n /* repetitions. */ \n if (myMPIRank==0){ \n benchComplete = repTimeCheck(totalTime, repsToDo); \n } \n \n MPI_Bcast(&benchComplete, 1, MPI_INT, 0, comm); \n MPI_Bcast(&repsToDo, 1, MPI_INT, 0, comm); \n } \n \n if (myMPIRank == 0){ \n setTestOutcome(TRUE); \n setReportParams(1,repsToDo,totalTime); \n printReport(); \n } \n \n return 0; \n}",
| SUBROUTINE barrierDriver()\n\n\t!initialise repsToDo to defaultReps\n\trepsToDo = defaultReps\n\n\t!Perform warm-up for benchmark\n\tCALL barrierKernel(warmUpIters)\n\n\t!Initialise the benchmark \n\tbenchComplete = .false.\n\t!Execute benchmark until target time is reached\n\tDO WHILE (benchComplete .NEQV. .true.)\n\t\t!Start timer\n\t\tCALL MPI_Barrier(comm,ierr)\n\t\tstartTime = MPI_Wtime()\n\t\t\n\t\t!Execute benchmark for repsToDo repetitions\n\t\tCALL barrierKernel(repsToDo)\n\n\t\t!Stop timer \n\t\tCALL MPI_Barrier(comm, ierr)\n\t\tfinishTime = MPI_Wtime()\n\t\ttotalTime = finishTime - startTime\n\n\t\t!Test if target time was reached with number of\n\t\t!repetitions.\n\t\tif (myMPIRank==0) then \n\t\t\tbenchComplete = repTimeCheck(totalTime, repsToDo)\n\t\tend if\n\t\t!Ensure all procs have the same value of benchComplete\n\t\t!and repsToDo\n\t\tcall MPI_Bcast(benchComplete, 1, MPI_INTEGER, 0, comm, ierr)\n\t\tcall MPI_Bcast(repsToDo, 1, MPI_INTEGER, 0, comm, ierr)\n\t\n\tEND DO !End of benchComplete loop\n\n\t!Master process sets benchmark results\n\tIF (myMPIRank == 0) THEN\n\t\t!No unit test, hardwire test result to pass\n\t\tCALL setTestOutcome(.true.)\n\t\tCALL setReportParams(1,repsToDo,totalTime)\n\t\tCALL printReport()\n\tEND IF\n\nEND SUBROUTINE barrierDriver
|
13 | barrierKernel:Main kernel for barrier benchmark. First threads under each process synchronise with an OMP BARRIER. Then a MPI barrier synchronises each MPI process. MPI barrier is called within a OpenMP master directive.
| int barrierKernel(int totalReps){ \n int repIter; \n\n #pragma omp parallel default(none) \\ \n private(repIter) \\ \n shared(totalReps,comm) \n { \n for (repIter=0; repIter<totalReps; repIter++){ \n #pragma omp barrier \n\n #pragma omp master \n { \n MPI_Barrier(comm); \n } \n } \n } \n\n return 0; \n}",
| SUBROUTINE barrierKernel(totalReps)\n\tinteger, intent(in) :: totalReps\n\tinteger :: repIter\n\n\t!Open the parallel region\n!$OMP PARALLEL DEFAULT(NONE), &\n!$OMP PRIVATE(repIter), &\n!$OMP SHARED(totalReps,comm,ierr) \n\n\tDO repIter = 1, totalReps\n\n\t\t!Threads synchronise with an OpenMP barrier\n!$OMP BARRIER\n\n\t\t!Master threads on each process now synhronise\n!$OMP MASTER\n\t\tCALL MPI_Barrier(comm, ierr)\n!$OMP END MASTER\n\t\n\tEND DO !End repetitions loop\n\n!$OMP END PARALLEL\n\nEND SUBROUTINE barrierKernel
|
14 | broadcast:Driver subroutine for the broadcast benchmark.
| int broadcast() {\n int dataSizeIter, sizeofFinalBuf;\n repsToDo = defaultReps;\n dataSizeIter = minDataSize;\n \n while (dataSizeIter <= maxDataSize) {\n allocateBroadcastData(dataSizeIter);\n broadcastKernel(warmUpIters,dataSizeIter);\n sizeofFinalBuf = dataSizeIter * numThreads;\n testBroadcast(sizeofFinalBuf);\n benchComplete = FALSE;\n \n while (benchComplete != TRUE) {\n MPI_Barrier(comm);\n startTime = MPI_Wtime();\n broadcastKernel(repsToDo, dataSizeIter);\n MPI_Barrier(comm);\n finishTime = MPI_Wtime();\n totalTime = finishTime - startTime;\n \n if (myMPIRank==0) {\n benchComplete = repTimeCheck(totalTime, repsToDo);\n }\n \n MPI_Bcast(&benchComplete, 1, MPI_INT, 0, comm);\n MPI_Bcast(&repsToDo, 1, MPI_INT, 0, comm);\n }\n \n if (myMPIRank == 0) {\n setReportParams(dataSizeIter, repsToDo, totalTime);\n printReport();\n }\n \n freeBroadcastData();\n dataSizeIter = dataSizeIter * 2;\n }\n \n return 0;\n}",
| SUBROUTINE broadcast()\n\tinteger :: dataSizeIter\n\tinteger :: sizeofFinalBuf !needed for test\n\n\t!initialise repsToDo to defaultReps\n\trepsToDo = defaultReps\n\n\t!Start loop over data sizes\n\tdataSizeIter = minDataSize \n\tDO WHILE (dataSizeIter <= maxDataSize) \n\t\t!allocate space for main data arrays\n\t\tCALL allocateData(dataSizeIter)\n\n\t\t!Perform benchmark warm-up\n\t\tCALL broadcastKernel(warmUpIters,dataSizeIter)\n\t\t\n\t\t!Set sizeofFinalBuf and test if broadcast was a success\n\t\tsizeofFinalBuf = dataSizeIter * numThreads\n\t\tCALL testBroadcast(sizeofFinalBuf)\n\n\t\t!Initialise the benchmark \n\t\tbenchComplete = .false.\n\t\t!Execute benchmark until target time is reached\n\t\tDO WHILE (benchComplete .NEQV. .true.)\n\t\t\t!Start timer\n\t\t\tCALL MPI_Barrier(comm, ierr)\n\t\t\tstartTime = MPI_WTime()\n\n\t\t\t!Execute broadcast for repsToDo repetitions\n\t\t\tCALL broadcastKernel(repsToDo, dataSizeIter)\n\n\t\t\t!Stop timer \n\t\t\tCALL MPI_Barrier(comm, ierr)\n\t\t\tfinishTime = MPI_Wtime()\n\t\t\ttotalTime = finishTime - startTime\n\t\t\t\n\t\t\t!Test if target time was reached \n\t\t\tif (myMPIRank==0) then \n\t\t\t\tbenchComplete = repTimeCheck(totalTime, repsToDo)\n\t\t\tend if\n\t\t\t!Ensure all procs have the same value of benchComplete\n\t\t\t!and repsToDo\n\t\t\tcall MPI_Bcast(benchComplete, 1, MPI_INTEGER, 0, comm, ierr)\n\t\t\tcall MPI_Bcast(repsToDo, 1, MPI_INTEGER, 0, comm, ierr)\n\n\t\tEND DO\n\n\t\t!Master process sets benchmark result for reporting\n\t\tIF (myMPIRank == 0) THEN\n\t\t\tCALL setReportParams(dataSizeIter,repsToDo,totalTime)\n\t\t\tCALL printReport()\n\t\tEND IF\n\n\t\t!Free allocated data\n\t\tCALL freeData()\n\n\t\t!Double dataSize and loop again\n\t\tdataSizeIter = dataSizeIter * 2\n\t \n\tEND DO !End loop over data sizes\n\nEND SUBROUTINE broadcast
|
15 | broadcastKernel:The broadcast benchmark. At the start one process owns the data. After, all processes and threads have a copy of the data.
| int broadcastKernel(int totalReps, int dataSize){\n int repIter, i;\n int startPos;\n\n for (repIter=0; repIter<totalReps; repIter++){\n if (myMPIRank == BROADCASTROOT){\n for (i=0; i<dataSize; i++){\n broadcastBuf[i] = BROADCASTNUM;\n }\n }\n\n MPI_Bcast(broadcastBuf, dataSize, MPI_INT, BROADCASTROOT, comm);\n\n #pragma omp parallel default(none) \\\n private(i,startPos) \\\n shared(dataSize,finalBroadcastBuf,broadcastBuf)\n {\n startPos = ((myThreadID) * dataSize);\n for (i=0; i<dataSize; i++){\n finalBroadcastBuf[startPos + i] = broadcastBuf[i];\n }\n }\n }\n\n return 0;\n}",
| SUBROUTINE broadcastKernel(totalReps, dataSize)\n\tinteger, intent(in) :: totalReps, dataSize\n\tinteger :: repIter, i\n\t!Set source of broadcast\n\tinteger, parameter :: BROADCASTROOT = 0\n\t!Start position in finalBroadcastBuf of each thread.\n\tinteger :: startPos \n\n\tDO repIter = 1, totalReps\n\n\t\t!Master MPI process writes to broadcastBuf\n\t\tIF (myMPIRank == BROADCASTROOT) THEN\n\t\t\tDO i = 1, dataSize\n\t\t\t\tbroadcastBuf(i) = BROADCASTNUM\n\t\t\tEND DO\n\t\tEND IF\n\t\n\t\t!Broadcast array to all other processes.\n\t\tCALL MPI_Bcast(broadcastBuf, dataSize, MPI_INTEGER, &\n\t\t\tBROADCASTROOT, comm, ierr)\n\n\t\t!Each thread copies broadcastBuf to its portion of\n\t\t!finalBroadcastBuf\n\t\t!$OMP PARALLEL DEFAULT(NONE), &\n\t\t!$OMP PRIVATE(i,startPos), &\n\t\t!$OMP SHARED(dataSize,finalBroadcastBuf,broadcastBuf)\n\t\t\n\t\t!Calculate start of each threads portions of finalBroadcastBuf\n\t\tstartPos = ((myThreadID-1) * dataSize)\n\t\tDO i = 1, dataSize\n\t\t\tfinalBroadcastBuf(startPos + i) = broadcastBuf(i)\n\t\tEND DO\n\n\t\t!$OMP END PARALLEL\n\t\n\tEND DO !End of repetitions loop\n\nEND SUBROUTINE broadcastKernel
|
16 | allocateBroadcastData:Allocate memory for the main data arrays in the broadcast operation.
| int allocateBroadcastData(int bufferSize){\n broadcastBuf = (int *)malloc(bufferSize * sizeof(int));\n finalBroadcastBuf = (int *)malloc((bufferSize*numThreads)*sizeof(int));\n return 0;\n}",
| SUBROUTINE allocateData(bufferSize)\n\tinteger, intent(in) :: bufferSize\n\n\tallocate(broadcastBuf(bufferSize))\n\t!finalBroadcastBuf is of size dataSizenumThreads\n\tallocate(finalBroadcastBuf(bufferSizenumThreads))\n\nEND SUBROUTINE allocateData
|
17 | freeBroadcastData:Free memory of main data arrays.
| int freeBroadcastData(){\n free(broadcastBuf);\n free(finalBroadcastBuf);\n return 0;\n}",
| SUBROUTINE freeData()\n\n\tdeallocate(broadcastBuf)\n\tdeallocate(finalBroadcastBuf)\n\nEND SUBROUTINE freeData
|
18 | testBroadcast:Verifies that the broadcast benchmark worked correctly.
| int testBroadcast(int bufferSize){\n int i, testFlag, reduceFlag;\n testFlag = TRUE;\n\n for (i=0; i<bufferSize; i++){\n if (finalBroadcastBuf[i] != BROADCASTNUM){\n testFlag = FALSE;\n }\n }\n\n MPI_Reduce(&testFlag, &reduceFlag, 1, MPI_INT, MPI_LAND, 0, comm);\n\n if (myMPIRank == 0){\n setTestOutcome(testFlag);\n }\n\n return 0;\n}",
| SUBROUTINE testBroadcast(bufferSize)\n\n\tinteger, intent(in) :: bufferSize\n\tinteger :: i\n\tlogical :: testFlag, reduceFlag\n\n\t!Initialise testFlag to true\n\ttestFlag = .true.\n\n\t!Compare each element of finalBroadcastBuf with BROADCASTNUM\n\tDO i = 1, bufferSize\n\t\tIF (finalBroadcastBuf(i) /= BROADCASTNUM) THEN\n\t\t\ttestFlag = .false.\n\t\tEND IF\n\tEND DO\n\n\t!Reduce testFlag to master with logical AND operation\n\tCALL MPI_Reduce(testFlag, reduceFlag, 1, MPI_LOGICAL, &\n\t\tMPI_LAND, 0, comm, ierr)\n\t!Master then sets testOutcome using reduceFlag\n\tIF (myMPIRank == 0) THEN\n\t\tCALL setTestOutcome(reduceFlag)\n\tEND IF\n\nEND SUBROUTINE testBroadcast
|
19 | reduction:Driver subroutine for the reduce and allReduce benchmarks.
| int reduction(int benchmarkType){\n int dataSizeIter, sizeofBuf;\n repsToDo = defaultReps;\n dataSizeIter = minDataSize;\n\n while (dataSizeIter <= maxDataSize){\n allocateReduceData(dataSizeIter);\n\n if (benchmarkType == REDUCE){\n reduceKernel(warmUpIters, dataSizeIter);\n\n if (myMPIRank == 0){\n testReduce(dataSizeIter, benchmarkType);\n }\n } else if (benchmarkType == ALLREDUCE){\n sizeofBuf = dataSizeIter * numThreads;\n allReduceKernel(warmUpIters, dataSizeIter);\n testReduce(sizeofBuf, benchmarkType);\n }\n\n benchComplete = FALSE;\n\n while (benchComplete != TRUE){\n MPI_Barrier(comm);\n startTime = MPI_Wtime();\n\n if (benchmarkType == REDUCE){\n reduceKernel(repsToDo, dataSizeIter);\n } else if (benchmarkType == ALLREDUCE){\n allReduceKernel(repsToDo, dataSizeIter);\n }\n\n MPI_Barrier(comm);\n finishTime = MPI_Wtime();\n totalTime = finishTime - startTime;\n\n if (myMPIRank==0){\n benchComplete = repTimeCheck(totalTime, repsToDo);\n }\n\n MPI_Bcast(&benchComplete, 1, MPI_INT, 0, comm);\n MPI_Bcast(&repsToDo, 1, MPI_INT, 0, comm);\n }\n\n if (myMPIRank == 0){\n setReportParams(dataSizeIter, repsToDo, totalTime);\n printReport();\n }\n\n freeReduceData();\n dataSizeIter = dataSizeIter * 2;\n }\n\n return 0;\n}",
| SUBROUTINE reduction(benchmarkType)\ninteger, intent(in) :: benchmarkType\ninteger :: dataSizeIter\ninteger :: sizeofBuf !for allReduce operation\n!initialise repsToDo to defaultReps\nrepsToDo = defaultReps\n!Start loop over data sizes\ndataSizeIter = minDataSize !initialise dataSizeIter\nDO WHILE (dataSizeIter <= maxDataSize)\n!allocate space for the main data arrays..\nCALL allocateData(dataSizeIter)\n!Perform benchmark warm-up\nIF (benchmarkType == REDUCE) THEN\nCALL reduceKernel(warmUpIters,dataSizeIter)\n!Master process tests if reduce was success\nIF (myMPIRank == 0) THEN\nCALL testReduce(dataSizeIter,benchmarkType)\nEND IF\nELSE IF (benchmarkType == ALLREDUCE) THEN\n!calculate sizeofBuf for test\nsizeofBuf = dataSizeIter * numThreads\nCALL allReduceKernel(warmUpIters, dataSizeIter)\n!All processes need to perform unit test\nCALL testReduce(sizeofBuf,benchmarkType)\nEND IF\n!Initialise the benchmark\nbenchComplete = .false.\n!Execute benchmark until target time is reached\nDO WHILE (benchComplete .NEQV. .true.)\n!Start timer\nCALL MPI_Barrier(comm, ierr)\nstartTime = MPI_Wtime()\n!Execute reduce for repsToDo repetitions\nIF (benchmarkType == REDUCE) THEN\nCALL reduceKernel(repsToDo, dataSizeIter)\nELSE IF (benchmarkType == ALLREDUCE) THEN\nCALL allReduceKernel(repsToDo, dataSizeIter)\nEND IF\n!Stop timer\nCALL MPI_Barrier(comm, ierr)\nfinishTime = MPI_Wtime()\ntotalTime = finishTime - startTime\n!Test if target time was reached with the number of reps\nif (myMPIRank==0) then\nbenchComplete = repTimeCheck(totalTime, repsToDo)\nend if\n!Ensure all procs have the same value of benchComplete\n!and repsToDo\ncall MPI_Bcast(benchComplete, 1, MPI_INTEGER, 0, comm, ierr)\ncall MPI_Bcast(repsToDo, 1, MPI_INTEGER, 0, comm, ierr)\nEND DO\n!Master process sets benchmark result for reporting\nIF (myMPIRank == 0) THEN\nCALL setReportParams(dataSizeIter,repsToDo,totalTime)\nCALL printReport()\nEND IF\n!Free allocated data\nCALL freeData()\n!Double dataSize and loop again\ndataSizeIter = dataSizeIter * 2\nEND DO\n!End loop over data sizes\nEND SUBROUTINE reduction
|
20 | reduceKernel:Implements the reduce mixed mode benchmark. Each thread under every MPI process combines its local buffer. This is then sent to the master MPI process to get the overall reduce value.
| int reduceKernel(int totalReps, int dataSize){\n int repIter, i, j;\n\n for (repIter=1; repIter<totalReps; repIter++){\n #pragma omp parallel default(none) \\\n private(i,j) \\\n shared(tempBuf,globalIDarray,dataSize,numThreads) \\\n shared(localReduceBuf)\n {\n #pragma omp for schedule(static,dataSize)\n for(i=0; i<(numThreads * dataSize); i++){\n tempBuf[i] = globalIDarray[myThreadID];\n }\n\n #pragma omp for\n for(i=0; i<dataSize; i++){\n localReduceBuf[i] = 0;\n for (j=0; j<numThreads; j++){\n localReduceBuf[i] += tempBuf[(j*dataSize)+i];\n }\n }\n }\n\n MPI_Reduce(localReduceBuf, globalReduceBuf, dataSize,\\\n MPI_INT, MPI_SUM, 0, comm);\n\n if (myMPIRank==0) {\n for (i=0; i<dataSize; i++){\n finalReduceBuf[i] = globalReduceBuf[i];\n }\n }\n }\n\n return 0;\n}",
| SUBROUTINE reduceKernel(totalReps, dataSize)\ninteger, intent(in) :: totalReps, dataSize\ninteger :: repIter,i\n!Decalre array which each thread reduces into\ninteger, dimension(dataSize) :: localReduceBuf\n\nDO repIter = 1, totalReps !loop for totalReps\n\n !initialise all reduce arrays to ensure correct results\n localReduceBuf = 0\n globalReduceBuf = 0\n finalReduceBuf = 0\n\n !Open the parallel region and declare localReduceBuf\n !as a reduction variable.\n!$OMP PARALLEL DEFAULT(NONE), &\n!$OMP PRIVATE(i),&\n!$OMP SHARED(dataSize,globalIDarray),&\n!$OMP REDUCTION(+:localReduceBuf)\n DO i = 1, dataSize\n localReduceBuf(i) = localReduceBuf(i) + globalIDarray(myThreadID)\n END DO\n!$OMP END PARALLEL\n\n !Perform a reduce of localReduceBuf across the\n !MPI processes.\n CALL MPI_Reduce(localReduceBuf, globalReduceBuf, &\n dataSize, MPI_INTEGER, MPI_SUM, 0, comm, ierr)\n \n !Copy globalReduceBuf into master Threads portion\n !of finalReduceBuf.\n ! FR this should only happen on rank==0 \n if (myMPIRank==0) then \n finalReduceBuf(1:dataSize) = globalReduceBuf\n end if\n\nEND DO !End repetitions loop\n\nEND SUBROUTINE reduceKernel
|
21 | allReduce:Implements the allreduce mixed mode benchmark. Each thread under every MPI process combines its local buffer. All MPI processes then combine this value to the overall reduction value at each process.
| int allReduceKernel(int totalReps, int dataSize){\n int repIter, i, j;\n int startPos;\n\n for (repIter=0; repIter<totalReps; repIter++){\n\n #pragma omp parallel default(none) \\\n private(i,j) \\\n shared(tempBuf,globalIDarray,dataSize,numThreads) \\\n shared(localReduceBuf)\n {\n #pragma omp for schedule(static,dataSize)\n for(i=0; i<(numThreads * dataSize); i++){\n tempBuf[i] = globalIDarray[myThreadID];\n }\n\n #pragma omp for\n for(i=0; i<dataSize; i++){\n localReduceBuf[i] = 0;\n\n for (j=0; j<numThreads; j++){\n localReduceBuf[i] += tempBuf[(j*dataSize)+i];\n }\n }\n }\n\n MPI_Allreduce(localReduceBuf, globalReduceBuf, \\\n dataSize, MPI_INTEGER, MPI_SUM, comm);\n\n #pragma omp parallel default(none) \\\n private(i,startPos) \\\n shared(dataSize,finalReduceBuf,globalReduceBuf)\n {\n startPos = (myThreadID * dataSize);\n\n for (i=0; i<dataSize; i++){\n finalReduceBuf[startPos + i] = globalReduceBuf[i];\n }\n }\n }\n\n return 0;\n}",
| SUBROUTINE allReduceKernel(totalReps, dataSize)\ninteger, intent(in) :: totalReps, dataSize\ninteger :: repIter,i\ninteger :: startPos\n!Decalre array which each thread reduces into\ninteger, dimension(dataSize) :: localReduceBuf\n\n DO repIter = 1, totalReps !loop for totalReps\n\n !initialise all reduce arrays to ensure correct results\n localReduceBuf = 0\n globalReduceBuf = 0\n finalReduceBuf = 0\n\n !Open the parallel region and declare localReduceBuf\n !as a reduction variable.\n!$OMP PARALLEL DEFAULT(NONE), &\n!$OMP PRIVATE(i),&\n!$OMP SHARED(dataSize,globalIDarray),&\n!$OMP REDUCTION(+:localReduceBuf)\n DO i = 1, dataSize\n localReduceBuf(i) = localReduceBuf(i) + globalIDarray(myThreadID)\n END DO\n!$OMP END PARALLEL\n\n !Perform an all reduce of localReduceBuf across \n !the MPI processes.\n CALL MPI_Allreduce(localReduceBuf, globalReduceBuf, &\n dataSize, MPI_INTEGER, MPI_SUM, comm, ierr)\n \n !Each thread copies globalReduceBuf into its portion \n !of finalReduceBuf\n!$OMP PARALLEL DEFAULT(NONE), &\n!$OMP PRIVATE(i,startPos), &\n!$OMP SHARED(dataSize,finalReduceBuf,globalReduceBuf)\n\n !Calculate the start of each threads portion of finalReduceBuf\n startPos = ((myThreadID-1) * dataSize)\n DO i = 1, dataSize\n finalReduceBuf(startPos + i) = globalReduceBuf(i)\n END DO\n!$OMP END PARALLEL\n\nEND DO !End repetitions loop\n\nEND SUBROUTINE allReduceKernel
|
22 | allocateReduceData:Allocate memory for the main data arrays in the reduction operation.
| int allocateReduceData(int bufferSize){\n localReduceBuf = (int *) malloc(bufferSize * sizeof(int));\n globalReduceBuf = (int *) malloc(bufferSize * sizeof(int));\n tempBuf = (int *) malloc((bufferSize * numThreads) * sizeof(int));\n finalReduceBuf = (int *) malloc((bufferSize * numThreads) * sizeof(int));\n\n return 0;\n}",
| SUBROUTINE allocateData(bufferSize)\ninteger, intent(in) :: bufferSize\n\nallocate(globalReduceBuf(bufferSize))\n!Final reduce is of size dataSizenumThreads\nallocate(finalReduceBuf(bufferSizenumThreads))\n\nEND SUBROUTINE allocateData
|
23 | freeReduceData:Free allocated memory for main data arrays.
| int freeReduceData(){\n free(localReduceBuf);\n free(globalReduceBuf);\n free(tempBuf);\n free(finalReduceBuf);\n return 0;\n}",
| SUBROUTINE freeData()\ndeallocate(globalReduceBuf)\ndeallocate(finalReduceBuf)\nEND SUBROUTINE freeData
|
24 | testReduce:Verifies that the reduction benchmarks worked correctly.
| int testReduce(int bufferSize, int benchmarkType){ \n int i, testFlag, reduceFlag; \n int correctReduce, lastGlobalID; \n correctReduce = 0; \n testFlag = TRUE; \n lastGlobalID = (numMPIprocs * numThreads); \n\n for (i=0; i<lastGlobalID; i++){ \n correctReduce = correctReduce + i; \n } \n\n for (i=0; i<bufferSize; i++){ \n if (finalReduceBuf[i] != correctReduce){ \n testFlag = FALSE; \n } \n } \n\n if (benchmarkType == ALLREDUCE){ \n MPI_Reduce(&testFlag, &reduceFlag, 1, MPI_INT, MPI_LAND, 0, comm); \n\n if (myMPIRank == 0){ \n setTestOutcome(reduceFlag); \n } \n } else{ \n setTestOutcome(testFlag); \n } \n\n return 0; \n}",
| SUBROUTINE testReduce(bufferSize,benchmarkType)\ninteger, intent(in) :: bufferSize, benchmarkType\ninteger :: i\ninteger :: correctReduce, lastGlobalID\nlogical :: testFlag, reduceFlag\n\n!Initialise correctReduce to 0..\ncorrectReduce = 0\n!..and testFlag to true\ntestFlag = .true.\n\n!set lastGlobalID\nlastGlobalID = (numMPIprocs * numThreads)\n\n!Now find correctReduce value by summing to lastGlobalID\nDO i = 1, lastGlobalID\ncorrectReduce = correctReduce + i\nEND DO\n\n!Compare each element of finalRecvBuf to correctReduce \nDO i = 1, bufferSize\nIF (finalReduceBuf(i) /= correctReduce) THEN\ntestFlag = .false.\nEND IF\nEND DO\n\n!For allReduce, combine testFlag into master with logical AND\nIF (benchmarkType == ALLREDUCE) THEN\nCALL MPI_Reduce(testFlag, reduceFlag, 1, MPI_LOGICAL, &\nMPI_LAND, 0, comm, ierr)\n!then master sets testOutcome using reduceFlag\nIF (myMPIRank == 0) THEN\nCALL setTestOutcome(reduceFlag)\nEND IF\nELSE\n!For reduce master process just sets testOutcome using testFlag\nCALL setTestOutcome(testFlag)\nEND IF\n\nEND SUBROUTINE testReduce
|
25 | scatterGather:Driver routine for the scatter benchmark.
| int scatterGather(int benchmarkType){\n int dataSizeIter, bufferSize;\n repsToDo = defaultReps;\n dataSizeIter = minDataSize; /* initialise dataSizeIter */\n \n while (dataSizeIter <= maxDataSize){\n bufferSize = dataSizeIter * numThreads;\n\n if (benchmarkType == SCATTER){\n allocateScatterGatherData(bufferSize, benchmarkType);\n scatterKernel(warmUpIters, dataSizeIter);\n testScatterGather(bufferSize, benchmarkType);\n } else if (benchmarkType == GATHER){\n allocateScatterGatherData(bufferSize, benchmarkType);\n gatherKernel(warmUpIters, dataSizeIter);\n\n if (myMPIRank == GATHERROOT){\n testScatterGather(bufferSize*numMPIprocs, benchmarkType);\n }\n }\n\n benchComplete = FALSE;\n\n while (benchComplete != TRUE){\n MPI_Barrier(comm);\n startTime = MPI_Wtime();\n\n if (benchmarkType == SCATTER){\n scatterKernel(repsToDo, dataSizeIter);\n } else if (benchmarkType == GATHER){\n gatherKernel(repsToDo, dataSizeIter);\n }\n\n MPI_Barrier(comm);\n finishTime = MPI_Wtime();\n totalTime = finishTime - startTime;\n\n if (myMPIRank==0) {\n benchComplete = repTimeCheck(totalTime, repsToDo);\n }\n\n MPI_Bcast(&benchComplete, 1, MPI_INT, 0, comm);\n MPI_Bcast(&repsToDo, 1, MPI_INT, 0, comm);\n }\n\n if (myMPIRank == 0){\n setReportParams(dataSizeIter, repsToDo, totalTime);\n printReport();\n }\n\n freeScatterGatherData(benchmarkType);\n dataSizeIter = dataSizeIter * 2;\n }\n\n return 0;\n}",
| SUBROUTINE scatterGather(benchmarkType)\ninteger, intent(in) :: benchmarkType\ninteger :: dataSizeIter\ninteger :: bufferSize\n\n!Initialise repsToDo to defaultReps\nrepsToDo = defaultReps\n\n!Start loop over data sizes\ndataSizeIter = minDataSize !initialise dataSizeIter\nDO WHILE (dataSizeIter <= maxDataSize)\n!Calculate buffer size and allocate space for \n!scatter data arrays.\nbufferSize = dataSizeIter * numThreads\n\n\nIF (benchmarkType == SCATTER) THEN !Scatter\nCALL allocateData(bufferSize,benchmarkType)\n!Perform benchmark warm-up\nCALL scatterKernel(warmUpIters, dataSizeIter)\n!Test if scatter was successful\nCALL testScatterGather(bufferSize, benchmarkType)\n\nELSE IF (benchmarkType == GATHER) THEN !Gather\nCALL allocateData(bufferSize,benchmarkType)\n!Perform benchmark warm-up\nCALL gatherKernel(warmUpIters, dataSizeIter)\n!Test if gather was successful\nIF (myMPIRank == GATHERROOT) THEN\nCALL testScatterGather(bufferSize*numMPIprocs, benchmarkType)\nEND IF\nEND IF\n\n!Initialise the benchmark\nbenchComplete = .false.\n!Execute benchmark until target time is reached\nDO WHILE (benchComplete .NEQV. .true.)\n!Start timer\nCALL MPI_Barrier(comm, ierr)\nstartTime = MPI_Wtime()\n\nIF (benchmarkType == SCATTER) THEN !Scatter\n!Execute scatter for repsToDo repetitions\nCALL scatterKernel(repsToDo, dataSizeIter)\n\nELSE IF (benchmarkType == GATHER) THEN !Gather\n!Execute gather for repsToDo repetitions\nCALL gatherKernel(repsToDo, dataSizeIter)\nEND IF\n\n!Stop timer\nCALL MPI_Barrier(comm, ierr)\nfinishTime = MPI_Wtime()\ntotalTime = finishTime - startTime\n\n!Test if target time was reached\nif (myMPIRank==0) then \nbenchComplete = repTimeCheck(totalTime, repsToDo)\nend if\n!Ensure all procs have the same value of benchComplete\n!and repsToDo\ncall MPI_Bcast(benchComplete, 1, MPI_INTEGER, 0, comm, ierr)\ncall MPI_Bcast(repsToDo, 1, MPI_INTEGER, 0, comm, ierr)\n\nEND DO\n\n!Master process sets benchmark result for reporting\nIF (myMPIRank == 0) THEN\nCALL setReportParams(dataSizeIter,repsToDo,totalTime)\nCALL printReport()\nEND IF\n\n!Free allocated data\nCALL freeData(benchmarkType)\n\n!Double data size and loop again\ndataSizeIter = dataSizeIter * 2\n\nEND DO !End loop over data sizes\n\nEND SUBROUTINE scatterGather
|
26 | scatterKernel:Implement the scatter benchmark. Root process first scatters send buffer to other processes. Each thread under a MPI process then reads its portion of scatterRecvBuf.
| int scatterKernel(int totalReps, int dataSize){\\n\n int repIter, i;\\n\n int totalSendBufElems, sendCount, recvCount;\\n\n totalSendBufElems = numMPIprocs * numThreads * dataSize;\\n\n sendCount = dataSize * numThreads;\\n\n recvCount = sendCount;\\n\n for (repIter=0; repIter<totalReps; repIter++){\\n\n if (myMPIRank == SCATTERROOT){\\n\n for (i=0; i<totalSendBufElems; i++){\\n\n scatterSendBuf[i] = SCATTERSTARTVAL + i;\\n\n }\\n\n }\\n\n MPI_Scatter(scatterSendBuf, sendCount, MPI_INT, \\n\n scatterRecvBuf, recvCount, MPI_INT, \\n\n SCATTERROOT, comm);\\n\n #pragma omp parallel for default(none)\t\t\t\\\\\\n\n private(i)\t\t\t\t\t\t\\\\\\n\n shared(dataSize,recvCount,finalBuf,scatterRecvBuf)\t\\\\\\n\n schedule(static,dataSize)\\n\n for (i=0; i<recvCount; i++){\\n\n /* loop over all data in recv buffer */\\n\n finalBuf[i] = scatterRecvBuf[i];\\n\n }\\n\n } /* End of loop over reps */\\n\n return 0;\\n\n}",
| SUBROUTINE scatterKernel(totalReps, dataSize)\ninteger, intent(in) :: totalReps, dataSize\ninteger :: repIter, i\ninteger :: totalSendBufElems, sendCount, recvCount\n\n!Calculate totalSendBufElems\ntotalSendBufElems = numMPIprocs * numThreads * dataSize\n!Calculate sendCount\nsendCount = dataSize * numThreads\nrecvCount = sendCount\n\nDO repIter = 1, totalReps\n\n!Master process writes to scatterSendBuf\nIF (myMPIRank == SCATTERROOT) THEN\nDO i = 1, totalSendBufElems\nscatterSendBuf(i) = SCATTERSTARTVAL + i\nEND DO\nEND IF\n\n!Scatter the data to other processes\nCALL MPI_Scatter(scatterSendBuf, sendCount, MPI_INTEGER, &\nscatterRecvBuf, recvCount, MPI_INTEGER, &\nSCATTERROOT, comm, ierr)\n\n!Each thread now reads its portion of scatterRecvBuf\n!$OMP PARALLEL DO DEFAULT(NONE),&\n!$OMP PRIVATE(i),&\n!$OMP SHARED(dataSize,recvCount,finalBuf,scatterRecvBuf),&\n!$OMP SCHEDULE(STATIC,dataSize)\n\nDO i = 1, recvCount !looping over all data in recv buffer\nfinalBuf(i) = scatterRecvBuf(i)\nEND DO\n\n!$OMP END PARALLEL DO\n\nEND DO !End repetitions loop\n\nEND SUBROUTINE scatterKernel
|
27 | gatherKernel:Implements the gather benchmark. Each thread writes part of its buffer then all data is gathered to the master process.
| int gatherKernel(int totalReps, int dataSize){\n int repIter, i;\n int totalRecvBufElems, sendCount, recvCount;\n int startVal;\n\n totalRecvBufElems = dataSize * numThreads * numMPIprocs;\n sendCount = dataSize * numThreads;\n recvCount = sendCount;\n startVal = (myMPIRank * sendCount) + GATHERSTARTVAL;\n\n for (repIter=0; repIter<totalReps; repIter++){\n #pragma omp parallel for default(none) \\ \n private(i) \\ \n shared(gatherSendBuf,startVal,dataSize,sendCount) \\\n schedule(static,dataSize)\n for (i=0; i<sendCount; i++){\n gatherSendBuf[i] = startVal + i;\n }\n\n MPI_Gather(gatherSendBuf, sendCount, MPI_INT,\\\n gatherRecvBuf, recvCount, MPI_INT,\\\n GATHERROOT, comm);\n\n if (myMPIRank == GATHERROOT){\n for (i=0; i<totalRecvBufElems; i++){\n finalBuf[i] = gatherRecvBuf[i];\n }\n }\n }\n\n return 0;\n}",
| SUBROUTINE gatherKernel(totalReps, dataSize)\ninteger, intent(in) :: totalReps, dataSize\ninteger :: repIter, i\ninteger :: totalRecvBufElems\ninteger :: sendCount, recvCount\ninteger :: startVal\n\n!Calculate totalRecvBufElems\ntotalRecvBufElems = dataSize * numThreads * numMPIprocs\n!Each process calculates its send and recv count\nsendCount = dataSize * numThreads\nrecvCount = sendCount\n\n!Calculate startVal for each process\n!This is used to find the values to put in gatherSendBuf\nstartVal = (myMPIRank * sendCount) + GATHERSTARTVAL\n\nDO repIter = 1, totalReps\n\n!Each thread writes to its portion of gatherSendBuf\n!$OMP PARALLEL DO DEFAULT(NONE), &\n!$OMP PRIVATE(i), &\n!$OMP SHARED(gatherSendBuf,startVal,dataSize,sendCount),&\n!$OMP SCHEDULE(STATIC,dataSize)\nDO i = 1, sendCount\ngatherSendBuf(i) = startVal + i\nEND DO\n!$OMP END PARALLEL DO\n\n!Gather the data to GATHERROOT\nCALL MPI_Gather(gatherSendBuf, sendCount, MPI_INTEGER, &\ngatherRecvBuf, recvCount, MPI_INTEGER, &\nGATHERROOT, comm, ierr)\n\n!GATHERROOT process then copies its received data to\n!finalBuf\nIF (myMPIRank == GATHERROOT) THEN\nDO i = 1, totalRecvBufElems\nfinalBuf(i) = gatherRecvBuf(i)\nEND DO\nEND IF\n\nEND DO !End of repetitions loop\n\nEND SUBROUTINE gatherKernel
|
28 | allocateScatterGatherData:Allocate memory for main data arrays
| int allocateScatterGatherData(int bufferSize, int benchmarkType){ \n if (benchmarkType == SCATTER){ \n if (myMPIRank == SCATTERROOT){ \n scatterSendBuf = (int *) malloc((bufferSize * numMPIprocs) * sizeof(int)); \n } \n scatterRecvBuf = (int *) malloc(bufferSize * sizeof(int)); \n finalBuf = (int *)malloc(bufferSize * sizeof(int)); \n } else if (benchmarkType == GATHER){ \n gatherSendBuf = (int *) malloc(bufferSize * sizeof(int)); \n if (myMPIRank == GATHERROOT){ \n gatherRecvBuf = (int *) malloc((bufferSize * numMPIprocs) * sizeof(int)); \n finalBuf = (int *) malloc((bufferSize * numMPIprocs) * sizeof(int)); \n } \n } \n return 0; \n}",
| SUBROUTINE allocateData(bufferSize, benchmarkType)\ninteger, intent(in) :: bufferSize, benchmarkType\n\nIF (benchmarkType == SCATTER) THEN !Allocate for scatter\n\n!scatterSendBuf is size (bufferSize * numMPIprocs)\nIF (myMPIRank == SCATTERROOT) THEN\nallocate(scatterSendBuf(bufferSizenumMPIprocs))\nEND IF\nallocate(scatterRecvBuf(bufferSize))\nallocate(finalBuf(bufferSize))\n\nELSE IF (benchmarkType == GATHER) THEN !Allocate for gather\n\nallocate(gatherSendBuf(bufferSize))\nIF (myMPIRank == GATHERROOT) THEN\nallocate(gatherRecvBuf(bufferSizenumMPIprocs))\nallocate(finalBuf(bufferSize*numMPIprocs))\nEND IF\n\nEND IF\n\nEND SUBROUTINE allocateData
|
29 | freeScatterGatherData:Free memory of main data arrays.
| int freeScatterGatherData(int benchmarkType){\\n\n if (benchmarkType == SCATTER){\\n\n if (myMPIRank == SCATTERROOT){\\n\n free(scatterSendBuf);\\n\n }\\n\n free(scatterRecvBuf);\\n\n free(finalBuf);\\n\n } else if (benchmarkType == GATHER){\\n\n free(gatherSendBuf);\\n\n if (myMPIRank == GATHERROOT){\\n\n free(gatherRecvBuf);\\n\n free(finalBuf);\\n\n }\\n\n }\\n\n return 0;\\n\n}",
| SUBROUTINE freeData(benchmarkType)\ninteger, intent(in) :: benchmarkType\n\nIF (benchmarkType == SCATTER) THEN\n\nIF (myMPIRank == SCATTERROOT) THEN\ndeallocate(scatterSendBuf)\nEND IF\ndeallocate(scatterRecvBuf)\ndeallocate(finalBuf)\n\nELSE IF (benchmarkType == GATHER) THEN\n\ndeallocate(gatherSendBuf)\nIF (myMPIRank == GATHERROOT) THEN\ndeallocate(gatherRecvBuf)\ndeallocate(finalBuf)\nEND IF\n\nEND IF\n\nEND SUBROUTINE freeData
|
30 | testScatterGather:Verifies that the scatter and gahter benchmarks worked correctly.
| int testScatterGather(int sizeofBuffer, int benchmarkType){\\n\n int i, startVal;\\n\n int testFlag, reduceFlag;\\n\n int *testBuf;\\n\n testFlag = TRUE;\\n\n testBuf = (int *) malloc (sizeofBuffer * sizeof(int));\\n\n if (benchmarkType == SCATTER){\\n\n startVal = (myMPIRank * sizeofBuffer) + SCATTERSTARTVAL;\\n\n } else if (benchmarkType == GATHER){\\n\n startVal = GATHERSTARTVAL;\\n\n }\\n\n for (i=0; i<sizeofBuffer; i++){\\n\n testBuf[i] = startVal + i;\\n\n }\\n\n for (i=0; i<sizeofBuffer; i++){\\n\n if (finalBuf[i] != testBuf[i]){\\n\n testFlag = FALSE;\\n\n }\\n\n }\\n\n if (benchmarkType == SCATTER){\\n\n MPI_Reduce(&testFlag, &reduceFlag, 1, MPI_INT, MPI_LAND, 0, comm);\\n\n if (myMPIRank == 0){\\n\n setTestOutcome(reduceFlag);\\n\n }\\n\n } else if (benchmarkType == GATHER){\\n\n setTestOutcome(testFlag);\\n\n }\\n\n free(testBuf);\\n\n return 0;\\n\n}",
| SUBROUTINE testScatterGather(sizeofBuffer, benchmarkType)\ninteger, intent(in) :: sizeofBuffer, benchmarkType\ninteger :: i\ninteger :: startVal\nlogical :: testFlag, reduceFlag\n\n!initialise testFlag to true\ntestFlag = .true.\n\n!Allocate space for testBuf\nallocate(testBuf(sizeofBuffer))\n\nIF (benchmarkType == SCATTER) THEN\n!Find the start scatter value for each MPI process\nstartVal = (myMPIRank*sizeofBuffer) + SCATTERSTARTVAL\nELSE IF (benchmarkType == GATHER) THEN\n!startVal is GATHERSTARTVAL\nstartVal = GATHERSTARTVAL\nEND IF\n\n!Fill testBuf with correct values\nDO i = 1, sizeofBuffer\ntestBuf(i) = startVal + i\nEND DO\n\n!Compare each element of finalBuf with testBuf\nDO i = 1, sizeofBuffer\nIF (finalBuf(i) /= testBuf(i)) THEN\ntestFlag = .false.\nEND IF\nEND DO\n\n!For scatter reduce testFlag into master with\n!logical AND operator\nIF (benchmarkType == SCATTER) THEN\nCALL MPI_Reduce(testFlag, reduceFlag, 1, MPI_LOGICAL, &\nMPI_LAND, 0, comm, ierr)\n!Master then sets testOutcome using reduceFlag\nIF (myMPIRank == 0) THEN\nCALL setTestOutcome(reduceFlag)\nEND IF\nELSE IF (benchmarkType == GATHER) THEN\nCALL setTestOutcome(testFlag)\nEND IF\n\ndeallocate(testBuf)\n\nEND SUBROUTINE testScatterGather
|
31 | Main driver for mixed mode benchmark program:Reads benchmark input file. Initialises the parallel environment. Calls each benchmark.
| int main(int argc, char *argv[]){\n int supportFlag;\n char name[MAXSTRING];\n\n initParallelEnv();\n\n if (myMPIRank == 0){\n if (argc != 2){\n printf(\"ERROR Reading input file from command line.\\n\");\n printf(\"Usage: %s <filename>\", argv[0] );\n finaliseParallelEnv();\n exit(-1);\n }\n else{\n printHeader(numMPIprocs,numThreads,threadSupport);\n openFile(argv[1]);\n setupBenchmarkList();\n }\n }\n\n readBenchmarkParams();\n\n while (findBenchmarkNumber() != FINISHED){\n switch (benchmarkNumber){\n case 0:\n if (myMPIRank == 0){\n supportFlag = benchmarkSupport(MPI_THREAD_FUNNELED);\n strcpy(name,\"Masteronly Pingpong\");\n setBenchName(name, benchmarkNumber, supportFlag);\n }\n pingPong(MASTERONLY);\n break;\n\n case 1:\n if (myMPIRank == 0){\n supportFlag = benchmarkSupport(MPI_THREAD_FUNNELED);\n strcpy(name,\"Funnelled Pingpong\");\n setBenchName(name, benchmarkNumber, supportFlag);\n }\n pingPong(FUNNELLED);\n break;\n\n case 2:\n if (myMPIRank == 0){\n supportFlag = benchmarkSupport(MPI_THREAD_MULTIPLE);\n strcpy(name,\"Multiple Pingpong\");\n setBenchName(name, benchmarkNumber, supportFlag);\n }\n pingPong(MULTIPLE);\n break;\n\n case 3:\n if (myMPIRank == 0){\n supportFlag = benchmarkSupport(MPI_THREAD_FUNNELED);\n strcpy(name,\"Masteronly Pingping\");\n setBenchName(name, benchmarkNumber, supportFlag);\n }\n pingPing(MASTERONLY);\n break;\n\n case 4:\n if (myMPIRank == 0){\n supportFlag = benchmarkSupport(MPI_THREAD_FUNNELED);\n strcpy(name,\"Funnelled Pingping\");\n setBenchName(name, benchmarkNumber, supportFlag);\n }\n pingPing(FUNNELLED);\n break;\n\n case 5:\n if (myMPIRank == 0){\n supportFlag = benchmarkSupport(MPI_THREAD_MULTIPLE);\n strcpy(name,\"Multiple Pingping\");\n setBenchName(name, benchmarkNumber, supportFlag);\n }\n pingPing(M",
| PROGRAM mixedModeBenchmark\n use pt_to_pt_pingpong\n use pt_to_pt_pingping\n use pt_to_pt_multiPingPong\n use pt_to_pt_multiPingPing\n use pt_to_pt_haloExchange\n use collective_barrier\n use collective_reduction\n use collective_broadcast\n use collective_scatterGather\n use collective_alltoall\n use parallelEnvironment\n use benchmarkSetup\n use output\n\n implicit none\n\n !String for setting benchmark name for output\n character (len = MAXSTRING) :: name\n\n !Flag to check if benchmark is supported \n logical :: supportFlag\n\n !Initialise the parallel execution environment\n CALL initParallelEnv()\n\n !Master MPI process.....\n IF (myMPIRank == 0) THEN\n !1) Ptints header and parallel environment info.\n CALL printHeader(numMPIprocs,numThreads,threadSupport)\n !2) Opens the input file\n CALL openFile()\n !3) Setup the list of all possible benchmarks\n CALL setupBenchmarkList()\n END IF\n \n !Master reads parameters from input file and\n !broadcasts them to the other processes.\n CALL readBenchmarkParams()\n \n !Execute bencmarks by reading list from\n !input file.\n CALL findBenchmarkNumber()\n DO WHILE(benchmarkNumber /= FINISHED)\n \n benchmarks : SELECT CASE (benchmarkNumber)\n !Masteronly Pingpong\n CASE(1) \n !Set name\n IF (myMPIRank == 0) THEN\n supportFlag = benchmarkSupport(MPI_THREAD_FUNNELED)\n name = "Masteronly Pingpong"\n CALL setBenchName(name, benchmarkNumber, supportFlag) \n END IF\n !Execute benchmark\n CALL pingPong(MASTERONLY)\n \n !Funnelled Pingpong \n CASE(2)\n !Set name\n IF (myMPIRank == 0) THEN\n supportFlag = benchmarkSupport(MPI_THREAD_FUNNELED)\n name = "Funnelled Pingpong"\n CALL setBenchName(name, benchmarkNumber, supportFlag)\n END IF\n !Execute benchmark\n CALL pingPong(FUNNELLED)\n\n !Multiple Pingpong\n CASE(3) \n !Set name\n IF (myMPIRank == 0) THEN\n supportFlag = benchmarkSupport(MPI_THREAD_MULTIPLE)\n name = "Multiple Pingpong"\n CALL setBenchName(name, benchmarkNumber, supportFlag) \n END IF\n !Execute benchmark \n CALL pingPong(MULTIPLE)\n\n !Masteronly Pingping\n CASE(4)\n !Set name\n IF (myMPIRank == 0) THEN\n supportFlag = benchmarkSupport(MPI_THREAD_FUNNELED)\n name = "Masteronly Pingping"\n CALL setBenchName(name, benchmarkNumber, supportFlag) \n END IF\n !Execute benchmark\n CALL pingPing(MASTERONLY)\n\n !Funnelled Pingping\n CASE(5)\n !Set name\n IF (myMPIRank == 0) THEN\n supportFlag = benchmarkSupport(MPI_THREAD_FUNNELED)\n name = "Funnelled Pingping"\n CALL setBenchName(name, benchmarkNumber, supportFlag) \n END IF\n !Execute benchmark\n CALL pingPing(FUNNELLED)\n\n !Multiple Pingping\n CASE(6)\n !Set name\n IF (myMPIRank == 0) THEN\n supportFlag = benchmarkSupport(MPI_THREAD_MULTIPLE)\n name = "Multiple Pingping"\n CALL setBenchName(name, benchmarkNumber, supportFlag) \n END IF\n !Execute benchmark\n CALL pingPing(MULTIPLE)\n\n !Masteronly Haloexchange\n CASE(7)\n !Set name\n IF (myMPIRank == 0) THEN\n supportFlag = benchmarkSupport(MPI_THREAD_FUNNELED)\n name = "Masteronly Haloexchange"\n CALL setBenchName(name, benchmarkNumber, supportFlag) \n CALL printBenchHeader()\n END IF\n !Execute benchmark\n CALL haloExchange(MASTERONLY)\n\n !Funnelled Haloexchange \n CASE(8) \n !Set name\n IF (myMPIRank == 0) THEN\n supportFlag = benchmarkSupport(MPI_THREAD_FUNNELED)\n name = "Funnelled Haloexchange"\n CALL setBenchName(name, benchmarkNumber, supportFlag) \n CALL printBenchHeader()\n END IF\n !Execute benchmark\n CALL haloExchange(FUNNELLED)\n\n !Multiple Haloexchange\n CASE(9) \n !Set name\n IF (myMPIRank == 0) THEN\n supportFlag = benchmarkSupport(MPI_THREAD_MULTIPLE)\n name = "Multiple Haloexchange"\n CALL setBenchName(name, benchmarkNumber, supportFlag) \n CALL printBenchHeader()\n END IF\n !Execute benchmark\n CALL haloExchange(MULTIPLE)\n\n !Masteronly Multipingpong\n CASE(10)\n !Set name\n IF (myMPIRank == 0) THEN\n supportFlag = benchmarkSupport(MPI_THREAD_FUNNELED)\n name = "Masteronly MultiPingpong"\n CALL setBenchName(name, benchmarkNumber, supportFlag)\n END IF\n !Execute benchmark\n CALL multiPingPong(MASTERONLY)\n\n !Funnelled Multipingpong\n CASE(11)\n !Set name\n IF (myMPIRank == 0) THEN\n supportFlag = benchmarkSupport(MPI_THREAD_FUNNELED)\n name = "Funnelled MultiPingpong"\n CALL setBenchName(name, benchmarkNumber, supportFlag)\n END IF\n !Execute benchmark\n CALL multiPingPong(FUNNELLED) \n\n !Multiple Multipingpong\n CASE(12)\n !Set name\n IF (myMPIRank == 0) THEN\n supportFlag = benchmarkSupport(MPI_THREAD_MULTIPLE)\n name = "Multiple MultiPingpong"\n CALL setBenchName(name, benchmarkNumber, supportFlag)\n END IF\n !Execute benchmark\n CALL multiPingPong(MULTIPLE)\n\n !Masteronly Multipingping\n CASE(13)\n !Set name\n IF (myMPIRank == 0) THEN\n supportFlag = benchmarkSupport(MPI_THREAD_FUNNELED)\n name = "Masteronly MultiPingping"\n CALL setBenchName(name, benchmarkNumber, supportFlag)\n END IF\n !Execute benchmark\n CALL multiPingPing(MASTERONLY)\n\n !Funnelled Multipingping\n CASE(14)\n !Set name\n IF (myMPIRank == 0) THEN\n supportFlag = benchmarkSupport(MPI_THREAD_FUNNELED)\n name = "Funnelled MultiPingping"\n CALL setBenchName(name, benchmarkNumber, supportFlag)\n END IF\n !Execute benchmark\n CALL multiPingPing(FUNNELLED)\n\n !Multiple MultiPingping\n CASE(15)\n !Set name\n IF (myMPIRank == 0) THEN\n supportFlag = benchmarkSupport(MPI_THREAD_MULTIPLE)\n name = "Multiple MultiPingping"\n CALL setBenchName(name, benchmarkNumber, supportFlag)\n END IF\n !Execute benchmark\n CALL multiPingPing(MULTIPLE)\n\n !Barrier \n CASE(16) \n !Set name\n IF (myMPIRank == 0) THEN\n supportFlag = benchmarkSupport(MPI_THREAD_FUNNELED)\n name = "Barrier"\n CALL setBenchName(name, benchmarkNumber, supportFlag)\n CALL printBenchHeader()\n END IF\n !Execute benchmark\n CALL barrierDriver()\n\n !Reduce \n CASE(17)\n !Set name\n IF (myMPIRank == 0) THEN\n supportFlag = benchmarkSupport(MPI_THREAD_FUNNELED)\n name = "Reduce"\n CALL setBenchName(name, benchmarkNumber, supportFlag)\n CALL printBenchHeader()\n END IF\n !Execute benchmark\n CALL reduction(REDUCE)\n\n !All-reduce \n CASE(18)\n !Set name\n IF (myMPIRank == 0) THEN\n supportFlag = benchmarkSupport(MPI_THREAD_FUNNELED)\n name = "All Reduce"\n CALL setBenchName(name, benchmarkNumber, supportFlag)\n CALL printBenchHeader()\n END IF\n !Execute benchmark\n CALL reduction(ALLREDUCE)\n\n !Broadcast \n CASE(19) \n !Set name\n IF (myMPIRank == 0) THEN\n supportFlag = benchmarkSupport(MPI_THREAD_FUNNELED)\n name = "Broadcast"\n CALL setBenchName(name, benchmarkNumber, supportFlag)\n CALL printBenchHeader()\n END IF\n !Execute benchmark\n CALL broadcast()\n \n !Scatter \n CASE(20)\n IF (myMPIRank == 0) THEN\n supportFlag = benchmarkSupport(MPI_THREAD_FUNNELED)\n name = "Scatter"\n CALL setBenchName(name, benchmarkNumber, supportFlag)\n CALL printBenchHeader()\n END IF\n !Execute banehmark\n CALL scatterGather(SCATTER)\n\n !Gather \n CASE(21)\n IF (myMPIRank == 0) THEN\n supportFlag = benchmarkSupport(MPI_THREAD_FUNNELED)\n name = "Gather"\n CALL setBenchName(name, benchmarkNumber, supportFlag)\n CALL printBenchHeader()\n END IF\n !Execute benchmark\n CALL scatterGather(GATHER)\n\n !All to all\n CASE(22)\n !Set name\n IF (myMPIRank == 0) THEN\n supportFlag = benchmarkSupport(MPI_THREAD_FUNNELED)\n name = "All to all"\n CALL setBenchName(name, benchmarkNumber, supportFlag)\n CALL printBenchHeader()\n END IF\n !Execute benchmark\n CALL alltoall()\n \n !Default..file read error\n CASE default\n !..error message will already be printed out.\n\n END SELECT benchmarks\n \n !Read next benchmark from file\n CALL findBenchmarkNumber()\n END DO\n\n !Finalise programming environment\n CALL finaliseParallelEnv()\n !Master process closes file\n IF (myMPIRank == 0) THEN\n CALL closeFile()\n END IF\nEND PROGRAM mixedModeBenchmark
|
32 | printHeader:Prints a header in the output.
| int printHeader(){\n char string[MAXSTRING];\n threadSupportToString(benchReport.supportLevel, string);\n printf(\"----------------------------------------------\\n\");\n printf(\"Mixed mode MPI/OpenMP benchmark suite v1.0\\n\");\n printf(\"----------------------------------------------\\n\");\n printf(\"Number of MPI processes = %d\\n\", benchReport.numMPIprocs);\n printf(\"Number of OpenMP threads = %d\\n\", benchReport.numThreads);\n printf(\"Thread support = %s\\n\", string);\n printf(\"\\n\");\n return 0;\n}",
| SUBROUTINE printHeader(numProcs, numThreads, threadSupport)\n integer, intent(in) :: numProcs, numThreads, threadSupport\n character (len = MAXSTRING) :: string\n\n !Convert threadSupport to a string for output\n CALL threadSupportToString(threadSupport, string)\n\n write(,) "----------------------------------------------"\n write(,) " Mixed mode MPI/OpenMP benchmark suite v1.0 "\n write(,) "----------------------------------------------"\n write(,) " Number of MPI processes =", numProcs\n write(,) " Number of OpenMP threads =", numThreads\n write(,) " Thread support = ", trim(string)\n \n END SUBROUTINE printHeader
|
33 | setBenchName:Sets the benchName, benchNumber and if the benchmark is supported.
| int setBenchName(char *name, int number, int support){ \n strcpy(benchReport.benchName,name); \n benchReport.benchNumber = number; \n benchReport.supported = support; \n printBenchName(); \n return 0; \n}",
| SUBROUTINE setBenchName(name,number,support)\n character (len = MAXSTRING), intent (in) :: name\n integer, intent(in) :: number\n logical, intent(in) :: support\n\n benchReport%benchName = name\n benchReport%benchNumber = number\n benchReport%supported = support\n\n CALL printBenchName()\n\n END SUBROUTINE setBenchName
|
34 | printBenchName:Print header for benchmark - name of benchmark and list of names of each column.
| int printBenchName(){\n\tprintf("--------------------------------------------\n");\n\tprintf("# %s\n", benchReport.benchName);\n\tprintf("--------------------------------------------\n");\n\n\tif (benchReport.supported == FALSE){\n\t\tprintf("WARNING: Implementation does not support benchmark.\n");\n\t}\n\n\treturn 0;\n}
| SUBROUTINE printBenchName()\n write(,) "--------------------------------------------"\n write(,) "# ", benchReport%benchName\n write(,) "--------------------------------------------"\n\n !print warning if benchmark not supported\n IF (benchReport%supported .EQV. .false.) THEN\n write(,) "WARNING: Implementation does not ",&\n "support benchmark"\n END IF\n \n !Flush output buffer\n !CALL flush(6)\n\n END SUBROUTINE printBenchName
|
35 | printNodeReport:For pingpong and pingping benchmarks prints out if the two MPI processes are on the same node or not.
| int printNodeReport(int sameNode, int rankA, int rankB){\n\tif (sameNode == TRUE){\n\t\tprintf("Intra node benchmark between process %d and process %d\n",rankA,rankB);\n\t}\n\telse if (sameNode == FALSE){\n\t\tprintf("Inter node benchmark between process %d and process %d\n",rankA,rankB);\n\t}\n\n\treturn 0;\n}
| SUBROUTINE printNodeReport(sameNode,rankA,rankB)\n integer, intent(in) :: rankA, rankB\n logical, intent(in) :: sameNode\n IF (sameNode .EQV. .true.) THEN\n write(,) "Intra node benchmark between process",rankA, "and process", rankB \n ELSE IF (sameNode .EQV. .false.) THEN\n write(,) "Inter node benchmark between process",rankA, "and process", rankB\n END IF\n END SUBROUTINE printNodeReport
|
36 | printBenchHeader:Prints the column headings for the benchmark report.
| int printBenchHeader(){\n\tprintf(" Data Size Msg Size (bytes) No. Reps ");\n\tprintf("Time (sec) Time/Rep (s) Test\n");\n\n\tprintf("----------- ------------------ ---------- ");\n\tprintf("------------ -------------- ------\n");\n\n\treturn 0;\n}
| SUBROUTINE printBenchHeader()\n\n write(,fmt="(2x,a9,5x,a16,5x,a8,5x,a10,5x,a12,5x,a4)")&\n "Data Size","Msg Size (bytes)","No. Reps",&\n "Time (sec)","Time/Rep (s)","Test"\n write(,fmt="(1x,a11,3x,a18,3x,a10,3x,a12,3x,a14,3x,a6)")&\n "-----------","------------------","----------",&\n "------------","--------------","------"\n\n END SUBROUTINE printBenchHeader
|
37 | setTestOutcome:Sets benchReport's testOutcome element. Called in test routine of each benchmark.
| int setTestOutcome(int outcome){\n if (outcome == TRUE){\n strcpy(benchReport.testOutcome,\"Pass\");\n }\\n\n else if (outcome == FALSE){\n strcpy(benchReport.testOutcome,\"Fail\");\n }\\n\n return 0;\n}",
| SUBROUTINE setTestOutcome(outcome)\n logical, intent(in) :: outcome\n\n benchReport%testOutcome = outcome\n\n END SUBROUTINE setTestOutcome
|
38 | setReportParams:Sets the numReps and benchTime for a certain datasize
| int setReportParams(int size, int reps, double time){\n benchReport.dataSize = size; \n benchReport.numReps = reps; \n benchReport.benchTime = time; \n benchReport.timePerRep = time / reps; \n \n if (benchReport.benchNumber <= LAST_PT_PT_ID){\n /* dataSize x numThreads x sizeof(int) */\n benchReport.bytes = size * benchReport.numThreads * sizeInteger; \n } else if (benchReport.benchNumber <= LASTMULTIPPID){\n benchReport.bytes = size * benchReport.numThreads * sizeInteger * localCommSize; \n } else {\n benchReport.bytes = size * sizeInteger; \n } \n \n return 0; \n}",
| SUBROUTINE setReportParams(size,reps,time)\n integer, intent(in) :: size, reps\n DOUBLE PRECISION, intent(in) :: time\n\n benchReport%dataSize = size\n benchReport%numReps = reps\n benchReport%benchTime = time\n !Calculate and set time for 1 rep\n benchReport%timePerRep = time/reps\n !Calculate the size of message in bytes\n IF (benchReport%benchNumber <= LAST_PT_PT_ID) THEN\n !If point to point benchmark size of msg is \n !dataSize * numThreads * sizeof(integer)\n benchReport%bytes = size * numThreads * sizeInteger\n ELSE IF (benchReport%benchNumber <= LASTMULTIPPID) THEN\n !If multi point to point benchmark size of msg is \n !the size of the message leaving each node.\n benchReport%bytes = size * numThreads * sizeInteger * localCommSize\n ELSE\n benchReport%bytes = size * sizeInteger\n END IF\n\n END SUBROUTINE setReportParams
|
39 | printMultiProcInfo:This prints the comm world ranks and processor names for each pair of processes in the multi-pingpong or multi-pingping benchmarks.
| int printMultiProcInfo(int printNode, int pairWorldRank, char *pairProcName){ \n if (crossCommRank == printNode){ \n printf(\"MPI process %d on %s \", myMPIRank,myProcName); \n printf(\"communicating with MPI process %d on %s\\n\", pairWorldRank,pairProcName); \n } \n return 0; \n}",
| SUBROUTINE printMultiProcInfo(printNode, pairWorldRank, pairProcName)\n integer, intent(in) :: printNode, pairWorldRank\n character (len = MPI_MAX_PROCESSOR_NAME) :: pairProcName\n IF (crossCommRank == printNode) THEN\n print *, "MPI process ", myMPIRank, "on ", trim(myProcName), &\n " commumicating with MPI process ", pairWorldRank, &\n "on ", trim(pairProcName)\n END IF\n END SUBROUTINE printMultiProcInfo
|
40 | printReport:Prints out the a column of information after each data size iteration.
| int printReport(){\n printf(\"d %d\\t\\t%d\\t\\t %d\\t\\t%lf\\t%lf\\t%s\\n\", \\\n benchReport.dataSize, benchReport.bytes, benchReport.numReps, \\\n benchReport.benchTime, benchReport.timePerRep, benchReport.testOutcome);\n return 0;\n}",
| SUBROUTINE printReport()\n character (len =4) testString\n IF(benchReport%testOutcome .EQV. .true.) THEN\n testString = "Pass"\n ELSE\n testString = "Fail"\n END IF\n write(*,fmt="('d',i10,5x,i16,5x,i8,5x,f10.6,4x,f14.9,5x,a4)")&\n benchReport%dataSize, benchReport%bytes,&\n benchReport%numReps,benchReport%benchTime,&\n benchReport%timePerRep,testString\n END SUBROUTINE printReport
|
41 | printBalanceError:Prints an error if there isn't the same number of MPI processes in the nodes selected for the multi-pingpong or multi-pingping benchmarks.
| int printBalanceError(){ \n printf(\"\\nERROR: Nodes selected for this benchmark do not\\n\"); \n printf(\"have same number of MPI processes per node.\\n\"); \n printf(\"Skipping benchmark...\\n\"); \n return 0; \n}",
| SUBROUTINE printBalanceError()\n print *, ""\n print *, "ERROR: Nodes selected for this benchmark do not",\n "have same number of MPI processes per node.",\n "Skipping benchmark..."\n print *, ""\n END SUBROUTINE printBalanceError
|
42 | threadSupportToString:Converts the threadSupport integer variable to a string for output.
| int threadSupportToString(int threadSupport, char *string){\\n\n if (threadSupport == MPI_THREAD_SINGLE){\\n\n strcpy(string,\"MPI_THREAD_SINGLE\");\\n\n } else if (threadSupport == MPI_THREAD_FUNNELED){\\n\n strcpy(string,\"MPI_THREAD_FUNNELED\");\\n\n } else if (threadSupport == MPI_THREAD_SERIALIZED){\\n\n strcpy(string,\"MPI_THREAD_SERIALIZED\");\\n\n } else if (threadSupport == MPI_THREAD_MULTIPLE){\\n\n strcpy(string,\"MPI_THREAD_MULTIPLE\");\\n\n }\\n\n return 0;\\n\n}",
| SUBROUTINE threadSupportToString(threadSupport, string)\n integer, intent(in) :: threadSupport\n character (len = MAXSTRING), intent(out) :: string\n\n IF (threadSupport == MPI_THREAD_SINGLE) THEN\n string = "MPI_THREAD_SINGLE"\n ELSE IF (threadSupport == MPI_THREAD_FUNNELED) THEN\n string = "MPI_THREAD_FUNNELED"\n ELSE IF (threadSupport == MPI_THREAD_SERIALIZED) THEN\n string = "MPI_THREAD_SERIALIZED"\n ELSE IF (threadSupport == MPI_THREAD_MULTIPLE) THEN\n string = "MPI_THREAD_MULTIPLE"\n END IF\n\n END SUBROUTINE threadSupportToString
|
43 | initParallelEnv:Initialises the MPI and OpenMP environments. Finds the total number of MPI processes and OpenMP threads. Also finds the ID of each MPI process and OpenMP thread.
| int initParallelEnv() { \n MPI_Init_thread(NULL, NULL, MPI_THREAD_MULTIPLE, &threadSupport); \n comm = MPI_COMM_WORLD; \n MPI_Comm_size(comm, &numMPIprocs); \n MPI_Comm_rank(comm, &myMPIRank); \n sizeInteger = sizeof(int); \n MPI_Get_processor_name(myProcName, &procNameLen); \n\n // * across node boundaries. \n setupCommunicators(); \n\n #pragma omp parallel default(none) \\ \n shared(numThreads,globalIDarray,myMPIRank) \n { \n numThreads = omp_get_num_threads(); \n myThreadID = omp_get_thread_num(); \n\n #pragma omp single \n { \n globalIDarray = (int *)malloc(numThreads * sizeof(int)); \n } \n\n globalIDarray[myThreadID] = (myMPIRank * numThreads) + myThreadID; \n } \n\n setParallelInfo(numMPIprocs,threadSupport,numThreads); \n\n return 0; \n}",
| SUBROUTINE initParallelEnv()\n\n !setup MPI programming environment \n CALL MPI_Init_thread(MPI_THREAD_MULTIPLE,threadSupport,ierr)\n\n comm = MPI_COMM_WORLD\n CALL MPI_Comm_size(comm, numMPIprocs, ierr)\n CALL MPI_Comm_rank(comm, myMPIRank, ierr)\n\n !Find the number of bytes for an int (numMPIprocs)\n CALL MPI_Type_size(MPI_INTEGER, sizeInteger, ierr)\n\n !Find the processor name of each MPI process.\n CALL MPI_Get_processor_name(myProcName, procNameLen, ierr)\n\n !Use processor name to create a communicator across \n !node boundaries.\n CALL setupCommunicators()\n\n !setup OpenMP programming environment\n!$OMP PARALLEL DEFAULT(NONE), &\n!$OMP SHARED(numThreads,globalIDarray,myMPIRank)\n\n numThreads = omp_get_num_threads()\n myThreadID = omp_get_thread_num() + 1 !threadID from 1 to totalThreads\n\n !Allocate space for globalIDarray\n!$OMP SINGLE\n allocate(globalIDarray(numThreads))\n!$OMP END SINGLE\n\n !Calculate the globalID for each thread\n globalIDarray(myThreadID) = (myMPIRank * numThreads) + myThreadID\n\n!$OMP END PARALLEL\n \n END SUBROUTINE initParallelEnv
|
44 | finaliseParallelEnv:Closes the MPI programming environment.
| int finaliseParallelEnv(){ \n MPI_Finalize(); \n free(globalIDarray); \n return 0; \n}",
| SUBROUTINE finaliseParallelEnv()\n\n !finalise the MPI programming environment\n CALL MPI_Finalize(ierr)\n !free the space created for globalIDarray...\n deallocate(globalIDarray)\n \n END SUBROUTINE finaliseParallelEnv
|
45 | findRank:Finds the MPI ranks which will take part in the pingping or pingpong benchmarks based on the numbers read from the input file.
| int findRank(int rankIn){\n int CalcRank;\n\n if (rankIn < 0){\n CalcRank = numMPIprocs + rankIn;\n } else{\n CalcRank = rankIn;\n }\n\n if (CalcRank > (numMPIprocs-1)){\n printf(\"Warning: Rank input greater than total process count.\\n\");\n printf(\"Using Rank = %d\\n\", numMPIprocs-1);\n CalcRank = numMPIprocs - 1;\n } else if(CalcRank < 0){\n printf(\"Warning: MPI process offset greater than total process count.\\n\");\n printf(\"Using Rank = 0\\n\");\n CalcRank = 0;\n }\n\n return CalcRank;\n}",
| SUBROUTINE findNeighbourRanks()\n integer :: dims(1) !dims array for MPI_Dims_Create\n logical, parameter :: PERIODS(1) = (/.true./), REORDER = .false.\n\n !find a good process distribution\n dims = 0 !zero so that dims_create tries to rearrange\n CALL MPI_Dims_Create(numMPIProcs,1,dims,ierr)\n\n !Create the cartesian topology\n CALL MPI_Cart_Create(comm,1,dims,PERIODS,REORDER, &\n commCart, ierr)\n\n !Find the ranks of left and right neighbour\n CALL MPI_Cart_Shift(commCart, 0, 1, leftNeighbour, &\n rightNeighbour, ierr)\n \n END SUBROUTINE findNeighbourRanks
|
46 | findNeighbourRanks:This creates a cartesian topology and finds the left and right neighbours of each process.
| int findNeighbours(){\n\tint dims[1];\n\tint periods[1];\n\tint reorder;\n\n\tdims[0] = 0;\n\tMPI_Dims_create(numMPIprocs, 1, dims);\n\n\tperiods[0] = TRUE;\n\treorder = FALSE;\n\n\tMPI_Cart_create(comm, 1, dims, periods, reorder, &commCart);\n\n\tMPI_Cart_shift(commCart, 0, 1, &leftNeighbour, &rightNeighbour);\n\n\treturn 0;\n}
| FUNCTION benchmarkSupport(required)\n integer, intent(in) :: required\n logical :: benchmarkSupport\n\n IF (required <= threadSupport) THEN\n benchmarkSupport = .true.\n ELSE\n benchmarkSupport = .false.\n END IF\n\n END FUNCTION benchmarkSupport
|
47 | benchmarkSupport:This function compares the level of thread support needed by a particular benchmark with the level provided by the implementation.
| int benchmarkSupport(int required){\n int benchSupport;\n if (required <= threadSupport){\n benchSupport = TRUE;\n } else {\n benchSupport = FALSE;\n }\n return benchSupport;\n}",
| FUNCTION findRank(rankIn)\n integer, intent(in) :: rankIn\n integer :: findRank\n\n !Figure out actual MPI rank\n IF (rankIn < 0) THEN\n findRank = numMPIprocs + rankIn\n ELSE\n findRank = rankIn\n END IF\n\n !Check if findRank is too big or still -ve\n IF (findRank > (numMPIprocs-1)) THEN\n !write(,) "Warning: Rank input greater than total",&\n ! "process count. Using Rank = ", numMPIprocs-1\n findRank = numMPIprocs - 1\n ELSE IF (findRank < 0) THEN\n !write(,) "Warning: MPI process offset greater than",&\n ! "total process count. Using Rank = 0"\n findRank = 0\n END IF\n\n END FUNCTION findRank
|
48 | compareProcNames:Compares the names of 2 processes to check if they are on the same node or not.
| int compareProcNames(int rankA, int rankB){ \\n\n int sameNode; \\n\n char recvProcName[MPI_MAX_PROCESSOR_NAME]; \\n\n if (myMPIRank == rankB){ \\n\n MPI_Send(myProcName, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, rankA, TAG, comm); \\n\n } \\n\n else if (myMPIRank == rankA){ \\n\n MPI_Recv(recvProcName, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, rankB, TAG, comm, &status); \\n\n if (strcmp(myProcName,recvProcName) == 0){ \\n\n sameNode = TRUE; \\n\n } \\n\n else{ \\n\n sameNode = FALSE; \\n\n } \\n\n } \\n\n MPI_Bcast(&sameNode, 1, MPI_INT, rankA, comm); \\n\n return sameNode; \\n\n}",
| FUNCTION compareProcNames(rankA, rankB)\n integer, intent(in) :: rankA, rankB\n logical :: compareProcNames\n character (len = MPI_MAX_PROCESSOR_NAME) :: recvProcName\n\n !Rank B sends procName to Rank A\n IF (myMPIRank == rankB) THEN\n CALL MPI_Send(myProcName, MPI_MAX_PROCESSOR_NAME, &\n MPI_CHARACTER, rankA, tag, comm, ierr)\n ELSE IF (myMPIRank == rankA) THEN\n CALL MPI_Recv(recvProcName, MPI_MAX_PROCESSOR_NAME, &\n MPI_CHARACTER, rankB, tag, comm, status, ierr)\n !rankB compares the two processor names\n IF (myProcName == recvProcName) THEN\n compareProcNames = .true.\n ELSE\n compareProcNames = .false.\n END IF\n END IF\n\n !Rank A then broadcasts its compareProcNames value to \n !the other processes\n CALL MPI_Bcast(compareProcNames, 1, MPI_LOGICAL, rankA, &\n comm, ierr)\n\n END FUNCTION compareProcNames
|
49 | setupCommunicators:This creates two new communicators. The first gives a local communicator for processes on the same node. The second uses the local rank to give a communicator across node boundaries. e.g. for 16 nodes each with 2 processors, this routine will give 16 local communicators of size 2 and 2 communicators of size 2 across nodes.
| int setupCommunicators(){\n int procHash;\n\n procHash = procNameToHash();\n\n /* local communicator. */\n MPI_Comm_split(comm, procHash, 0, &localComm);\n MPI_Comm_rank(localComm, &localCommRank);\n MPI_Comm_size(localComm, &localCommSize);\n\n MPI_Comm_split(comm, localCommRank, 0, &crossComm);\n MPI_Comm_rank(crossComm, &crossCommRank);\n\n return 0;\n}\\n",
| SUBROUTINE setupCommunicators()\n integer :: procHash\n \n !Get hash based on processor name\n procHash = procNameToHash()\n \n !Comm_split using procHash as colour to get \n !local cmmunicator.\n CALL MPI_Comm_split(comm, procHash, 0, localComm, ierr)\n \n !Find ranks of processes in localComm\n CALL MPI_Comm_rank(localComm, localCommRank, ierr)\n \n !Find the size of localComm (for use in calculating multi datasize)\n CALL MPI_Comm_size(localComm, localCommSize, ierr)\n \n !Use localRank as colour to get communicator across nodes.\n CALL MPI_Comm_split(comm, localCommRank, 0, crossComm, ierr)\n \n !Find ranks of processes in crossComm\n CALL MPI_Comm_rank(crossComm, crossCommRank, ierr)\n \n END SUBROUTINE setupCommunicators
|
50 | procNameToHash:Creates an integer hash for each process. Each process on the same node will have the same hash value.
| int procNameToHash(){\n int procHash,i;\n procHash = 0;\n for (i=0; i<procNameLen; i++){\n procHash = (7 * procHash) + (int)(myProcName[i]);\n }\n return procHash;\n}\\n",
| FUNCTION procNameToHash()\n integer :: procNameToHash\n integer :: i\n\n !Initialise hash to 0\n procNameToHash = 0\n\n DO i = 1, procNameLen\n \n procNameToHash = 7 * procNameToHash + &\n ICHAR(myProcName(i:i))\n END DO\n \n END FUNCTION procNameToHash
|
51 | exchangeWorldRanks:Finds the MPI_COMM_WORLD ranks of the processes participating in the multi-pingpong and multi-pingping benchmarks.
| int exchangeWorldRanks(int nodeA, int nodeB, int *otherWorldRank) {\n\\n int destRank;\n\\n if (crossCommRank == nodeA) {\n\\n destRank = nodeB;\n\\n } else if (crossCommRank == nodeB) {\n\\n destRank = nodeA;\n\\n }\n\\n if (crossCommRank == nodeA || crossCommRank == nodeB) {\n\\n MPI_Isend(&myMPIRank, 1, MPI_INT, destRank, TAG, crossComm, &requestID);\n\\n MPI_Recv(otherWorldRank, 1, MPI_INT, destRank, TAG, crossComm, &status);\n\\n MPI_Wait(&requestID, &status);\n\\n }\n\\n return 0;\n\\n}",
| SUBROUTINE exchangeWorldRanks(nodeA, nodeB, otherWorldRank)\ninteger, intent(in) :: nodeA, nodeB\ninteger, intent(out) :: otherWorldRank\ninteger :: destRank\n\nIF (crossCommRank == nodeA) THEN\ndestRank = nodeB\nELSE IF (crossCommRank == nodeB) THEN\ndestRank = nodeA\nEND IF\n\nIF (crossCommRank == nodeA .or. crossCommRank == nodeB) THEN\nCALL MPI_Isend(myMPIRank, 1, MPI_INTEGER, destRank, &\ntag, crossComm, requestID, ierr)\nCALL MPI_Recv(otherWorldRank, 1, MPI_INTEGER, destRank, &\ntag, crossComm, status, ierr)\nCALL MPI_Wait(requestID, status, ierr)\nEND IF\n\nEND SUBROUTINE exchangeWorldRanks
|
52 | sendProcName:Sends the processor name from processes in destNode of crossComm to srcNode.
| int sendProcName(int destNode, int srcNode, char *destProcName){ \n if (crossCommRank == srcNode){ \n MPI_Send(myProcName, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, destNode, TAG, crossComm); \n } \n else if (crossCommRank == destNode){ \n MPI_Recv(destProcName, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, srcNode, TAG, crossComm, &status); \n } \n}",
| SUBROUTINE sendProcName(destNode, srcNode, destProcName)\ninteger, intent(in) :: srcNode, destNode\ncharacter (len = MPI_MAX_PROCESSOR_NAME), intent(out) :: destProcName\n\nIF (crossCommRank == srcNode) THEN\nCALL MPI_Send(myProcName, MPI_MAX_PROCESSOR_NAME, &\nMPI_CHARACTER, destNode, tag, crossComm, ierr)\nELSE IF (crossCommRank == destNode) THEN\nCALL MPI_Recv(destProcName, MPI_MAX_PROCESSOR_NAME, &\nMPI_CHARACTER, srcNode, tag, crossComm, status, ierr)\nEND IF\n\nEND SUBROUTINE sendProcName
|
53 | checkCrossCommBalance:Checks if there's a balance in the number of processes in crossComm nodes.
| int crossCommBalance(int nodeA, int nodeB){\n int localCommSize, otherLocalCommSize;\n int crossCommBalance;\n\n MPI_Comm_size(localComm, &localCommSize);\n\n if ((crossCommRank == nodeB) && (localCommRank == 0)){\n MPI_Send(&localCommSize, 1, MPI_INT, nodeA, TAG, crossComm);\n }\n else if ((crossCommRank == nodeA) && (localCommRank == 0)){\n MPI_Recv(&otherLocalCommSize, 1, MPI_INT, nodeB, TAG, \\\n crossComm, &status);\n\n if (localCommSize == otherLocalCommSize){\n crossCommBalance = TRUE;\n }\n else{\n crossCommBalance = FALSE;\n }\n\n if (myMPIRank != 0){\n MPI_Send(&crossCommBalance, 1, MPI_INT, 0, TAG, comm);\n }\n }\n\n if (myMPIRank == 0){\n if ((crossCommRank != nodeA) && (localCommRank != 0)){\n MPI_Recv(&crossCommRank, 1, MPI_INT, MPI_ANY_SOURCE, \\\n TAG, comm, &status);\n }\n }\n\n MPI_Bcast(&crossCommBalance, 1, MPI_INT, 0, comm);\n\n return crossCommBalance;\n}",
| FUNCTION crossCommBalance(nodeA, nodeB)\ninteger, intent(in) :: nodeA, nodeB\ninteger :: localCommSize, otherLocalCommSize\nlogical :: crossCommBalance\n\nCALL MPI_Comm_size(localComm, localCommSize, ierr)\n\nIF (crossCommRank == nodeB .and. localCommRank == 0) THEN\nCALL MPI_Send(localCommSize, 1, MPI_INTEGER, nodeA, &\ntag, crossComm, ierr)\nELSEIF (crossCommRank == nodeA .and. localCommRank == 0) THEN\nCALL MPI_Recv(otherLocalCommSize, 1, MPI_INTEGER, nodeB, &\ntag, crossComm, status, ierr)\nIF (localCommSize == otherLocalCommSize) THEN\ncrossCommBalance = .true.\nELSE \ncrossCommBalance = .false.\nEND IF\nIF (myMPIRank /= 0) THEN\nCALL MPI_Send(crossCommBalance, 1, MPI_LOGICAL, &\n0, tag, comm, ierr)\nEND IF\nEND IF\nIF (myMPIRank == 0) THEN\nIF (crossCommRank /= nodeA .and. localCommRank /= 0) THEN\nCALL MPI_Recv(crossCommBalance, 1, MPI_LOGICAL, &\nMPI_ANY_SOURCE, tag, comm, status, ierr)\nEND IF\nEND IF\nCALL MPI_Bcast(crossCommBalance, 1, MPI_LOGICAL, 0, comm, ierr)\n\nEND FUNCTION crossCommBalance
|
54 | haloExchange:Driver subroutine for the haloExchange benchmark.
| int haloExchange(int benchmarkType){ \n int dataSizeIter; \n findNeighbours(); \n repsToDo = defaultReps; \n dataSizeIter = minDataSize; /* Initialise dataSizeIter */ \n \n while (dataSizeIter <= maxDataSize){ \n sizeofBuffer = dataSizeIter * numThreads; \n allocateHaloexchangeData(sizeofBuffer); \n \n if (benchmarkType == MASTERONLY){ \n masteronlyHaloexchange(warmUpIters, dataSizeIter); \n } else if (benchmarkType == FUNNELLED){ \n funnelledHaloexchange(warmUpIters, dataSizeIter); \n } else if (benchmarkType == MULTIPLE){ \n multipleHaloexchange(warmUpIters, dataSizeIter); \n } \n \n testHaloexchange(sizeofBuffer, dataSizeIter); \n benchComplete = FALSE; \n \n while (benchComplete != TRUE){ \n MPI_Barrier(comm); \n startTime = MPI_Wtime(); \n \n if (benchmarkType == MASTERONLY){ \n masteronlyHaloexchange(repsToDo, dataSizeIter); \n } else if (benchmarkType == FUNNELLED){ \n funnelledHaloexchange(repsToDo, dataSizeIter); \n } else if (benchmarkType == MULTIPLE){ \n multipleHaloexchange(repsToDo, dataSizeIter); \n } \n \n MPI_Barrier(comm); \n finishTime = MPI_Wtime(); \n totalTime = finishTime - startTime; \n \n if (myMPIRank==0){ \n benchComplete = repTimeCheck(totalTime, repsToDo); \n } \n \n MPI_Bcast(&benchComplete, 1, MPI_INT, 0, comm); \n MPI_Bcast(&repsToDo, 1, MPI_INT, 0, comm); \n } \n \n if (myMPIRank == 0 ){ \n setReportParams(dataSizeIter, repsToDo, totalTime); \n printReport(); \n } \n \n freeHaloexchangeData(); \n dataSizeIter = dataSizeIter * 2; \n } \n \n return 0; \n}",
| SUBROUTINE haloExchange(benchmarkType)\ninteger, intent(in) :: benchmarkType\ninteger :: dataSizeIter\n\nCALL findNeighbourRanks()\n\nrepsToDo = defaultReps\n\ndataSizeIter = minDataSize\nDO WHILE (dataSizeIter <= maxDataSize)\nsizeofBuffer = dataSizeIter * numThreads\n\nCALL allocateData(sizeofBuffer)\n\nIF (benchmarkType == MASTERONLY) THEN\nCALL masteronlyHaloexchange(warmUpIters, dataSizeIter)\nELSE IF (benchmarkType == FUNNELLED) THEN\nCALL funnelledHaloexchange(warmUpIters, dataSizeIter)\nELSE IF (benchmarkType == MULTIPLE) THEN\nCALL multipleHaloexchange(warmUpIters, dataSizeIter)\nEND IF\n\nCALL testHaloexchange(sizeofBuffer, dataSizeIter)\n\nbenchComplete = .false.\nDO WHILE (benchComplete .NEQV. .true.)\nCALL MPI_Barrier(comm, ierr)\nstartTime = MPI_Wtime()\n\nIF (benchmarkType == MASTERONLY) THEN\nCALL masteronlyHaloexchange(repsToDo, dataSizeIter)\nELSE IF (benchmarkType == FUNNELLED) THEN\nCALL funnelledHaloexchange(repsToDo, dataSizeIter)\nELSE IF (benchmarkType == MULTIPLE) THEN\nCALL multipleHaloexchange(repsToDo, dataSizeIter)\nEND IF\n\nCALL MPI_Barrier(comm, ierr)\nfinishTime = MPI_Wtime()\ntotalTime = finishTime - startTime\n\nIF (myMPIRank==0) THEN\nbenchComplete = repTimeCheck(totalTime, repsToDo)\nEND IF\nCALL MPI_Bcast(benchComplete, 1, MPI_INTEGER, 0, comm, ierr)\nCALL MPI_Bcast(repsToDo, 1, MPI_INTEGER, 0, comm, ierr)\n\nEND DO\n\nIF (myMPIRank == 0) THEN\nCALL setReportParams(dataSizeIter,repsToDo,totalTime)\nCALL printReport()\nEND IF\n\nCALL freeData()\n\ndataSizeIter = dataSizeIter * 2\n\nEND DO\n\nEND SUBROUTINE haloExchange
|
55 | masteronlyHaloexchange:Each process exchanges a message with its left and right neighbour. Communication takes place outside of the parallel region.
| int masteronlyHaloexchange(int totalReps, int dataSize){\n int repIter, i;\n \n for (repIter=0; repIter<totalReps; repIter++){\n \n /* Fill leftSendBuf and rightSendBuf using a parallel for directive. */\n #pragma omp parallel for default(none) \\\n private(i) \\\n shared(leftSendBuf,rightSendBuf,dataSize) \\\n shared(sizeofBuffer,globalIDarray) \\\n schedule(static,dataSize)\n for (i=0; i<sizeofBuffer; i++){\n leftSendBuf[i] = globalIDarray[myThreadID];\n rightSendBuf[i] = globalIDarray[myThreadID];\n }\n \n /* Send data to leftNeighbour and rightNeighbour using non-blocking send... */\n MPI_Isend(leftSendBuf, sizeofBuffer, MPI_INT, leftNeighbour, \\\n TAG, commCart, &requestArray[0]);\n MPI_Isend(rightSendBuf, sizeofBuffer, MPI_INT, rightNeighbour, \\\n TAG, commCart, &requestArray[1]);\n MPI_Irecv(leftRecvBuf, sizeofBuffer, MPI_INT, leftNeighbour, \\\n TAG, commCart, &requestArray[2]);\n MPI_Irecv(rightRecvBuf, sizeofBuffer, MPI_INT, rightNeighbour, \\\n TAG, commCart, &requestArray[3]);\n \n MPI_Waitall(4, requestArray, statusArray);\n \n #pragma omp parallel for default(none) \\\n private(i) \\\n shared(leftRecvBuf,rightRecvBuf,dataSize,sizeofBuffer) \\\n shared(finalLeftBuf,finalRightBuf) \\\n schedule(static,dataSize)\n for (i=0; i<sizeofBuffer; i++){\n finalLeftBuf[i] = leftRecvBuf[i];\n finalRightBuf[i] = rightRecvBuf[i];\n }\n }\n return 0;\n}",
| SUBROUTINE masteronlyHaloexchange(totalReps, dataSize)\ninteger, intent(in) :: totalReps, dataSize\ninteger :: repIter, i\nDO repIter = 1, totalReps\n\n!Each thread writes its globalID to rightSendBuf\n!and leftSendBuf with a parallel do directive\n!$OMP PARALLEL DO DEFAULT(NONE), &\n!$OMP PRIVATE(i), &\n!$OMP SHARED(leftSendBuf,rightSendBuf,dataSize,sizeofBuffer), &\n!$OMP SHARED(globalIDarray),&\n!$OMP SCHEDULE(STATIC,dataSize)\nDO i = 1, sizeofBuffer\nleftSendBuf(i) = globalIDarray(myThreadID)\nrightSendBuf(i) = globalIDarray(myThreadID)\nEND DO\n!$OMP END PARALLEL DO\n\n!Process starts send of data to leftNeighbour and\n!rightNeighbour using non-blocking send...\n!..to leftNeighbour\nCALL MPI_ISend(leftSendBuf, sizeofBuffer, MPI_INTEGER, &\nleftNeighbour, tag, comm, requestArray(1), ierr)\n!..to rightNeighbour\nCALL MPI_ISend(rightSendBuf, sizeofBuffer, MPI_INTEGER, &\nrightNeighbour, tag, comm, requestArray(2), ierr)\n\n!Process then waits for messages from leftNeighbour and\n!rightNeighbour.\n!Receive leftRecvBuf from leftNeighbour\nCALL MPI_IRecv(leftRecvBuf, sizeofBuffer, MPI_INTEGER, &\nleftNeighbour, tag, comm, requestArray(3), ierr)\n!Receive rightRecvBuf from rightNeighbour\nCALL MPI_IRecv(rightRecvBuf, sizeofBuffer, MPI_INTEGER, &\nrightNeighbour, tag, comm, requestArray(4), ierr)\n\n!Finish the sends with an MPI_Waitall on the requests\nCALL MPI_Waitall(4, requestArray, statusArray, ierr) \n\n!Each thread now reads its part of the left and right\n!received buffers.\n!$OMP PARALLEL DO DEFAULT(NONE), &\n!$OMP PRIVATE(i), &\n!$OMP SHARED(leftRecvBuf,rightRecvBuf,dataSize,sizeofBuffer),&\n!$OMP SHARED(finalLeftBuf,finalRightBuf), &\n!$OMP SCHEDULE(STATIC,dataSize)\nDO i = 1, sizeofBuffer\nfinalLeftBuf(i) = leftRecvBuf(i)\nfinalRightBuf(i) = rightRecvBuf(i)\nEND DO\n!$OMP END PARALLEL DO\n\nEND DO\n\nEND SUBROUTINE masteronlyHaloexchange
|
56 | funnelledHaloexchange:Each process exchanges a message with its left and right neighbour. Communication takes place by one thread inside of the parallel region.
| int funnelledHaloexchange(int totalReps, int dataSize){\n int repIter, i;\n\n #pragma omp parallel default(none) \\\n private(i,repIter) \\\n shared(dataSize,sizeofBuffer,leftSendBuf,rightSendBuf) \\\n shared(rightRecvBuf,leftRecvBuf,finalLeftBuf,finalRightBuf) \\\n shared(globalIDarray,commCart,totalReps,requestArray,statusArray) \\\n shared(leftNeighbour,rightNeighbour)\n { \n for (repIter=0; repIter<totalReps; repIter++){\n\n #pragma omp for schedule(static,dataSize)\n for (i=0; i<sizeofBuffer; i++){\n leftSendBuf[i] = globalIDarray[myThreadID];\n rightSendBuf[i] = globalIDarray[myThreadID];\n }\n\n #pragma omp master\n {\n MPI_Isend(leftSendBuf, sizeofBuffer, MPI_INT, leftNeighbour, \\\n TAG, commCart, &requestArray[0]);\n\n MPI_Isend(rightSendBuf, sizeofBuffer, MPI_INT, rightNeighbour, \\\n TAG, commCart, &requestArray[1]);\n\n MPI_Irecv(leftRecvBuf, sizeofBuffer, MPI_INT, leftNeighbour, \\\n TAG, commCart, &requestArray[2]);\n\n MPI_Irecv(rightRecvBuf, sizeofBuffer, MPI_INT, rightNeighbour, \\\n TAG, commCart, &requestArray[3]);\n\n MPI_Waitall(4, requestArray, statusArray);\n }\n\n #pragma omp barrier\n\n #pragma omp for schedule(static,dataSize)\n for(i=0; i<sizeofBuffer; i++){\n finalLeftBuf[i] = leftRecvBuf[i];\n finalRightBuf[i] = rightRecvBuf[i];\n }\n }\n }\n\n return 0;\n}",
| SUBROUTINE funnelledHaloexchange(totalReps, dataSize)\ninteger, intent(in) :: totalReps, dataSize\ninteger :: repIter, i\n\n!Open the parallel region\n!$OMP PARALLEL DEFAULT(NONE), &\n!$OMP PRIVATE(i,repIter), &\n!$OMP SHARED(dataSize,sizeofBuffer,leftSendBuf,rightSendBuf), &\n!$OMP SHARED(rightRecvBuf,leftRecvBuf,finalLeftBuf), &\n!$OMP SHARED(finalRightBuf,leftNeighbour,rightNeighbour), &\n!$OMP SHARED(globalIDarray,ierr,comm,status), &\n!$OMP SHARED(totalReps,requestArray,statusArray)\n\nDO repIter = 1, totalReps !loop for totalReps \n\n!Each thread writes its globalID to rightSendBuf and\n!leftSendBuf.\n!$OMP DO SCHEDULE(STATIC,dataSize)\nDO i = 1, sizeofBuffer\nleftSendBuf(i) = globalIDarray(myThreadID)\nrightSendBuf(i) = globalIDarray(myThreadID)\nEND DO\n!$OMP END DO\n!Implicit barrier here takes care of necessary synchronisation\n\n!$OMP MASTER\n!Master thread starts send of data to left and right\n!neighbours with a non-blocking send\n!..to leftNeighbour\nCALL MPI_ISend(leftSendBuf, sizeofBuffer, MPI_INTEGER, &\nleftNeighbour, tag, comm, requestArray(1), ierr)\n!..to rightNeighbour\nCALL MPI_ISend(rightSendBuf, sizeofBuffer, MPI_INTEGER, &\nrightNeighbour, tag, comm, requestArray(2), ierr)\n\n!Thread then starts receive of messages from leftNeighbour \n!and rightNeighbour.\n!Receive leftRecvBuf from leftNeighbour\nCALL MPI_IRecv(leftRecvBuf, sizeofBuffer, MPI_INTEGER, &\nleftNeighbour, tag, comm, requestArray(3), ierr)\n!Receive rightRecvBuf from rightNeighbour\nCALL MPI_IRecv(rightRecvBuf, sizeofBuffer, MPI_INTEGER, &\nrightNeighbour, tag, comm, requestArray(4), ierr)\n\n!Finish the sends and receives with an MPI_Waitall \n!on the requests.\nCALL MPI_Waitall(4, requestArray, statusArray, ierr) \n!$OMP END MASTER\n\n!Barrier to ensure master thread has completed transfer.\n!$OMP BARRIER\n\n!Each thread now reads its part of the left and right\n!received buffers.\n!$OMP DO SCHEDULE(STATIC,dataSize)\nDO i = 1, sizeofBuffer\nfinalLeftBuf(i) = leftRecvBuf(i)\nfinalRightBuf(i) = rightRecvBuf(i)\nEND DO\n!$OMP END DO\n\nEND DO !end of repetitions loop\n!$OMP END PARALLEL\nEND SUBROUTINE funnelledHaloexchange
|
57 | multipleHaloexchange:Each process exchanges a message with its left and right neighbour. All threads take part in the inter-porcess communication.
| int multipleHaloexchange(int totalReps, int dataSize){ \n int repIter, i; \n int lBound; \n \n #pragma omp parallel default(none) \\ \n private(i,requestArray,statusArray,lBound,repIter) \\ \n shared(dataSize,sizeofBuffer,leftSendBuf,rightSendBuf) \\ \n shared(rightRecvBuf,leftRecvBuf,finalLeftBuf,finalRightBuf) \\ \n shared(leftNeighbour,rightNeighbour,globalIDarray,commCart,totalReps) \n { \n for (repIter=0; repIter<totalReps; repIter++){ \n lBound = (myThreadID * dataSize); \n \n #pragma omp for nowait schedule(static,dataSize) \n for (i=0; i<sizeofBuffer; i++){ \n leftSendBuf[i] = globalIDarray[myThreadID]; \n rightSendBuf[i] = globalIDarray[myThreadID]; \n } \n \n MPI_Isend(&leftSendBuf[lBound], dataSize, MPI_INT, leftNeighbour, \\\n myThreadID, commCart, &requestArray[0]); \n \n MPI_Isend(&rightSendBuf[lBound], dataSize, MPI_INT, rightNeighbour, \\\n myThreadID, commCart, &requestArray[1]); \n \n MPI_Irecv(&leftRecvBuf[lBound], dataSize, MPI_INT, leftNeighbour, \\\n myThreadID, commCart, &requestArray[2]); \n \n MPI_Irecv(&rightRecvBuf[lBound], dataSize, MPI_INT, rightNeighbour, \\\n myThreadID, commCart, &requestArray[3]); \n \n MPI_Waitall(4, requestArray, statusArray); \n \n #pragma omp for nowait schedule(static,dataSize) \n for (i=0; i<sizeofBuffer; i++){ \n finalLeftBuf[i] = leftRecvBuf[i]; \n finalRightBuf[i] = rightRecvBuf[i]; \n } \n } \n } \n \n return 0; \n}",
| SUBROUTINE multipleHaloexchange(totalReps, dataSize)\ninteger, intent(in) :: totalReps, dataSize\ninteger :: repIter, i\ninteger :: lBound, uBound\n\n !Open the parallel region\n!$OMP PARALLEL DEFAULT(NONE), &\n!$OMP PRIVATE(i,requestArray,statusArray,status,ierr), &\n!$OMP PRIVATE(lBound,uBound,repIter),&\n!$OMP SHARED(dataSize,sizeofBuffer,leftSendBuf,rightSendBuf), &\n!$OMP SHARED(rightRecvBuf,leftRecvBuf,finalLeftBuf), &\n!$OMP SHARED(finalRightBuf,leftNeighbour,rightNeighbour), &\n!$OMP SHARED(globalIDarray,comm,totalReps)\n\nDO repIter = 1, totalReps !repetition loop \n\n !Calculate lower and upper bound of each threads\n !portion of the data arrays.\n lBound = ((myThreadID-1) * dataSize) + 1\n uBound = (myThreadID * dataSize)\n \n !Each thread writes its globalID to rightSendBuf and\n !leftSendBuf.\n!$OMP DO SCHEDULE(STATIC,dataSize)\n DO i = 1, sizeofBuffer\n leftSendBuf(i) = globalIDarray(myThreadID)\n rightSendBuf(i) = globalIDarray(myThreadID)\n END DO\n!$OMP END DO NOWAIT\n!Implicit barrier not needed for multiple version\n\n !Each thread starts send of dataSize items to leftNeighbour\n !and to rightNeighbour\n CALL MPI_Isend(leftSendBuf(lBound:uBound), dataSize, &\n MPI_INTEGER, leftNeighbour, myThreadID, comm, &\n requestArray(1), ierr)\n CALL MPI_Isend(rightSendBuf(lBound:uBound), dataSize, &\n MPI_INTEGER, rightNeighbour, myThreadID, comm, &\n requestArray(2), ierr)\n \n !Each Thread then starts receive of messages from \n !leftNeighbour and rightNeighbour.\n !Receive leftRecvBuf from leftNeighbour...\n CALL MPI_IRecv(leftRecvBuf(lBound:uBound), dataSize, &\n MPI_INTEGER, leftNeighbour, myThreadID, comm, &\n requestArray(3), ierr)\n !Receive rightRecvBuf from rightNeighbour\n CALL MPI_IRecv(rightRecvBuf(lBound:uBound), dataSize, &\n MPI_INTEGER, rightNeighbour, myThreadID, comm, &\n requestArray(4), ierr)\n \n !Finish the sends with an MPI_Waitall on the requests\n CALL MPI_Waitall(4, requestArray, statusArray, ierr)\n\n !Each thread now reads its part of the left and right\n !received buffers.\n!$OMP DO SCHEDULE(STATIC,dataSize)\n DO i = 1, sizeofBuffer\n finalLeftBuf(i) = leftRecvBuf(i)\n finalRightBuf(i) = rightRecvBuf(i)\n END DO\n!$OMP END DO NOWAIT\n\nEND DO !End repetitions loop\n!$OMP END PARALLEL\nEND SUBROUTINE multipleHaloexchange
|
58 | allocateHaloexchangeData:Allocate memory for the main data arrays in the haloexchange.
| int allocateHaloexchangeData(int sizeofBuffer){\n leftSendBuf = (int *)malloc(sizeofBuffer * sizeof(int));\n leftRecvBuf = (int *)malloc(sizeofBuffer * sizeof(int));\n rightSendBuf = (int *)malloc(sizeofBuffer * sizeof(int));\n rightRecvBuf = (int *)malloc(sizeofBuffer * sizeof(int));\n finalLeftBuf = (int *)malloc(sizeofBuffer * sizeof(int));\n finalRightBuf = (int *)malloc(sizeofBuffer * sizeof(int));\n\n return 0;\n}",
| SUBROUTINE allocateData(bufferSize)\ninteger, intent(in) :: bufferSize\n\nallocate(leftSendBuf(bufferSize), leftRecvBuf(bufferSize))\nallocate(rightSendBuf(bufferSize), rightRecvBuf(bufferSize))\nallocate(finalLeftBuf(bufferSize), finalRightBuf(bufferSize))\n\nEND SUBROUTINE allocateData
|
59 | freeHaloexchangeData:Deallocates the storage space for the main data arrays.
| int freeHaloexchangeData(){ \n free(leftSendBuf); \n free(leftRecvBuf); \n free(rightSendBuf); \n free(rightRecvBuf); \n free(finalLeftBuf); \n free(finalRightBuf); \n return 0; \n}",
| SUBROUTINE freeData()\ndeallocate(leftSendBuf, leftRecvBuf)\ndeallocate(rightSendBuf, rightRecvBuf)\ndeallocate(finalLeftBuf, finalRightBuf)\n\nEND SUBROUTINE freeData
|
60 | testHaloexchange:Verifies that the halo exchange benchmark worked correctly.
| int testHaloexchange(int sizeofBuffer, int dataSize){\n int i;\n int testFlag, reduceFlag;\n int *testLeftBuf, *testRightBuf;\n\n testFlag = TRUE;\n\n testLeftBuf = (int *)malloc(sizeofBuffer * sizeof(int));\n testRightBuf = (int *)malloc(sizeofBuffer * sizeof(int));\n\n #pragma omp parallel for default(none) \\\n private(i) \\\n shared(leftNeighbour,rightNeighbour,numThreads) \\\n shared(dataSize,sizeofBuffer,testLeftBuf,testRightBuf) \\\n schedule(static,dataSize)\n for (i=0; i<sizeofBuffer; i++){\n testLeftBuf[i] = (leftNeighbour * numThreads) + myThreadID;\n testRightBuf[i] = (rightNeighbour * numThreads) + myThreadID;\n }\n\n for (i=0; i<sizeofBuffer; i++){\n if (testLeftBuf[i] != finalLeftBuf[i]){\n testFlag = FALSE;\n }\n if (testRightBuf[i] != finalRightBuf[i]){\n testFlag = FALSE;\n }\n }\n\n MPI_Reduce(&testFlag, &reduceFlag, 1, MPI_INT, MPI_LAND, 0, comm);\n\n if (myMPIRank == 0){\n setTestOutcome(reduceFlag);\n }\n\n free(testLeftBuf);\n free(testRightBuf);\n\n return 0;\n}",
| SUBROUTINE testHaloexchange(sizeofBuffer, dataSize)\n integer, intent(in) :: sizeofBuffer, dataSize\n integer :: i\n logical :: testFlag, reduceFlag\n\n !set testFlag to true\n testFlag = .true.\n\n !allocate space for testLeftBuf and testRightBuf\n allocate(testLeftBuf(sizeofBuffer),testRightBuf(sizeofBuffer))\n\n !Construct testLeftBuf and testRightBuf with correct values\n!$OMP PARALLEL DO DEFAULT(NONE), &\n!$OMP PRIVATE(i), &\n!$OMP SHARED(leftNeighbour,rightNeighbour,numThreads), &\n!$OMP SHARED(dataSize,sizeofBuffer,testLeftBuf,testRightBuf),&\n!$OMP SCHEDULE(STATIC,dataSize)\n DO i = 1, sizeofBuffer\n !Calculate globalID of thread expected in finalLeftBuf..\n testLeftBuf(i) = (leftNeighbour * numThreads) + myThreadID\n !...and in finalRightBuf.\n testRightBuf(i) = (rightNeighbour * numThreads) + myThreadID\n END DO\n!OMP END PARALLEL DO\n\n !Compare..\n DO i = 1, sizeofBuffer\n !1) values from left neighbour\n IF (testLeftBuf(i) /= finalLeftBuf(i)) THEN\n testFlag = .false.\n END IF\n !2) values from right neighbour\n IF (testRightBuf(i) /= finalRightBuf(i)) THEN\n testFlag = .false.\n END IF\n END DO\n\n !Reduce testFlag into master with logical AND operator\n CALL MPI_Reduce(testFlag, reduceFlag, 1, MPI_LOGICAL, &\n MPI_LAND, 0, comm, ierr)\n\n !Master then sets testOutcome flag\n IF (myMPIRank == 0) THEN\n CALL setTestOutcome(reduceFlag)\n END IF\n\n !free space for testLeftBuf and testRightBuf\n deallocate(testLeftBuf, testRightBuf)\n\n END SUBROUTINE testHaloexchange
|
61 | multiPingPing:Driver subroutine for the multi-pingping benchmark.
| int multiPingping(int benchmarkType){\n\\n int dataSizeIter;\n\\n char otherProcName[MPI_MAX_PROCESSOR_NAME];\n\\n int balance;\n\\n pingNodeA = 0;\n\\n pingNodeB = 1;\n\\n balance = crossCommBalance(pingNodeA, pingNodeB);\n\n\\n if (balance == FALSE){\n\\n if (myMPIRank == 0){\n\\n printBalanceError();\n\\n }\n\\n return 1;\n\\n }\n\n\\n exchangeWorldRanks(pingNodeA, pingNodeB, &otherPingRank);\n\\n sendProcName(pingNodeA, pingNodeB, otherProcName);\n\\n printMultiProcInfo(pingNodeA, otherPingRank, otherProcName);\n\\n MPI_Barrier(comm);\n\n\\n if (myMPIRank == 0){\n\\n printBenchHeader();\n\\n }\n\n\\n repsToDo = defaultReps;\n\\n dataSizeIter = minDataSize;\n\n\\n while (dataSizeIter <= maxDataSize){\n\\n sizeofBuffer = dataSizeIter * numThreads;\n\\n allocateMultiPingpingData(sizeofBuffer);\n\n\\n if (benchmarkType == MASTERONLY){\n\\n masteronlyMultiPingping(warmUpIters, dataSizeIter);\n\\n } else if (benchmarkType == FUNNELLED){\n\\n funnelledMultiPingping(warmUpIters, dataSizeIter);\n\\n } else if (benchmarkType == MULTIPLE){\n\\n multipleMultiPingping(warmUpIters, dataSizeIter);\n\\n }\n\n\\n testMultiPingping(sizeofBuffer, dataSizeIter);\n\\n benchComplete = FALSE;\n\n\\n while (benchComplete != TRUE){\n\\n MPI_Barrier(comm);\n\\n startTime = MPI_Wtime();\n\n\\n if (benchmarkType == MASTERONLY){\n\\n masteronlyMultiPingping(repsToDo, dataSizeIter);\n\\n } else if (benchmarkType == FUNNELLED){\n\\n funnelledMultiPingping(repsToDo, dataSizeIter);\n\\n } else if (benchmarkType == MULTIPLE){\n\\n multipleMultiPingping(repsToDo, dataSizeIter);\n\\n }\n\n\\n MPI_Barrier(comm);\n\\n finishTime = MPI_Wtime();\n\\n totalTime = finishTime - startTime;\n\n\\n if (myMPIRank==0){\n\\n benchComplete = repTimeCheck(totalTime, repsToDo);\n\\n }\n\n\\n ",
| SUBROUTINE multiPingPing(benchmarkType)\n integer, intent(in) :: benchmarkType\n integer :: dataSizeIter\n character (len = MPI_MAX_PROCESSOR_NAME) :: otherProcName\n logical :: balance\n\n pingNodeA = 0\n pingNodeB = 1\n\n !Check if there's a balance in num of MPI processes \n !in pingNodeA and pingNodeB.\n balance = crossCommBalance(pingNodeA, pingNodeB)\n !If not balanced..\n IF (balance .EQV. .false.) THEN\n !..Master prints error\n IF (myMPIRank == 0) THEN\n CALL printBalanceError()\n END IF\n !..and all processes return from subroutine.\n RETURN\n END IF\n\n !Exchange MPI_COMM_WORLD ranks for processes in same crossComm\n CALL exchangeWorldRanks(pingNodeA, pingNodeB, otherPingRank)\n\n !Processes on pingNodeB send processor name to pingNodeA procs\n CALL sendProcName(pingNodeA, pingNodeB, otherProcName)\n\n !Print comm world ranks & processor names of \n !processes taking part in multi-pingping benchmark.\n CALL printMultiProcInfo(pingNodeA, otherPingRank, otherProcName)\n\n !Barrier to ensure that all procs have completed\n !printMultiProcInfo before printing column headings\n CALL MPI_Barrier(comm, ierr)\n !Master process then prints report column headings\n IF(myMPIRank == 0) THEN\n CALL printBenchHeader()\n END IF\n\n !initialise repsToDo to defaultReps at start of benchmark\n repsToDo = defaultReps\n\n !Start loop over data sizes\n dataSizeIter = minDataSize !initialise dataSizeIter\n DO WHILE (dataSizeIter <= maxDataSize)\n\n !set size of buffer\n sizeofBuffer = dataSizeIter * numThreads\n\n !Allocate space for the main data arrays\n CALL allocateData(sizeofBuffer)\n\n !Warm-up for benchmarkType\n IF (benchmarkType == MASTERONLY) THEN\n !Masteronly warm-up sweep\n CALL masteronlyMultiPingping(warmUpIters, dataSizeIter)\n ELSE IF (benchmarkType == FUNNELLED) THEN\n !Funnelled warm-up sweep\n CALL funnelledMultiPingping(warmUpIters, dataSizeIter)\n ELSE IF (benchmarkType == MULTIPLE) THEN\n !Multiple warm-up\n CALL multipleMultiPingping(warmUpIters, dataSizeIter)\n END IF\n \n !Verification test for multi-pingping\n CALL testMultiPingping(sizeofBuffer, dataSizeIter)\n\n !Initialise benchmark\n benchComplete = .false.\n !Keep executing benchmark until target time is reached\n DO WHILE (benchComplete .NEQV. .true.)\n\n !Start the timer..MPI_Barrier to synchronise\n !processes for more accurate timing.\n CALL MPI_Barrier(comm, ierr)\n startTime = MPI_Wtime()\n\n IF (benchmarkType == MASTERONLY) THEN\n !Execute masteronly multipingping repsToDo times\n CALL masteronlyMultiPingping(repsToDo, dataSizeIter)\n ELSE IF (benchmarkType == FUNNELLED) THEN\n !Execute funnelled multipingping\n CALL funnelledMultiPingping(repsToDo, dataSizeIter)\n ELSE IF (benchmarkType == MULTIPLE) THEN\n !Execute multiple multipingping\n CALL multipleMultiPingping(repsToDo, dataSizeIter)\n END IF\n\n !Stop the timer..MPI_Barrier to synchronise processes\n !for more accurate timing.\n CALL MPI_Barrier(comm, ierr)\n finishTime = MPI_Wtime()\n totalTime = finishTime - startTime\n\n !Call repTimeCheck function to test if target time\n !is reached.\n if (myMPIRank==0) then \n benchComplete = repTimeCheck(totalTime, repsToDo)\n end if\n !Ensure all procs have the same value of benchComplete\n !and repsToDo\n call MPI_Bcast(benchComplete, 1, MPI_INTEGER, 0, comm, ierr)\n call MPI_Bcast(repsToDo, 1, MPI_INTEGER, 0, comm, ierr)\n\n END DO !end of loop to check if benchComplete is true\n\n !Master process sets benchmark results\n IF (myMPIRank == 0) THEN\n CALL setReportParams(dataSizeIter, repsToDo, totalTime)\n CALL printReport()\n END IF\n\n !Free the allocated space for the main data arrays\n CALL freeData()\n\n !Update dataSize before next iteration\n dataSizeIter = dataSizeIter * 2 !double data size\n\n END DO !end of loop over data sizes.\n\n END SUBROUTINE multiPingPing
|
62 | masteronlyMultiPingping:All Processes with rank of pingNodeA or pingNodeB in crossComm send a message to each other. MPI communication takes place outside of the parallel region.
| int masteronlyMultiPingping(int totalReps, int dataSize){\n int repIter, i;\n int destRank;\n\n /* set destRank to ID of other process */\n if (crossCommRank == pingNodeA){\n destRank = pingNodeB;\n }\n else if (crossCommRank == pingNodeB){\n destRank = pingNodeA;\n }\n\n /* loop totalRep times */\n for (repIter=1; repIter<=totalReps; repIter++){\n\n if ((crossCommRank == pingNodeA) || (crossCommRank == pingNodeB) ){\n\n /* Each thread writes its globalID to pingSendBuf\n * with a parallel for directive.\n */\n#pragma omp parallel for default(none)\n private(i)\n shared(pingSendBuf,dataSize,sizeofBuffer,globalIDarray)\n schedule(static,dataSize)\n\n for (i=0; i<sizeofBuffer; i++){\n\tpingSendBuf[i] = globalIDarray[myThreadID];\n }\n\n /* Process calls non-blocking send to start transfer of\n * pingSendBuf to other process.\n */\n MPI_Isend(pingSendBuf, sizeofBuffer, MPI_INT, destRank, TAG,\n\t\tcrossComm, &requestID);\n\n /* Processes then wait for message from other process. */\n MPI_Recv(pingRecvBuf, sizeofBuffer, MPI_INT, destRank, TAG, \n\t crossComm, &status);\n\n /* Finish the send operation with an MPI_Wait */\n MPI_Wait(&requestID, &status);\n\n /* Threads under the MPI processes read their part of the\n * received buffer.\n */\n#pragma omp parallel for default(none)\n private(i)\n shared(finalRecvBuf,dataSize,sizeofBuffer,pingRecvBuf)\n schedule(static,dataSize)\n\n for (i=0; i<sizeofBuffer; i++){\n\tfinalRecvBuf[i] = pingRecvBuf[i];\n }\n\n }\n } /* End repetitions loop */\n\n return 0;\n}
| SUBROUTINE masteronlyMultiPingping(totalReps, dataSize)\n integer, intent(in) :: totalReps, dataSize\n integer :: repIter, i\n integer :: destRank\n\n !set destRank to ID of other process\n IF (crossCommRank == pingNodeA) THEN\n destRank = pingNodeB\n ELSE IF (crossCommRank == pingNodeB) THEN\n destRank = pingNodeA\n END IF\n\n DO repIter = 1, totalReps !loop totalRep times\n\n IF (crossCommRank == pingNodeA .or. &\n crossCommRank == pingNodeB) THEN\n \n !Each thread writes its globalID to pingSendBuf\n !with a PARALLEL DO directive.\n!$OMP PARALLEL DO DEFAULT(NONE), &\n!$OMP PRIVATE(i), &\n!$OMP SHARED(pingSendBuf,dataSize,sizeofBuffer,globalIDarray), &\n!$OMP SCHEDULE(STATIC,dataSize)\n\n DO i = 1, sizeofBuffer\n pingSendBuf(i) = globalIDarray(myThreadID)\n END DO\n\n!$OMP END PARALLEL DO\n\n !Process calls non-blocking send to start transfer of\n !pingSendBuf to other process.\n CALL MPI_Isend(pingSendBuf, sizeofBuffer, MPI_INTEGER, &\n destRank, tag, crossComm, requestID, ierr)\n\n !Processes then waits for message from other process.\n CALL MPI_Recv(pingRecvBuf, sizeofBuffer, MPI_INTEGER, &\n destRank, tag, crossComm, status, ierr)\n\n !Finish the send operation with an MPI_Wait\n CALL MPI_Wait(requestID, status, ierr)\n\n !The threads under the MPI processes read their \n !part of the received buffer.\n!$OMP PARALLEL DO DEFAULT(NONE), &\n!$OMP PRIVATE(i), &\n!$OMP SHARED(finalRecvBuf,dataSize,sizeofBuffer,pingRecvBuf), &\n!$OMP SCHEDULE(STATIC,dataSize)\n DO i = 1, sizeofBuffer\n finalRecvBuf(i) = pingRecvBuf(i)\n END DO\n!$OMP END PARALLEL DO\n \n END IF\n \n END DO !End repetitions loop\n\n END SUBROUTINE masteronlyMultiPingping
|
63 | funnelledMultiPingping:All processes with rank of pingNodeA or pingNodeB in crossComm send a message to each other. Inter-process communication takes place inside the OpenMP parallel region by the master thread.
| int funnelledMultiPingping(int totalReps, int dataSize){\n int repIter, i;\n int destRank;\n\n if (crossCommRank == pingNodeA){\n destRank = pingNodeB;\n } \n else if (crossCommRank == pingNodeB){\n destRank = pingNodeA;\n }\n\n #pragma omp parallel default(none) \\\n private(i,repIter) \\\n shared(dataSize,sizeofBuffer,pingSendBuf,globalIDarray) \\\n shared(pingRecvBuf,finalRecvBuf,status,requestID,destRank) \\\n shared(crossComm,crossCommRank,pingNodeA,pingNodeB,totalReps)\n {\n for (repIter = 1; repIter <= totalReps; repIter++){\n if (crossCommRank == pingNodeA || crossCommRank == pingNodeB){\n\n #pragma omp for schedule(static,dataSize)\n for (i=0; i<sizeofBuffer; i++){\n pingSendBuf[i] = globalIDarray[myThreadID];\n }\n\n #pragma omp master \n {\n MPI_Isend(pingSendBuf, sizeofBuffer, MPI_INT, \\\n destRank, TAG, crossComm, &requestID);\n MPI_Recv(pingRecvBuf, sizeofBuffer, MPI_INT, \\\n destRank, TAG, crossComm, &status);\n MPI_Wait(&requestID, &status);\n }\n\n #pragma omp barrier\n\n #pragma omp for schedule(static,dataSize)\n for (i=0; i<sizeofBuffer; i++){\n finalRecvBuf[i] = pingRecvBuf[i];\n }\n }\n } /* End repetitions loop */\n } /* End parallel region */\n\n return 0;\n}",
| SUBROUTINE funnelledMultiPingping(totalReps, dataSize)\n integer, intent(in) :: totalReps, dataSize\n integer :: repIter, i\n integer :: destRank\n\n !set destRank to ID of other process\n IF (crossCommRank == pingNodeA) THEN\n destRank = pingNodeB\n ELSE IF (crossCommRank == pingNodeB) THEN\n destRank = pingNodeA\n END IF\n\n !Open the parallel region\n!$OMP PARALLEL DEFAULT(NONE), &\n!$OMP PRIVATE(i,repIter), &\n!$OMP SHARED(dataSize,sizeofBuffer,pingSendBuf,globalIDarray), &\n!$OMP SHARED(pingRecvBuf,finalRecvBuf,status,requestID,ierr), &\n!$OMP SHARED(destRank,crossComm,crossCommRank,pingNodeA), &\n!$OMP SHARED(pingNodeB,totalReps)\n\n DO repIter = 1, totalReps !loop totalRep times\n\n IF (crossCommRank == pingNodeA .or. &\n crossCommRank == pingNodeB) THEN\n \n !Each thread writes its globalID to its part \n !of pingSendBuf with a OMP DO.\n!$OMP DO SCHEDULE(STATIC,dataSize)\n DO i = 1, sizeofBuffer\n pingSendBuf(i) = globalIDarray(myThreadID)\n END DO\n!$OMP END DO\n!Implicit barrier here takes care of necessary synchronisation.\n\n!$OMP MASTER\n !Master thread of each process starts send.\n CALL MPI_Isend(pingSendBuf, sizeofBuffer, MPI_INTEGER, &\n destRank, tag, crossComm, requestID, ierr)\n\n !Processes then wait for message.\n CALL MPI_Recv(pingRecvBuf, sizeofBuffer, MPI_INTEGER, &\n destRank, tag, crossComm, status, ierr)\n\n !Finish the send operation with an MPI_Wait\n CALL MPI_Wait(requestID, status, ierr)\n!$OMP END MASTER\n\n!Barrier to ensure master thread has completed transfer.\n!$OMP BARRIER\n\n !Each thread reads its part of the received buffer\n!$OMP DO SCHEDULE(STATIC,dataSize)\n DO i = 1, sizeofBuffer\n finalRecvBuf(i) = pingRecvBuf(i)\n END DO\n!$OMP END DO\n\n END IF\n\n END DO !End repetitions loop\n!$OMP END PARALLEL\n\n END SUBROUTINE funnelledMultiPingping
|
64 | multipleMultiPingping:All processes with crossCommRank of pingNodeA and pingNodeB in crossComm send a message to each other.Multiple threads take part in the communication.
| int multipleMultiPingping(int totalReps, int dataSize){ \n int repIter, i; \n int destRank; \n int lBound; \n if (crossCommRank == pingNodeA){ \n destRank = pingNodeB; \n } else if (crossCommRank == pingNodeB){ \n destRank = pingNodeA; \n } \n #pragma omp parallel default(none) \\\n private(i,repIter,lBound,requestID,status) \\\n shared(dataSize,sizeofBuffer,pingSendBuf,globalIDarray) \\\n shared(pingRecvBuf,finalRecvBuf,destRank,crossComm) \\\n shared(crossCommRank,pingNodeA,pingNodeB,totalReps) \n { \n for (repIter = 1; repIter <= totalReps; repIter++){ \n if (crossCommRank == pingNodeA || crossCommRank == pingNodeB){ \n lBound = (myThreadID * dataSize); \n #pragma omp for nowait schedule(static,dataSize) \n for (i=0; i<sizeofBuffer; i++){ \n pingSendBuf[i] = globalIDarray[myThreadID]; \n } \n MPI_Isend(&pingSendBuf[lBound], dataSize, MPI_INT, \\\n destRank, myThreadID, crossComm, &requestID); \n MPI_Recv(&pingRecvBuf[lBound], dataSize, MPI_INT, destRank, \\\n myThreadID, crossComm, &status); \n MPI_Wait(&requestID, &status); \n #pragma omp for nowait schedule(static,dataSize) \n for (i=0; i<sizeofBuffer; i++){ \n finalRecvBuf[i] = pingRecvBuf[i]; \n } \n } \n } /* End repetitions loop */ \n } \n return 0; \n}",
| SUBROUTINE multipleMultiPingping(totalReps, dataSize)\n integer, intent(in) :: totalReps, dataSize\n integer :: repIter, i\n integer :: destRank\n integer :: lBound, uBound\n\n !set destRank to be ID of other process\n IF (crossCommRank == pingNodeA) THEN\n destRank = pingNodeB\n ELSE IF (crossCommRank == pingNodeB) THEN\n destRank = pingNodeA\n END IF\n\n !Open parallel region\n!$OMP PARALLEL DEFAULT(NONE), &\n!$OMP PRIVATE(i,repIter,lBound,uBound,requestID,status,ierr), &\n!$OMP SHARED(dataSize,sizeofBuffer,pingSendBuf,globalIDarray), &\n!$OMP SHARED(pingRecvBuf,finalRecvBuf,destRank,crossComm), &\n!$OMP SHARED(crossCommRank,pingNodeA,pingNodeB,totalReps)\n\n DO repIter = 1, totalReps !loop totalRep times\n\n IF (crossCommRank == pingNodeA .or. &\n crossCommRank == pingNodeB) THEN\n \n !Calculate lower and upper bound of each threads\n !portion of the data arrays\n lBound = ((myThreadID-1) * dataSize) + 1\n uBound = (myThreadID * dataSize)\n\n !Each thread writes to its part of pingSendBuf\n!$OMP DO SCHEDULE(STATIC,dataSize)\n DO i = 1, sizeofBuffer\n pingSendBuf(i) = globalIDarray(myThreadID)\n END DO\n!$OMP END DO NOWAIT\n\n !Each thread starts send of dataSize items from\n !pingSendBuf.\n CALL MPI_Isend(pingSendBuf(lBound:uBound), dataSize, &\n MPI_INTEGER, destRank, myThreadID, crossComm, &\n requestID, ierr)\n\n !Thread then waits for message from destRank\n !with tag equal to its threadID.\n CALL MPI_Recv(pingRecvBuf(lBound:uBound), dataSize, &\n MPI_INTEGER, destRank, myThreadID, crossComm, &\n status, ierr)\n\n !Thread completes send using MPI_Wait\n CALL MPI_Wait(requestID, status, ierr)\n\n !Each thread reads its part of received buffer.\n!$OMP DO SCHEDULE(STATIC,dataSize)\n DO i = 1, sizeofBuffer\n finalRecvBuf(i) = pingRecvBuf(i)\n END DO\n!$OMP END DO NOWAIT\n\n END IF\n\n END DO !End reps\n!$OMP END PARALLEL\n\n END SUBROUTINE multipleMultiPingping
|
65 | allocateMultiPingpingData:Allocates space for the main data arrays. Size of each array is specified by subroutine argument.
| int allocateMultiPingpingData(int sizeofBuffer){ \n if (crossCommRank == pingNodeA || crossCommRank == pingNodeB){ \n pingSendBuf = (int *)malloc(sizeof(int) * sizeofBuffer); \n pingRecvBuf = (int *)malloc(sizeof(int) * sizeofBuffer); \n finalRecvBuf = (int *)malloc(sizeof(int) * sizeofBuffer); \n } \n return 0; \n}",
| SUBROUTINE allocateData(sizeofBuffer)\n integer, intent(in) :: sizeofBuffer\n\n IF (crossCommRank == pingNodeA .or. &\n crossCommRank == pingNodeB) THEN\n\n allocate(pingSendBuf(sizeofBuffer))\n allocate(pingRecvBuf(sizeofBuffer))\n allocate(finalRecvBuf(sizeofBuffer))\n\n END IF\n\n END SUBROUTINE allocateData
|
66 | freeMultiPingpingData:Free allocated memory for main data arrays.
| int freeMultiPingpingData() {\n if (crossCommRank == pingNodeA || crossCommRank == pingNodeB) {\n free(pingSendBuf);\n free(pingRecvBuf);\n free(finalRecvBuf);\n }\n return 0;\n}",
| SUBROUTINE freeData()\n \n IF (crossCommRank == pingNodeA .or. &\n crossCommRank == pingNodeB) THEN\n \n deallocate(pingSendBuf, pingRecvBuf)\n deallocate(finalRecvBuf)\n\n END IF\n \n END SUBROUTINE freeData
|
67 | testMultiPingping:Verifies the the multi-pingping benchmark worked correctly.
| int testMultiPingping(int sizeofBuffer, int dataSize){\n int i;\n int testFlag, localTestFlag;\n localTestFlag = TRUE;\n\n if (crossCommRank == pingNodeA || crossCommRank == pingNodeB) {\n testBuf = (int *)malloc(sizeof(int) * sizeofBuffer);\n\n #pragma omp parallel for default(none) \\\n private(i) \\\n shared(otherPingRank,numThreads,dataSize,sizeofBuffer,testBuf) \\\n schedule(static,dataSize)\n for (i=0; i<sizeofBuffer; i++){\n testBuf[i] = (otherPingRank * numThreads) + myThreadID;\n }\n\n for (i=0; i<sizeofBuffer; i++){\n if (testBuf[i] != finalRecvBuf[i]){\n localTestFlag = FALSE;\n }\n }\n\n free(testBuf);\n }\n\n MPI_Reduce(&localTestFlag, &testFlag, 1, MPI_INT, MPI_LAND, 0, comm);\n\n if (myMPIRank == 0){\n setTestOutcome(testFlag);\n }\n\n return 0;\n}",
| SUBROUTINE testMultiPingping(sizeofBuffer, dataSize)\n integer, intent(in) :: sizeofBuffer, dataSize\n integer :: i\n logical :: testFlag, localTestFlag\n\n !set localTestFlag to true\n localTestFlag = .true.\n\n !Testing done for processes on pingNodeA & pingNodeB\n IF (crossCommRank == pingNodeA .or. &\n crossCommRank == pingNodeB) THEN\n\n !allocate space for testBuf\n allocate(testBuf(sizeofBuffer))\n\n !Construct testBuf with correct values\n!$OMP PARALLEL DO DEFAULT(NONE), &\n!$OMP PRIVATE(i), &\n!$OMP SHARED(otherPingRank,numThreads,dataSize), &\n!$OMP SHARED(sizeofBuffer,testBuf), &\n!$OMP SCHEDULE(STATIC,dataSize)\n DO i = 1, sizeofBuffer\n !calculate globalID of thread expected in finalRecvBuf.\n !This is done using otherPingRank\n testBuf(i) = (otherPingRank * numThreads) + myThreadID\n END DO\n!$OMP END PARALLEL DO\n\n !Compare each element of testBuf and finalRecvBuf\n DO i = 1, sizeofBuffer\n IF (testBuf(i) /= finalRecvBuf(i)) THEN\n localTestFlag = .false.\n END IF\n END DO\n\n !free space for testBuf\n deallocate(testBuf)\n\n END IF\n\n !Reduce testFlag into master with logical AND \n CALL MPI_Reduce(localTestFlag, testFlag, 1, MPI_LOGICAL, &\n MPI_LAND, 0, comm, ierr)\n\n !master sets testOutcome flag\n IF (myMPIRank == 0) THEN\n CALL setTestOutcome(testFlag)\n END IF\n\n END SUBROUTINE testMultiPingping
|
68 | multiPingPong:Driver subroutine for the multi-pingpong benchmark.
| int multiPingPong(int benchmarkType){\n int dataSizeIter;\n int pongWorldRank;\n char pongProcName[MPI_MAX_PROCESSOR_NAME];\n int balance;\n pingNode = 0;\n pongNode = 1;\n\n balance = crossCommBalance(pingNode, pongNode);\n if (balance == FALSE){\n if (myMPIRank == 0){\n printBalanceError();\n }\n return 1;\n }\n\n exchangeWorldRanks(pingNode, pongNode, &pongWorldRank);\n sendProcName(pingNode, pongNode, pongProcName);\n printMultiProcInfo(pingNode, pongWorldRank, pongProcName);\n MPI_Barrier(comm);\n\n if (myMPIRank == 0){\n printBenchHeader();\n }\n\n repsToDo = defaultReps;\n dataSizeIter = minDataSize; /* initialise dataSizeIter to minDataSize */\n\n while (dataSizeIter <= maxDataSize){\n sizeofBuffer = dataSizeIter * numThreads;\n allocateMultiPingpongData(sizeofBuffer);\n\n if (benchmarkType == MASTERONLY){\n masteronlyMultiPingpong(warmUpIters, dataSizeIter);\n } else if (benchmarkType == FUNNELLED){\n funnelledMultiPingpong(warmUpIters, dataSizeIter);\n } else if (benchmarkType == MULTIPLE){\n multipleMultiPingpong(warmUpIters, dataSizeIter);\n }\n\n testMultiPingpong(sizeofBuffer, dataSizeIter);\n benchComplete = FALSE;\n\n while (benchComplete != TRUE){\n /* Then start the timer. */\n MPI_Barrier(comm);\n startTime = MPI_Wtime();\n\n if (benchmarkType == MASTERONLY){\n masteronlyMultiPingpong(repsToDo, dataSizeIter);\n } else if (benchmarkType == FUNNELLED){\n funnelledMultiPingpong(repsToDo, dataSizeIter);\n } else if (benchmarkType == MULTIPLE){\n multipleMultiPingpong(repsToDo, dataSizeIter);\n }\n\n /* for more accurate timing. */\n MPI_Barrier(comm);\n finishTime = MPI_Wtime();\n totalTime = finishTime - startTime;\n\n if (myMPIRank==0){\n benchComplete = repTimeCheck(totalTime, repsToDo);\n }\n\n MPI_Bcast(&benchComplete, 1, MPI_INT, 0, comm);\n MPI_Bcast(&repsToDo,",
| SUBROUTINE multiPingPong(benchmarkType)\n integer, intent(in) :: benchmarkType\n integer :: dataSizeIter\n integer :: pongWorldRank\n character (len = MPI_MAX_PROCESSOR_NAME) :: pongProcName\n logical :: balance\n\n pingNode = 0\n pongNode = 1\n\n !Check if there's a balance in num of MPI processes on\n !pingNode and pongNode.\n balance = crossCommBalance(pingNode, pongNode)\n !If not balanced..\n IF (balance .EQV. .false.) THEN\n !..Master prints error\n IF (myMPIRank == 0) THEN\n CALL printBalanceError()\n END IF\n !..and all processes return from subroutine.\n RETURN\n END IF\n \n !Exchange MPI_COMM_WORLD ranks for processes in same crossComm\n CALL exchangeWorldRanks(pingNode, pongNode, pongWorldRank)\n\n !Processes on pongNode send processor name to pingNode procs\n CALL sendProcName(pingNode, pongNode, pongProcName)\n\n !Print comm world ranks & processor names of\n !processes taking part in multi-pingpong benchmark.\n CALL printMultiProcInfo(pingNode, pongWorldRank, pongProcName)\n\n !Barrier to ensure that all procs have completed\n !printMultiProcInfo before printing column headings\n CALL MPI_Barrier(comm, ierr)\n !Master process then prints report column headings\n IF (myMPIRank == 0) THEN\n CALL printBenchHeader()\n END IF\n\n !initialise repsToDo to defaultReps at start of benchmark\n repsToDo = defaultReps\n\n !Loop over data sizes\n dataSizeIter = minDataSize !initialise dataSizeIter to minDataSize\n DO WHILE (dataSizeIter <= maxDataSize)\n \n !set sizeofBuffer\n sizeofBuffer = dataSizeIter * numThreads\n\n !Allocate space for the main data arrays\n CALL allocateData(sizeofBuffer)\n\n !Warm-up for either masteronly, funnelled or multiple\n IF (benchmarkType == MASTERONLY) THEN\n !Perform masteronly warm-up sweep\n CALL masteronlyMultiPingpong(warmUpIters, dataSizeIter)\n ELSE IF (benchmarkType == FUNNELLED) THEN\n !Funnelled warm-up sweep\n CALL funnelledMultiPingpong(warmUpIters, dataSizeIter)\n ELSE IF (benchmarkType == MULTIPLE) THEN\n !Mutiple pingpong warmup\n CALL multipleMultiPingpong(warmUpIters, dataSizeIter)\n END IF\n\n !Verification test for multi-pingpong\n CALL testMultiPingpong(sizeofBuffer, dataSizeIter)\n\n !Initialise benchmark\n benchComplete = .false.\n !Keep executing benchmark until target time is reached\n DO WHILE (benchComplete .NEQV. .true.)\n \n !Start the timer...MPI_Barrier to synchronise\n !processes for more accurate timing.\n CALL MPI_Barrier(comm, ierr)\n startTime = MPI_Wtime()\n \n IF (benchmarkType == MASTERONLY) THEN\n !Execute masteronly multipingpong repsToDo times\n CALL masteronlyMultiPingpong(repsToDo, dataSizeIter)\n ELSE IF (benchmarkType == FUNNELLED) THEN\n !Execute funnelled multipingpong\n CALL funnelledMultiPingpong(repsToDo, dataSizeIter)\n ELSE IF (benchmarkType == MULTIPLE) THEN\n !Exexute multiple multipingpong\n CALL multipleMultiPingpong(repsToDo, dataSizeIter)\n END IF\n\n !Stop the timer...MPI_Barrier to synchronise processes\n !for more accurate timing.\n CALL MPI_Barrier(comm, ierr)\n finishTime = MPI_Wtime()\n totalTime = finishTime - startTime\n\n !Call repTimeCheck function to test if target time\n !is reached.\n if (myMPIRank==0) then \n benchComplete = repTimeCheck(totalTime, repsToDo)\n end if\n !Ensure all procs have the same value of benchComplete\n !and repsToDo\n call MPI_Bcast(benchComplete, 1, MPI_INTEGER, 0, comm, ierr)\n call MPI_Bcast(repsToDo, 1, MPI_INTEGER, 0, comm, ierr)\n\n END DO !end of loop to check if benchComplete is true\n\n !Master process sets benchmark results\n IF (myMPIRank == 0) THEN\n CALL setReportParams(dataSizeIter, repsToDo, totalTime)\n CALL printReport()\n END IF\n\n !Free the allocated space for the main data arrays\n CALL freeData()\n\n !Update dataSize before next iteration\n dataSizeIter = dataSizeIter * 2 !double data size\n\n END DO !end of loop over data sizes.\n\n END SUBROUTINE multiPingPong
|
69 | masteronlyMultiPingpong:All MPI processes in crossComm = pingNode sends a single fixed length message to the neighbouring process in crossComm = pongNode. The neighbouring processes then sends the message back to the first process.
| int masteronlyMultiPingpong(int totalReps, int dataSize){\n int repIter, i;\n for (repIter = 1; repIter <= totalReps; repIter++){\n if (crossCommRank == pingNode){\n #pragma omp parallel for default(none) \\\n private(i) \\\n shared(pingSendBuf,dataSize,sizeofBuffer,globalIDarray) \\\n schedule(static,dataSize)\n for (i=0; i<sizeofBuffer; i++){\n pingSendBuf[i] = globalIDarray[myThreadID];\n }\n MPI_Send(pingSendBuf, sizeofBuffer, MPI_INT, pongNode, TAG, crossComm);\n MPI_Recv(pongRecvBuf, sizeofBuffer, MPI_INT, pongNode, \\\n TAG, crossComm, &status);\n #pragma omp parallel for default(none) \\\n private(i) \\\n shared(pongRecvBuf,finalRecvBuf,dataSize,sizeofBuffer) \\\n schedule(static,dataSize)\n for (i=0; i<sizeofBuffer; i++){\n finalRecvBuf[i] = pongRecvBuf[i];\n }\n } else if (crossCommRank == pongNode){\n MPI_Recv(pingRecvBuf, sizeofBuffer, MPI_INT, pingNode,\\\n TAG, crossComm, &status);\n #pragma omp parallel for default(none) \\\n private(i) \\\n shared(pongSendBuf,pingRecvBuf,dataSize,sizeofBuffer) \\\n schedule(static,dataSize)\n for (i=0; i<sizeofBuffer; i++){\n pongSendBuf[i] = pingRecvBuf[i];\n }\n MPI_Send(pongSendBuf, sizeofBuffer, MPI_INT, pingNode, \\\n TAG, crossComm);\n }\n }\n return 0;\n}",
| SUBROUTINE masteronlyMultiPingpong(totalReps, dataSize)\n integer, intent(in) :: totalReps, dataSize\n integer :: repIter, i\n\n DO repIter = 1, totalReps !loop totalRep times\n\n !All threads under each MPI process with \n !crossCommRank = pingNode write to pingSendBuf array\n !using a PARALLEL DO directive.\n IF (crossCommRank == pingNode) THEN\n \n!$OMP PARALLEL DO DEFAULT(NONE), &\n!$OMP PRIVATE(i), &\n!$OMP SHARED(pingSendBuf,dataSize,sizeofBuffer,globalIDarray), &\n!$OMP SCHEDULE(STATIC,dataSize)\n\n DO i = 1, sizeofBuffer\n pingSendBuf(i) = globalIDarray(myThreadID)\n END DO\n!$OMP END PARALLEL DO\n\n !Each process with crossCommRank = pingNode sends\n !buffer to MPI process with rank = pongNode in crossComm.\n CALL MPI_Send(pingSendBuf, sizeofBuffer, MPI_INTEGER,&\n pongNode, tag, crossComm, ierr)\n\n !The processes then wait for a message from pong process\n !and each thread reads its part of the received buffer.\n CALL MPI_Recv(pongRecvBuf, sizeofBuffer, MPI_INTEGER,&\n pongNode, tag, crossComm, status, ierr)\n\n!$OMP PARALLEL DO DEFAULT(NONE), &\n!$OMP PRIVATE(i), &\n!$OMP SHARED(pongRecvBuf,finalRecvBuf,dataSize,sizeofBuffer), &\n!$OMP SCHEDULE(STATIC,dataSize)\n DO i = 1, sizeofBuffer\n finalRecvBuf(i) = pongRecvBuf(i)\n END DO\n!$OMP END PARALLEL DO\n\n ELSEIF (crossCommRank == pongNode) THEN\n\n !Each process with crossCommRank = pongNode receives\n !the message from the pingNode processes.\n CALL MPI_Recv(pingRecvBuf, sizeofBuffer, MPI_INTEGER,&\n pingNode, tag, crossComm, status, ierr)\n\n !Each thread copies its part of the received buffer\n !to pongSendBuf.\n!$OMP PARALLEL DO DEFAULT(NONE), &\n!$OMP PRIVATE(i), &\n!$OMP SHARED(pongSendBuf,pingRecvBuf,dataSize,sizeofBuffer),&\n!$OMP SCHEDULE(STATIC,dataSize)\n DO i = 1,sizeofBuffer\n pongSendBuf(i) = pingRecvBuf(i)\n END DO\n!$OMP END PARALLEL DO\n\n !The processes now send pongSendBuf to processes\n !with crossCommRank = pingNode.\n CALL MPI_Send(pongSendBuf, sizeofBuffer, MPI_INTEGER, &\n pingNode, tag, crossComm, ierr)\n \n END IF\n\n END DO !End repetitions loop\n\n END SUBROUTINE masteronlyMultiPingpong
|
70 | funnelledMultiPingpong:All MPI processes in crossComm = pingNode sends a single fixed length message to the neighbouring process in crossComm = pongNode. The neighbouring processes then sends the message back to the first process. All communication takes place within the OpenMP parallel region for this benchmark.
| int funnelledMultiPingpong(int totalReps, int dataSize){\n int repIter, i;\n #pragma omp parallel default(none) \\\n private(i,repIter) \\\n shared(pingNode,pongNode,pingSendBuf,pingRecvBuf) \\\n shared(pongSendBuf,pongRecvBuf,finalRecvBuf,sizeofBuffer) \\\n shared(dataSize,globalIDarray,crossComm,status) \\\n shared(totalReps,myMPIRank,crossCommRank)\n {\n for (repIter = 1; repIter <= totalReps; repIter++){\n if (crossCommRank == pingNode){\n #pragma omp for schedule(static,dataSize)\n for (i=0; i<sizeofBuffer; i++){\n pingSendBuf[i] = globalIDarray[myThreadID];\n }\n #pragma omp master\n {\n MPI_Send(pingSendBuf, sizeofBuffer, MPI_INT, pongNode, TAG, crossComm);\n MPI_Recv(pongRecvBuf, sizeofBuffer, MPI_INT, pongNode, TAG, \\\n crossComm, &status);\n }\n #pragma omp barrier\n\n #pragma omp for schedule(static,dataSize)\n for (i=0; i<sizeofBuffer; i++){\n finalRecvBuf[i] = pongRecvBuf[i];\n }\n } else if (crossCommRank == pongNode){\n #pragma omp master\n {\n MPI_Recv(pingRecvBuf, sizeofBuffer, MPI_INT, pingNode,\\\n TAG, crossComm, &status);\n }\n #pragma omp barrier\n\n #pragma omp for schedule(static,dataSize)\n for (i=0; i<sizeofBuffer; i++){\n pongSendBuf[i] = pingRecvBuf[i];\n }\n\n #pragma omp master\n {\n MPI_Send(pongSendBuf, sizeofBuffer, MPI_INT, pingNode, TAG, crossComm);\n }\n }\n }\n }\n return 0;\n}",
| SUBROUTINE funnelledMultiPingpong(totalReps, dataSize)\n integer, intent(in) :: totalReps, dataSize\n integer :: repIter, i\n\n !Open parallel region for threads\n!$OMP PARALLEL DEFAULT(NONE), &\n!$OMP PRIVATE(i,repIter), &\n!$OMP SHARED(pingNode,pongNode,pingSendBuf,pingRecvBuf),&\n!$OMP SHARED(pongSendBuf,pongRecvBuf,finalRecvBuf,sizeofBuffer),&\n!$OMP SHARED(dataSize,globalIDarray,crossComm,ierr,status), &\n!$OMP SHARED(totalReps,myMPIRank,crossCommRank)\n\n DO repIter = 1, totalReps !loop totalRep times\n \n !All threads under each MPI process with\n !crossCommRank = pingNode write to pingSendBuf array\n !using a PARALLEL DO directive.\n IF (crossCommRank == pingNode) THEN\n\n!$OMP DO SCHEDULE(STATIC,dataSize)\n\n DO i = 1, sizeofBuffer\n pingSendBuf(i) = globalIDarray(myThreadID)\n END DO\n\n!$OMP END DO\n!Implicit barrier at end of DO takes care of synchronisation here\n \n !Master thread under each pingNode process sends \n !buffer to corresponding MPI process in pongNode\n !using the crossComm.\n!$OMP MASTER\n CALL MPI_Send(pingSendBuf, sizeofBuffer, MPI_INTEGER,&\n pongNode, tag, crossComm, ierr)\n\n !The Master thread then waits for a message from the\n !pong process.\n CALL MPI_Recv(pongRecvBuf, sizeofBuffer, MPI_INTEGER,&\n pongNode, tag, crossComm, status, ierr)\n!$OMP END MASTER\n\n!Barrier needed to wait for master thread to complete MPI_Recv\n!$OMP BARRIER\n\n !Each thread then reads its part of the received buffer.\n!$OMP DO SCHEDULE(STATIC,dataSize)\n\n DO i = 1, sizeofBuffer\n finalRecvBuf(i) = pongRecvBuf(i)\n END DO\n\n!$OMP END DO\n \n ELSE IF (crossCommRank == pongNode) THEN\n\n !Master thread under each pongNode process receives\n !the message from the pingNode processes.\n!$OMP MASTER \n CALL MPI_Recv(pingRecvBuf, sizeofBuffer, MPI_INTEGER,&\n pingNode, tag, crossComm, status, ierr)\n!$OMP END MASTER\n\n!Barrier needed to wait on master thread\n!$OMP BARRIER\n\n !Each thread reads its part of the received buffer.\n!$OMP DO SCHEDULE(STATIC,dataSize)\n\n DO i = 1, sizeofBuffer\n pongSendBuf(i) = pingRecvBuf(i)\n END DO\n\n!$OMP END DO\n!Implicit barrier at end of DO\n\n !Master threads sends pongSendBuf to processes\n !with crossCommRank = pingNode.\n!$OMP MASTER\n CALL MPI_Send(pongSendBuf, sizeofBuffer, MPI_INTEGER,&\n pingNode, tag, crossComm, ierr)\n!$OMP END MASTER\n\n END IF\n END DO !End repetitions loop\n\n!$OMP END PARALLEL\n\n END SUBROUTINE funnelledMultiPingpong
|
71 | multipleMultiPingpong:Multiple threads take place in the communication and computation. Each thread of all MPI processes in crossComm = pingNode sends a portion of the message to the neighbouring process in crossComm = pongNode. Each thread of the neighbouring processes then sends the message back to the first process.
| int multipleMultiPingpong(int totalReps, int dataSize){ \n int repIter, i; \n int lBound; \n #pragma omp parallel default(none) \\\n private(i,repIter,status,lBound) \\\n shared(pingNode,pongNode,pingSendBuf,pingRecvBuf) \\\n shared(pongSendBuf,pongRecvBuf,finalRecvBuf,sizeofBuffer) \\\n shared(dataSize,globalIDarray,crossComm) \\\n shared(totalReps,myMPIRank,crossCommRank) \n { \n for (repIter=1; repIter<=totalReps; repIter++){ /* loop totalRep times */ \n if (crossCommRank == pingNode){ \n lBound = (myThreadID * dataSize); \n #pragma omp for nowait schedule(static,dataSize) \n for (i=0; i<sizeofBuffer; i++){ \n pingSendBuf[i] = globalIDarray[myThreadID]; \n } \n MPI_Send(&pingSendBuf[lBound], dataSize, MPI_INT, pongNode, \\\n myThreadID, crossComm); \n MPI_Recv(&pongRecvBuf[lBound], dataSize, MPI_INT, pongNode, \\\n myThreadID, crossComm, &status); \n #pragma omp for nowait schedule(static,dataSize) \n for (i=0; i<sizeofBuffer; i++){ \n finalRecvBuf[i] = pongRecvBuf[i]; \n } \n } else if (crossCommRank == pongNode){ \n lBound = (myThreadID * dataSize); \n MPI_Recv(&pingRecvBuf[lBound], dataSize, MPI_INT, pingNode, \\\n myThreadID, crossComm, &status); \n #pragma omp for nowait schedule(static,dataSize) \n for (i=0; i<sizeofBuffer; i++){ \n pongSendBuf[i] = pingRecvBuf[i]; \n } \n MPI_Send(&pongSendBuf[lBound], dataSize, MPI_INT, pingNode, \\\n myThreadID, crossComm); \n } \n } \n } \n return 0; \n}",
| SUBROUTINE multipleMultiPingpong(totalReps, dataSize)\n integer, intent(in) :: totalReps, dataSize\n integer :: repIter, i\n integer :: lBound, uBound\n\n !Open parallel region for threads\n!$OMP PARALLEL DEFAULT(NONE), &\n!$OMP PRIVATE(i,repIter,ierr,status,lBound,uBound), &\n!$OMP SHARED(pingNode,pongNode,pingSendBuf,pingRecvBuf),&\n!$OMP SHARED(pongSendBuf,pongRecvBuf,finalRecvBuf,sizeofBuffer),&\n!$OMP SHARED(dataSize,globalIDarray,crossComm), &\n!$OMP SHARED(totalReps,myMPIRank,crossCommRank)\n\n DO repIter = 1, totalReps !loop totalReps time\n\n IF (crossCommRank == pingNode) THEN\n \n !Calculate lower and upper bound of data array\n lBound = ((myThreadID-1)* dataSize) + 1\n uBound = (myThreadID * dataSize)\n\n !All threads write to its part of the pingBuf\n !array using a PARALLEL DO directive\n!$OMP DO SCHEDULE(STATIC,dataSize)\n DO i = 1, sizeofBuffer\n pingSendBuf(i) = globalIDarray(myThreadID)\n END DO\n!$OMP END DO NOWAIT\n!Implicit barrier at end of DO not needed for multiple\n\n !Each thread under Ping process sends dataSize items\n !to pongNode process in crossComm.\n !myThreadID is used as tag to ensure data goes to \n !correct place in buffer.\n CALL MPI_Send(pingSendBuf(lBound:uBound), dataSize,&\n MPI_INTEGER, pongNode, myThreadID, crossComm, ierr)\n\n !Thread then waits for a message from pongNode.\n CALL MPI_Recv(pongRecvBuf(lBound:uBound), dataSize,&\n MPI_INTEGER, pongNode, myThreadID, crossComm, &\n status, ierr)\n\n !Each thread reads its part of the received buffer,\n!$OMP DO SCHEDULE(STATIC,dataSize)\n DO i = 1, sizeofBuffer\n finalRecvBuf(i) = pongRecvBuf(i)\n END DO\n!$OMP END DO NOWAIT\n\n ELSEIF (crossCommRank == pongNode) THEN\n\n !Calculate lower and upper bound of data array\n lBound = ((myThreadID - 1) * dataSize) + 1\n uBound = (myThreadID * dataSize)\n\n !Each thread under pongRank receives a message\n !from the ping process\n CALL MPI_Recv(pingRecvBuf(lBound:uBound), dataSize, &\n MPI_INTEGER, pingNode, myThreadID, crossComm, &\n status, ierr)\n\n !Each thread now copies its part of the received\n !buffer to pongSendBuf\n!$OMP DO SCHEDULE(STATIC,dataSize)\n DO i = 1, sizeofBuffer\n pongSendBuf(i) = pingRecvBuf(i)\n END DO\n!$OMP END DO NOWAIT\n\n !Each thread now sends pongSendBuf to ping process.\n CALL MPI_Send(pongSendBuf(lBound:uBound), dataSize,&\n MPI_INTEGER, pingNode, myThreadID, &\n crossComm, ierr)\n\n END IF\n END DO !End repetitions loop\n!$OMP END PARALLEL\n\n END SUBROUTINE multipleMultiPingpong
|
72 | allocateMultiPingpongData:Allocates space for the main data arrays. Size of each array is specified by subroutine argument.
| int allocateMultiPingpongData(int sizeofBuffer){ \n\n if (crossCommRank == pingNode){ \n pingSendBuf = (int *)malloc(sizeof(int) * sizeofBuffer); \n pongRecvBuf = (int *)malloc(sizeof(int) * sizeofBuffer); \n finalRecvBuf = (int *)malloc(sizeof(int) * sizeofBuffer); \n } \n else if (crossCommRank == pongNode){ \n pingRecvBuf = (int *)malloc(sizeof(int) * sizeofBuffer); \n pongSendBuf = (int *)malloc(sizeof(int) * sizeofBuffer); \n } \n\n return 0; \n}",
| SUBROUTINE allocateData(sizeofBuffer)\n integer, intent(in) :: sizeofBuffer\n \n IF (crossCommRank == pingNode) THEN\n !allocate space for arrays that MPI processes\n !with crossCommRank = pingNode will use\n allocate(pingSendBuf(sizeofBuffer))\n allocate(pongRecvBuf(sizeofBuffer))\n allocate(finalRecvBuf(sizeofBuffer))\n ELSE IF (crossCommRank == pongNode) THEN\n !allocate space for arrays that MPI processes\n !with crossCommRank = pongNode will use.\n allocate(pingRecvBuf(sizeofBuffer))\n allocate(pongSendBuf(sizeofBuffer))\n END IF\n \n END SUBROUTINE allocateData
|
73 | freeMultiPingpongData:Deallocates the storage space for the main data arrays.
| int freeMultiPingpongData(){ \n if (crossCommRank == pingNode){ \n free(pingSendBuf); \n free(pongRecvBuf); \n free(finalRecvBuf); \n } \n else if (crossCommRank == pongNode){ \n free(pingRecvBuf); \n free(pongSendBuf); \n } \n return 0; \n}",
| SUBROUTINE freeData()\n \n IF (crossCommRank == pingNode) THEN\n deallocate(pingSendBuf)\n deallocate(pongRecvBuf)\n deallocate(finalRecvBuf)\n ELSE IF (crossCommRank == pongNode) THEN\n deallocate(pingRecvBuf)\n deallocate(pongSendBuf)\n END IF\n\n END SUBROUTINE freeData
|
74 | testMultiPingpong:Verifies the the multi pingpong benchmark worked correctly.
| int testMultiPingpong(int sizeofBuffer, int dataSize){ \n int i; \n int testFlag, localTestFlag; \n localTestFlag = TRUE; \n \n if (crossCommRank == pingNode){ \n testBuf = (int *)malloc(sizeof(int) * sizeofBuffer); \n \n #pragma omp parallel for default(none) \\ \n private(i) \\ \n shared(testBuf,dataSize,sizeofBuffer,globalIDarray)\\ \n schedule(static,dataSize) \n for (i=0; i<sizeofBuffer; i++){ \n testBuf[i] = globalIDarray[myThreadID]; \n } \n \n for (i=0; i<sizeofBuffer; i++){ \n if (testBuf[i] != finalRecvBuf[i]){ \n localTestFlag = FALSE; \n } \n } \n \n free(testBuf); \n } \n \n MPI_Reduce(&localTestFlag, &testFlag, 1, MPI_INT,MPI_LAND, 0, comm); \n \n if (myMPIRank == 0){ \n setTestOutcome(testFlag); \n } \n \n return 0; \n}",
| SUBROUTINE testMultiPingpong(sizeofBuffer, dataSize)\n integer, intent(in) :: sizeofBuffer, dataSize\n integer :: i\n logical :: testFlag, localTestFlag\n\n !Initialise localtestFlag to true\n localTestFlag = .true.\n\n !All processes with crossCommRank = pingNode check\n !if multi-pingpong worked ok.\n IF (crossCommRank == pingNode) THEN\n \n !allocate space for testBuf\n allocate(testBuf(sizeofBuffer))\n \n !Construct testBuf array with correct values.\n !These are the values that should be in finalRecvBuf.\n!$OMP PARALLEL DO DEFAULT(NONE),&\n!$OMP PRIVATE(i), &\n!$OMP SHARED(testBuf,dataSize,sizeofBuffer,globalIDarray),&\n!$OMP SCHEDULE(STATIC,dataSize)\n\n DO i = 1,sizeofBuffer\n testBuf(i) = globalIDarray(myThreadID)\n END DO\n\n!$OMP END PARALLEL DO\n\n !Compare each element of testBuf and finalRecvBuf\n DO i = 1, sizeofBuffer\n IF (testBuf(i) /= finalRecvBuf(i)) THEN\n localTestFlag = .false.\n END IF\n END DO\n\n !free space for testBuf\n deallocate(testBuf)\n\n END IF\n\n !Reduce localTestFlag to master with logical AND operator\n CALL MPI_Reduce(localTestFlag, testFlag, 1, MPI_LOGICAL, &\n MPI_LAND, 0, comm, ierr)\n !Master then sets testOutcome using reduceFlag\n IF (myMPIRank == 0) THEN\n CALL setTestOutcome(testFlag)\n END IF\n\n END SUBROUTINE testMultiPingpong
|
75 | pingPing:Driver subroutine for the pingping benchmark.
| int pingPing(int benchmarkType){\n int dataSizeIter;\n int sameNode;\n pingRankA = PPRanks[0];\n pingRankB = PPRanks[1];\n\n sameNode = compareProcNames(pingRankA, pingRankB);\n\n if (myMPIRank == 0){\n printNodeReport(sameNode,pingRankA,pingRankB);\n printBenchHeader();\n }\n\n repsToDo = defaultReps;\n dataSizeIter = minDataSize; /* initialise dataSizeIter to minDataSize */\n\n while (dataSizeIter <= maxDataSize){\n sizeofBuffer = dataSizeIter * numThreads;\n allocatePingpingData(sizeofBuffer);\n\n if (benchmarkType == MASTERONLY){\n masteronlyPingping(warmUpIters, dataSizeIter);\n } else if (benchmarkType == FUNNELLED){\n funnelledPingping(warmUpIters, dataSizeIter);\n } else if (benchmarkType == MULTIPLE){\n multiplePingping(warmUpIters, dataSizeIter);\n }\n\n testPingping(sizeofBuffer, dataSizeIter);\n benchComplete = FALSE;\n\n while (benchComplete != TRUE){\n MPI_Barrier(comm);\n startTime = MPI_Wtime();\n\n if (benchmarkType == MASTERONLY){\n masteronlyPingping(repsToDo, dataSizeIter);\n } else if (benchmarkType == FUNNELLED){\n funnelledPingping(repsToDo, dataSizeIter);\n } else if (benchmarkType == MULTIPLE){\n multiplePingping(repsToDo, dataSizeIter);\n }\n\n MPI_Barrier(comm);\n finishTime = MPI_Wtime();\n totalTime = finishTime - startTime;\n\n if (myMPIRank==0){\n benchComplete = repTimeCheck(totalTime, repsToDo);\n }\n\n MPI_Bcast(&benchComplete, 1, MPI_INT, 0, comm);\n MPI_Bcast(&repsToDo, 1, MPI_INT, 0, comm);\n }\n\n if (myMPIRank == 0){\n setReportParams(dataSizeIter, repsToDo, totalTime);\n printReport();\n }\n\n freePingpingData();\n dataSizeIter = dataSizeIter * 2; /* double data size */\n }\n\n return 0;\n}",
| SUBROUTINE pingPing(benchmarkType)\n integer, intent(in) :: benchmarkType\n integer :: dataSizeIter\n logical :: sameNode\n\n pingRankA = PPRanks(1)\n pingRankB = PPRanks(2)\n\n !Check if pingRankA and pingRankB are on the same node\n sameNode = compareProcNames(pingRankA, pingRankB)\n \n IF (myMPIRank == 0) THEN\n !print message saying if benchmark is inter or intra node\n CALL printNodeReport(sameNode,pingRankA,pingRankB)\n !..then print benchmark report column headings.\n CALL printBenchHeader()\n END IF\n\n !initialise repsToDo to defaultReps\n repsToDo = defaultReps\n\n !Start loop over data sizes\n dataSizeIter = minDataSize !initialise dataSizeIter\n DO WHILE (dataSizeIter <= maxDataSize)\n \n !set size of buffer\n sizeofBuffer = dataSizeIter * numThreads\n\n !Allocate space for main data arrays\n CALL allocateData(sizeofBuffer)\n\n !warm-up for benchmarkType\n IF (benchmarkType == MASTERONLY) THEN\n !Masteronly warmup sweep\n CALL masteronlyPingping(warmUpIters, dataSizeIter)\n ELSE IF (benchmarkType == FUNNELLED) THEN\n !funnelled warmup\n CALL funnelledPingping(warmUpIters, dataSizeIter)\n ELSE IF (benchmarkType == MULTIPLE) THEN\n !perform multiple pinping warmup\n CALL multiplePingping(warmUpIters, dataSizeIter)\n END IF\n\n !Perform verification test for pingping\n !this is only done by pingRankA and pingRankB\n CALL testPingping(sizeofBuffer, dataSizeIter)\n \n !Initialise the benchmark\n benchComplete = .false.\n !Execute benchmark until target time is reached\n DO WHILE (benchComplete .NEQV. .true.)\n !Start timer.\n CALL MPI_Barrier(comm, ierr)\n startTime = MPI_Wtime()\n \n !Execute benchmarkType for repsToDo repetitions\n IF (benchmarkType == MASTERONLY) THEN\n CALL masteronlyPingping(repsToDo, dataSizeIter)\n ELSE IF (benchmarkType == FUNNELLED) THEN\n CALL funnelledPingping(repsToDo, dataSizeIter)\n ELSE IF (benchmarkType == MULTIPLE) THEN\n CALL multiplePingping(repsToDo, dataSizeIter)\n END IF\n \n !Stop timer.\n CALL MPI_Barrier(comm, ierr)\n finishTime = MPI_Wtime()\n totalTime = finishTime - startTime\n\n !Test if target time was reached with number of\n !repetitions.\n if (myMPIRank==0) then \n benchComplete = repTimeCheck(totalTime, repsToDo)\n end if\n !Ensure all procs have the same value of benchComplete\n !and repsToDo\n call MPI_Bcast(benchComplete, 1, MPI_INTEGER, 0, comm, ierr)\n call MPI_Bcast(repsToDo, 1, MPI_INTEGER, 0, comm, ierr)\n \n END DO !End of benchComplete loop\n\n !Master process sets benchmark results\n IF (myMPIRank == 0) THEN\n CALL setReportParams(dataSizeIter,repsToDo,totalTime)\n CALL printReport()\n END IF\n\n !Free allocated data\n CALL freeData()\n\n !Double dataSize and loop again\n dataSizeIter = dataSizeIter * 2\n\n END DO !End loop over data sizes\n\n END SUBROUTINE pingPing
|
76 | masteronlyPingping:Two processes send a message to each other using the MPI_Isend, MPI_Recv and MPI_Wait routines. Inter-process communication takes place outside of the parallel region.
| int masteronlyPingping(int totalReps, int dataSize){\\n\n int repIter, i;\\n\n int destRank;\\n\n if (myMPIRank == pingRankA){\\n\n destRank = pingRankB;\\n\n } else if (myMPIRank == pingRankB){\\n\n destRank = pingRankA;\\n\n }\\n\n for (repIter = 0; repIter < totalReps; repIter++){\\n\n if (myMPIRank == pingRankA || myMPIRank == pingRankB){\\n\n #pragma omp parallel for default(none) \\n\n private(i) \\n\n shared(pingSendBuf,dataSize,sizeofBuffer,globalIDarray) \\n\n schedule(static,dataSize)\\n\n for (i=0; i<sizeofBuffer; i++){\\n\n pingSendBuf[i] = globalIDarray[myThreadID];\\n\n }\\n\n MPI_Isend(pingSendBuf, sizeofBuffer, MPI_INT, destRank, \\n\n TAG, comm, &requestID);\\n\n MPI_Recv(pingRecvBuf, sizeofBuffer, MPI_INT, destRank, \\n\n TAG, comm, &status);\\n\n MPI_Wait(&requestID, &status);\\n\n #pragma omp parallel for default(none) \\n\n private(i) \\n\n shared(finalRecvBuf,dataSize,sizeofBuffer,pingRecvBuf) \\n\n schedule(static,dataSize)\\n\n for (i=0; i<sizeofBuffer; i++){\\n\n finalRecvBuf[i] = pingRecvBuf[i];\\n\n }\\n\n }\\n\n }\\n\n return 0;\\n\n}",
| SUBROUTINE masteronlyPingping(totalReps, dataSize)\n integer, intent(in) :: totalReps, dataSize\n integer :: repIter, i\n integer :: destRank\n\n !set destRank to ID of other process\n IF (myMPIRank == pingRankA) THEN\n destRank = pingRankB\n ELSE IF (myMPIRank == pingRankB) THEN\n destRank = pingRankA\n END IF\n\n DO repIter = 1, totalReps !loop totalReps times\n\n IF(myMPIRank == pingRankA .or. myMPIRank == pingRankB) THEN\n\n !Each thread writes its globalID to pingSendBuf\n !using a PARALLEL DO directive.\n!$OMP PARALLEL DO DEFAULT(NONE), &\n!$OMP PRIVATE(i), &\n!$OMP SHARED(pingSendBuf,dataSize,sizeofBuffer,globalIDarray), &\n!$OMP SCHEDULE(STATIC,dataSize)\n DO i = 1, sizeofBuffer\n pingSendBuf(i) = globalIDarray(myThreadID)\n END DO\n!$OMP END PARALLEL DO\n\n !Process calls non-blocking send to start transfer of\n !pingSendBuf to other process.\n CALL MPI_ISend(pingSendBuf, sizeofBuffer, MPI_INTEGER, &\n destRank, tag, comm, requestID, ierr)\n\n !Process then waits for message from other process.\n CALL MPI_Recv(pingRecvBuf, sizeofBuffer, MPI_INTEGER, &\n destRank, tag, comm, status, ierr)\n\n !Finish the Send operation with an MPI_Wait\n CALL MPI_Wait(requestID, status, ierr)\n\n !Each thread under the MPI process now reads its part \n !of the received buffer.\n!$OMP PARALLEL DO DEFAULT(NONE), &\n!$OMP PRIVATE(i), &\n!$OMP SHARED(finalRecvBuf,dataSize,sizeofBuffer,pingRecvBuf), &\n!$OMP SCHEDULE(STATIC,dataSize)\n DO i = 1, sizeofBuffer\n finalRecvBuf(i) = pingRecvBuf(i)\n END DO\n!$OMP END PARALLEL DO\n\n END IF\n\n END DO !End repetitions loop\n END SUBROUTINE masteronlyPingping
|
77 | funnelledPingPing:Two processes send a message to each other using the MPI_Isend, MPI_Recv and MPI_Wait routines. Inter-process communication takes place inside the OpenMP parallel region.
| int funnelledPingping(int totalReps, int dataSize){\n\tint repIter, i;\n\tint destRank;\n\n /* set destRank to ID of other process */\n if (myMPIRank == pingRankA){\n \tdestRank = pingRankB;\n }\n else if (myMPIRank == pingRankB){\n \tdestRank = pingRankA;\n }\n\n\t/* Open the parallel region */\n#pragma omp parallel default(none) \\\n\tprivate(i, repIter) \\\n\tshared(dataSize,sizeofBuffer,pingSendBuf,globalIDarray) \\\n\tshared(pingRecvBuf,finalRecvBuf,status,requestID) \\\n\tshared(destRank,comm,myMPIRank,pingRankA,pingRankB,totalReps)\n\n\tfor (repIter = 0; repIter < totalReps; repIter++){\n\n\n\t\tif (myMPIRank == pingRankA || myMPIRank == pingRankB){\n\n\t\t\t/* Each thread writes its globalID to its part of\n\t\t\t * pingSendBuf.\n\t\t\t */\n#pragma omp for schedule(static,dataSize)\n\t\t\tfor (i=0; i<sizeofBuffer; i++){\n\t\t\t\tpingSendBuf[i] = globalIDarray[myThreadID];\n\t\t\t}\n/* Implicit barrier here takes care of necessary synchronisation */\n\n#pragma omp master\n\t\t\t{\n\t\t\t/* Master thread starts send of buffer */\n\t\t\tMPI_Isend(pingSendBuf, sizeofBuffer, MPI_INT, destRank, \\\n\t\t\t\t\tTAG, comm, &requestID);\n\n\t\t\t/* then waits for message from other process */\n\t\t\tMPI_Recv(pingRecvBuf, sizeofBuffer, MPI_INT, destRank, \\\n\t\t\t\t\tTAG, comm, &status);\n\n\t\t\t/* Master thread then completes send using an MPI_Wait */\n\t\t\tMPI_Wait(&requestID, &status);\n\t\t\t}\n\n/* Barrier needed to ensure master thread has completed transfer */\n#pragma omp barrier\n\n\t\t\t/* Each thread reads its part of the received buffer */\n#pragma omp for schedule(static,dataSize)\n\t\t\tfor (i=0; i<sizeofBuffer; i++){\n\t\t\t\tfinalRecvBuf[i] = pingRecvBuf[i];\n\t\t\t}\n\t\t}\n\t}\n\n\treturn 0;\n}
| SUBROUTINE funnelledPingping(totalReps, dataSize)\n integer, intent(in) :: totalReps, dataSize\n integer :: repIter, i\n integer :: destRank\n\n !set destRank to ID of other process\n IF (myMPIRank == pingRankA) THEN\n destRank = pingRankB\n ELSE IF (myMPIRank == pingRankB) THEN\n destRank = pingRankA\n END IF\n\n !Open the parallel region\n!$OMP PARALLEL DEFAULT(NONE), &\n!$OMP PRIVATE(i,repIter), &\n!$OMP SHARED(dataSize,sizeofBuffer,pingSendBuf,globalIDarray), &\n!$OMP SHARED(pingRecvBuf,finalRecvBuf,status,requestID,ierr), &\n!$OMP SHARED(destRank,comm,myMPIRank,pingRankA,pingRankB,totalReps)\n\n DO repIter = 1, totalReps !loop totalRep times\n \n IF(myMPIRank == pingRankA .or. myMPIRank == pingRankB) THEN\n \n !Each threads write its globalID to its part of\n !pingSendBuf.\n!$OMP DO SCHEDULE(STATIC,dataSize)\n DO i = 1,sizeofBuffer\n pingSendBuf(i) = globalIDarray(myThreadID)\n END DO\n!$OMP END DO\n!Implicit barrier here takes care of necessary synchronisation.\n \n!$OMP MASTER\n !Master thread starts send of buffer\n CALL MPI_Isend(pingSendBuf, sizeofBuffer, MPI_INTEGER,&\n destRank, tag, comm, requestID, ierr)\n !then waits for message from other process.\n CALL MPI_Recv(pingRecvBuf, sizeofBuffer, MPI_INTEGER,&\n destRank, tag, comm, status, ierr)\n !Master thread then completes send using MPI_Wait.\n CALL MPI_Wait(requestID, status, ierr)\n!$OMP END MASTER\n\n!Barrier needed to ensure master thread has completed transfer.\n!$OMP BARRIER\n \n !Each thread reads its part of the received buffer\n!$OMP DO SCHEDULE(STATIC,dataSize)\n DO i = 1,sizeofBuffer\n finalRecvBuf(i) = pingRecvBuf(i)\n END DO\n!$OMP END DO\n\n END IF\n\n END DO !End repetitions loop\n!$OMP END PARALLEL\n\n END SUBROUTINE funnelledPingping
|
78 | multiplePingping:With this algorithm multiple threads take place in the communication and computation. Each thread sends its portion of the pingSendBuf to the other process using MPI_Isend/ MPI_Recv/ MPI_Wait routines.
| int multiplePingping(int totalReps, int dataSize){\n\tint repIter, i;\n\tint destRank;\n\tint lBound;\n\n /* set destRank to ID of other process */\n if (myMPIRank == pingRankA){\n \tdestRank = pingRankB;\n }\n else if (myMPIRank == pingRankB){\n \tdestRank = pingRankA;\n }\n\n /* Open parallel region */\n#pragma omp parallel default(none) \\\n\tprivate(i,lBound,requestID,status,repIter) \\\n\tshared(pingSendBuf,pingRecvBuf,finalRecvBuf,sizeofBuffer) \\\n\tshared(destRank,myMPIRank,pingRankA,pingRankB,totalReps) \\\n\tshared(dataSize,globalIDarray,comm)\n {\n for (repIter = 0; repIter < totalReps; repIter++){\n\n \tif (myMPIRank == pingRankA || myMPIRank == pingRankB){\n\n \t\t/* Calculate the lower bound of each threads\n \t\t * portion of the data arrays.\n \t\t */\n \t\tlBound = (myThreadID * dataSize);\n\n \t\t/* Each thread writes to its part of pingSendBuf */\n#pragma omp for nowait schedule(static,dataSize)\n \t\tfor (i=0; i<sizeofBuffer; i++){\n \t\t\tpingSendBuf[i] = globalIDarray[myThreadID];\n \t\t}\n\n \t\t/* Each thread starts send of dataSize items of\n \t\t * pingSendBuf to process with rank = destRank.\n \t\t */\n \t\tMPI_Isend(&pingSendBuf[lBound], dataSize, MPI_INT, destRank, \\\n \t\t\t\tmyThreadID, comm, &requestID);\n\n \t\t/* Thread then waits for message from destRank with\n \t\t * tag equal to it thread id.\n \t\t */\n \t\tMPI_Recv(&pingRecvBuf[lBound], dataSize, MPI_INT, destRank, \\\n \t\t\t\tmyThreadID, comm, &status);\n\n \t\t/* Thread completes send using MPI_Wait */\n \t\tMPI_Wait(&requestID, &status);\n\n \t\t/* Each thread reads its part of received buffer. */\n#pragma omp for nowait schedule(static,dataSize)\n \t\tfor (i=0; i<sizeofBuffer; i++){\n \t\t\tfinalRecvBuf[i] = pingRecvBuf[i];\n \t\t}\n \t}\n }\n }\n\n return 0;\n}
| SUBROUTINE multiplePingping(totalReps, dataSize)\n integer, intent(in) :: totalReps, dataSize\n integer :: repIter, i\n integer :: destRank\n integer :: lBound, uBound\n\n !set destRank to be ID of other process\n IF (myMPIRank == pingRankA) THEN\n destRank = pingRankB\n ELSE IF (myMPIRank == pingRankB) THEN\n destRank = pingRankA\n END IF\n \n !Open parallel region\n!$OMP PARALLEL DEFAULT(NONE), &\n!$OMP PRIVATE(i,lBound,uBound,requestID,status,ierr,repIter), &\n!$OMP SHARED(pingSendBuf,pingRecvBuf,finalRecvBuf,sizeofBuffer),&\n!$OMP SHARED(destRank,myMPIRank,pingRankA,pingRankB,totalReps),&\n!$OMP SHARED(dataSize,globalIDarray,comm)\n\n DO repIter = 1,totalReps !Loop totalReps times\n IF(myMPIRank == pingRankA .or. myMPIRank == pingRankB) THEN\n \n !Calculate lower and upper bound of each threads \n !portion of the data arrays\n lBound = ((myThreadID-1)* dataSize) + 1\n uBound = (myThreadID * dataSize)\n \n !Each thread writes to its part of pingSendBuf\n!$OMP DO SCHEDULE(STATIC,dataSize)\n DO i = 1,sizeofBuffer\n pingSendBuf(i) = globalIDarray(myThreadID)\n END DO\n!$OMP END DO NOWAIT\n\n !Each thread starts send of dataSize items of \n !pingSendBuf to process with rank = destRank\n CALL MPI_Isend(pingSendBuf(lBound:uBound), dataSize,&\n MPI_INTEGER, destRank, myThreadID, comm,&\n requestID, ierr)\n !Thread then waits for message from destRank with \n !tag equal to its thread id.\n CALL MPI_Recv(pingRecvBuf(lBound:uBound), dataSize,&\n MPI_INTEGER, destRank, myThreadID, comm,&\n status, ierr)\n !Thread completes send using MPI_Wait\n CALL MPI_Wait(requestID, status, ierr)\n\n !Each thread reads its part of received buffer.\n!$OMP DO SCHEDULE(STATIC,dataSize)\n DO i = 1, sizeofBuffer\n finalRecvBuf(i) = pingRecvBuf(i)\n END DO\n!$OMP END DO NOWAIT\n\n END IF\n\n END DO !End repetitions loop\n!$OMP END PARALLEL\n\n END SUBROUTINE multiplePingping
|
79 | allocatePingpingData:Allocates space for the main data arrays. Size of each array is specified by subroutine argument.
| int allocatePingpingData(int sizeofBuffer){\n\n\tpingSendBuf = (int *)malloc(sizeofBuffer * sizeof(int));\n\tpingRecvBuf = (int *)malloc(sizeofBuffer * sizeof(int));\n\tfinalRecvBuf = (int *)malloc(sizeofBuffer * sizeof(int));\n\n\treturn 0;\n}
| SUBROUTINE allocateData(bufferSize)\n integer, intent(in) :: bufferSize\n\n allocate(pingSendBuf(bufferSize), pingRecvBuf(bufferSize))\n allocate(finalRecvBuf(bufferSize))\n\n END SUBROUTINE allocateData
|
80 | freePingpingData:Deallocates the storage space for the main data arrays.
| int freePingpingData(){\n\n\tfree(pingSendBuf);\n\tfree(pingRecvBuf);\n\tfree(finalRecvBuf);\n\n\treturn 0;\n}
| SUBROUTINE freeData()\n\n deallocate(pingSendBuf, pingRecvBuf)\n deallocate(finalRecvBuf)\n\n END SUBROUTINE freeData
|
81 | testPingping:Verifies that the PingPing benchmark worked correctly.
| int testPingping(int sizeofBuffer,int dataSize){\n\tint otherPingRank, i, testFlag, reduceFlag;\n\tint *testBuf;\n\t/* initialise testFlag to true (test passed) */\n\ttestFlag = TRUE;\n\t/* Testing only needs to be done by pingRankA & pingRankB */\n\tif (myMPIRank == pingRankA || myMPIRank == pingRankB){\n\t\t/* allocate space for testBuf */\n\t\ttestBuf = (int *)malloc(sizeofBuffer * sizeof(int));\n\t\t/* set the ID of other pingRank */\n\t\tif (myMPIRank == pingRankA){\n\t\t\totherPingRank = pingRankB;\n\t\t}\n\t\telse if (myMPIRank == pingRankB){\n\t\t\totherPingRank = pingRankA;\n\t\t}\n\t\t/* construct testBuf array with correct values.\n\t\t * These are the values that should be in finalRecvBuf.\n\t\t */\n#pragma omp parallel for default(none) \\\n\t\tprivate(i) \\\n\t\tshared(otherPingRank,numThreads,testBuf,dataSize,sizeofBuffer) \\\n\t\tschedule(static,dataSize)\n\t\tfor (i=0; i<sizeofBuffer; i++){\n\t\t\t/* calculate globalID of thread expected in finalRecvBuf\n\t\t\t * This is done by using otherPingRank\n\t\t\t */\n\t\t\ttestBuf[i] = (otherPingRank * numThreads) + myThreadID;\n\t\t}\n\t\t/* compare each element of testBuf and finalRecvBuf */\n\t\tfor (i=0; i<sizeofBuffer; i++){\n\t\t\tif (testBuf[i] != finalRecvBuf[i]){\n\t\t\t\ttestFlag = FALSE;\n\t\t\t}\n\t\t}\n\t\t/* free space for testBuf */\n\t\tfree(testBuf);\n\t}\n\tMPI_Reduce(&testFlag, &reduceFlag, 1, MPI_INT, MPI_LAND, 0, comm);\n\t/* Master process sets the testOutcome using testFlag. */\n\t if (myMPIRank == 0){\n\t\t setTestOutcome(reduceFlag);\n\t }\n\t return 0;\n}
| SUBROUTINE testPingPing(sizeofBuffer, dataSize)\n integer, intent(in) :: sizeofBuffer, dataSize\n integer :: otherPingRank, i\n logical :: testFlag, reduceFlag\n \n !set testFlag to true\n testFlag = .true.\n \n !Testing only needs to be done by pingRankA & pingRankB\n IF (myMPIRank == pingRankA .or. myMPIRank == pingRankB) THEN\n !allocate space for testBuf\n allocate(testBuf(sizeofBuffer))\n \n !set the ID of other pingRank\n IF (myMPIRank == pingRankA) THEN\n otherPingRank = pingRankB\n ELSE IF (myMPIRank == pingRankB) THEN\n otherPingRank = pingRankA\n END IF\n \n !Construct testBuf with correct values\n!$OMP PARALLEL DO DEFAULT(NONE), &\n!$OMP PRIVATE(i), &\n!$OMP SHARED(otherPingRank,numThreads,dataSize), &\n!$OMP SHARED(sizeofBuffer,testBuf), &\n!$OMP SCHEDULE(STATIC,dataSize)\n DO i = 1, sizeofBuffer\n !calculate globalID of thread expected in finalRecvBuf\n !this is done by using otherPingRank\n testBuf(i) = (otherPingRank * numThreads) + myThreadID\n END DO\n!$OMP END PARALLEL DO\n\n !Compare each element of testBuf and finalRecvBuf\n DO i = 1, sizeofBuffer\n IF (testBuf(i) /= finalRecvBuf(i)) THEN\n testFlag = .false.\n END IF\n END DO\n\n !free space for testBuf\n deallocate(testBuf)\n \n END IF !End test loop\n \n !Reduce testFlag into master with logical AND\n CALL MPI_Reduce(testFlag, reduceFlag, 1, MPI_LOGICAL, &\n MPI_LAND, 0, comm, ierr)\n\n !master sets testOutcome flag\n IF (myMPIRank == 0) THEN\n CALL setTestOutcome(reduceFlag)\n END IF\n\n END SUBROUTINE testPingPing
|
82 | pingPong:Driver subroutine for the pingpong benchmark.
| int pingPong(int benchmarkType){\n\tint dataSizeIter;\n\tint sameNode;\n\tpingRank = PPRanks[0];\n\tpongRank = PPRanks[1];\n\t/* Check if pingRank and pongRank are on the same node */\n\tsameNode = compareProcNames(pingRank,pongRank);\n\t/* Master process then does some reporting */\n\tif (myMPIRank == 0){\n\t\t/* print message saying if benchmark is inter or intra node */\n\t\tprintNodeReport(sameNode,pingRank,pongRank);\n\t\t/* then print report column headings. */\n\t\tprintBenchHeader();\n\t}\n\t/* initialise repsToDo to defaultReps at start of benchmark */\n\trepsToDo = defaultReps;\n\t/* Loop over data sizes */\n\tdataSizeIter = minDataSize; /* initialise dataSizeIter to minDataSize */\n\twhile (dataSizeIter <= maxDataSize){\n\t\t/* set sizeofBuffer */\n\t\tsizeofBuffer = dataSizeIter * numThreads;\n\t\t/* allocate space for the main data arrays */\n\t\tallocatePingpongData(sizeofBuffer);\n\t\t/* warm-up for either masteronly, funnelled or multiple */\n\t\tif (benchmarkType == MASTERONLY){\n\t\t\t/* perform masteronly warm-up sweep */\n\t\t\tmasteronlyPingpong(warmUpIters, dataSizeIter);\n\t\t}\n\t\telse if (benchmarkType == FUNNELLED){\n\t\t\t/* perform funnelled warm-up sweep */\n\t\t\tfunnelledPingpong(warmUpIters, dataSizeIter);\n\t\t}\n\t\telse if (benchmarkType == MULTIPLE){\n\t\t\tmultiplePingpong(warmUpIters, dataSizeIter);\n\t\t}\n\t\t/* perform verification test for the pingpong */\n\t\ttestPingpong(sizeofBuffer, dataSizeIter);\n\t\t/* Initialise benchmark */\n\t\tbenchComplete = FALSE;\n\t\t/* keep executing benchmark until target time is reached */\n\t\twhile (benchComplete != TRUE){\n\t\t\t/* Start the timer...MPI_Barrier to synchronise */\n\t\t\tMPI_Barrier(comm);\n\t\t\tstartTime = MPI_Wtime();\n\t\t\tif (benchmarkType == MASTERONLY){\n\t\t\t\t/* execute for repsToDo repetitions */\n\t\t\t\tmasteronlyPingpong(repsToDo, dataSizeIter);\n\t\t\t}\n\t\t\telse if (benchmarkType == FUNNELLED){\n\t\t\t\tfunnelledPingpong(repsToDo, dataSizeIter);\n\t\t\t}\n\t\t\telse if (benchmarkType == MULTIPLE){\n\t\t\t\tmultiplePingpong(repsToDo, dataSizeIter);\n\t\t\t}\n\t\t\t/* Stop the timer...MPI_Barrier to synchronise processes */\n\t\t\tMPI_Barrier(comm);\n\t\t\tfinishTime = MPI_Wtime();\n\t\t\ttotalTime = finishTime - startTime;\n\t\t\t/* Call repTimeCheck function to test if target time is reached */\n\t\t\tif (myMPIRank==0){\n\t\t\t benchComplete = repTimeCheck(totalTime, repsToDo);\n\t\t\t}\n\t\t\t/* Ensure all procs have the same value of benchComplete */\n\t\t\t/* and repsToDo */\n\t\t\tMPI_Bcast(&benchComplete, 1, MPI_INT, 0, comm);\n\t\t\tMPI_Bcast(&repsToDo, 1, MPI_INT, 0, comm);\n\t\t}\n\t\t/* Master process sets benchmark results */\n\t\tif (myMPIRank == 0){\n\t\t\tsetReportParams(dataSizeIter, repsToDo, totalTime);\n\t\t\tprintReport();\n\t\t}\n\t\t/* Free the allocated space for the main data arrays */\n\t\tfreePingpongData();\n\t\t/* Update dataSize before the next iteration */\n\t\tdataSizeIter = dataSizeIter * 2; /* double data size */\n\t}\n\treturn 0;\n}
| SUBROUTINE pingPong(benchmarkType)\n integer, intent(in) :: benchmarkType\n integer :: dataSizeIter\n logical :: sameNode\n\n pingRank = PPRanks(1) \n pongRank = PPRanks(2)\n \n !Check if pingRank and pongRank are on the same node\n sameNode = compareProcNames(pingRank,pongRank)\n \n !Master process then does reporting...\n IF (myMPIRank == 0) THEN\n !print message saying if benchmark is inter or intra node\n CALL printNodeReport(sameNode,pingRank,pongRank)\n !..then print report column headings.\n CALL printBenchHeader()\n END IF\n\n !initialise repsToDo to defaultReps at start of benchmark\n repsToDo = defaultReps\n\n !Loop over data sizes\n dataSizeIter = minDataSize !initialise dataSizeIter to minDataSize\n DO WHILE (dataSizeIter <= maxDataSize)\n \n !set sizeofBuffer \n sizeofBuffer = dataSizeIter*numThreads\n\n !Allocate space for the main data arrays\n CALL allocateData(sizeofBuffer)\n\n !warm-up for either masteronly, funnelled or multiple\n IF (benchmarkType == MASTERONLY) THEN\n !Perform masteronly warm-up sweep\n CALL masteronlyPingpong(warmUpIters, dataSizeIter)\n ELSE IF (benchmarkType == FUNNELLED) THEN\n !Perform funnelled warm-up sweep\n CALL funnelledPingpong(warmUpIters, dataSizeIter)\n ELSE IF (benchmarkType == MULTIPLE) THEN\n !Perform multiple pingpong warmup\n CALL multiplePingpong(warmUpIters, dataSizeIter)\n END IF\n\n !Perform verification test for the pingpong\n CALL testPingpong(sizeofBuffer, dataSizeIter)\n \n !Initialise benchmark\n benchComplete = .false.\n !Keep executing benchmark until target time is reached\n DO WHILE (benchComplete .NEQV. .true.)\n\n !Start the timer...MPI_Barrier to synchronise \n !processes for more accurate timing.\n CALL MPI_Barrier(comm, ierr)\n startTime = MPI_Wtime()\n \n IF (benchmarkType == MASTERONLY) THEN\n !Execute for repsToDo repetitions\n CALL masteronlyPingpong(repsToDo, dataSizeIter)\n ELSE IF (benchmarkType == FUNNELLED) THEN\n CALL funnelledPingpong(repsToDo, dataSizeIter)\n ELSE IF (benchmarkType == MULTIPLE) THEN\n CALL multiplePingpong(repsToDo, dataSizeIter)\n END IF\n\n !Stop the timer...MPI_Barrier to synchronise processes \n !for more accurate timing.\n CALL MPI_Barrier(comm, ierr)\n finishTime = MPI_Wtime()\n totalTime = finishTime - startTime\n\n !Call repTimeCheck function to test if target time \n !is reached.\n if (myMPIRank==0) then \n benchComplete = repTimeCheck(totalTime, repsToDo)\n end if\n !Ensure all procs have the same value of benchComplete\n !and repsToDo\n call MPI_Bcast(benchComplete, 1, MPI_INTEGER, 0, comm, ierr)\n call MPI_Bcast(repsToDo, 1, MPI_INTEGER, 0, comm, ierr)\n\n END DO !end of loop to check if benchComplete is true\n\n !Master process sets benchmark results\n IF (myMPIRank == 0) THEN\n CALL setReportParams(dataSizeIter,repsToDo,totalTime)\n CALL printReport()\n END IF\n\n !Free the allocated space for the main data arrays\n CALL freeData()\n\n !Update dataSize before next iteration\n dataSizeIter = dataSizeIter * 2 !double data size\n \n END DO !end of loop over data sizes\n \nEND SUBROUTINE pingPong
|
83 | masteronlyPingpong:One MPI process sends single fixed length message to another MPI process. This other process then sends it back to the first process.
| int masteronlyPingpong(int totalReps, int dataSize){\n\tint repIter, i;\n\tfor (repIter = 0; repIter < totalReps; repIter++){\n\t\t/* All threads under MPI process with rank = pingRank\n\t\t * write to their part of the pingBuf array using a\n\t\t * parallel for directive.\n\t\t */\n\t\tif (myMPIRank == pingRank){\n#pragma omp parallel for default(none) \\\n\tprivate(i) \\\n\tshared(pingSendBuf,dataSize,sizeofBuffer,globalIDarray) \\\n\tschedule(static,dataSize)\n\t\t\tfor(i=0; i<sizeofBuffer; i++){\n\t\t\t\tpingSendBuf[i] = globalIDarray[myThreadID];\n\t\t\t}\n\t\t\t/* Ping process sends buffer to MPI process with rank equal to\n\t\t\t * pongRank.\n\t\t\t */\n\t\t\tMPI_Send(pingSendBuf, sizeofBuffer, MPI_INT, pongRank, TAG, comm);\n\t\t\t/* Process then waits for a message from pong process and\n\t\t\t * each thread reads its part of received buffer.\n\t\t\t */\n\t\t\tMPI_Recv(pongRecvBuf, sizeofBuffer, MPI_INT, pongRank, \\\n\t\t\t\t\tTAG, comm, &status);\n#pragma omp parallel for default(none) \\\n\tprivate(i) \\\n\tshared(pongRecvBuf,finalRecvBuf,dataSize,sizeofBuffer) \\\n\tschedule(static,dataSize)\n\t\t\tfor(i=0; i<sizeofBuffer; i++){\n\t\t\t\tfinalRecvBuf[i] = pongRecvBuf[i];\n\t\t\t}\n\t\t}\n\t\telse if (myMPIRank == pongRank){\n\t\t\t/* pongRank receives the message from the ping process */\n\t\t\tMPI_Recv(pingRecvBuf, sizeofBuffer, MPI_INT, pingRank, \\\n\t\t\t\t\tTAG, comm, &status);\n\t\t\t/* each thread under the pongRank MPI process now copies\n\t\t\t * its part of the received buffer to pongSendBuf.\n\t\t\t */\n#pragma omp parallel for default(none) \\\n\tprivate(i) \\\n shared(pongSendBuf,pingRecvBuf,dataSize,sizeofBuffer) \\\n\tschedule(static,dataSize)\n\t\t\tfor(i=0; i< sizeofBuffer; i++){\n\t\t\t\tpongSendBuf[i] = pingRecvBuf[i];\n\t\t\t}\n\t\t\t/* pongRank process now sends pongSendBuf to ping process. */\n\t\t\tMPI_Send(pongSendBuf, sizeofBuffer, MPI_INTEGER, pingRank, \\\n\t\t\t\t\tTAG, comm);\n\t\t}\n\t}\n\treturn 0;\n}
| SUBROUTINE masteronlyPingpong(totalReps, dataSize)\n integer, intent(in) :: totalReps, dataSize\n integer :: repIter, i\n DO repIter = 1, totalReps !loop totalRep times\n\n !All threads under MPI process with rank = pingRank\n !write to its part of the pingBuf array using a \n !PARALLEL DO directive.\n IF (myMPIRank == pingRank) THEN\n \n!$OMP PARALLEL DO DEFAULT(NONE), &\n!$OMP PRIVATE(i), &\n!$OMP SHARED(pingSendBuf,dataSize,sizeofBuffer,globalIDarray), &\n!$OMP SCHEDULE(STATIC,dataSize)\n\n DO i = 1,sizeofBuffer\n pingSendBuf(i) = globalIDarray(myThreadID)\n ENDDO\n!$OMP END PARALLEL DO\n \n !Ping process sends buffer to MPI process with rank equal\n !to pongRank.\n CALL MPI_Send(pingSendBuf, sizeofBuffer, MPI_INTEGER,&\n pongRank, tag, comm, ierr)\n \n !Process then waits for a message from pong process and\n !each thread reads its part of received buffer.\n !This completes the pingpong operation.\n CALL MPI_Recv(pongRecvBuf, sizeofBuffer, MPI_INTEGER,&\n pongRank, tag, comm, status, ierr)\n\n!$OMP PARALLEL DO DEFAULT(NONE), &\n!$OMP PRIVATE(i), &\n!$OMP SHARED(pongRecvBuf,finalRecvBuf,dataSize,sizeofBuffer), &\n!$OMP SCHEDULE(STATIC,dataSize)\n DO i = 1,sizeofBuffer\n finalRecvBuf(i) = pongRecvBuf(i)\n ENDDO\n!$OMP END PARALLEL DO\n \n ELSEIF (myMPIRank == pongRank) THEN\n \n !pongRank receives the message from the ping process\n CALL MPI_Recv(pingRecvBuf, sizeofBuffer, MPI_INTEGER, &\n pingRank, tag, comm, status, ierr)\n\n !Each thread under the pongRank MPI process now copies \n !its part of the received buffer to pongSendBuf\n\n!$OMP PARALLEL DO DEFAULT(NONE), &\n!$OMP PRIVATE(i), &\n!$OMP SHARED(pongSendBuf,pingRecvBuf,dataSize,sizeofBuffer), &\n!$OMP SCHEDULE(STATIC,dataSize)\n DO i = 1,sizeofBuffer\n pongSendBuf(i) = pingRecvBuf(i)\n END DO\n!$OMP END PARALLEL DO\n \n !pongRank process now sends pongSendBuf to ping process.\n CALL MPI_Send(pongSendBuf, sizeofBuffer, MPI_INTEGER, &\n pingRank, tag, comm, ierr)\n END IF\n\n END DO !End repetitions loop\n \nEND SUBROUTINE masteronlyPingpong
|
84 | funnelledPingpong:One MPI process sends single fixed length message to another MPI process. This other process then sends it back to the first process. All communication takes place within the OpenMP region for this benchmark.
| int funnelledPingpong(int totalReps, int dataSize){\n\tint repIter, i;\n\t/* Open parallel region for threads */\n#pragma omp parallel default(none) \\\n\tprivate(i,repIter) \\\n\tshared(pingRank,pongRank,pingSendBuf,pingRecvBuf) \\\n\tshared(pongSendBuf,pongRecvBuf,finalRecvBuf,sizeofBuffer) \\\n\tshared(dataSize,globalIDarray,comm,status,totalReps,myMPIRank)\n\t{\n\tfor (repIter=0; repIter< totalReps; repIter++){\n\t\t/* All threads under MPI process with rank = pingRank\n\t\t * write to its part of the pingBuf array using a\n\t\t * parallel do directive.\n\t\t */\n\t\tif (myMPIRank == pingRank){\n#pragma omp for schedule(static,dataSize)\n\t\t\tfor (i=0; i<sizeofBuffer; i++){\n\t\t\t\tpingSendBuf[i] = globalIDarray[myThreadID];\n\t\t\t}\n/* Implicit barrier at end of for takes care of synchronisation */\n\t\t\t/*Master thread under ping process sends buffer to\n\t\t\t * MPI process with rank equal to pongRank.\n\t\t\t */\n#pragma omp master\n\t\t\t{\n\t\t\tMPI_Send(pingSendBuf, sizeofBuffer, MPI_INT, pongRank, TAG, comm);\n\t\t\t/* Master thread then waits for a message from pong process */\n\t\t\tMPI_Recv(pongRecvBuf, sizeofBuffer, MPI_INT, pongRank, TAG, \\\n\t\t\t\t\tcomm, &status);\n\t\t\t}\n/* Barrier needed to wait for master thread to complete MPI_Recv */\n#pragma omp barrier\n\t\t/*Each thread reads its part of received buffer */\n#pragma omp for schedule(static,dataSize)\n\t\tfor (i=0; i<sizeofBuffer; i++){\n\t\t\tfinalRecvBuf[i] = pongRecvBuf[i];\n\t\t}\n\t}\n\telse if (myMPIRank == pongRank){\n\t\t/* Master thread under pongRank receives the message\n\t\t * from the ping process.\n\t\t */\n#pragma omp master\n\t\t{\n\t\tMPI_Recv(pingRecvBuf, sizeofBuffer, MPI_INT, pingRank, TAG, comm, &status);\n\t\t}\n/* Barrier needed to wait on master thread */\n#pragma omp barrier\n\t\t/* Each thread under the pongRank MPI process now copies its part\n\t\t * of the received buffer to pongSendBuf.\n\t\t */\n#pragma omp for schedule(static,dataSize)\n\t\tfor (i=0; i<sizeofBuffer; i++){\n\t\t\tpongSendBuf[i] = pingRecvBuf[i];\n\t\t}\n/* Implicit barrier at end of DO */\n\t\t/* Master thread of pongRank process now sends pongSendBuf\n\t\t * to ping process.\n\t\t */\n#pragma omp master\n\t\t{\n\t\tMPI_Send(pongSendBuf, sizeofBuffer, MPI_INT, pingRank, TAG, comm);\n\t\t}\n\t}\n\t} /* end of repetitions */\n\t} /* end of parallel region */\n\treturn 0;\n}
| SUBROUTINE funnelledPingpong(totalReps, dataSize)\n integer, intent(in) :: totalReps, dataSize\n integer :: repIter, i\n\n !Open parallel region for threads\n!$OMP PARALLEL DEFAULT(NONE), &\n!$OMP PRIVATE(i,repIter), &\n!$OMP SHARED(pingRank,pongRank,pingSendBuf,pingRecvBuf), &\n!$OMP SHARED(pongSendBuf,pongRecvBuf,finalRecvBuf,sizeofBuffer), &\n!$OMP SHARED(dataSize,globalIDarray,comm,ierr,status), &\n!$OMP SHARED(totalReps,myMPIRank)\n\n DO repIter = 1, totalReps !loop totalRep times\n\n !All threads under MPI process with rank = pingRank\n !write to its part of the pingBuf array using a \n !PARALLEL DO directive.\n IF (myMPIRank == pingRank) THEN\n \n!$OMP DO SCHEDULE(STATIC,dataSize)\n DO i = 1,sizeofBuffer\n pingSendBuf(i) = globalIDarray(myThreadID)\n ENDDO\n!$OMP END DO\n!Implicit barrier at end of DO takes care of synchronisation here\n\n !Master thread under Ping process sends buffer \n !to MPI process with rank equal to pongRank.\n!$OMP MASTER\n CALL MPI_Send(pingSendBuf, sizeofBuffer, MPI_INTEGER,&\n pongRank, tag, comm, ierr)\n \n !Thread then waits for a message from pong process.\n CALL MPI_Recv(pongRecvBuf, sizeofBuffer, MPI_INTEGER,&\n pongRank, tag, comm, status, ierr)\n!$OMP END MASTER\n\n!Barrier needed to wait for master thread to complete MPI_Recv\n!$OMP BARRIER \n\n !Each thread reads its part of received buffer.\n !This completes the pingpong operation.\n!$OMP DO SCHEDULE(STATIC,dataSize)\n DO i = 1,sizeofBuffer\n finalRecvBuf(i) = pongRecvBuf(i)\n ENDDO\n!$OMP END DO\n\n ELSEIF (myMPIRank == pongRank) THEN\n \n !Master thread under pongRank receives the message \n !from the ping process\n!$OMP MASTER\n CALL MPI_Recv(pingRecvBuf, sizeofBuffer, MPI_INTEGER, &\n pingRank, tag, comm, status, ierr)\n!$OMP END MASTER\n\n!Barrier needed to wait on master thread\n!$OMP BARRIER\n\n !Each thread under the pongRank MPI process now copies \n !its part of the received buffer to pongSendBuf\n\n!$OMP DO SCHEDULE(STATIC,dataSize)\n DO i = 1,sizeofBuffer\n pongSendBuf(i) = pingRecvBuf(i)\n END DO\n!$OMP END DO\n!Implicit barrier at end of DO\n \n !Master thread pongRank process now sends pongSendBuf \n !to ping process.\n!$OMP MASTER\n CALL MPI_Send(pongSendBuf, sizeofBuffer, MPI_INTEGER, &\n pingRank, tag, comm, ierr)\n!$OMP END MASTER\n\n END IF\n END DO !End repetitions loop\n!$OMP END PARALLEL \n\n END SUBROUTINE funnelledPingpong
|
85 | multiplePingpong:With this algorithm multiple threads take place in the communication and computation. Each thread under the MPI ping process sends a portion of the message to the other MPI process. Each thread of the other process then sends it back to the first process.
| int multiplePingpong(int totalReps, int dataSize){\n\tint repIter, i, lBound, uBound;\n\t/* Open parallel region for threads under pingRank */\n#pragma omp parallel default(none) \\\n\tprivate(i,repIter,lBound) \\\n\tshared(pingRank,pongRank,pingSendBuf,pingRecvBuf) \\\n\tshared(pongSendBuf,pongRecvBuf,finalRecvBuf,sizeofBuffer) \\\n\tshared(dataSize,globalIDarray,comm,status,totalReps,myMPIRank)\n\t{\n\tfor (repIter=0; repIter < totalReps; repIter++){\n\t\tif (myMPIRank == pingRank){\n\t\t\t/* Calculate lower bound of data array for the thread */\n\t\t\tlBound = (myThreadID * dataSize);\n\t\t\t/* All threads under MPI process with rank = pingRank\n\t\t\t * write to their part of the pingBuf array using\n\t\t\t * a parallel for directive.\n\t\t\t */\n#pragma omp for nowait schedule(static,dataSize)\n\t\t\tfor (i=0; i<sizeofBuffer; i++){\n\t\t\t\tpingSendBuf[i] = globalIDarray[myThreadID];\n\t\t\t}\n/* Implicit barrier not needed for multiple*/\n\t\t\t/*Each thread under ping process sends dataSize items\n\t\t\t * to MPI process with rank equal to pongRank.\n\t\t\t * myThreadID is used as tag to ensure data goes to correct\n\t\t\t * place in recv buffer.\n\t\t\t */\n\t\t\tMPI_Send(&pingSendBuf[lBound], dataSize, MPI_INT, pongRank, \\\n\t\t\t\t\tmyThreadID, comm);\n\t\t\t/* Thread then waits for a message from pong process. */\n\t\t\tMPI_Recv(&pongRecvBuf[lBound], dataSize, MPI_INT, pongRank, \\\n\t\t\t\t\tmyThreadID, comm, &status);\n\t\t\t/* Each thread reads its part of the received buffer */\n#pragma omp for nowait schedule(static,dataSize)\n\t\t\tfor (i=0; i<sizeofBuffer; i++){\n\t\t\t\tfinalRecvBuf[i] = pongRecvBuf[i];\n\t\t\t}\n\t\t}\n\t\telse if (myMPIRank == pongRank){\n\t\t\t/* Calculate lower bound of the data array */\n\t\t\tlBound = (myThreadID * dataSize);\n\t\t\t/* Each thread under pongRank receives a message\n\t\t\t * from the ping process.\n\t\t\t */\n\t\t\tMPI_Recv(&pingRecvBuf[lBound], dataSize, MPI_INT, pingRank, \\\n\t\t\t\t\tmyThreadID, comm, &status);\n\t\t\t/* Each thread now copies its part of the received buffer\n\t\t\t * to pongSendBuf.\n\t\t\t */\n#pragma omp for nowait schedule(static,dataSize)\n\t\t\tfor (i=0; i<sizeofBuffer; i++)\n\t\t\t{\n\t\t\t\tpongSendBuf[i] = pingRecvBuf[i];\n\t\t\t}\n\t\t\t/* Each thread now sends pongSendBuf to ping process. */\n\t\t\tMPI_Send(&pongSendBuf[lBound], dataSize, MPI_INT, pingRank, \\\n\t\t\t\t\tmyThreadID, comm);\n\t\t}\n\t}/* end of repetitions */\n\t} /* end of parallel region */\n\treturn 0;\n}
| SUBROUTINE multiplePingpong(totalReps, dataSize)\n integer, intent(in) :: totalReps, dataSize\n integer :: repIter, i\n integer :: lBound, uBound\n\n !Open parallel region for threads under pingRank\n!$OMP PARALLEL DEFAULT(NONE), &\n!$OMP PRIVATE(i,lBound,uBound,repIter,ierr,status), &\n!$OMP SHARED(myMPIRank,pingRank,pongRank,pingSendBuf,pingRecvBuf), &\n!$OMP SHARED(pongSendBuf,pongRecvBuf,finalRecvBuf,sizeofBuffer),&\n!$OMP SHARED(totalReps,dataSize,globalIDarray,comm)\n\n DO repIter = 1, totalReps !loop totalRep times\n\n IF (myMPIRank == pingRank) THEN\n \n !Calculate lower and upper bound of data array\n lBound = ((myThreadID-1)* dataSize) + 1\n uBound = (myThreadID * dataSize)\n \n !All threads under MPI process with rank = pingRank\n !write to its part of the pingBuf array using a \n !PARALLEL DO directive\n!$OMP DO SCHEDULE(STATIC,dataSize)\n DO i = 1,sizeofBuffer\n pingSendBuf(i) = globalIDarray(myThreadID)\n ENDDO\n!$OMP END DO NOWAIT\n!Implicit barrier at end of DO not needed for multiple\n\n !Each thread under Ping process sends dataSize items\n !to MPI process with rank equal to pongRank.\n !myThreadID is used as tag to ensure data goes to correct \n !place in buffer.\n CALL MPI_Send(pingSendBuf(lBound:uBound), dataSize,&\n MPI_INTEGER, pongRank, myThreadID, comm, ierr)\n \n !Thread then waits for a message from pong process.\n CALL MPI_Recv(pongRecvBuf(lBound:uBound), dataSize, &\n MPI_INTEGER, pongRank, myThreadID, comm, &\n status, ierr)\n\n !Each thread reads its part of received buffer.\n !This completes the pingpong operation.\n!$OMP DO SCHEDULE(STATIC,dataSize)\n DO i = 1,sizeofBuffer\n finalRecvBuf(i) = pongRecvBuf(i)\n ENDDO\n!$OMP END DO NOWAIT\n\n\n ELSEIF (myMPIRank == pongRank) THEN\n\n !Calculate lower and upper bound of data array\n lBound = ((myThreadID-1)* dataSize)+1\n uBound = (myThreadID * dataSize)\n\n\n !Each thread under pongRank receives a message \n !from the ping process\n CALL MPI_Recv(pingRecvBuf(lBound:uBound), dataSize, &\n MPI_INTEGER, pingRank, myThreadID, comm, &\n status, ierr)\n\n !Each thread under the pongRank MPI process now copies \n !its part of the received buffer to pongSendBuf\n\n!$OMP DO SCHEDULE(STATIC,dataSize)\n DO i = 1,sizeofBuffer\n pongSendBuf(i) = pingRecvBuf(i)\n END DO\n!$OMP END DO NOWAIT\n \n !Each thread now sends pongSendBuf to ping process.\n CALL MPI_Send(pongSendBuf(lBound:uBound), dataSize, &\n MPI_INTEGER, pingRank, myThreadID, comm, ierr)\n\n END IF\n END DO !End repetitions loop\n!$OMP END PARALLEL \n\n END SUBROUTINE multiplePingpong
|
86 | allocateData:Allocates space for the main data arrays. Size of each array is specified by subroutine argument.
| int allocatePingpongData(int sizeofBuffer){\n\tpingSendBuf = (int *)malloc(sizeofBuffer * sizeof(int));\n\tpingRecvBuf = (int *)malloc(sizeofBuffer * sizeof(int));\n\tpongSendBuf = (int *)malloc(sizeofBuffer * sizeof(int));\n\tpongRecvBuf = (int *)malloc(sizeofBuffer * sizeof(int));\n\tfinalRecvBuf = (int *)malloc(sizeofBuffer * sizeof(int));\n\treturn 0;\n}
| SUBROUTINE allocateData(sizeofBuffer)\n integer, intent(in) :: sizeofBuffer\n allocate(pingSendBuf(sizeofBuffer), pingRecvBuf(sizeofBuffer))\n allocate(pongSendBuf(sizeofBuffer), pongRecvBuf(sizeofBuffer))\n allocate(finalRecvBuf(sizeofBuffer))\n\n END SUBROUTINE allocateData
|
87 | freeData:Deallocates the storage space for the main data arrays.
| int freePingpongData(){\n\tfree(pingSendBuf);\n\tfree(pingRecvBuf);\n\tfree(pongSendBuf);\n\tfree(pongRecvBuf);\n\tfree(finalRecvBuf);\n\treturn 0;\n}
| SUBROUTINE freeData()\n deallocate(pingSendBuf, pingRecvBuf)\n deallocate(pongSendBuf, pongRecvBuf)\n deallocate(finalRecvBuf)\n\n END SUBROUTINE freeData
|
88 | testPingpong:Verifies that the Ping Pong benchmark worked correctly. | int testPingpong(int sizeofBuffer,int dataSize){\n\tint i, testFlag;\n\tint *testBuf;\n\t/* PingRank process checks if pingpong worked ok. */\n\tif (myMPIRank == pingRank){\n\t\t/* initialise testFlag to true (test passed) */\n\t\ttestFlag = TRUE;\n\t\t/* allocate space for the testBuf */\n\t\ttestBuf = (int *)malloc(sizeofBuffer * sizeof(int));\n\t\t/* construct testBuf array with correct values.\n\t\t * These are the values that should be in finalRecvBuf.\n\t\t */\n#pragma omp parallel for default(none) \\\n\t\tprivate(i) \\\n\t\tshared(testBuf,dataSize,sizeofBuffer,globalIDarray) \\\n\t\tschedule(static,dataSize)\n\t\tfor (i=0; i<sizeofBuffer; i++){\n\t\t\ttestBuf[i] = globalIDarray[myThreadID];\n\t\t}\n\t\t/* compare each element of testBuf and finalRecvBuf */\n\t\tfor (i=0; i<sizeofBuffer; i++){\n\t\t\tif (testBuf[i] != finalRecvBuf[i]){\n\t\t\t\ttestFlag = FALSE;\n\t\t\t}\n\t\t}\n\t\t/* free space for testBuf */\n\t\tfree(testBuf);\n\t}\n\t/* pingRank broadcasts testFlag to the other processes */\n MPI_Bcast(&testFlag, 1, MPI_INT, pingRank, comm);\n /* Master process sets the testOutcome using testFlag. */\n if (myMPIRank == 0){\n \tsetTestOutcome(testFlag);\n }\n return 0;\n} | SUBROUTINE testPingPong(sizeofBuffer, dataSize)\n integer, intent(in) :: sizeofBuffer, dataSize\n integer :: i\n logical :: testFlag\n\n !PingRank process checks if pingpong worked ok\n IF (myMPIRank == pingRank) THEN\n !initialise testFlag to true (test passed)\n testFlag = .true.\n\n !allocate space for the testBuf\n allocate(testBuf(sizeofBuffer))\n \n !Construct testBuf array with correct values.\n !These are the values that should be in finalRecvBuf.\n!$OMP PARALLEL DO DEFAULT(NONE), &\n!$OMP PRIVATE(i), &\n!$OMP SHARED(testBuf,dataSize,sizeofBuffer,globalIDarray), &\n!$OMP SCHEDULE(STATIC,dataSize) \n\n DO i = 1,sizeofBuffer\n testBuf(i) = globalIDarray(myThreadID)\n END DO\n\n!$OMP END PARALLEL DO\n \n !Compare each element of testBuf and finalRecvBuf\n DO i = 1, sizeofBuffer\n IF (testBuf(i) /= finalRecvBuf(i)) THEN\n testFlag = .false.\n END IF\n END DO\n\n !free space for testBuf\n deallocate(testBuf)\n \n END IF\n\n !pingRank broadcasts testFlag to the other processes\n CALL MPI_Bcast(testFlag, 1, MPI_LOGICAL, pingRank, comm, ierr)\n\n !Master process receives sets the testOutcome using testFlag.\n IF (myMPIRank == 0) THEN\n CALL setTestOutcome(testFlag)\n END IF\n \n END SUBROUTINE testPingPong |