Initial commit. Everything but analysis has been completed. Performance table pending verification
This commit is contained in:
commit
5e444713ca
10
report.md
Normal file
10
report.md
Normal file
@ -0,0 +1,10 @@
|
||||
# Report
|
||||
|
||||
| Implementation | Elapsed Time (ms) | MFLOP/s | Memory Bandwidth (GB/s) |
|
||||
| --------------- | ----------------- | ------- | ----------------------- |
|
||||
| CPU Vector Addition | xx | xx | xx |
|
||||
| CUDA 1 thread, 1 thread block | 1,203.31 | 425.59 | 5.35 |
|
||||
| CUDA 256 threads, 1 thread block | 1,212.36 | 422.76 | 5.31 |
|
||||
| CUDA 256 threads/block, many thread blocks | 1,232.73 | 415.16 | 5.24 |
|
||||
| CUDA 256 threads/block, many blocks, prefetching | 4.77 | 112,591.01 | 1,349.96 |
|
||||
|
||||
47
vecadd_cpu.cpp
Normal file
47
vecadd_cpu.cpp
Normal file
@ -0,0 +1,47 @@
|
||||
#include <chrono>
|
||||
#include <iostream>
|
||||
#include <math.h>
|
||||
|
||||
// function to add the elements of two arrays
|
||||
void add(int n, float *x, float *y) {
|
||||
for (int i = 0; i < n; i++)
|
||||
y[i] = x[i] + y[i];
|
||||
}
|
||||
|
||||
int main(void) {
|
||||
int N = 1 << 29; // Setting problem size to 1<<29 (536,870,912 elements)
|
||||
|
||||
float *x = new float[N];
|
||||
float *y = new float[N];
|
||||
|
||||
// initialize x and y arrays on the host
|
||||
for (int i = 0; i < N; i++) {
|
||||
x[i] = 1.0f;
|
||||
y[i] = 2.0f;
|
||||
}
|
||||
|
||||
// Timer starts before the add function call
|
||||
auto start_time = std::chrono::high_resolution_clock::now();
|
||||
|
||||
// Run kernel on the elements on the CPU
|
||||
add(N, x, y);
|
||||
|
||||
// Timer ends after the add function call
|
||||
auto end_time = std::chrono::high_resolution_clock::now();
|
||||
|
||||
// Calculate elapsed time
|
||||
std::chrono::duration<double> elapsed = end_time - start_time;
|
||||
std::cout << "Elapsed time: " << elapsed.count() << " seconds" << std::endl;
|
||||
|
||||
// Check for errors (all values should be 3.0f)
|
||||
float maxError = 0.0f;
|
||||
for (int i = 0; i < N; i++)
|
||||
maxError = fmax(maxError, fabs(y[i] - 3.0f));
|
||||
std::cout << "Max error: " << maxError << std::endl;
|
||||
|
||||
// Free memory
|
||||
delete[] x;
|
||||
delete[] y;
|
||||
|
||||
return 0;
|
||||
}
|
||||
53
vecadd_gpu_1t.cu
Normal file
53
vecadd_gpu_1t.cu
Normal file
@ -0,0 +1,53 @@
|
||||
#include <iostream>
|
||||
#include <math.h>
|
||||
#include <cuda_runtime.h>
|
||||
|
||||
// CUDA kernel to add the elements of two arrays
|
||||
__global__
|
||||
void add(int n, float *x, float *y) {
|
||||
int index = threadIdx.x + blockIdx.x * blockDim.x;
|
||||
int stride = blockDim.x * gridDim.x;
|
||||
for (int i = index; i < n; i += stride) {
|
||||
y[i] = x[i] + y[i];
|
||||
}
|
||||
}
|
||||
|
||||
int main(void) {
|
||||
int N = 1 << 29; // Setting problem size to 1<<29 (536,870,912 elements)
|
||||
|
||||
float *x, *y;
|
||||
|
||||
// Allocate Unified Memory – accessible from CPU or GPU
|
||||
cudaMallocManaged(&x, N * sizeof(float));
|
||||
cudaMallocManaged(&y, N * sizeof(float));
|
||||
|
||||
// initialize x and y arrays on the host
|
||||
for (int i = 0; i < N; i++) {
|
||||
x[i] = 1.0f;
|
||||
y[i] = 2.0f;
|
||||
}
|
||||
|
||||
// Number of threads per block
|
||||
int blockSize = 256;
|
||||
// Number of blocks in the grid
|
||||
int numBlocks = (N + blockSize - 1) / blockSize;
|
||||
|
||||
// Run kernel on the elements on the GPU
|
||||
add<<<numBlocks, blockSize>>>(N, x, y);
|
||||
|
||||
// Wait for GPU to finish before accessing on host
|
||||
cudaDeviceSynchronize();
|
||||
|
||||
// Check for errors (all values should be 3.0f)
|
||||
float maxError = 0.0f;
|
||||
for (int i = 0; i < N; i++) {
|
||||
maxError = fmax(maxError, fabs(y[i] - 3.0f));
|
||||
}
|
||||
std::cout << "Max error: " << maxError << std::endl;
|
||||
|
||||
// Free memory
|
||||
cudaFree(x);
|
||||
cudaFree(y);
|
||||
|
||||
return 0;
|
||||
}
|
||||
53
vecadd_gpu_256t.cu
Normal file
53
vecadd_gpu_256t.cu
Normal file
@ -0,0 +1,53 @@
|
||||
#include <iostream>
|
||||
#include <math.h>
|
||||
#include <cuda_runtime.h>
|
||||
|
||||
// CUDA kernel to add the elements of two arrays
|
||||
__global__
|
||||
void add(int n, float *x, float *y) {
|
||||
int index = threadIdx.x + blockIdx.x * blockDim.x;
|
||||
int stride = blockDim.x * gridDim.x;
|
||||
for (int i = index; i < n; i += stride) {
|
||||
y[i] = x[i] + y[i];
|
||||
}
|
||||
}
|
||||
|
||||
int main(void) {
|
||||
int N = 1 << 29; // Setting problem size to 1<<29 (536,870,912 elements)
|
||||
|
||||
float *x, *y;
|
||||
|
||||
// Allocate Unified Memory – accessible from CPU or GPU
|
||||
cudaMallocManaged(&x, N * sizeof(float));
|
||||
cudaMallocManaged(&y, N * sizeof(float));
|
||||
|
||||
// initialize x and y arrays on the host
|
||||
for (int i = 0; i < N; i++) {
|
||||
x[i] = 1.0f;
|
||||
y[i] = 2.0f;
|
||||
}
|
||||
|
||||
// Number of threads per block
|
||||
int blockSize = 256;
|
||||
// Number of blocks in the grid
|
||||
int numBlocks = (N + blockSize - 1) / blockSize;
|
||||
|
||||
// Run kernel on the elements on the GPU
|
||||
add<<<numBlocks, blockSize>>>(N, x, y);
|
||||
|
||||
// Wait for GPU to finish before accessing on host
|
||||
cudaDeviceSynchronize();
|
||||
|
||||
// Check for errors (all values should be 3.0f)
|
||||
float maxError = 0.0f;
|
||||
for (int i = 0; i < N; i++) {
|
||||
maxError = fmax(maxError, fabs(y[i] - 3.0f));
|
||||
}
|
||||
std::cout << "Max error: " << maxError << std::endl;
|
||||
|
||||
// Free memory
|
||||
cudaFree(x);
|
||||
cudaFree(y);
|
||||
|
||||
return 0;
|
||||
}
|
||||
56
vecadd_gpu_256t_mb.cu
Normal file
56
vecadd_gpu_256t_mb.cu
Normal file
@ -0,0 +1,56 @@
|
||||
#include <iostream>
|
||||
#include <math.h>
|
||||
#include <cuda_runtime.h>
|
||||
|
||||
// CUDA kernel to add the elements of two arrays
|
||||
__global__
|
||||
void add(int n, float *x, float *y) {
|
||||
int index = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
int stride = blockDim.x * gridDim.x;
|
||||
for (int i = index; i < n; i += stride) {
|
||||
y[i] = x[i] + y[i];
|
||||
}
|
||||
}
|
||||
|
||||
int main(void) {
|
||||
int N = 1 << 29; // Setting problem size to 1<<29 (536,870,912 elements)
|
||||
|
||||
float *x, *y;
|
||||
|
||||
// Allocate Unified Memory – accessible from CPU or GPU
|
||||
cudaMallocManaged(&x, N * sizeof(float));
|
||||
cudaMallocManaged(&y, N * sizeof(float));
|
||||
|
||||
// Initialize x and y arrays on the host
|
||||
for (int i = 0; i < N; i++) {
|
||||
x[i] = 1.0f;
|
||||
y[i] = 2.0f;
|
||||
}
|
||||
|
||||
// Number of threads per block
|
||||
int blockSize = 256;
|
||||
// Number of blocks in the grid
|
||||
int numBlocks = (N + blockSize - 1) / blockSize;
|
||||
|
||||
// Print the number of thread blocks
|
||||
std::cout << "Number of thread blocks: " << numBlocks << std::endl;
|
||||
|
||||
// Run kernel on the elements on the GPU
|
||||
add<<<numBlocks, blockSize>>>(N, x, y);
|
||||
|
||||
// Wait for GPU to finish before accessing on host
|
||||
cudaDeviceSynchronize();
|
||||
|
||||
// Check for errors (all values should be 3.0f)
|
||||
float maxError = 0.0f;
|
||||
for (int i = 0; i < N; i++) {
|
||||
maxError = fmax(maxError, fabs(y[i] - 3.0f));
|
||||
}
|
||||
std::cout << "Max error: " << maxError << std::endl;
|
||||
|
||||
// Free memory
|
||||
cudaFree(x);
|
||||
cudaFree(y);
|
||||
|
||||
return 0;
|
||||
}
|
||||
60
vecadd_gpu_256t_mb_prefetch.cu
Normal file
60
vecadd_gpu_256t_mb_prefetch.cu
Normal file
@ -0,0 +1,60 @@
|
||||
#include <iostream>
|
||||
#include <math.h>
|
||||
#include <cuda_runtime.h>
|
||||
|
||||
// CUDA kernel to add the elements of two arrays
|
||||
__global__
|
||||
void add(int n, float *x, float *y) {
|
||||
int index = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
int stride = blockDim.x * gridDim.x;
|
||||
for (int i = index; i < n; i += stride) {
|
||||
y[i] = x[i] + y[i];
|
||||
}
|
||||
}
|
||||
|
||||
int main(void) {
|
||||
int N = 1 << 29; // Setting problem size to 1<<29 (536,870,912 elements)
|
||||
|
||||
float *x, *y;
|
||||
|
||||
// Allocate Unified Memory – accessible from CPU or GPU
|
||||
cudaMallocManaged(&x, N * sizeof(float));
|
||||
cudaMallocManaged(&y, N * sizeof(float));
|
||||
|
||||
// Initialize x and y arrays on the host
|
||||
for (int i = 0; i < N; i++) {
|
||||
x[i] = 1.0f;
|
||||
y[i] = 2.0f;
|
||||
}
|
||||
|
||||
int deviceID = 0;
|
||||
cudaMemPrefetchAsync(x, N * sizeof(float), deviceID);
|
||||
cudaMemPrefetchAsync(y, N * sizeof(float), deviceID);
|
||||
|
||||
// Number of threads per block
|
||||
int blockSize = 256;
|
||||
// Number of blocks in the grid
|
||||
int numBlocks = (N + blockSize - 1) / blockSize;
|
||||
|
||||
// Print the number of thread blocks
|
||||
std::cout << "Number of thread blocks: " << numBlocks << std::endl;
|
||||
|
||||
// Run kernel on the elements on the GPU
|
||||
add<<<numBlocks, blockSize>>>(N, x, y);
|
||||
|
||||
// Wait for GPU to finish before accessing on host
|
||||
cudaDeviceSynchronize();
|
||||
|
||||
// Check for errors (all values should be 3.0f)
|
||||
float maxError = 0.0f;
|
||||
for (int i = 0; i < N; i++) {
|
||||
maxError = fmax(maxError, fabs(y[i] - 3.0f));
|
||||
}
|
||||
std::cout << "Max error: " << maxError << std::endl;
|
||||
|
||||
// Free memory
|
||||
cudaFree(x);
|
||||
cudaFree(y);
|
||||
|
||||
return 0;
|
||||
}
|
||||
Loading…
Reference in New Issue
Block a user