231
社区成员
发帖
与我相关
我的任务
分享
cudaError_t addWithCuda(float *r, float *a, float *b, float *c, size_t size);
__global__ void addKernel(float *r, float *a, float *b, float *c)
{
int i = threadIdx.x;
int tem = a[i] + b[i];
tem=tem+c[i];
r[i]=tem;
}
int main()
{
const int arraySize = 1000;
float *a ;
float *b ;
float *c ;
float * r,*r1;
a=(float *)malloc(sizeof(float)*arraySize);
b=(float *)malloc(sizeof(float)*arraySize);
c=(float *)malloc(sizeof(float)*arraySize);
r=(float *)malloc(sizeof(int)*arraySize);
r1=(float *)malloc(sizeof(int)*arraySize);
for (int i=0; i<arraySize; i++)
{
a[i]=(float)i;
b[i]=1.0;
c[i]=1.;
r[i]=0.;
r1[i]=a[i]+b[i]+c[i];
}
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(r, a, b,c, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
for (int i=0;i<arraySize;i++)
{
printf("%f+%f+%f=%f r1[i]:%f\n",a[i],b[i],c[i],r[i],r1[i]);
}
// cudaThreadExit must be called before exiting in order for profiling and
// tracing tools such as N sight and Visual Profiler to show complete traces.
cudaStatus = cudaThreadExit();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaThreadExit failed!");
return 1;
}
free(a);
free(b);
free(c);
free(r);
free(r1);
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(float *r, float *a, float *b, float *c, size_t size)
{
float *dev_a = 0;
float *dev_b = 0;
float *dev_c = 0;
float *dev_r = 0;
int threadsNum=256;
int blockNum=(size+threadsNum-1)/threadsNum;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_r, size * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_c, c, size * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<blockNum, threadsNum>>>(dev_r, dev_a, dev_b,dev_c);
// cudaThreadSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaThreadSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaThreadSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(r, dev_r, size * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_r);
return cudaStatus;
}