OpenCL矩阵相乘求助

baidu_37112680 2017-01-13 05:00:37
程序可以正常运行,但其实C输出都是0,时间也显示没有,是为什么呢?求助。

main.cpp

#include "stdafx.h"

#include<sstream>
#include <iostream>
#include<time.h>
#include <string>
#include<math.h>
#include <vector>
#include <CL/cl.h>
#include <fstream>


using namespace std;
#define MAX_SOURCE_SIZE (0x1000000)


int main(int argc, char **argv[])
{


FILE *fp;
char fileName[] = "./MatrixMul.cl";
char *source_str;
size_t source_size;

/* Load the source code containing the kernel*/
fp = fopen(fileName, "r");
if (!fp) {
fprintf(stderr, "Failed to load kernel.\n");
exit(1);
}
source_str = (char*)malloc(MAX_SOURCE_SIZE);
source_size = fread(source_str, 1, MAX_SOURCE_SIZE, fp);
fclose(fp);


//the size of matrixs
int A_row = 1024;
int A_col = 1024;
int B_row = A_col;
int B_col = 1024;

int A_size = A_row * A_col;
int B_size = B_row * B_col;
int C_size = A_row * B_col;


/** step 1: get platform */
int err=-1;
cl_uint num_platforms = NULL;
cl_platform_id platform_id = NULL;
int choose_num_platform = 1;
err = clGetPlatformIDs(choose_num_platform, &platform_id, &num_platforms);

if ((CL_SUCCESS != err) || (num_platforms < 1))
{
cout << "Error getting platform: " << err << endl;
return 0;
}
else
{
cout << "The number of platform is: " << num_platforms << ". And the platform id is: " << platform_id <<
". \n***If you want to change platform please modifiy the choose_num_platform variable.\n"<<endl;
}

/** step 2: get device */
cl_uint num_devices = NULL;
cl_device_id device_id = NULL;
int choose_num_device=1;
clGetDeviceIDs(platform_id, CL_DEVICE_TYPE_GPU, choose_num_device, &device_id, &num_devices);
if ((CL_SUCCESS != err) || (num_devices < 1))
{
cout << "Error getting GPU device: " << err << endl;
return 0;
}
else {
cout << "The number of device is: " << num_devices << ". And the device id is: " << device_id <<
". \n***If you want to change device please modifiy the choose_num_device variable.\n" << endl;
}

/** step 3: create context */

cl_context_properties props[] = { CL_CONTEXT_PLATFORM, (cl_context_properties)platform_id, 0 };
cl_context context = NULL;
context = clCreateContext(props, 1, &device_id, NULL, NULL, &err);

if ((CL_SUCCESS != err) || (NULL == context))
{
cout << "Error creating context: " << err << endl;
return 0;
}

/** step 4: create command queue */

cl_command_queue command_queue = NULL;
command_queue = clCreateCommandQueue(context, device_id, CL_QUEUE_PROFILING_ENABLE, &err);

if ((CL_SUCCESS != err) || (NULL == command_queue))
{
cout << "Error creating command queue: " << err << endl;
return 0;
}

/** step 5: create memory object */
cl_mem matrixAMemObj = NULL;
cl_mem matrixBMemObj = NULL;
cl_mem matrixCMemObj = NULL;

// matrixs
int *MatrixA = NULL;;
int *MatrixB = NULL;;
int *MatrixC = NULL;;

const int BUF_SIZEa = A_size * sizeof(int);
const int BUF_SIZEb = B_size * sizeof(int);
const int BUF_SIZEc = C_size * sizeof(int);

// create and init host buffer
MatrixA = (int *)malloc(A_row * A_col * sizeof(int));
MatrixB = (int *)malloc(B_row * B_col * sizeof(int));
MatrixC = (int *)malloc(B_row * A_col * sizeof(int));

for (int i = 0; i < A_size; i++)
{
MatrixA[i] = (int)std::rand() / 10000;
}

for (int i = 0; i < B_size; i++)
{
MatrixB[i] = (int)std::rand() / 1000;
}

for (int i = 0; i < C_size; ++i)
{
MatrixC[i] = 333.0;

}


// create opencl memory object using host ptr
matrixAMemObj = clCreateBuffer(context, CL_MEM_READ_ONLY, BUF_SIZEa, NULL, NULL);
matrixBMemObj = clCreateBuffer(context, CL_MEM_READ_ONLY, BUF_SIZEb, NULL, NULL);
matrixCMemObj = clCreateBuffer(context, CL_MEM_WRITE_ONLY, BUF_SIZEc, NULL, NULL);

if ((err != CL_SUCCESS) || (NULL == matrixAMemObj) || (NULL == matrixBMemObj) || (NULL == matrixCMemObj))
{
cout << "Error creating command queue: " << err << endl;
return 0;
}


/** step 6: create program */

cl_program program;

program = clCreateProgramWithSource(context, 1, (const char**)&source_str, (const size_t *)&source_size, &err);

if ((CL_SUCCESS != err) || (NULL == program))
{
cout << "Error creating program: " << err << endl;
return 0;
}

// build program
err = clBuildProgram(program, 1, &device_id, NULL, NULL, NULL);

if (CL_SUCCESS != err)
{
cout << "Error building program: " << err << endl;
return 0;
}

/** step 7: create kernel */

cl_kernel kernel;
kernel = clCreateKernel(program, "Matrixmul", &err);

if ((CL_SUCCESS != err) || (NULL == kernel))
{
cout << "Error creating kernel: " << err << endl;
return 0;
}

/** step 8: set kernel arguments */
err = clSetKernelArg(kernel, 0, sizeof(int), &B_row);
err = clSetKernelArg(kernel, 1, sizeof(int), &A_col);
err = clSetKernelArg(kernel, 2, sizeof(cl_mem), &matrixAMemObj);
err = clSetKernelArg(kernel, 3, sizeof(cl_mem), &matrixBMemObj);
err = clSetKernelArg(kernel, 4, sizeof(cl_mem), &matrixCMemObj);
err = clSetKernelArg(kernel, 5, sizeof(int)*A_col, NULL);

if (CL_SUCCESS != err)
{
cout << "Error setting kernel argument: " << err << endl;
return 0;
}

/** step 9: set work group size */
size_t globalThreads[] = { A_col };
size_t localThreads[] = { 256 };
cl_event prof_event;

//write A B into device memory
err = clEnqueueWriteBuffer(command_queue, matrixAMemObj, CL_TRUE, 0, A_row * A_col * sizeof(int), (void*)MatrixA, 0, NULL, NULL);
err = clEnqueueWriteBuffer(command_queue, matrixBMemObj, CL_TRUE, 0, B_row * B_col * sizeof(int), (void*)MatrixB, 0, NULL, NULL);

/** step 10: run kernel */
err = clEnqueueNDRangeKernel(command_queue, kernel, 1, NULL, globalThreads, localThreads, 0, NULL, &prof_event);
clWaitForEvents(1, &prof_event);

/** step 11: get result */

cl_ulong ev_start_time = 0;
cl_ulong ev_end_time = 0;
int err9 = 9;
err9 = clGetEventInfo(prof_event, CL_PROFILING_COMMAND_START, sizeof(cl_ulong), &ev_start_time, NULL);
cout << "ev_start_time: " << err9 << ev_start_time << endl;
int err10 = 10;
err10 = clGetEventInfo(prof_event, CL_PROFILING_COMMAND_END, sizeof(cl_ulong), &ev_end_time, NULL);
cout << "ev_end_time: " << err10 << ev_end_time << endl;
int err11 = 11;
err11 = clEnqueueReadBuffer(command_queue, matrixCMemObj, CL_TRUE, 0, B_row * A_col * sizeof(int), (void*)MatrixC, 0, NULL, NULL);
cout << "readbuffer: " << err11 << endl;
float run_time = ev_end_time - ev_start_time;
cout << "runtime:" << run_time << endl;


/** step 12: release all resources */
if (NULL != kernel) clReleaseKernel(kernel);
if (NULL != program) clReleaseProgram(program);
if (NULL != matrixAMemObj) clReleaseMemObject(matrixAMemObj);
if (NULL != matrixBMemObj) clReleaseMemObject(matrixBMemObj);
if (NULL != matrixCMemObj) clReleaseMemObject(matrixCMemObj);
if (NULL != command_queue) clReleaseCommandQueue(command_queue);
if (NULL != context) clReleaseContext(context);
if (NULL != MatrixA) free(MatrixA);
if (NULL != MatrixB) free(MatrixB);
if (NULL != MatrixC) free(MatrixC);


}

Matrixmul.cl
__kernel void Matrixmul(
const int B_width,
const int A_height,
__global int* A,
__global int* B,
__global int* C,
__local int* shared)
{
int i = get_global_id(0);
int id = get_local_id(0);
int size = get_local_size(0);
float tmp = 0.0;
int tmpData[1024];
if (i<A_height)
{
for (int k = 0; k<B_width; ++k)
{
tmpData[k] = A[i*A_height + k];
}

for (int k = id; k<B_width; k += size)
shared[k] = B[k*B_width + k];
barrier(CLK_LOCAL_MEM_FENCE);

for (int j = 0; j<B_width; ++j)
{
tmp = 0.0;
for (int k = 0; k<B_width; ++k)
{
tmp += tmpData[k] * shared[k];
}
C[i*A_height + j] = tmp;
}
}
}
...全文
2020 2 打赏 收藏 转发到动态 举报
写回复
用AI写文章
2 条回复
切换为时间正序
请发表友善的回复…
发表回复
xinyu391 2017-06-12
  • 打赏
  • 举报
回复
建议检查下每一步cl操作的errCode

604

社区成员

发帖
与我相关
我的任务
社区描述
异构开发技术
社区管理员
  • OpenCL和异构编程社区
加入社区
  • 近7日
  • 近30日
  • 至今
社区公告
暂无公告

试试用AI创作助手写篇文章吧