CUDA实现的BP神经网络
刚开始学习CUDA,用CUDA实现的BP神经网络,发现运算效率很低,时间不但没有比在CPU下少,还是20倍以上,求问如何优化程序
#include <fstream>
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include <iostream>
#include <vector>
#include <string>
#include <stdio.h>
#include <cuda_runtime.h>
#include <sm_20_atomic_functions.h>
#include <cublas_v2.h>
using namespace std;
#define random(x) (rand()%x)
const int total = 489;
const int totalTest = 210;
const int NumIn = 9;
const int NumMid = 50;
const int NumOut = 1;
const char * openFileNameTrain = "D:\\test\\CUDATEST\\DataSet\\BreastCancerWisconsin_train.txt" ;//数据位置
const char * openFileNameTest = "D:\\test\\CUDATEST\\DataSet\\BreastCancerWisconsin_train.txt" ;//数据位置
const double A = 30;
const double B = 20;
double COEF = 0.003; //网络的学习效率
const double TCOEF = 0.001;//网络的阀值调整效率
const double alpha = 0.9;//网络要求精度
const double ERROR = 0.02; // 网络训练中的允许误差
const double ACCURACY = 0.004;//网络要求精度
const int MAX = 3000;
float wITM[NumIn * NumMid]; //权值矩阵,输入层到隐藏层
float wMTO[NumMid * NumOut]; //权值矩阵,隐藏层到输出层
float wcITM[NumIn * NumMid]; //权值矩阵调整量,输入层到隐藏层
float wcMTO[NumMid * NumOut]; //权值矩阵调整量,隐藏层到输出层
float tMid[NumMid]; //隐藏层阈值
float tOut[NumOut]; //输出层阈值
float tcMid[NumMid]; //隐藏层阈值调整量
float tcOut[NumOut]; //输出层阈值调整量
float dMid[NumMid]; //隐藏层误差
float dOut[NumOut]; //输出层误差
float MidIn[NumMid] = {0},MidOut[NumMid]; //隐藏层输入、输出
float OutIn[NumOut],OutOut[NumOut]; //输出层输入、输出
float diff; //输出层误差
float datasetIn[total][NumIn]; //训练数据集输入
float datasetOut[total][NumOut]; //训练数据集输出
int set1[total][NumIn + NumOut + 1]; //数据集
double datasetTest[total][NumIn]; //测试数据集
int res[4];
//数据读取
bool DataLoad()
{
...
}
//计算输出
void culOut(int Index)
{
...
}
//阈值初始化
void InitW()
{
...
}
void InitT()
{
...
}
//计算隐藏层输出
__global__ static void gpu_CalMidOut(float *dev_MidIn, float *dev_MidOut, float *dev_tMid)
{
int Index = blockIdx.x;
dev_MidIn[Index] = dev_MidIn[Index] - dev_tMid[Index];
dev_MidOut[Index] = 30 / ( 1 + exp( - dev_MidIn[Index] / 20 ) );
}
//计算输出层输出
__global__ static void gpu_CalOutOut(float *dev_OutIn, float *dev_OutOut, float *dev_tOut)
{
int Index = blockIdx.x;
dev_OutIn[Index] = dev_OutIn[Index] - dev_tOut[Index];
dev_OutOut[Index] = 30 / ( 1 + exp( - dev_OutIn[Index] / 20 ) );
}
//计算输出误差
void CalDiff(int Index)
{
for(int i = 0; i < NumOut; i++)
{
diff = 0.5 * ( OutOut[i] - datasetOut[Index][i] ) * ( OutOut[i] - datasetOut[Index][i] );
}
}
//计算输出层反向传播误差
__global__ static void gpu_CalDOut(float *dev_OutOut, float *dev_Expect, float *dev_dOut, float *dev_OutIn)
{
int Index = blockIdx.x;
dev_dOut[Index] = ( dev_OutOut[Index] - dev_Expect[Index] ) * (30 / 20) * exp( - dev_OutIn[Index] / 20) / pow( 1 + exp( - dev_OutIn[Index] / 20) , 2 );
}
//计算隐藏层反向传播误差
__global__ static void gpu_CalDMid(float *dev_dOut, float *dev_wMTO, float *dev_dMid, float *dev_MidIn)
{
int Index = blockIdx.x;
float temp = 0;
for(int i = 0; i < NumOut; i++)
{
temp += dev_dOut[i] * dev_wMTO[i * NumMid + Index];
}
dev_dMid[Index] = temp * (30 / 20) * exp( - dev_MidIn[Index] / 20) / pow( 1 + exp( - dev_MidIn[Index] / 20) , 2 );
}
//计算权值矩阵调整量,并调整权值矩阵,隐藏层到输出层
__global__ static void gpu_CalWCMTO(float *dev_dOut, float *dev_MidOut, float *dev_wcMTO, float *dev_wMTO, float *dev_tcOut, float *dev_tOut)
{
int outIndex = blockIdx.x;
int midIndex = threadIdx.x;
int Index = outIndex * blockDim.x + midIndex;
if(outIndex < NumOut && midIndex < NumMid)
{
dev_wcMTO[Index] = - 0.006 * dev_dOut[outIndex] * dev_MidOut[midIndex] + 0.9 * dev_wcMTO[Index];
dev_wMTO[Index] = dev_wMTO[Index] + dev_wcMTO[Index];
}
dev_tcOut[outIndex] = 0.002 * dev_dOut[outIndex];
dev_tOut[outIndex] = dev_tOut[outIndex] + dev_tcOut[outIndex];
}
//计算权值矩阵调整量,并调整权值矩阵,输入层到隐藏层
__global__ static void gpu_CalWCITM(float *dev_dMid, float *dev_Input, float *dev_wcITM, float *dev_wITM, float *dev_tcMid, float *dev_tMid)
{
int midIndex = blockIdx.x;
int inIndex = threadIdx.x;
int Index = midIndex * blockDim.x + inIndex;
if(inIndex < NumIn && midIndex < NumMid)
{
dev_wcITM[Index] = - 0.006 * dev_dMid[midIndex] * dev_Input[inIndex] + 0.9 * dev_wcITM[Index];
dev_wITM[Index] = dev_wITM[Index] + dev_wcITM[Index];
}
dev_tcMid[midIndex] = 0.002 * dev_dMid[midIndex];
dev_tMid[midIndex] = dev_tMid[midIndex] + dev_tcMid[midIndex];
}
//计算阈值调整量,并调整阈值,输出层
__global__ static void gpu_CalDCOut(float *dev_dOut, float *dev_tcOut, float *dev_tOut)
{
int Index = blockIdx.x;
dev_tcOut[Index] = 0.001 * dev_dOut[Index];
dev_tOut[Index] = dev_tOut[Index] + dev_tcOut[Index];
}
//计算阈值调整量,并调整阈值,隐藏层
__global__ static void gpu_CalDCMid(float *dev_dMid, float *dev_tcMid, float *dev_tMid)
{
int Index = blockIdx.x;
dev_tcMid[Index] = 0.001 * dev_dMid[Index];
dev_tMid[Index] = dev_tMid[Index] + dev_tcMid[Index];
}
//清零,隐藏层到输出层权值矩阵,输出层阈值
__global__ static void gpu_ClearWCMTO(float *dev_wcMTO, float *dev_tOut)
{
int outIndex = blockIdx.x;
int midIndex = threadIdx.x;
int Index = outIndex * blockDim.x + midIndex;
dev_wcMTO[Index] = 0;
dev_tOut[outIndex] = 0;
}
//清零,输入层到隐藏层权值矩阵,隐藏层阈值
__global__ static void gpu_ClearWCITM(float *dev_wcITM, float *dev_tMid)
{
int midIndex = blockIdx.x;
int inIndex = threadIdx.x;
int Index = midIndex * blockDim.x + inIndex;
dev_wcITM[Index] = 0;
dev_tMid[midIndex] = 0;
}
double culTotalDiff()
{
double sum = 0;
for(int i = 0; i < total; i++)
{
culOut(i);
for(int j = 0; j < NumOut; j++)
{
sum = sum + pow( OutOut[j] - datasetOut[i][j] , 2);
}
}
return sum / total;
}
int main()
{
...
//神经网络初始化
InitW();
InitT();
//设备端变量定义,添加cublas
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
float *d_Input, *d_wITM, *d_wMTO, *d_wcITM, *d_wcMTO, *d_tMid, *d_tOut, *d_tcMid, *d_tcOut, *d_dMid, *d_dOut, *d_MidIn, *d_MidOut, *d_OutIn, *d_OutOut, *d_Expect, *d_diff;
float p1 = 1.0, p2 = 0.0;
cublasStatus_t status;
cublasHandle_t handle;
status = cublasCreate(&handle);
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! CUBLAS initialization error\n");
printf("输入任意字符继续......");
scanf("%d");
return EXIT_FAILURE;
}
cudaMalloc((void **)&d_Input, NumIn * sizeof(float));
cudaMalloc((void **)&d_wITM, NumIn * NumMid * sizeof(float));
cudaMalloc((void **)&d_wMTO, NumMid * NumOut * sizeof(float));
cudaMalloc((void **)&d_wcITM, NumIn * NumMid * sizeof(float));
cudaMalloc((void **)&d_wcMTO, NumMid * NumOut * sizeof(float));
cudaMalloc((void **)&d_tMid, NumMid * sizeof(float));
cudaMalloc((void **)&d_tOut, NumOut * sizeof(float));
cudaMalloc((void **)&d_tcMid, NumMid * sizeof(float));
cudaMalloc((void **)&d_tcOut, NumOut * sizeof(float));
cudaMalloc((void **)&d_dMid, NumMid * sizeof(float));
cudaMalloc((void **)&d_dOut, NumOut * sizeof(float));
cudaMalloc((void **)&d_MidIn, NumMid * sizeof(float));
cudaMalloc((void **)&d_MidOut, NumMid * sizeof(float));
cudaMalloc((void **)&d_OutIn, NumOut * sizeof(float));
cudaMalloc((void **)&d_OutOut, NumOut * sizeof(float));
cudaMalloc((void **)&d_Expect, NumOut * sizeof(float));
cudaMalloc((void **)&d_diff, sizeof(float));
cudaMemcpy(d_wITM, wITM, NumIn * NumMid * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_wMTO, wMTO, NumMid * NumOut * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_tMid, tMid, NumMid * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_tOut, tOut, NumOut * sizeof(float), cudaMemcpyHostToDevice);
int count1 = 0;
for(int i = 0; i < 1; i++)
{
count1 = 0;
for(int j = 0; j < total; j++)
{
cudaMemcpy(d_Input, datasetIn[j], NumIn * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_Expect, datasetOut[j], NumOut * sizeof(float), cudaMemcpyHostToDevice);
gpu_ClearWCMTO<<<NumOut, NumMid>>>(d_wcMTO, d_tOut);
gpu_ClearWCITM<<<NumMid, NumIn>>>(d_wcITM, d_tMid);
cublasSgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, 1, NumMid, NumIn, &p1, d_Input, NumIn, d_wITM, NumIn, &p2, d_MidIn, 1);
gpu_CalMidOut<<<NumMid, 1>>>(d_MidIn, d_MidOut, d_tMid);
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, 1, NumOut, NumMid, &p1, d_MidOut, 1, d_wMTO, NumMid, &p2, d_OutIn, 1);
gpu_CalOutOut<<<NumOut, 1>>>(d_OutIn, d_OutOut, d_tOut);
cudaMemcpy(OutOut, d_OutOut, NumOut * sizeof(float), cudaMemcpyDeviceToHost);
CalDiff(j);
while(diff > ERROR)
{
gpu_CalDOut<<<NumOut, 1>>>(d_OutOut, d_Expect, d_dOut, d_OutIn);
gpu_CalDMid<<<NumMid, 1>>>(d_dOut, d_wMTO, d_dMid, d_MidIn);
gpu_CalWCMTO<<<NumOut, NumMid>>>(d_dOut, d_MidOut, d_wcMTO, d_wMTO, d_tcOut, d_tOut);
gpu_CalWCITM<<<NumMid, NumIn>>>(d_dMid, d_Input, d_wcITM, d_wITM, d_tcMid, d_tMid);
cublasSgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, 1, NumMid, NumIn, &p1, d_Input, NumIn, d_wITM, NumIn, &p2, d_MidIn, 1);
gpu_CalMidOut<<<NumMid, 1>>>(d_MidIn, d_MidOut, d_tMid);
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, 1, NumOut, NumMid, &p1, d_MidOut, 1, d_wMTO, NumMid, &p2, d_OutIn, 1);
gpu_CalOutOut<<<NumOut, 1>>>(d_OutIn, d_OutOut, d_tOut);
cudaMemcpy(OutOut, d_OutOut, NumOut * sizeof(float), cudaMemcpyDeviceToHost);
CalDiff(j);
count1 ++;
}
printf("%d %lf %lf %lf\n",count1,datasetIn[j][0],datasetOut[j][0],OutOut[0]);
}
cudaMemcpy(wITM, d_wITM, NumMid * NumIn * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(wMTO, d_wMTO, NumOut * NumMid * sizeof(float), cudaMemcpyDeviceToHost);
printf("This is %d times training NN...\n",i);
float totaldiff = culTotalDiff();
printf("count: %d\n",count1);
printf("totaldiff: %f\n",totaldiff);
if(totaldiff < ACCURACY)
{
break;
}
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,start,stop);
printf("Classify Processed Time : %f\n",elapsedTime);
}