cuda学习笔记2

本节要点
  • 一种错误处理的宏定义
  • CUDA实现并行性
1.基于GPU的矢量求和
看代码:

#include "cuda_runtime.h"
#include "device_launch_parameters.h"

#include 
#include 
#include 
using namespace std;

#define N 100

__global__ void add_kernel(double *a, double *b, double *c) {
    int tid = blockIdx.x;
    if (tid < N)
    {
        c[tid] = a[tid] + b[tid];
    }

}

__global__ void value_init_kernel(double *a, double *b) {
    int tid = blockIdx.x;
    if (tid < N)
    {
        a[tid] = 1.0*tid;
        b[tid] = (1.0*tid*tid);
    }

}

int main(void)
{
    cudaError_t err1 = cudaSuccess, err2 = cudaSuccess, err3 = cudaSuccess;
    double a[N], b[N], c[N];
    double *dev_a, *dev_b, *dev_c;

    err1 = cudaMalloc((void**)&dev_a, N * sizeof(double));
    err2 = cudaMalloc((void**)&dev_b, N * sizeof(double));
    err3 = cudaMalloc((void**)&dev_c, N * sizeof(double));


    if (err1 != cudaSuccess || err2 != cudaSuccess || err3 != cudaSuccess)
    {
        fprintf(stderr, "Failed to allocate device value  (error code (%s,%s,%s))!\n", cudaGetErrorString(err1), cudaGetErrorString(err2), cudaGetErrorString(err3));
        exit(EXIT_FAILURE);
    }

    value_init_kernel <<>> (dev_a, dev_b);////在GPU上赋值操作
    add_kernel <<>> (dev_a, dev_b, dev_c)iii;////在GPU上相加操作

    err1 = cudaMemcpy(a, dev_a, N * sizeof(double), cudaMemcpyDeviceToHost);
    err2 = cudaMemcpy(b, dev_b, N * sizeof(double), cudaMemcpyDeviceToHost);
    err3 = cudaMemcpy(c, dev_c, N * sizeof(double), cudaMemcpyDeviceToHost);

    if (err1 != cudaSuccess || err2 != cudaSuccess || err3 != cudaSuccess)
    {
        fprintf(stderr, "Failed to copy device value to host value (error code (%s,%s,%s))!\n", cudaGetErrorString(err1), cudaGetErrorString(err2), cudaGetErrorString(err3));
        exit(EXIT_FAILURE);
    }


    for (int i = 0; i < N; i++)
    {
        printf("%f + %f = %f\n", a[i], b[i], c[i]);
    }

    ////释放GPU内存
    cudaFree(dev_a);
    cudaFree(dev_b);
    cudaFree(dev_c);


    return 0;

}

代码实现在GPU上对变量赋值,然后相加返回给主机函数,上面每次对错误处理的代码太冗长了,可以用一个宏定义来简化:

static void HandleError( cudaError_t err,
                         const char *file,
                         int line ) {
    if (err != cudaSuccess) {
        printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
                file, line );
        exit( EXIT_FAILURE );
    }
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))

最后是一个有趣的例子:Julia分形图

Julia图
#include 
#include "cuda_runtime.h"
#include "device_launch_parameters.h"

#include "../common/book.h" ////GPU高性能编程CUDA实战代码
#include "../common/image.h"


#include "book.h"
#include "image.h"

#define DIM1 5760
#define DIM2 5760 //每一维度的长度
#define iter_N 200

struct cuComplex {
    float r;
    float i;
    __device__ cuComplex(float a, float b) :r(a), i(b) {}
    __device__ float magnitude2(void) {
        return r*r + i*i;
    } ////返回复数的模的平方
    __device__ cuComplex operator*(const cuComplex& a) {
        return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
    }
    __device__ cuComplex operator+(const cuComplex& a) {
        return cuComplex(r + a.r, i + a.i);
    }
};

__device__ int julia(int x, int y) {
    const float scale = 1.5;
    float jx = scale * (float)(DIM1 / 2 - x) / (DIM1/ 2);
    float jy = scale * (float)(DIM2 / 2 - y) / (DIM2 / 2);

    cuComplex c(-0.8, 0.156); //-0.8,0.156;
    cuComplex a(jx, jy);


    for (int i = 1; i < iter_N; i++) {
        a = a * a + c;
        if (a.magnitude2() > 1000)
            return i;
    }
    return 0;
}

__global__ void kernel(unsigned char *ptr) {
    int x = blockIdx.x;
    int y = blockIdx.y;
    int offset = x + y * gridDim.x;

    int juliaValue = julia(x, y);
    ////美工部分。。。。。。
    if (juliaValue ==0)
    {
        ptr[offset * 4 + 0] = 0;
        ptr[offset * 4 + 1] = 0;
        ptr[offset * 4 + 2] = 0;
        ptr[offset * 4 + 3] = 255;
    }

    if (juliaValue < 90 && juliaValue >= 1)
    {
        ptr[offset * 4 + 0] = (int)(255 * juliaValue / (2.0 * iter_N));
        ptr[offset * 4 + 1] = 0;
        ptr[offset * 4 + 2] = 0;
        ptr[offset * 4 + 3] = 255;
    }

    if (juliaValue < 120 && juliaValue >=90)
    {
        ptr[offset * 4 + 0] = 255;
        ptr[offset * 4 + 1] = 255 - (int)(255 * juliaValue  / (5.0 * iter_N));
        ptr[offset * 4 + 2] = 255 - (int)(255 * juliaValue / (5.0 * iter_N));
        ptr[offset * 4 + 3] = 255;
    }

    if (juliaValue < 180 && juliaValue >=120)
    {
        ptr[offset * 4 + 0] = 10;
        ptr[offset * 4 + 1] = 215;
        ptr[offset * 4 + 2] = 200;
        ptr[offset * 4 + 3] = 255;
    }

    if (juliaValue <= 255 && juliaValue >=180)
    {
        ptr[offset * 4 + 0] = (int)(255 * juliaValue / (1.0 * iter_N));
        ptr[offset * 4 + 1] = 0;
        ptr[offset * 4 + 2] = 0;
        ptr[offset * 4 + 3] = 255;
    }


}

struct DataBlock {
    unsigned char   *dev_bitmap;
};

int main(void) {
    DataBlock data;
    IMAGE bitmap(DIM1, DIM2);
    unsigned char *dev_bitmap;

    HANDLE_ERROR(cudaMalloc((void**)&dev_bitmap, bitmap.image_size()));
    data.dev_bitmap = dev_bitmap;

    dim3 grid(DIM1, DIM2); ////实际上是DIM1*DIM2*1的三维线程格
    kernel << > > (dev_bitmap);
    HANDLE_ERROR(cudaMemcpy(bitmap.get_ptr(), dev_bitmap,
        bitmap.image_size(),
        cudaMemcpyDeviceToHost));
    HANDLE_ERROR(cudaFree(dev_bitmap));
    imwrite("C:/Users/Lenovo/Pictures/image/julia.png", bitmap.image);
    bitmap.show_image();

}


最终结果图:

你可能感兴趣的:(cuda学习笔记2)