Cuda入门
cuda剛剛推出的時候就償試過了, 也了解了異構計算,可惜當初還沒有想要寫博客的想法,很多積累的知識也就慢慢地淡忘了。
隨著人工智能,深度學習變得越來越熱門,越來越成熟,GPU的運算性能也越來越強大, 讓我們重新開始學習Cuda吧。
前提條件:當然你要有一塊nvidai的顯卡,我的筆記本電腦配置的是GeForce GTX 770M
首先安裝cuda toolkit。
cuda toolkit 發展到了8.0版本,正好支持visual studio 2015,終于不用折騰開發環境方面的問題了。
從官方網站下載安裝包,一路next就可以了。
運行VS2015創建項目
生成代碼如下
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
? ? int i = threadIdx.x;
? ? c[i] = a[i] + b[i];
}
int main()
{
? ? const int arraySize = 5;
? ? const int a[arraySize] = { 1, 2, 3, 4, 5 };
? ? const int b[arraySize] = { 10, 20, 30, 40, 50 };
? ? int c[arraySize] = { 0 };
? ? // Add vectors in parallel.
? ? cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
? ? if (cudaStatus != cudaSuccess) {
? ? ? ? fprintf(stderr, "addWithCuda failed!");
? ? ? ? return 1;
? ? }
? ? printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
? ? ? ? c[0], c[1], c[2], c[3], c[4]);
? ? // cudaDeviceReset must be called before exiting in order for profiling and
? ? // tracing tools such as Nsight and Visual Profiler to show complete traces.
? ? cudaStatus = cudaDeviceReset();
? ? if (cudaStatus != cudaSuccess) {
? ? ? ? fprintf(stderr, "cudaDeviceReset failed!");
? ? ? ? return 1;
? ? }
? ? return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
? ? int *dev_a = 0;
? ? int *dev_b = 0;
? ? int *dev_c = 0;
? ? cudaError_t cudaStatus;
? ? // Choose which GPU to run on, change this on a multi-GPU system.
? ? cudaStatus = cudaSetDevice(0);
? ? if (cudaStatus != cudaSuccess) {
? ? ? ? fprintf(stderr, "cudaSetDevice failed! ?Do you have a CUDA-capable GPU installed?");
? ? ? ? goto Error;
? ? }
? ? // Allocate GPU buffers for three vectors (two input, one output) ? ?.
? ? cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
? ? if (cudaStatus != cudaSuccess) {
? ? ? ? fprintf(stderr, "cudaMalloc failed!");
? ? ? ? goto Error;
? ? }
? ? cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
? ? if (cudaStatus != cudaSuccess) {
? ? ? ? fprintf(stderr, "cudaMalloc failed!");
? ? ? ? goto Error;
? ? }
? ? cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
? ? if (cudaStatus != cudaSuccess) {
? ? ? ? fprintf(stderr, "cudaMalloc failed!");
? ? ? ? goto Error;
? ? }
? ? // Copy input vectors from host memory to GPU buffers.
? ? cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
? ? if (cudaStatus != cudaSuccess) {
? ? ? ? fprintf(stderr, "cudaMemcpy failed!");
? ? ? ? goto Error;
? ? }
? ? cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
? ? if (cudaStatus != cudaSuccess) {
? ? ? ? fprintf(stderr, "cudaMemcpy failed!");
? ? ? ? goto Error;
? ? }
? ? // Launch a kernel on the GPU with one thread for each element.
? ? addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
? ? // Check for any errors launching the kernel
? ? cudaStatus = cudaGetLastError();
? ? if (cudaStatus != cudaSuccess) {
? ? ? ? fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
? ? ? ? goto Error;
? ? }
? ??
? ? // cudaDeviceSynchronize waits for the kernel to finish, and returns
? ? // any errors encountered during the launch.
? ? cudaStatus = cudaDeviceSynchronize();
? ? if (cudaStatus != cudaSuccess) {
? ? ? ? fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
? ? ? ? goto Error;
? ? }
? ? // Copy output vector from GPU buffer to host memory.
? ? cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
? ? if (cudaStatus != cudaSuccess) {
? ? ? ? fprintf(stderr, "cudaMemcpy failed!");
? ? ? ? goto Error;
? ? }
Error:
? ? cudaFree(dev_c);
? ? cudaFree(dev_a);
? ? cudaFree(dev_b);
? ??
? ? return cudaStatus;
}
運行結果
總結
- 上一篇: yarn的基本使用
- 下一篇: QTWebEngine的使用