▶ 书上的代码,逐步优化绘制 Julia 图形的代码

● 无并行优化(手动优化了变量等)

 #include <stdio.h>
#include <stdlib.h>
#include <openacc.h> #define N (1024 * 8) int julia(const float cre, const float cim, float zre, float zim, const int maxIter)// 计算单点迭代次数
{
float zre2 = 0.0f, zim2 = 0.0f;
for (int iter = ; iter < maxIter; iter += ) // 一个迭代里计算两次
{
zre2 = zre * zre - zim * zim + cre, zim2 = * zre * zim + cim;
if (zre2 * zre2 + zim2 * zim2 > 4.0f)
return iter; zre = zre2 * zre2 - zim2 * zim2 + cre, zim = * zre2 * zim2 + cim;
if (zre * zre + zim * zim > 4.0f)
return iter;
}
return maxIter + + (maxIter % );
} int main()
{
const int maxIter = ; // 最大迭代次数
const float cre = -0.8350, cim = -0.2321, h = 4.0f / N; // 迭代常数和画幅步长
int *image = (int *)malloc(sizeof(int) * N * N);
FILE *pf = fopen("R:/output.txt", "w"); for (int i = ; i < N; i++)
{
for (int j = ; j < N; j++)
fprintf(pf, "%d ", julia(cre, cim, i * h - 2.0f, j * h - 2.0f, maxIter));
fprintf(pf, "\n");
} fclose(pf);
free(image);
//getchar();
return ;
}

● 输出结果(后面所有代码的输出都相同,不再写了)

● 改进 1,计算并行化

 #include <stdio.h>
#include <stdlib.h>
#include <openacc.h> #define N (1024 * 8) #pragma acc routine seq
int julia(const float cre, const float cim, float zre, float zim, const int maxIter)
{
float zre2 = 0.0f, zim2 = 0.0f;
for (int iter = ; iter < maxIter; iter += )
{
zre2 = zre * zre - zim * zim + cre, zim2 = * zre * zim + cim;
if (zre2 * zre2 + zim2 * zim2 > 4.0f)
return iter; zre = zre2 * zre2 - zim2 * zim2 + cre, zim = * zre2 * zim2 + cim;
if (zre * zre + zim * zim > 4.0f)
return iter;
}
return maxIter + + (maxIter % );
} int main()
{
const int maxIter = ;
const float cre = -0.8350, cim = -0.2321, h = 4.0f / N;
int *image = (int *)malloc(sizeof(int) * N * N); #pragma acc data copyout(image[0:N * N]) // 数据域
{
#pragma acc kernels loop independent // loop 并行化,强制独立
for (int i = ; i < N; i++)
{
for (int j = ; j < N; j++)
image[i * N + j] = julia(cre, cim, i * h - 2.0f, j * h - 2.0f, maxIter);
}
}
/*// 注释掉写入文件的部分,防止 Nvvp 加入分析
FILE *pf = fopen("R:/output.txt", "w");
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
fprintf(pf, "%d ", image[i * N + j]);
fprintf(pf, "\n");
}
fclose(pf);
*/
free(image);
//getchar();
return ;
}

● 输出结果

D:\Code\OpenACC\OpenACCProject\OpenACCProject>pgcc -acc -Minfo main.c -o main_acc.exe
julia:
, Generating acc routine seq
Generating Tesla code
, FMA (fused multiply-add) instruction(s) generated
, FMA (fused multiply-add) instruction(s) generated
main:
, Generating copyout(image[:])
, Loop is parallelizable
FMA (fused multiply-add) instruction(s) generated
, Loop is parallelizable
Accelerator kernel generated
Generating Tesla code
, #pragma acc loop gang, vector(4) /* blockIdx.y threadIdx.y */
, #pragma acc loop gang, vector(32) /* blockIdx.x threadIdx.x */
, FMA (fused multiply-add) instruction(s) generated D:\Code\OpenACC\OpenACCProject\OpenACCProject>main_acc.exe
launch CUDA kernel file=D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c function=main
line= device= threadid= num_gangs= num_workers= vector_length= grid=256x128 block=32x4
PGI: "acc_shutdown" not detected, performance results might be incomplete.
Please add the call "acc_shutdown(acc_device_nvidia)" to the end of your application to ensure that the performance results are complete. Accelerator Kernel Timing data
D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c
main NVIDIA devicenum=
time(us): ,
: data region reached times
: data copyout transfers:
device time(us): total=, max=, min= avg=,
: compute region reached time
: kernel launched time
grid: [256x128] block: [32x4]
device time(us): total= max= min= avg=

● 改进 2,分块计算,没有明显性能提升,为异步做准备

 #include <stdio.h>
#include <stdlib.h>
#include <openacc.h> #define N (1024 * 8) #pragma acc routine seq
int julia(const float cre, const float cim, float zre, float zim, const int maxIter)
{
float zre2 = 0.0f, zim2 = 0.0f;
for (int iter = ; iter < maxIter; iter += )
{
zre2 = zre * zre - zim * zim + cre, zim2 = * zre * zim + cim;
if (zre2 * zre2 + zim2 * zim2 > 4.0f)
return iter; zre = zre2 * zre2 - zim2 * zim2 + cre, zim = * zre2 * zim2 + cim;
if (zre * zre + zim * zim > 4.0f)
return iter;
}
return maxIter + + (maxIter % );
} int main()
{
const int maxIter = ;
const float cre = -0.8350, cim = -0.2321, h = 4.0f / N;
int *image = (int *)malloc(sizeof(int) * N * N); #pragma acc data copyout(image[0:N * N])
{
const int numblock = ; // 指定分块数量
for (int block = ; block < numblock; block++) // 每次计算一块
{
const int start = block * (N / numblock), end = start + N / numblock; // 每块的始末下标
#pragma acc kernels loop independent
for (int i = start; i < end; i++)
{
for (int j = ; j < N; j++)
image[i * N + j] = julia(cre, cim, i * h - 2.0f, j * h - 2.0f, maxIter);
}
}
}
/*
FILE *pf = fopen("R:/output.txt", "w");
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
fprintf(pf, "%d ", image[i * N + j]);
fprintf(pf, "\n");
}
fclose(pf);
*/
free(image);
//getchar();
return ;
}

● 输出结果

D:\Code\OpenACC\OpenACCProject\OpenACCProject>pgcc -acc -Minfo main.c -o main_acc.exe
julia:
, Generating acc routine seq
Generating Tesla code
, FMA (fused multiply-add) instruction(s) generated
, FMA (fused multiply-add) instruction(s) generated
main:
, Generating copyout(image[:])
, Loop is parallelizable
FMA (fused multiply-add) instruction(s) generated
, Loop is parallelizable
Accelerator kernel generated
Generating Tesla code
, #pragma acc loop gang, vector(4) /* blockIdx.y threadIdx.y */
, #pragma acc loop gang, vector(32) /* blockIdx.x threadIdx.x */
, FMA (fused multiply-add) instruction(s) generated D:\Code\OpenACC\OpenACCProject\OpenACCProject>main_acc.exe
launch CUDA kernel file=D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c function=main
line= device= threadid= num_gangs= num_workers= vector_length= grid=256x128 block=32x4
launch CUDA kernel file=D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c function=main
= device= threadid= num_gangs= num_workers= vector_length= grid=256x128 block=32x4
launch CUDA kernel file=D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c function=main
line= device= threadid= num_gangs= num_workers= vector_length= grid=256x128 block=32x4
launch CUDA kernel file=D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c function=main
line= device= threadid= num_gangs= num_workers= vector_length= grid=256x128 block=32x4
PGI: "acc_shutdown" not detected, performance results might be incomplete.
Please add the call "acc_shutdown(acc_device_nvidia)" to the end of your application to ensure that the performance results are complete. Accelerator Kernel Timing data
D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c
main NVIDIA devicenum=
time(us): ,
: data region reached times
: data copyout transfers:
device time(us): total=, max=, min= avg=,
: compute region reached times
: kernel launched times
grid: [256x128] block: [32x4]
device time(us): total= max= min= avg=

● 改进 3,分块传输,没有明显性能提升,为异步做准备

 #include <stdio.h>
#include <stdlib.h>
#include <openacc.h> #define N (1024 * 8) #pragma acc routine seq
int julia(const float cre, const float cim, float zre, float zim, const int maxIter)
{
float zre2 = 0.0f, zim2 = 0.0f;
for (int iter = ; iter < maxIter; iter += )
{
zre2 = zre * zre - zim * zim + cre, zim2 = * zre * zim + cim;
if (zre2 * zre2 + zim2 * zim2 > 4.0f)
return iter; zre = zre2 * zre2 - zim2 * zim2 + cre, zim = * zre2 * zim2 + cim;
if (zre * zre + zim * zim > 4.0f)
return iter;
}
return maxIter + + (maxIter % );
} int main()
{
const int maxIter = ;
const float cre = -0.8350, cim = -0.2321, h = 4.0f / N;
int *image = (int *)malloc(sizeof(int) * N * N); #pragma acc data create(image[0:N * N]) // 改成 create,不需要从主机拷贝初始数据
{
const int numBlock = , blockSize = N * N / numBlock; // 仍然分块计算
for (int block = ; block < numBlock; block++)
{
const int start = block * (N / numBlock), end = start + N / numBlock;
#pragma acc kernels loop independent
for (int i = start; i < end; i++)
{
for (int j = ; j < N; j++)
image[i * N + j] = julia(cre, cim, i * h - 2.0f, j * h - 2.0f, maxIter);
}
#pragma acc update host(image[block * blockSize : blockSize]) // 每计算完一块就向主机回传数据
}
}
/*
FILE *pf = fopen("R:/output.txt", "w");
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
fprintf(pf, "%d ", image[i * N + j]);
fprintf(pf, "\n");
}
fclose(pf);
*/
free(image);
//getchar();
return ;
}

● 输出结果

D:\Code\OpenACC\OpenACCProject\OpenACCProject>pgcc -acc -Minfo main.c -o main_acc.exe
julia:
, Generating acc routine seq
Generating Tesla code
, FMA (fused multiply-add) instruction(s) generated
, FMA (fused multiply-add) instruction(s) generated
main:
, Generating create(image[:])
, Loop is parallelizable
FMA (fused multiply-add) instruction(s) generated
, Loop is parallelizable
Accelerator kernel generated
Generating Tesla code
, #pragma acc loop gang, vector(4) /* blockIdx.y threadIdx.y */
, #pragma acc loop gang, vector(32) /* blockIdx.x threadIdx.x */
, FMA (fused multiply-add) instruction(s) generated
, Generating update self(image[block*blockSize:blockSize]) D:\Code\OpenACC\OpenACCProject\OpenACCProject>main_acc.exe
launch CUDA kernel file=D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c function=main
line= device= threadid= num_gangs= num_workers= vector_length= grid=256x128 block=32x4
launch CUDA kernel file=D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c function=main
line= device= threadid= num_gangs= num_workers= vector_length= grid=256x128 block=32x4
launch CUDA kernel file=D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c function=main
line= device= threadid= num_gangs= num_workers= vector_length= grid=256x128 block=32x4
launch CUDA kernel file=D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c function=main
line= device= threadid= num_gangs= num_workers= vector_length= grid=256x128 block=32x4
PGI: "acc_shutdown" not detected, performance results might be incomplete.
Please add the call "acc_shutdown(acc_device_nvidia)" to the end of your application to ensure that the performance results are complete. Accelerator Kernel Timing data
D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c
main NVIDIA devicenum=
time(us): ,
: data region reached times
: compute region reached times
: kernel launched times
grid: [256x128] block: [32x4]
elapsed time(us): total=, max=, min= avg=,
: update directive reached times
: data copyout transfers:
device time(us): total=, max=, min= avg=,

● 改进 4,异步计算 - 双向传输

 #include <stdio.h>
#include <stdlib.h>
#include <openacc.h> #define N (1024 * 8) #pragma acc routine seq
int julia(const float cre, const float cim, float zre, float zim, const int maxIter)
{
float zre2 = 0.0f, zim2 = 0.0f;
for (int iter = ; iter < maxIter; iter += )
{
zre2 = zre * zre - zim * zim + cre, zim2 = * zre * zim + cim;
if (zre2 * zre2 + zim2 * zim2 > 4.0f)
return iter; zre = zre2 * zre2 - zim2 * zim2 + cre, zim = * zre2 * zim2 + cim;
if (zre * zre + zim * zim > 4.0f)
return iter;
}
return maxIter + + (maxIter % );
} int main()
{
const int maxIter = ;
const float cre = -0.8350, cim = -0.2321, h = 4.0f / N;
int *image = (int *)malloc(sizeof(int) * N * N); #pragma acc data create(image[0:N * N])
{
const int numBlock = , blockSize = N / numBlock * N;
for (int block = ; block < numBlock; block++)
{
const int start = block * (N / numBlock), end = start + N / numBlock;
#pragma acc kernels loop independent async(block + 1) // 异步计算,用块编号作标记
for (int i = start; i < end; i++)
{
for (int j = ; j < N; j++)
image[i * N + j] = julia(cre, cim, i * h - 2.0f, j * h - 2.0f, maxIter);
}
#pragma acc update host(image[block * blockSize : blockSize]) async(block + 1) // 计算完一块就异步传输
}
#pragma acc wait
}
/*
FILE *pf = fopen("R:/output.txt", "w");
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
fprintf(pf, "%d ", image[i * N + j]);
fprintf(pf, "\n");
}
fclose(pf);
*/
free(image);
//getchar();
return ;
}

● 输出结果

D:\Code\OpenACC\OpenACCProject\OpenACCProject>pgcc -acc -Minfo main.c -o main_acc.exe
julia:
, Generating acc routine seq
Generating Tesla code
, FMA (fused multiply-add) instruction(s) generated
, FMA (fused multiply-add) instruction(s) generated
main:
, Generating create(image[:])
, Loop is parallelizable
FMA (fused multiply-add) instruction(s) generated
, Loop is parallelizable
Accelerator kernel generated
Generating Tesla code
, #pragma acc loop gang, vector(4) /* blockIdx.y threadIdx.y */
, #pragma acc loop gang, vector(32) /* blockIdx.x threadIdx.x */
, FMA (fused multiply-add) instruction(s) generated
, Generating update self(image[block*blockSize:blockSize]) D:\Code\OpenACC\OpenACCProject\OpenACCProject>main_acc.exe
launch CUDA kernel file=D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c function=main
line= device= threadid= queue= num_gangs= num_workers= vector_length= grid=256x128 block=32x4
launch CUDA kernel file=D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c function=main
line= device= threadid= queue= num_gangs= num_workers= vector_length= grid=256x128 block=32x4
launch CUDA kernel file=D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c function=main
line= device= threadid= queue= num_gangs= num_workers= vector_length= grid=256x128 block=32x4
launch CUDA kernel file=D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c function=main
line= device= threadid= queue= num_gangs= num_workers= vector_length= grid=256x128 block=32x4
PGI: "acc_shutdown" not detected, performance results might be incomplete.
Please add the call "acc_shutdown(acc_device_nvidia)" to the end of your application to ensure that the performance results are complete. Accelerator Kernel Timing data
Timing may be affected by asynchronous behavior
set PGI_ACC_SYNCHRONOUS to to disable async() clauses
D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c
main NVIDIA devicenum=
time(us): ,
: data region reached times
: compute region reached times
: kernel launched times
grid: [256x128] block: [32x4]
device time(us): total= max= min= avg=
: update directive reached times
: data copyout transfers:
device time(us): total=, max=, min= avg=,

● 使用统一内存访址(Ubuntu,win64 不支持)

● 改进 4,多设备版本 1,使用 OpenMP

 #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <openacc.h> #define N (1024 * 8) #pragma acc routine seq
int julia(const float cre, const float cim, float zre, float zim, const int maxIter)
{
float zre2 = 0.0f, zim2 = 0.0f;
for (int iter = ; iter < maxIter; iter += )
{
zre2 = zre * zre - zim * zim + cre, zim2 = * zre * zim + cim;
if (zre2 * zre2 + zim2 * zim2 > 4.0f)
return iter; zre = zre2 * zre2 - zim2 * zim2 + cre, zim = * zre2 * zim2 + cim;
if (zre * zre + zim * zim > 4.0f)
return iter;
}
return maxIter + + (maxIter % );
} int main()
{
const int maxIter = ;
const int numBlock = acc_get_num_devices(acc_device_nvidia), blockSize = N / numBlock * N; // 使用 OpenMP 检测目标设备数量,以此作为分块数
const float cre = -0.8350, cim = -0.2321, h = 4.0f / N; int *image = (int *)malloc(sizeof(int) * N * N);
acc_init(acc_device_nvidia); // 一次性初始化全部目标设备 #pragma omp parallel num_threads(numBlock) // 使用多个线程,分别向目标设备发送任务
{
acc_set_device_num(omp_get_thread_num(), acc_device_nvidia);// 标记目标设备
#pragma omp for
for (int block = ; block < numBlock; block++)
{
const int start = block * (N / numBlock), end = start + N / numBlock;
#pragma acc data copyout(image[block * blockSize : blockSize])
{
#pragma acc kernels loop independent
for (int i = start; i < end; i++)
{
for (int j = ; j < N; j++)
image[i * N + j] = julia(cre, cim, i * h - 2.0f, j * h - 2.0f, maxIter);
}
}
}
}
/*
FILE *pf = fopen("R:/output.txt", "w");
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
fprintf(pf, "%d ", image[i * N + j]);
fprintf(pf, "\n");
}
fclose(pf);
*/
free(image);
//getchar();
return ;
}

● 输出结果

D:\Code\OpenACC\OpenACCProject\OpenACCProject>pgcc -acc -mp -Minfo main.c -o main_acc.exe
julia:
, Generating acc routine seq
Generating Tesla code
, FMA (fused multiply-add) instruction(s) generated
, FMA (fused multiply-add) instruction(s) generated
main:
, Parallel region activated
, Parallel loop activated with static block schedule
, Generating copyout(image[block*blockSize:blockSize])
, Loop is parallelizable
FMA (fused multiply-add) instruction(s) generated
, Loop is parallelizable
Accelerator kernel generated
Generating Tesla code
, #pragma acc loop gang, vector(4) /* blockIdx.y threadIdx.y */
, #pragma acc loop gang, vector(32) /* blockIdx.x threadIdx.x */
, FMA (fused multiply-add) instruction(s) generated
, Barrier
, Parallel region terminated D:\Code\OpenACC\OpenACCProject\OpenACCProject>main_acc.exe
launch CUDA kernel file=D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c function=main
line= device= threadid= num_gangs= num_workers= vector_length= grid=256x128 block=32x4
PGI: "acc_shutdown" not detected, performance results might be incomplete.
Please add the call "acc_shutdown(acc_device_nvidia)" to the end of your application to ensure that the performance results are complete. Accelerator Kernel Timing data
D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c
main NVIDIA devicenum=
time(us): ,
: data region reached times
: data copyout transfers:
device time(us): total=, max=, min= avg=,
: compute region reached time
: kernel launched time
grid: [256x128] block: [32x4]
device time(us): total= max= min= avg=

● 改进 5,多设备版本 2,调整 OpenMP

 #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <openacc.h> #define N (1024 * 8) #pragma acc routine seq
int julia(const float cre, const float cim, float zre, float zim, const int maxIter)
{
float zre2 = 0.0f, zim2 = 0.0f;
for (int iter = ; iter < maxIter; iter += )
{
zre2 = zre * zre - zim * zim + cre, zim2 = * zre * zim + cim;
if (zre2 * zre2 + zim2 * zim2 > 4.0f)
return iter; zre = zre2 * zre2 - zim2 * zim2 + cre, zim = * zre2 * zim2 + cim;
if (zre * zre + zim * zim > 4.0f)
return iter;
}
return maxIter + + (maxIter % );
} int main()
{
const int maxIter = ;
const int numBlock = acc_get_num_devices(acc_device_nvidia), blockSize = N / numBlock * N;
const float cre = -0.8350, cim = -0.2321, h = 4.0f / N; int *image = (int *)malloc(sizeof(int) * N * N);
acc_init(acc_device_nvidia); #pragma omp parallel for num_threads(numBlock) // 把函数 acc_set_device_num 单独放在一起
for(int block = ;block<numBlock;block++)
acc_set_device_num(block, acc_device_nvidia); #pragma omp for num_threads(numBlock)
for (int block = ; block < numBlock; block++)
{
const int start = block * (N / numBlock), end = start + N / numBlock;
#pragma acc data copyout(image[block * blockSize : blockSize])
{
#pragma acc kernels loop independent
for (int i = start; i < end; i++)
{
for (int j = ; j < N; j++)
image[i * N + j] = julia(cre, cim, i * h - 2.0f, j * h - 2.0f, maxIter);
}
}
}
/*
FILE *pf = fopen("R:/output.txt", "w");
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
fprintf(pf, "%d ", image[i * N + j]);
fprintf(pf, "\n");
}
fclose(pf);
*/
free(image);
//getchar();
return ;
}

● 输出结果

D:\Code\OpenACC\OpenACCProject\OpenACCProject>pgcc main.c -acc -Minfo -o main_acc.exe
julia:
, Generating acc routine seq
Generating Tesla code
, FMA (fused multiply-add) instruction(s) generated
, FMA (fused multiply-add) instruction(s) generated
main:
, Generating copyout(image[block*blockSize:blockSize])
, Loop is parallelizable
FMA (fused multiply-add) instruction(s) generated
, Loop is parallelizable
Accelerator kernel generated
Generating Tesla code
, #pragma acc loop gang, vector(4) /* blockIdx.y threadIdx.y */
, #pragma acc loop gang, vector(32) /* blockIdx.x threadIdx.x */
, FMA (fused multiply-add) instruction(s) generated D:\Code\OpenACC\OpenACCProject\OpenACCProject>main_acc.exe
launch CUDA kernel file=D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c function=main
line= device= threadid= num_gangs= num_workers= vector_length= grid=256x128 block=32x4
PGI: "acc_shutdown" not detected, performance results might be incomplete.
Please add the call "acc_shutdown(acc_device_nvidia)" to the end of your application to ensure that the performance results are complete. Accelerator Kernel Timing data
D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c
main NVIDIA devicenum=
time(us): ,
: data region reached times
: data copyout transfers:
device time(us): total=, max=, min= avg=,
: compute region reached time
: kernel launched time
grid: [256x128] block: [32x4]
elapsed time(us): total=, max=, min=, avg=,

OpenACC Julia 图形的更多相关文章

  1. OpenCV绘制朱利亚(Julia)集合图形

    朱利亚集合是一个在复平面上形成分形的点的集合.以法国数学家加斯顿·朱利亚(Gaston Julia)的名字命名. 朱利亚集合可以由下式进行反复迭代得到: 对于固定的复数c,取某一z值(如z = z0) ...

  2. CUDA+OpenCV 绘制朱利亚(Julia)集合图形

    Julia集中的元素都是经过简单的迭代计算得到的,很适合用CUDA进行加速.对一个600*600的图像,需要进行360000次迭代计算,所以在CUDA中创建了600*600个线程块(block),每个 ...

  3. OpenACC 云水参数化方案

    ▶ 书上第十三章,用一系列步骤优化一个云水参数化方案.用于熟悉 Fortran 以及 OpenACC 在旗下的表现 ● 代码,文件较多,放在一起了 ! main.f90 PROGRAM main US ...

  4. 详解 CUDA By Example 中的 Julia Set 绘制GPU优化

    笔者测试环境VS2019. 基本介绍 原书作者引入Julia Sets意在使用GPU加速图形的绘制.Julia Set 是指满足下式迭代收敛的复数集合 \[ Z_{n+1}=Z_{n}^2+C \] ...

  5. 现代3D图形编程学习-基础简介(3)-什么是opengl (译)

    本书系列 现代3D图形编程学习 OpenGL是什么 在我们编写openGL程序之前,我们首先需要知道什么是OpenGL. 将OpenGL作为一个API OpenGL 通常被认为是应用程序接口(API) ...

  6. 超全面的.NET GDI+图形图像编程教程

    本篇主题内容是.NET GDI+图形图像编程系列的教程,不要被这个滚动条吓到,为了查找方便,我没有分开写,上面加了目录了,而且很多都是源码和图片~ (*^_^*) 本人也为了学习深刻,另一方面也是为了 ...

  7. Ubuntu设置root用户登录图形界面

    Ubuntu默认的是root用户不能登录图形界面的,只能以其他用户登录图形界面.这样就很麻烦,因为权限的问题,不能随意复制删除文件,用gedit编辑文件时经常不能保存,只能用vim去编辑. 解决的办法 ...

  8. 第六代智能英特尔® 酷睿™ 处理器图形 API 开发人员指南

    欢迎查看第六代智能英特尔® 酷睿™ 处理器图形 API 开发人员指南,该处理器可为开发人员和最终用户提供领先的 CPU 和图形性能增强.各种新特性和功能以及显著提高的性能. 本指南旨在帮助软件开发人员 ...

  9. 通过Matrix进行二维图形仿射变换

    Affine Transformation是一种二维坐标到二维坐标之间的线性变换,保持二维图形的"平直性"和"平行性".仿射变换可以通过一系列的原子变换的复合来 ...

随机推荐

  1. HDU 1024:Max Sum Plus Plus(DP,最大m子段和)

    Max Sum Plus Plus Time Limit: 2000/1000 MS (Java/Others)    Memory Limit: 65536/32768 K (Java/Others ...

  2. B. Beautiful Paintings

    time limit per test 1 second memory limit per test 256 megabytes input standard input output standar ...

  3. ASP.NET 5 & MVC6系列教程

    本系列的大部分内容来自于微软源码的阅读和网络,大部分测试代码都是基于VS RC版本进行测试的. 解读ASP.NET 5 & MVC6系列(1):ASP.NET 5简介 解读ASP.NET 5 ...

  4. FastAdmin CMS 插件下载

    FastAdmin CMS 插件下载 CMS内容管理系统插件(含小程序) 自定义内容模型.自定义单页.自定义表单.自定义会员发布.付费阅读.小程序等 提供全部前后端源代码和小程序源代码 功能特性 基于 ...

  5. 标 题: Re: 总感觉IT没我大山东啥事?

    发信人: liuzhlai (liuzhlai), 信区: ITExpress 标  题: Re: 总感觉IT没我大山东啥事? 发信站: 水木社区 (Sat Aug 22 15:51:50 2015) ...

  6. TNS-12535 TNS-00505的处理方法

    原文地址:TNS-12535 TNS-00505的处理方法 作者:wzq609 硬件说明: 操作系统版本:ORACLE LINUX 6.3  64位 数据库版本:11.2.0.3   64位 问题说明 ...

  7. hive 安装、知识点

    hive 查询语句: 语句 含义 show database; 或 show schemas; 查看数据库 show tables; hive显示所有表 set; 查看hive配置信息 问题:load ...

  8. Django Admin 时间格式化

    http://961911.blog.51cto.com/951911/1557218 修改settings.py,添加一下内容: USE_L10N = False DATETIME_FORMAT = ...

  9. tomcat下安装jenkins

    参考网址:http://www.cnblogs.com/edward2013/p/5269465.html 5.安装Jenkins 方法1: jenkins.war下载地址:   http://mir ...

  10. 【python】break和continue

    break:跳出循环 ,continue:停止当前循环,进入下一次循环,但为跳出循环. passwdList=["123","456"] valid = Fal ...