▶ 书上的代码,逐步优化绘制 Julia 图形的代码

● 无并行优化(手动优化了变量等)

 #include <stdio.h>
#include <stdlib.h>
#include <openacc.h> #define N (1024 * 8) int julia(const float cre, const float cim, float zre, float zim, const int maxIter)// 计算单点迭代次数
{
float zre2 = 0.0f, zim2 = 0.0f;
for (int iter = ; iter < maxIter; iter += ) // 一个迭代里计算两次
{
zre2 = zre * zre - zim * zim + cre, zim2 = * zre * zim + cim;
if (zre2 * zre2 + zim2 * zim2 > 4.0f)
return iter; zre = zre2 * zre2 - zim2 * zim2 + cre, zim = * zre2 * zim2 + cim;
if (zre * zre + zim * zim > 4.0f)
return iter;
}
return maxIter + + (maxIter % );
} int main()
{
const int maxIter = ; // 最大迭代次数
const float cre = -0.8350, cim = -0.2321, h = 4.0f / N; // 迭代常数和画幅步长
int *image = (int *)malloc(sizeof(int) * N * N);
FILE *pf = fopen("R:/output.txt", "w"); for (int i = ; i < N; i++)
{
for (int j = ; j < N; j++)
fprintf(pf, "%d ", julia(cre, cim, i * h - 2.0f, j * h - 2.0f, maxIter));
fprintf(pf, "\n");
} fclose(pf);
free(image);
//getchar();
return ;
}

● 输出结果(后面所有代码的输出都相同,不再写了)

● 改进 1,计算并行化

 #include <stdio.h>
#include <stdlib.h>
#include <openacc.h> #define N (1024 * 8) #pragma acc routine seq
int julia(const float cre, const float cim, float zre, float zim, const int maxIter)
{
float zre2 = 0.0f, zim2 = 0.0f;
for (int iter = ; iter < maxIter; iter += )
{
zre2 = zre * zre - zim * zim + cre, zim2 = * zre * zim + cim;
if (zre2 * zre2 + zim2 * zim2 > 4.0f)
return iter; zre = zre2 * zre2 - zim2 * zim2 + cre, zim = * zre2 * zim2 + cim;
if (zre * zre + zim * zim > 4.0f)
return iter;
}
return maxIter + + (maxIter % );
} int main()
{
const int maxIter = ;
const float cre = -0.8350, cim = -0.2321, h = 4.0f / N;
int *image = (int *)malloc(sizeof(int) * N * N); #pragma acc data copyout(image[0:N * N]) // 数据域
{
#pragma acc kernels loop independent // loop 并行化,强制独立
for (int i = ; i < N; i++)
{
for (int j = ; j < N; j++)
image[i * N + j] = julia(cre, cim, i * h - 2.0f, j * h - 2.0f, maxIter);
}
}
/*// 注释掉写入文件的部分,防止 Nvvp 加入分析
FILE *pf = fopen("R:/output.txt", "w");
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
fprintf(pf, "%d ", image[i * N + j]);
fprintf(pf, "\n");
}
fclose(pf);
*/
free(image);
//getchar();
return ;
}

● 输出结果

D:\Code\OpenACC\OpenACCProject\OpenACCProject>pgcc -acc -Minfo main.c -o main_acc.exe
julia:
, Generating acc routine seq
Generating Tesla code
, FMA (fused multiply-add) instruction(s) generated
, FMA (fused multiply-add) instruction(s) generated
main:
, Generating copyout(image[:])
, Loop is parallelizable
FMA (fused multiply-add) instruction(s) generated
, Loop is parallelizable
Accelerator kernel generated
Generating Tesla code
, #pragma acc loop gang, vector(4) /* blockIdx.y threadIdx.y */
, #pragma acc loop gang, vector(32) /* blockIdx.x threadIdx.x */
, FMA (fused multiply-add) instruction(s) generated D:\Code\OpenACC\OpenACCProject\OpenACCProject>main_acc.exe
launch CUDA kernel file=D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c function=main
line= device= threadid= num_gangs= num_workers= vector_length= grid=256x128 block=32x4
PGI: "acc_shutdown" not detected, performance results might be incomplete.
Please add the call "acc_shutdown(acc_device_nvidia)" to the end of your application to ensure that the performance results are complete. Accelerator Kernel Timing data
D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c
main NVIDIA devicenum=
time(us): ,
: data region reached times
: data copyout transfers:
device time(us): total=, max=, min= avg=,
: compute region reached time
: kernel launched time
grid: [256x128] block: [32x4]
device time(us): total= max= min= avg=

● 改进 2,分块计算,没有明显性能提升,为异步做准备

 #include <stdio.h>
#include <stdlib.h>
#include <openacc.h> #define N (1024 * 8) #pragma acc routine seq
int julia(const float cre, const float cim, float zre, float zim, const int maxIter)
{
float zre2 = 0.0f, zim2 = 0.0f;
for (int iter = ; iter < maxIter; iter += )
{
zre2 = zre * zre - zim * zim + cre, zim2 = * zre * zim + cim;
if (zre2 * zre2 + zim2 * zim2 > 4.0f)
return iter; zre = zre2 * zre2 - zim2 * zim2 + cre, zim = * zre2 * zim2 + cim;
if (zre * zre + zim * zim > 4.0f)
return iter;
}
return maxIter + + (maxIter % );
} int main()
{
const int maxIter = ;
const float cre = -0.8350, cim = -0.2321, h = 4.0f / N;
int *image = (int *)malloc(sizeof(int) * N * N); #pragma acc data copyout(image[0:N * N])
{
const int numblock = ; // 指定分块数量
for (int block = ; block < numblock; block++) // 每次计算一块
{
const int start = block * (N / numblock), end = start + N / numblock; // 每块的始末下标
#pragma acc kernels loop independent
for (int i = start; i < end; i++)
{
for (int j = ; j < N; j++)
image[i * N + j] = julia(cre, cim, i * h - 2.0f, j * h - 2.0f, maxIter);
}
}
}
/*
FILE *pf = fopen("R:/output.txt", "w");
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
fprintf(pf, "%d ", image[i * N + j]);
fprintf(pf, "\n");
}
fclose(pf);
*/
free(image);
//getchar();
return ;
}

● 输出结果

D:\Code\OpenACC\OpenACCProject\OpenACCProject>pgcc -acc -Minfo main.c -o main_acc.exe
julia:
, Generating acc routine seq
Generating Tesla code
, FMA (fused multiply-add) instruction(s) generated
, FMA (fused multiply-add) instruction(s) generated
main:
, Generating copyout(image[:])
, Loop is parallelizable
FMA (fused multiply-add) instruction(s) generated
, Loop is parallelizable
Accelerator kernel generated
Generating Tesla code
, #pragma acc loop gang, vector(4) /* blockIdx.y threadIdx.y */
, #pragma acc loop gang, vector(32) /* blockIdx.x threadIdx.x */
, FMA (fused multiply-add) instruction(s) generated D:\Code\OpenACC\OpenACCProject\OpenACCProject>main_acc.exe
launch CUDA kernel file=D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c function=main
line= device= threadid= num_gangs= num_workers= vector_length= grid=256x128 block=32x4
launch CUDA kernel file=D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c function=main
= device= threadid= num_gangs= num_workers= vector_length= grid=256x128 block=32x4
launch CUDA kernel file=D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c function=main
line= device= threadid= num_gangs= num_workers= vector_length= grid=256x128 block=32x4
launch CUDA kernel file=D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c function=main
line= device= threadid= num_gangs= num_workers= vector_length= grid=256x128 block=32x4
PGI: "acc_shutdown" not detected, performance results might be incomplete.
Please add the call "acc_shutdown(acc_device_nvidia)" to the end of your application to ensure that the performance results are complete. Accelerator Kernel Timing data
D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c
main NVIDIA devicenum=
time(us): ,
: data region reached times
: data copyout transfers:
device time(us): total=, max=, min= avg=,
: compute region reached times
: kernel launched times
grid: [256x128] block: [32x4]
device time(us): total= max= min= avg=

● 改进 3,分块传输,没有明显性能提升,为异步做准备

 #include <stdio.h>
#include <stdlib.h>
#include <openacc.h> #define N (1024 * 8) #pragma acc routine seq
int julia(const float cre, const float cim, float zre, float zim, const int maxIter)
{
float zre2 = 0.0f, zim2 = 0.0f;
for (int iter = ; iter < maxIter; iter += )
{
zre2 = zre * zre - zim * zim + cre, zim2 = * zre * zim + cim;
if (zre2 * zre2 + zim2 * zim2 > 4.0f)
return iter; zre = zre2 * zre2 - zim2 * zim2 + cre, zim = * zre2 * zim2 + cim;
if (zre * zre + zim * zim > 4.0f)
return iter;
}
return maxIter + + (maxIter % );
} int main()
{
const int maxIter = ;
const float cre = -0.8350, cim = -0.2321, h = 4.0f / N;
int *image = (int *)malloc(sizeof(int) * N * N); #pragma acc data create(image[0:N * N]) // 改成 create,不需要从主机拷贝初始数据
{
const int numBlock = , blockSize = N * N / numBlock; // 仍然分块计算
for (int block = ; block < numBlock; block++)
{
const int start = block * (N / numBlock), end = start + N / numBlock;
#pragma acc kernels loop independent
for (int i = start; i < end; i++)
{
for (int j = ; j < N; j++)
image[i * N + j] = julia(cre, cim, i * h - 2.0f, j * h - 2.0f, maxIter);
}
#pragma acc update host(image[block * blockSize : blockSize]) // 每计算完一块就向主机回传数据
}
}
/*
FILE *pf = fopen("R:/output.txt", "w");
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
fprintf(pf, "%d ", image[i * N + j]);
fprintf(pf, "\n");
}
fclose(pf);
*/
free(image);
//getchar();
return ;
}

● 输出结果

D:\Code\OpenACC\OpenACCProject\OpenACCProject>pgcc -acc -Minfo main.c -o main_acc.exe
julia:
, Generating acc routine seq
Generating Tesla code
, FMA (fused multiply-add) instruction(s) generated
, FMA (fused multiply-add) instruction(s) generated
main:
, Generating create(image[:])
, Loop is parallelizable
FMA (fused multiply-add) instruction(s) generated
, Loop is parallelizable
Accelerator kernel generated
Generating Tesla code
, #pragma acc loop gang, vector(4) /* blockIdx.y threadIdx.y */
, #pragma acc loop gang, vector(32) /* blockIdx.x threadIdx.x */
, FMA (fused multiply-add) instruction(s) generated
, Generating update self(image[block*blockSize:blockSize]) D:\Code\OpenACC\OpenACCProject\OpenACCProject>main_acc.exe
launch CUDA kernel file=D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c function=main
line= device= threadid= num_gangs= num_workers= vector_length= grid=256x128 block=32x4
launch CUDA kernel file=D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c function=main
line= device= threadid= num_gangs= num_workers= vector_length= grid=256x128 block=32x4
launch CUDA kernel file=D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c function=main
line= device= threadid= num_gangs= num_workers= vector_length= grid=256x128 block=32x4
launch CUDA kernel file=D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c function=main
line= device= threadid= num_gangs= num_workers= vector_length= grid=256x128 block=32x4
PGI: "acc_shutdown" not detected, performance results might be incomplete.
Please add the call "acc_shutdown(acc_device_nvidia)" to the end of your application to ensure that the performance results are complete. Accelerator Kernel Timing data
D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c
main NVIDIA devicenum=
time(us): ,
: data region reached times
: compute region reached times
: kernel launched times
grid: [256x128] block: [32x4]
elapsed time(us): total=, max=, min= avg=,
: update directive reached times
: data copyout transfers:
device time(us): total=, max=, min= avg=,

● 改进 4,异步计算 - 双向传输

 #include <stdio.h>
#include <stdlib.h>
#include <openacc.h> #define N (1024 * 8) #pragma acc routine seq
int julia(const float cre, const float cim, float zre, float zim, const int maxIter)
{
float zre2 = 0.0f, zim2 = 0.0f;
for (int iter = ; iter < maxIter; iter += )
{
zre2 = zre * zre - zim * zim + cre, zim2 = * zre * zim + cim;
if (zre2 * zre2 + zim2 * zim2 > 4.0f)
return iter; zre = zre2 * zre2 - zim2 * zim2 + cre, zim = * zre2 * zim2 + cim;
if (zre * zre + zim * zim > 4.0f)
return iter;
}
return maxIter + + (maxIter % );
} int main()
{
const int maxIter = ;
const float cre = -0.8350, cim = -0.2321, h = 4.0f / N;
int *image = (int *)malloc(sizeof(int) * N * N); #pragma acc data create(image[0:N * N])
{
const int numBlock = , blockSize = N / numBlock * N;
for (int block = ; block < numBlock; block++)
{
const int start = block * (N / numBlock), end = start + N / numBlock;
#pragma acc kernels loop independent async(block + 1) // 异步计算,用块编号作标记
for (int i = start; i < end; i++)
{
for (int j = ; j < N; j++)
image[i * N + j] = julia(cre, cim, i * h - 2.0f, j * h - 2.0f, maxIter);
}
#pragma acc update host(image[block * blockSize : blockSize]) async(block + 1) // 计算完一块就异步传输
}
#pragma acc wait
}
/*
FILE *pf = fopen("R:/output.txt", "w");
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
fprintf(pf, "%d ", image[i * N + j]);
fprintf(pf, "\n");
}
fclose(pf);
*/
free(image);
//getchar();
return ;
}

● 输出结果

D:\Code\OpenACC\OpenACCProject\OpenACCProject>pgcc -acc -Minfo main.c -o main_acc.exe
julia:
, Generating acc routine seq
Generating Tesla code
, FMA (fused multiply-add) instruction(s) generated
, FMA (fused multiply-add) instruction(s) generated
main:
, Generating create(image[:])
, Loop is parallelizable
FMA (fused multiply-add) instruction(s) generated
, Loop is parallelizable
Accelerator kernel generated
Generating Tesla code
, #pragma acc loop gang, vector(4) /* blockIdx.y threadIdx.y */
, #pragma acc loop gang, vector(32) /* blockIdx.x threadIdx.x */
, FMA (fused multiply-add) instruction(s) generated
, Generating update self(image[block*blockSize:blockSize]) D:\Code\OpenACC\OpenACCProject\OpenACCProject>main_acc.exe
launch CUDA kernel file=D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c function=main
line= device= threadid= queue= num_gangs= num_workers= vector_length= grid=256x128 block=32x4
launch CUDA kernel file=D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c function=main
line= device= threadid= queue= num_gangs= num_workers= vector_length= grid=256x128 block=32x4
launch CUDA kernel file=D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c function=main
line= device= threadid= queue= num_gangs= num_workers= vector_length= grid=256x128 block=32x4
launch CUDA kernel file=D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c function=main
line= device= threadid= queue= num_gangs= num_workers= vector_length= grid=256x128 block=32x4
PGI: "acc_shutdown" not detected, performance results might be incomplete.
Please add the call "acc_shutdown(acc_device_nvidia)" to the end of your application to ensure that the performance results are complete. Accelerator Kernel Timing data
Timing may be affected by asynchronous behavior
set PGI_ACC_SYNCHRONOUS to to disable async() clauses
D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c
main NVIDIA devicenum=
time(us): ,
: data region reached times
: compute region reached times
: kernel launched times
grid: [256x128] block: [32x4]
device time(us): total= max= min= avg=
: update directive reached times
: data copyout transfers:
device time(us): total=, max=, min= avg=,

● 使用统一内存访址(Ubuntu,win64 不支持)

● 改进 4,多设备版本 1,使用 OpenMP

 #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <openacc.h> #define N (1024 * 8) #pragma acc routine seq
int julia(const float cre, const float cim, float zre, float zim, const int maxIter)
{
float zre2 = 0.0f, zim2 = 0.0f;
for (int iter = ; iter < maxIter; iter += )
{
zre2 = zre * zre - zim * zim + cre, zim2 = * zre * zim + cim;
if (zre2 * zre2 + zim2 * zim2 > 4.0f)
return iter; zre = zre2 * zre2 - zim2 * zim2 + cre, zim = * zre2 * zim2 + cim;
if (zre * zre + zim * zim > 4.0f)
return iter;
}
return maxIter + + (maxIter % );
} int main()
{
const int maxIter = ;
const int numBlock = acc_get_num_devices(acc_device_nvidia), blockSize = N / numBlock * N; // 使用 OpenMP 检测目标设备数量,以此作为分块数
const float cre = -0.8350, cim = -0.2321, h = 4.0f / N; int *image = (int *)malloc(sizeof(int) * N * N);
acc_init(acc_device_nvidia); // 一次性初始化全部目标设备 #pragma omp parallel num_threads(numBlock) // 使用多个线程,分别向目标设备发送任务
{
acc_set_device_num(omp_get_thread_num(), acc_device_nvidia);// 标记目标设备
#pragma omp for
for (int block = ; block < numBlock; block++)
{
const int start = block * (N / numBlock), end = start + N / numBlock;
#pragma acc data copyout(image[block * blockSize : blockSize])
{
#pragma acc kernels loop independent
for (int i = start; i < end; i++)
{
for (int j = ; j < N; j++)
image[i * N + j] = julia(cre, cim, i * h - 2.0f, j * h - 2.0f, maxIter);
}
}
}
}
/*
FILE *pf = fopen("R:/output.txt", "w");
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
fprintf(pf, "%d ", image[i * N + j]);
fprintf(pf, "\n");
}
fclose(pf);
*/
free(image);
//getchar();
return ;
}

● 输出结果

D:\Code\OpenACC\OpenACCProject\OpenACCProject>pgcc -acc -mp -Minfo main.c -o main_acc.exe
julia:
, Generating acc routine seq
Generating Tesla code
, FMA (fused multiply-add) instruction(s) generated
, FMA (fused multiply-add) instruction(s) generated
main:
, Parallel region activated
, Parallel loop activated with static block schedule
, Generating copyout(image[block*blockSize:blockSize])
, Loop is parallelizable
FMA (fused multiply-add) instruction(s) generated
, Loop is parallelizable
Accelerator kernel generated
Generating Tesla code
, #pragma acc loop gang, vector(4) /* blockIdx.y threadIdx.y */
, #pragma acc loop gang, vector(32) /* blockIdx.x threadIdx.x */
, FMA (fused multiply-add) instruction(s) generated
, Barrier
, Parallel region terminated D:\Code\OpenACC\OpenACCProject\OpenACCProject>main_acc.exe
launch CUDA kernel file=D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c function=main
line= device= threadid= num_gangs= num_workers= vector_length= grid=256x128 block=32x4
PGI: "acc_shutdown" not detected, performance results might be incomplete.
Please add the call "acc_shutdown(acc_device_nvidia)" to the end of your application to ensure that the performance results are complete. Accelerator Kernel Timing data
D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c
main NVIDIA devicenum=
time(us): ,
: data region reached times
: data copyout transfers:
device time(us): total=, max=, min= avg=,
: compute region reached time
: kernel launched time
grid: [256x128] block: [32x4]
device time(us): total= max= min= avg=

● 改进 5,多设备版本 2,调整 OpenMP

 #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <openacc.h> #define N (1024 * 8) #pragma acc routine seq
int julia(const float cre, const float cim, float zre, float zim, const int maxIter)
{
float zre2 = 0.0f, zim2 = 0.0f;
for (int iter = ; iter < maxIter; iter += )
{
zre2 = zre * zre - zim * zim + cre, zim2 = * zre * zim + cim;
if (zre2 * zre2 + zim2 * zim2 > 4.0f)
return iter; zre = zre2 * zre2 - zim2 * zim2 + cre, zim = * zre2 * zim2 + cim;
if (zre * zre + zim * zim > 4.0f)
return iter;
}
return maxIter + + (maxIter % );
} int main()
{
const int maxIter = ;
const int numBlock = acc_get_num_devices(acc_device_nvidia), blockSize = N / numBlock * N;
const float cre = -0.8350, cim = -0.2321, h = 4.0f / N; int *image = (int *)malloc(sizeof(int) * N * N);
acc_init(acc_device_nvidia); #pragma omp parallel for num_threads(numBlock) // 把函数 acc_set_device_num 单独放在一起
for(int block = ;block<numBlock;block++)
acc_set_device_num(block, acc_device_nvidia); #pragma omp for num_threads(numBlock)
for (int block = ; block < numBlock; block++)
{
const int start = block * (N / numBlock), end = start + N / numBlock;
#pragma acc data copyout(image[block * blockSize : blockSize])
{
#pragma acc kernels loop independent
for (int i = start; i < end; i++)
{
for (int j = ; j < N; j++)
image[i * N + j] = julia(cre, cim, i * h - 2.0f, j * h - 2.0f, maxIter);
}
}
}
/*
FILE *pf = fopen("R:/output.txt", "w");
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
fprintf(pf, "%d ", image[i * N + j]);
fprintf(pf, "\n");
}
fclose(pf);
*/
free(image);
//getchar();
return ;
}

● 输出结果

D:\Code\OpenACC\OpenACCProject\OpenACCProject>pgcc main.c -acc -Minfo -o main_acc.exe
julia:
, Generating acc routine seq
Generating Tesla code
, FMA (fused multiply-add) instruction(s) generated
, FMA (fused multiply-add) instruction(s) generated
main:
, Generating copyout(image[block*blockSize:blockSize])
, Loop is parallelizable
FMA (fused multiply-add) instruction(s) generated
, Loop is parallelizable
Accelerator kernel generated
Generating Tesla code
, #pragma acc loop gang, vector(4) /* blockIdx.y threadIdx.y */
, #pragma acc loop gang, vector(32) /* blockIdx.x threadIdx.x */
, FMA (fused multiply-add) instruction(s) generated D:\Code\OpenACC\OpenACCProject\OpenACCProject>main_acc.exe
launch CUDA kernel file=D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c function=main
line= device= threadid= num_gangs= num_workers= vector_length= grid=256x128 block=32x4
PGI: "acc_shutdown" not detected, performance results might be incomplete.
Please add the call "acc_shutdown(acc_device_nvidia)" to the end of your application to ensure that the performance results are complete. Accelerator Kernel Timing data
D:\Code\OpenACC\OpenACCProject\OpenACCProject\main.c
main NVIDIA devicenum=
time(us): ,
: data region reached times
: data copyout transfers:
device time(us): total=, max=, min= avg=,
: compute region reached time
: kernel launched time
grid: [256x128] block: [32x4]
elapsed time(us): total=, max=, min=, avg=,

OpenACC Julia 图形的更多相关文章

  1. OpenCV绘制朱利亚(Julia)集合图形

    朱利亚集合是一个在复平面上形成分形的点的集合.以法国数学家加斯顿·朱利亚(Gaston Julia)的名字命名. 朱利亚集合可以由下式进行反复迭代得到: 对于固定的复数c,取某一z值(如z = z0) ...

  2. CUDA+OpenCV 绘制朱利亚(Julia)集合图形

    Julia集中的元素都是经过简单的迭代计算得到的,很适合用CUDA进行加速.对一个600*600的图像,需要进行360000次迭代计算,所以在CUDA中创建了600*600个线程块(block),每个 ...

  3. OpenACC 云水参数化方案

    ▶ 书上第十三章,用一系列步骤优化一个云水参数化方案.用于熟悉 Fortran 以及 OpenACC 在旗下的表现 ● 代码,文件较多,放在一起了 ! main.f90 PROGRAM main US ...

  4. 详解 CUDA By Example 中的 Julia Set 绘制GPU优化

    笔者测试环境VS2019. 基本介绍 原书作者引入Julia Sets意在使用GPU加速图形的绘制.Julia Set 是指满足下式迭代收敛的复数集合 \[ Z_{n+1}=Z_{n}^2+C \] ...

  5. 现代3D图形编程学习-基础简介(3)-什么是opengl (译)

    本书系列 现代3D图形编程学习 OpenGL是什么 在我们编写openGL程序之前,我们首先需要知道什么是OpenGL. 将OpenGL作为一个API OpenGL 通常被认为是应用程序接口(API) ...

  6. 超全面的.NET GDI+图形图像编程教程

    本篇主题内容是.NET GDI+图形图像编程系列的教程,不要被这个滚动条吓到,为了查找方便,我没有分开写,上面加了目录了,而且很多都是源码和图片~ (*^_^*) 本人也为了学习深刻,另一方面也是为了 ...

  7. Ubuntu设置root用户登录图形界面

    Ubuntu默认的是root用户不能登录图形界面的,只能以其他用户登录图形界面.这样就很麻烦,因为权限的问题,不能随意复制删除文件,用gedit编辑文件时经常不能保存,只能用vim去编辑. 解决的办法 ...

  8. 第六代智能英特尔® 酷睿™ 处理器图形 API 开发人员指南

    欢迎查看第六代智能英特尔® 酷睿™ 处理器图形 API 开发人员指南,该处理器可为开发人员和最终用户提供领先的 CPU 和图形性能增强.各种新特性和功能以及显著提高的性能. 本指南旨在帮助软件开发人员 ...

  9. 通过Matrix进行二维图形仿射变换

    Affine Transformation是一种二维坐标到二维坐标之间的线性变换,保持二维图形的"平直性"和"平行性".仿射变换可以通过一系列的原子变换的复合来 ...

随机推荐

  1. HDU 2009

    求数列的和 Time Limit: 2000/1000 MS (Java/Others) Memory Limit: 65536/32768 K (Java/Others) Total Submiss ...

  2. 【java编程】java对象copy

    实现java对象Copy的三种方式 一.克隆 implements Cloneable 二.序列化 implements Serializable 三.利用反射机制copy apache的BeanUt ...

  3. calc()语法

    什么是calc()? 学习calc()之前,我们有必要先知道calc()是什么?只有知道了他是个什么东东?在实际运用中更好的使用他. calc()从字面我们可以把他理解为一个函数function.其实 ...

  4. oracle 以及 sql server mysql 空值默认值修改

    在SQL Server Oracle MySQL当数据库中查出某值为NULL怎么办? 1.MSSQL: ISNULL() 语法 ISNULL ( check_expression , replacem ...

  5. GridView实现数据编辑和删除

    <asp:GridView ID="gv_Emplogin" runat="server" AutoGenerateColumns="False ...

  6. JS替换空格回车换行符

    JS替换空格回车换行符 str=str.replace(/\r/g," ") str=str.replace(/\n/g,"<br />")  或 ...

  7. php重新整理数组索引

    语法 array_merge(array1,array2,array3...) 参数 描述 array1 必需.输入的第一个数组. array2 必需.输入的第二个数组. array3 可选.可指定的 ...

  8. iview admin 发布到IIS

    公司项目打算做前后端分离,选型最后选了vue+webapi的模式.于是在网上找到了iview及iview admin 这个后台管理模板,里面东西很完善.有这么好的东西,而且MIT协议,项目本身也比较简 ...

  9. <script>标签里的defer和async属性 区别(待补充)

    defer与async的区别(表格显示): table th:first-of-type { width: 150px; } table th:nth-of-type(2) { } 区别 defer ...

  10. 带你走进Linux(Ubuntu)

    类Unix系统目录结构 ubuntu没有盘符这个概念,只有一个根目录/,所有文件都在它下面 /:根目录,一般根目录下只存放目录,在Linux下有且只有一个根目录.所有的东西都是从这里开始.当你在终端里 ...