参考:

https://www.cnblogs.com/devilmaycry812839668/p/15348610.html

最近在看WarpDrive的代码,其中cuda上运行的代码是使用pycuda库进行连通的,使用pycuda可以很好的在python环境中调用cuda的代码,但是在使用中发现一些事情,那就是cuda函数的初始化要放在cuda内存空间初始化之后,否则会有报错。

代码:(可以正常运行的代码)

import numpy as np

from warp_drive.managers.data_manager import CUDADataManager
from warp_drive.managers.function_manager import (
CUDAFunctionManager, CUDALogController, CUDASampler, CUDAEnvironmentReset
)
from warp_drive.utils.data_feed import DataFeed source_code = """
// A function to demonstrate how to manipulate data on the GPU.
// This function increments each the random data array we pushed to the GPU before.
// Each index corresponding to (env_id, agent_id) in the array is incremented by "agent_id + env_id".
// Everything inside the if() loop runs in parallel for each agent and environment.
//
extern "C"{
__global__ void cuda_increment(
float* data,
int num_agents
)
{
int env_id = blockIdx.x;
int agent_id = threadIdx.x;
if (agent_id < num_agents){
int array_index = env_id * num_agents + agent_id;
int increment = env_id + agent_id;
data[array_index] += increment;
}
}
}
""" from timeit import Timer def push_random_data_and_increment_timer(
num_runs=1,
num_envs=2,
num_agents=3,
source_code=None
): assert source_code is not None def push_random_data(num_agents, num_envs):
# Initialize the CUDA data manager
cuda_data_manager = CUDADataManager(
num_agents=num_agents,
num_envs=num_envs,
episode_length=100
) # Create random data
random_data = np.random.rand(num_envs, num_agents) # Push data from host to device
data_feed = DataFeed()
data_feed.add_data(
name="random_data",
data=random_data,
)
data_feed.add_data(
name="num_agents",
data=num_agents
)
cuda_data_manager.push_data_to_device(data_feed) return cuda_data_manager # Initialize the CUDA function manager
def cuda_func_init():
cuda_function_manager = CUDAFunctionManager(
num_agents=num_agents, #cuda_data_manager.meta_info("n_agents"),
num_envs=num_envs #cuda_data_manager.meta_info("n_envs")
) # Load source code and initialize function
cuda_function_manager.load_cuda_from_source_code(
source_code,
default_functions_included=False
)
cuda_function_manager.initialize_functions(["cuda_increment"])
increment_function = cuda_function_manager._get_function("cuda_increment") return cuda_function_manager, increment_function def increment_data(cuda_data_manager, cuda_function_manager, increment_function):
increment_function(
cuda_data_manager.device_data("random_data"),
cuda_data_manager.device_data("num_agents"),
block=cuda_function_manager.block,
grid=cuda_function_manager.grid
) # set variable
# cuda_data_manager = push_random_data(num_agents, num_envs) # cuda function init
# cuda_function_manager, increment_function = cuda_func_init() # cuda function run
# increment_data(cuda_data_manager, cuda_function_manager, increment_function) data_push_time = Timer(lambda: push_random_data(num_agents, num_envs)).timeit(number=num_runs) cuda_data_manager = push_random_data(num_agents, num_envs)
cuda_function_manager, increment_function = cuda_func_init()
program_run_time = Timer(lambda: increment_data(cuda_data_manager, cuda_function_manager, increment_function)).timeit(number=num_runs)
print(cuda_data_manager.pull_data_from_device('random_data')) return {
"data push times": data_push_time,
"code run time": program_run_time
} num_runs = 1000
times = {} for scenario in [
(1, 1),
(1, 100),
(1, 1000),
(100, 1000),
(1000, 1000)
]:
num_envs, num_agents = scenario
times.update(
{
f"envs={num_envs}, agents={num_agents}":
push_random_data_and_increment_timer(
num_runs,
num_envs,
num_agents,
source_code
)
}
) print(f"Times for {num_runs} function calls")
print("*"*40)
for key, value in times.items():
print(f"{key:30}: mean data push times: {value['data push times']:10.5}s,\t mean increment times: {value['code run time']:10.5}s") '''
print(cuda_data_manager._meta_info)
print(cuda_data_manager._host_data)
print(cuda_data_manager._device_data_pointer)
print(cuda_data_manager._scalar_data_list)
print(cuda_data_manager._reset_data_list)
print(cuda_data_manager._log_data_list)
print(cuda_data_manager._device_data_via_torch)
print(cuda_data_manager._shared_constants)
print(cuda_data_manager._shape)
print(cuda_data_manager._dtype) print(tensor_on_device)
time.sleep(300) '''

报错的代码:

import numpy as np

from warp_drive.managers.data_manager import CUDADataManager
from warp_drive.managers.function_manager import (
CUDAFunctionManager, CUDALogController, CUDASampler, CUDAEnvironmentReset
)
from warp_drive.utils.data_feed import DataFeed source_code = """
// A function to demonstrate how to manipulate data on the GPU.
// This function increments each the random data array we pushed to the GPU before.
// Each index corresponding to (env_id, agent_id) in the array is incremented by "agent_id + env_id".
// Everything inside the if() loop runs in parallel for each agent and environment.
//
extern "C"{
__global__ void cuda_increment(
float* data,
int num_agents
)
{
int env_id = blockIdx.x;
int agent_id = threadIdx.x;
if (agent_id < num_agents){
int array_index = env_id * num_agents + agent_id;
int increment = env_id + agent_id;
data[array_index] += increment;
}
}
}
""" from timeit import Timer def push_random_data_and_increment_timer(
num_runs=1,
num_envs=2,
num_agents=3,
source_code=None
): assert source_code is not None def push_random_data(num_agents, num_envs):
# Initialize the CUDA data manager
cuda_data_manager = CUDADataManager(
num_agents=num_agents,
num_envs=num_envs,
episode_length=100
) # Create random data
random_data = np.random.rand(num_envs, num_agents) # Push data from host to device
data_feed = DataFeed()
data_feed.add_data(
name="random_data",
data=random_data,
)
data_feed.add_data(
name="num_agents",
data=num_agents
)
cuda_data_manager.push_data_to_device(data_feed) return cuda_data_manager # Initialize the CUDA function manager
def cuda_func_init():
cuda_function_manager = CUDAFunctionManager(
num_agents=num_agents, #cuda_data_manager.meta_info("n_agents"),
num_envs=num_envs #cuda_data_manager.meta_info("n_envs")
) # Load source code and initialize function
cuda_function_manager.load_cuda_from_source_code(
source_code,
default_functions_included=False
)
cuda_function_manager.initialize_functions(["cuda_increment"])
increment_function = cuda_function_manager._get_function("cuda_increment") return cuda_function_manager, increment_function def increment_data(cuda_data_manager, cuda_function_manager, increment_function):
increment_function(
cuda_data_manager.device_data("random_data"),
cuda_data_manager.device_data("num_agents"),
block=cuda_function_manager.block,
grid=cuda_function_manager.grid
) # set variable
# cuda_data_manager = push_random_data(num_agents, num_envs) # cuda function init
# cuda_function_manager, increment_function = cuda_func_init() # cuda function run
# increment_data(cuda_data_manager, cuda_function_manager, increment_function) #data_push_time = Timer(lambda: push_random_data(num_agents, num_envs)).timeit(number=num_runs) cuda_function_manager, increment_function = cuda_func_init() ###
cuda_data_manager = push_random_data(num_agents, num_envs) ###
program_run_time = Timer(lambda: increment_data(cuda_data_manager, cuda_function_manager, increment_function)).timeit(number=num_runs)
print(cuda_data_manager.pull_data_from_device('random_data')) return {
"data push times": 0, #data_push_time,
"code run time": program_run_time
} num_runs = 1000
times = {} for scenario in [
(1, 1),
(1, 100),
(1, 1000),
(100, 1000),
(1000, 1000)
]:
num_envs, num_agents = scenario
times.update(
{
f"envs={num_envs}, agents={num_agents}":
push_random_data_and_increment_timer(
num_runs,
num_envs,
num_agents,
source_code
)
}
) print(f"Times for {num_runs} function calls")
print("*"*40)
for key, value in times.items():
print(f"{key:30}: mean data push times: {value['data push times']:10.5}s,\t mean increment times: {value['code run time']:10.5}s") '''
print(cuda_data_manager._meta_info)
print(cuda_data_manager._host_data)
print(cuda_data_manager._device_data_pointer)
print(cuda_data_manager._scalar_data_list)
print(cuda_data_manager._reset_data_list)
print(cuda_data_manager._log_data_list)
print(cuda_data_manager._device_data_via_torch)
print(cuda_data_manager._shared_constants)
print(cuda_data_manager._shape)
print(cuda_data_manager._dtype) print(tensor_on_device)
time.sleep(300) '''

报错信息:

Traceback (most recent call last):
File "/home/xxxxxx/warp-drive/devil_make/tutorial-1-warp_drive_basics.py", line 145, in <module>
source_code
File "/home/xxxxxx/warp-drive/devil_make/tutorial-1-warp_drive_basics.py", line 116, in push_random_data_and_increment_timer
program_run_time = Timer(lambda: increment_data(cuda_data_manager, cuda_function_manager, increment_function)).timeit(number=num_runs)
File "/home/xxxxxx/anaconda3/envs/warp_drive/lib/python3.7/timeit.py", line 177, in timeit
timing = self.inner(it, self.timer)
File "<timeit-src>", line 6, in inner
File "/home/xxxxxx/warp-drive/devil_make/tutorial-1-warp_drive_basics.py", line 116, in <lambda>
program_run_time = Timer(lambda: increment_data(cuda_data_manager, cuda_function_manager, increment_function)).timeit(number=num_runs)
File "/home/xxxxxx/warp-drive/devil_make/tutorial-1-warp_drive_basics.py", line 97, in increment_data
grid=cuda_function_manager.grid
File "/home/xxxxxx/anaconda3/envs/warp_drive/lib/python3.7/site-packages/pycuda/driver.py", line 480, in function_call
func._set_block_shape(*block)
pycuda._driver.LogicError: cuFuncSetBlockShape failed: invalid resource handle

由此可知,在使用pycuda时,如果cuda函数初始化之前没有对cuda内存初始化则会报错:

报错信息:

pycuda._driver.LogicError: cuFuncSetBlockShape failed: invalid resource handle

如果再cuda函数初始化之前对cuda内存初始化那么就不会报错:

代码:

import numpy as np

from warp_drive.managers.data_manager import CUDADataManager
from warp_drive.managers.function_manager import (
CUDAFunctionManager, CUDALogController, CUDASampler, CUDAEnvironmentReset
)
from warp_drive.utils.data_feed import DataFeed source_code = """
// A function to demonstrate how to manipulate data on the GPU.
// This function increments each the random data array we pushed to the GPU before.
// Each index corresponding to (env_id, agent_id) in the array is incremented by "agent_id + env_id".
// Everything inside the if() loop runs in parallel for each agent and environment.
//
extern "C"{
__global__ void cuda_increment(
float* data,
int num_agents
)
{
int env_id = blockIdx.x;
int agent_id = threadIdx.x;
if (agent_id < num_agents){
int array_index = env_id * num_agents + agent_id;
int increment = env_id + agent_id;
data[array_index] += increment;
}
}
}
""" from timeit import Timer def push_random_data_and_increment_timer(
num_runs=1,
num_envs=2,
num_agents=3,
source_code=None
): assert source_code is not None def push_random_data(num_agents, num_envs):
# Initialize the CUDA data manager
cuda_data_manager = CUDADataManager(
num_agents=num_agents,
num_envs=num_envs,
episode_length=100
) # Create random data
random_data = np.random.rand(num_envs, num_agents) # Push data from host to device
data_feed = DataFeed()
data_feed.add_data(
name="random_data",
data=random_data,
)
data_feed.add_data(
name="num_agents",
data=num_agents
)
cuda_data_manager.push_data_to_device(data_feed) return cuda_data_manager # Initialize the CUDA function manager
def cuda_func_init():
cuda_function_manager = CUDAFunctionManager(
num_agents=num_agents, #cuda_data_manager.meta_info("n_agents"),
num_envs=num_envs #cuda_data_manager.meta_info("n_envs")
) # Load source code and initialize function
cuda_function_manager.load_cuda_from_source_code(
source_code,
default_functions_included=False
)
cuda_function_manager.initialize_functions(["cuda_increment"])
increment_function = cuda_function_manager._get_function("cuda_increment") return cuda_function_manager, increment_function def increment_data(cuda_data_manager, cuda_function_manager, increment_function):
increment_function(
cuda_data_manager.device_data("random_data"),
cuda_data_manager.device_data("num_agents"),
block=cuda_function_manager.block,
grid=cuda_function_manager.grid
) # set variable
# cuda_data_manager = push_random_data(num_agents, num_envs) # cuda function init
# cuda_function_manager, increment_function = cuda_func_init() # cuda function run
# increment_data(cuda_data_manager, cuda_function_manager, increment_function) #data_push_time = Timer(lambda: push_random_data(num_agents, num_envs)).timeit(number=num_runs) cuda_data_manager = push_random_data(num_agents, num_envs) ###
cuda_function_manager, increment_function = cuda_func_init() ###
program_run_time = Timer(lambda: increment_data(cuda_data_manager, cuda_function_manager, increment_function)).timeit(number=num_runs)
print(cuda_data_manager.pull_data_from_device('random_data')) return {
"data push times": '0', #data_push_time,
"code run time": program_run_time
} num_runs = 1000
times = {} for scenario in [
(1, 1),
(1, 100),
(1, 1000),
(100, 1000),
(1000, 1000)
]:
num_envs, num_agents = scenario
times.update(
{
f"envs={num_envs}, agents={num_agents}":
push_random_data_and_increment_timer(
num_runs,
num_envs,
num_agents,
source_code
)
}
) print(f"Times for {num_runs} function calls")
print("*"*40)
for key, value in times.items():
print(f"{key:30}: mean data push times: {value['data push times']:10.5}s,\t mean increment times: {value['code run time']:10.5}s") '''
print(cuda_data_manager._meta_info)
print(cuda_data_manager._host_data)
print(cuda_data_manager._device_data_pointer)
print(cuda_data_manager._scalar_data_list)
print(cuda_data_manager._reset_data_list)
print(cuda_data_manager._log_data_list)
print(cuda_data_manager._device_data_via_torch)
print(cuda_data_manager._shared_constants)
print(cuda_data_manager._shape)
print(cuda_data_manager._dtype) print(tensor_on_device)
time.sleep(300) '''

神奇的是不论在cuda函数初始化之前对cuda内存初始化多大空间的内存都不会再报错,这也是该问题神奇的地方所在。

如下代码:

import numpy as np

from warp_drive.managers.data_manager import CUDADataManager
from warp_drive.managers.function_manager import (
CUDAFunctionManager, CUDALogController, CUDASampler, CUDAEnvironmentReset
)
from warp_drive.utils.data_feed import DataFeed source_code = """
// A function to demonstrate how to manipulate data on the GPU.
// This function increments each the random data array we pushed to the GPU before.
// Each index corresponding to (env_id, agent_id) in the array is incremented by "agent_id + env_id".
// Everything inside the if() loop runs in parallel for each agent and environment.
//
extern "C"{
__global__ void cuda_increment(
float* data,
int num_agents
)
{
int env_id = blockIdx.x;
int agent_id = threadIdx.x;
if (agent_id < num_agents){
int array_index = env_id * num_agents + agent_id;
int increment = env_id + agent_id;
data[array_index] += increment;
}
}
}
""" from timeit import Timer def push_random_data_and_increment_timer(
num_runs=1,
num_envs=2,
num_agents=3,
source_code=None
): assert source_code is not None def push_random_data(num_agents, num_envs):
# Initialize the CUDA data manager
cuda_data_manager = CUDADataManager(
num_agents=num_agents,
num_envs=num_envs,
episode_length=100
) # Create random data
random_data = np.random.rand(num_envs, num_agents) # Push data from host to device
data_feed = DataFeed()
data_feed.add_data(
name="random_data",
data=random_data,
)
data_feed.add_data(
name="num_agents",
data=num_agents
)
cuda_data_manager.push_data_to_device(data_feed) return cuda_data_manager # Initialize the CUDA function manager
def cuda_func_init():
cuda_function_manager = CUDAFunctionManager(
num_agents=num_agents, #cuda_data_manager.meta_info("n_agents"),
num_envs=num_envs #cuda_data_manager.meta_info("n_envs")
) # Load source code and initialize function
cuda_function_manager.load_cuda_from_source_code(
source_code,
default_functions_included=False
)
cuda_function_manager.initialize_functions(["cuda_increment"])
increment_function = cuda_function_manager._get_function("cuda_increment") return cuda_function_manager, increment_function def increment_data(cuda_data_manager, cuda_function_manager, increment_function):
increment_function(
cuda_data_manager.device_data("random_data"),
cuda_data_manager.device_data("num_agents"),
block=cuda_function_manager.block,
grid=cuda_function_manager.grid
) # set variable
# cuda_data_manager = push_random_data(num_agents, num_envs) # cuda function init
# cuda_function_manager, increment_function = cuda_func_init() # cuda function run
# increment_data(cuda_data_manager, cuda_function_manager, increment_function) #data_push_time = Timer(lambda: push_random_data(num_agents, num_envs)).timeit(number=num_runs) push_random_data(1, 1) cuda_function_manager, increment_function = cuda_func_init() ###
cuda_data_manager = push_random_data(num_agents, num_envs) ###
program_run_time = Timer(lambda: increment_data(cuda_data_manager, cuda_function_manager, increment_function)).timeit(number=num_runs)
print(cuda_data_manager.pull_data_from_device('random_data')) return {
"data push times": '0', #data_push_time,
"code run time": program_run_time
} num_runs = 1000
times = {} for scenario in [
(1, 1),
(1, 100),
(1, 1000),
(100, 1000),
(1000, 1000)
]:
num_envs, num_agents = scenario
times.update(
{
f"envs={num_envs}, agents={num_agents}":
push_random_data_and_increment_timer(
num_runs,
num_envs,
num_agents,
source_code
)
}
) print(f"Times for {num_runs} function calls")
print("*"*40)
for key, value in times.items():
print(f"{key:30}: mean data push times: {value['data push times']:10.5}s,\t mean increment times: {value['code run time']:10.5}s") '''
print(cuda_data_manager._meta_info)
print(cuda_data_manager._host_data)
print(cuda_data_manager._device_data_pointer)
print(cuda_data_manager._scalar_data_list)
print(cuda_data_manager._reset_data_list)
print(cuda_data_manager._log_data_list)
print(cuda_data_manager._device_data_via_torch)
print(cuda_data_manager._shared_constants)
print(cuda_data_manager._shape)
print(cuda_data_manager._dtype) print(tensor_on_device)
time.sleep(300) '''

核心代码:

    push_random_data(1, 1)

    cuda_function_manager, increment_function = cuda_func_init()  ###
cuda_data_manager = push_random_data(num_agents, num_envs) ###
program_run_time = Timer(lambda: increment_data(cuda_data_manager, cuda_function_manager, increment_function)).timeit(number=num_runs)

下面代码为cuda的内存申请,即使是较小的内存申请也是可以是下面的cuda函数初始化正常运行,如果再cuda函数初始化之前没有任何对cudsa内存申请的操作那就会报错。

push_random_data(1, 1)

初始化cuda内存,cuda内存的申请操作:

    push_random_data(1, 1)

cuda函数的初始化操作:
cuda_function_manager, increment_function = cuda_func_init() ###

cuda 函数的执行:

increment_data(cuda_data_manager, cuda_function_manager, increment_function)

pycuda学习过程中的一些发现,cuda函数的初始化要在cuda内存空间初始化之后,否则会报错的更多相关文章

  1. 如果在Yii中,使用AR查询,不直接写sql,则在使用的时候会报错

    如果在Yii中,使用AR查询,不直接写sql,则在使用的时候会报错 Student::find() ->select("id,name,from_unixtime(create_tim ...

  2. JAVA_用_JCO连接_SAP,实现调用SAP_的_RFC_函数(整理)(附一篇看起来比较全面的说明)(JCO报错信息)

    // 获取RFC返回的字段值 11 JCoParameterList exportParam = function.getExportParameterList(); 12 String exPara ...

  3. vue中"‘webpack-dev-server’不是内部或外部命令,也不是可运行的程序"的报错

    在vue项目中发现了这个报错  解决办法将项目里的“node_modules”文件夹删除,然后重新运行cnpm install

  4. 在myeclipse中maven项目关于ssh整合时通过pom.xml导入依赖是pom.xml头部会报错

    错误如下 ArtifactTransferException: Failure to transfer org.springframework:spring-jdbc:jar:3.0.5.RELEAS ...

  5. free()函数释放一段分配的内存之陷阱

    朋友们对malloc函数应该是比较熟悉了,此函数功能是分配一段内存地址,并且将内存地址给一个指针变量,最后记得再调用free函数释放这段内存地址就可以了,标准的流程对吧,好像没什么问题.但是按照此标准 ...

  6. Unity C# 调用 C++ DLL 并在 DLL 中调用 C# 的回调函数

    Unity C# 调用 C++ DLL 并在 DLL 中调用 C# 的回调函数~~~    呵呵... 看着有点晕.. 再解释一下就是 在Unity中 使用 C# 调用 C++ 写的 DLL, 但是在 ...

  7. LoadRunner中常用的字符串操作函数

    LoadRunner中常用的字符串操作函数有:                strcpy(destination_string, source_string);               strc ...

  8. 【Python】从简单案列中揭示常用内置函数以及数据类型

    前面提到了BIF(内置函数)这个概念,什么是内置函数,就是python已经定义好的函数,不需要人为再自己定义,直接拿来就可以用的函数,那么都有哪些BIF呢? 可以在交互式界面(IDLE)输入这段代码, ...

  9. C语言calloc()函数:分配内存空间并初始化——stm32中的应用

    经常在代码中看到使用malloc来分配,然后memset清零,其实calloc更加方便,一句顶两句~ 头文件:#include <stdlib.h> calloc() 函数用来动态地分配内 ...

  10. 2018最新win10 安装tensorflow1.4(GPU/CPU)+cuda8.0+cudnn8.0-v6 + keras 安装CUDA失败 导入tensorflow失败报错问题解决

    原文作者:aircraft 原文链接:https://www.cnblogs.com/DOMLX/p/9747019.html 基本开发环境搭建 1. Microsoft Windows 版本 关于W ...

随机推荐

  1. redis数据类型篇

    redis数据类型官网资料,https://redis.io/docs/manual/data-types/ 生产环境下的redis实况图 超哥这个redis实例里,db0库有140万个key. 1. ...

  2. mysql8新版本安装注意事项及解决本地系统账户Strart the server和SQLyog客户端连接报 plugin caching_sha2_password could not be loaded

    mysql 安装包一路next安装, 到Apply Configuration,卡住在Start the server,原因是mysql8新版本的登录身份不对,需要进入服务,找到mysql80,然后属 ...

  3. 快速上手Python编程

    前言 .center { width: auto; display: table; margin-left: auto; margin-right: auto } 类型 原理 优点 缺点 编译型语言 ...

  4. Tarjan 求有向图的强连通分量

    重温Tarjan, 网上看了许多博客感觉都讲的不清楚. 故传上来自己的笔记, 希望帮到大家. 提到的一些概念可以参考 oi wiki, 代码也是 oi wiki 的, 因为我不认为我能写出比大佬更好的 ...

  5. 看李沐的 ViT 串讲

    ViT 概括 论文题目:AN IMAGE IS WORTH 16X16 WORDS: TRANSFORMERS FOR IMAGE RECOGNITION AT SCALE 论文地址:https:// ...

  6. php不使用Office包实现上万条数据导出表格

    经过上传客户要求主副表迁出,又提出可以将某张表的数据导出excel,听着很简单,实际看数据表发现上万条数据,并且需要关联表查询相关字段,导出的表格才可以被客户看明白. 要是使用office包目前后台内 ...

  7. Vim有哪几种模式?

    Vim有哪几种模式? 模式一:normal模式 作用主要是用来浏览,输入各种和在文档中移动. 模式二:编辑模式 用于对文件的编辑: 常用的插入命令: a在光标位置后编辑, i在光标位置前编辑, o在下 ...

  8. Python_12 多继承与多态

    一.查缺补漏 1. self和super的区别:self调用自己方法,super调用父类方法 当使用 self 调用方法时,会从当前类的方法列表中开始找,如果没有,就从父类中再找 而当使用 super ...

  9. C#多态性学习,虚方法、抽象方法、接口等用法举例

    1. 多态性定义   C#中的多态性是OOP(面向对象编程)的一个基本概念,它允许一个对象在不同情况下表现出不同的行为,以增强代码的可重用性和灵活性.   根据网上的教程,我们得知C#多态性分为两类, ...

  10. Asp .Net Core 系列:基于 Castle DynamicProxy + Autofac 实践 AOP 以及实现事务、用户填充功能

    目录 什么是 AOP ? .Net Core 中 有哪些 AOP 框架? 基于 Castle DynamicProxy 实现 AOP IOC中使用 Castle DynamicProxy 实现事务管理 ...