参考:

https://www.cnblogs.com/devilmaycry812839668/p/15348610.html

最近在看WarpDrive的代码,其中cuda上运行的代码是使用pycuda库进行连通的,使用pycuda可以很好的在python环境中调用cuda的代码,但是在使用中发现一些事情,那就是cuda函数的初始化要放在cuda内存空间初始化之后,否则会有报错。

代码:(可以正常运行的代码)

import numpy as np

from warp_drive.managers.data_manager import CUDADataManager
from warp_drive.managers.function_manager import (
CUDAFunctionManager, CUDALogController, CUDASampler, CUDAEnvironmentReset
)
from warp_drive.utils.data_feed import DataFeed source_code = """
// A function to demonstrate how to manipulate data on the GPU.
// This function increments each the random data array we pushed to the GPU before.
// Each index corresponding to (env_id, agent_id) in the array is incremented by "agent_id + env_id".
// Everything inside the if() loop runs in parallel for each agent and environment.
//
extern "C"{
__global__ void cuda_increment(
float* data,
int num_agents
)
{
int env_id = blockIdx.x;
int agent_id = threadIdx.x;
if (agent_id < num_agents){
int array_index = env_id * num_agents + agent_id;
int increment = env_id + agent_id;
data[array_index] += increment;
}
}
}
""" from timeit import Timer def push_random_data_and_increment_timer(
num_runs=1,
num_envs=2,
num_agents=3,
source_code=None
): assert source_code is not None def push_random_data(num_agents, num_envs):
# Initialize the CUDA data manager
cuda_data_manager = CUDADataManager(
num_agents=num_agents,
num_envs=num_envs,
episode_length=100
) # Create random data
random_data = np.random.rand(num_envs, num_agents) # Push data from host to device
data_feed = DataFeed()
data_feed.add_data(
name="random_data",
data=random_data,
)
data_feed.add_data(
name="num_agents",
data=num_agents
)
cuda_data_manager.push_data_to_device(data_feed) return cuda_data_manager # Initialize the CUDA function manager
def cuda_func_init():
cuda_function_manager = CUDAFunctionManager(
num_agents=num_agents, #cuda_data_manager.meta_info("n_agents"),
num_envs=num_envs #cuda_data_manager.meta_info("n_envs")
) # Load source code and initialize function
cuda_function_manager.load_cuda_from_source_code(
source_code,
default_functions_included=False
)
cuda_function_manager.initialize_functions(["cuda_increment"])
increment_function = cuda_function_manager._get_function("cuda_increment") return cuda_function_manager, increment_function def increment_data(cuda_data_manager, cuda_function_manager, increment_function):
increment_function(
cuda_data_manager.device_data("random_data"),
cuda_data_manager.device_data("num_agents"),
block=cuda_function_manager.block,
grid=cuda_function_manager.grid
) # set variable
# cuda_data_manager = push_random_data(num_agents, num_envs) # cuda function init
# cuda_function_manager, increment_function = cuda_func_init() # cuda function run
# increment_data(cuda_data_manager, cuda_function_manager, increment_function) data_push_time = Timer(lambda: push_random_data(num_agents, num_envs)).timeit(number=num_runs) cuda_data_manager = push_random_data(num_agents, num_envs)
cuda_function_manager, increment_function = cuda_func_init()
program_run_time = Timer(lambda: increment_data(cuda_data_manager, cuda_function_manager, increment_function)).timeit(number=num_runs)
print(cuda_data_manager.pull_data_from_device('random_data')) return {
"data push times": data_push_time,
"code run time": program_run_time
} num_runs = 1000
times = {} for scenario in [
(1, 1),
(1, 100),
(1, 1000),
(100, 1000),
(1000, 1000)
]:
num_envs, num_agents = scenario
times.update(
{
f"envs={num_envs}, agents={num_agents}":
push_random_data_and_increment_timer(
num_runs,
num_envs,
num_agents,
source_code
)
}
) print(f"Times for {num_runs} function calls")
print("*"*40)
for key, value in times.items():
print(f"{key:30}: mean data push times: {value['data push times']:10.5}s,\t mean increment times: {value['code run time']:10.5}s") '''
print(cuda_data_manager._meta_info)
print(cuda_data_manager._host_data)
print(cuda_data_manager._device_data_pointer)
print(cuda_data_manager._scalar_data_list)
print(cuda_data_manager._reset_data_list)
print(cuda_data_manager._log_data_list)
print(cuda_data_manager._device_data_via_torch)
print(cuda_data_manager._shared_constants)
print(cuda_data_manager._shape)
print(cuda_data_manager._dtype) print(tensor_on_device)
time.sleep(300) '''

报错的代码:

import numpy as np

from warp_drive.managers.data_manager import CUDADataManager
from warp_drive.managers.function_manager import (
CUDAFunctionManager, CUDALogController, CUDASampler, CUDAEnvironmentReset
)
from warp_drive.utils.data_feed import DataFeed source_code = """
// A function to demonstrate how to manipulate data on the GPU.
// This function increments each the random data array we pushed to the GPU before.
// Each index corresponding to (env_id, agent_id) in the array is incremented by "agent_id + env_id".
// Everything inside the if() loop runs in parallel for each agent and environment.
//
extern "C"{
__global__ void cuda_increment(
float* data,
int num_agents
)
{
int env_id = blockIdx.x;
int agent_id = threadIdx.x;
if (agent_id < num_agents){
int array_index = env_id * num_agents + agent_id;
int increment = env_id + agent_id;
data[array_index] += increment;
}
}
}
""" from timeit import Timer def push_random_data_and_increment_timer(
num_runs=1,
num_envs=2,
num_agents=3,
source_code=None
): assert source_code is not None def push_random_data(num_agents, num_envs):
# Initialize the CUDA data manager
cuda_data_manager = CUDADataManager(
num_agents=num_agents,
num_envs=num_envs,
episode_length=100
) # Create random data
random_data = np.random.rand(num_envs, num_agents) # Push data from host to device
data_feed = DataFeed()
data_feed.add_data(
name="random_data",
data=random_data,
)
data_feed.add_data(
name="num_agents",
data=num_agents
)
cuda_data_manager.push_data_to_device(data_feed) return cuda_data_manager # Initialize the CUDA function manager
def cuda_func_init():
cuda_function_manager = CUDAFunctionManager(
num_agents=num_agents, #cuda_data_manager.meta_info("n_agents"),
num_envs=num_envs #cuda_data_manager.meta_info("n_envs")
) # Load source code and initialize function
cuda_function_manager.load_cuda_from_source_code(
source_code,
default_functions_included=False
)
cuda_function_manager.initialize_functions(["cuda_increment"])
increment_function = cuda_function_manager._get_function("cuda_increment") return cuda_function_manager, increment_function def increment_data(cuda_data_manager, cuda_function_manager, increment_function):
increment_function(
cuda_data_manager.device_data("random_data"),
cuda_data_manager.device_data("num_agents"),
block=cuda_function_manager.block,
grid=cuda_function_manager.grid
) # set variable
# cuda_data_manager = push_random_data(num_agents, num_envs) # cuda function init
# cuda_function_manager, increment_function = cuda_func_init() # cuda function run
# increment_data(cuda_data_manager, cuda_function_manager, increment_function) #data_push_time = Timer(lambda: push_random_data(num_agents, num_envs)).timeit(number=num_runs) cuda_function_manager, increment_function = cuda_func_init() ###
cuda_data_manager = push_random_data(num_agents, num_envs) ###
program_run_time = Timer(lambda: increment_data(cuda_data_manager, cuda_function_manager, increment_function)).timeit(number=num_runs)
print(cuda_data_manager.pull_data_from_device('random_data')) return {
"data push times": 0, #data_push_time,
"code run time": program_run_time
} num_runs = 1000
times = {} for scenario in [
(1, 1),
(1, 100),
(1, 1000),
(100, 1000),
(1000, 1000)
]:
num_envs, num_agents = scenario
times.update(
{
f"envs={num_envs}, agents={num_agents}":
push_random_data_and_increment_timer(
num_runs,
num_envs,
num_agents,
source_code
)
}
) print(f"Times for {num_runs} function calls")
print("*"*40)
for key, value in times.items():
print(f"{key:30}: mean data push times: {value['data push times']:10.5}s,\t mean increment times: {value['code run time']:10.5}s") '''
print(cuda_data_manager._meta_info)
print(cuda_data_manager._host_data)
print(cuda_data_manager._device_data_pointer)
print(cuda_data_manager._scalar_data_list)
print(cuda_data_manager._reset_data_list)
print(cuda_data_manager._log_data_list)
print(cuda_data_manager._device_data_via_torch)
print(cuda_data_manager._shared_constants)
print(cuda_data_manager._shape)
print(cuda_data_manager._dtype) print(tensor_on_device)
time.sleep(300) '''

报错信息:

Traceback (most recent call last):
File "/home/xxxxxx/warp-drive/devil_make/tutorial-1-warp_drive_basics.py", line 145, in <module>
source_code
File "/home/xxxxxx/warp-drive/devil_make/tutorial-1-warp_drive_basics.py", line 116, in push_random_data_and_increment_timer
program_run_time = Timer(lambda: increment_data(cuda_data_manager, cuda_function_manager, increment_function)).timeit(number=num_runs)
File "/home/xxxxxx/anaconda3/envs/warp_drive/lib/python3.7/timeit.py", line 177, in timeit
timing = self.inner(it, self.timer)
File "<timeit-src>", line 6, in inner
File "/home/xxxxxx/warp-drive/devil_make/tutorial-1-warp_drive_basics.py", line 116, in <lambda>
program_run_time = Timer(lambda: increment_data(cuda_data_manager, cuda_function_manager, increment_function)).timeit(number=num_runs)
File "/home/xxxxxx/warp-drive/devil_make/tutorial-1-warp_drive_basics.py", line 97, in increment_data
grid=cuda_function_manager.grid
File "/home/xxxxxx/anaconda3/envs/warp_drive/lib/python3.7/site-packages/pycuda/driver.py", line 480, in function_call
func._set_block_shape(*block)
pycuda._driver.LogicError: cuFuncSetBlockShape failed: invalid resource handle

由此可知,在使用pycuda时,如果cuda函数初始化之前没有对cuda内存初始化则会报错:

报错信息:

pycuda._driver.LogicError: cuFuncSetBlockShape failed: invalid resource handle

如果再cuda函数初始化之前对cuda内存初始化那么就不会报错:

代码:

import numpy as np

from warp_drive.managers.data_manager import CUDADataManager
from warp_drive.managers.function_manager import (
CUDAFunctionManager, CUDALogController, CUDASampler, CUDAEnvironmentReset
)
from warp_drive.utils.data_feed import DataFeed source_code = """
// A function to demonstrate how to manipulate data on the GPU.
// This function increments each the random data array we pushed to the GPU before.
// Each index corresponding to (env_id, agent_id) in the array is incremented by "agent_id + env_id".
// Everything inside the if() loop runs in parallel for each agent and environment.
//
extern "C"{
__global__ void cuda_increment(
float* data,
int num_agents
)
{
int env_id = blockIdx.x;
int agent_id = threadIdx.x;
if (agent_id < num_agents){
int array_index = env_id * num_agents + agent_id;
int increment = env_id + agent_id;
data[array_index] += increment;
}
}
}
""" from timeit import Timer def push_random_data_and_increment_timer(
num_runs=1,
num_envs=2,
num_agents=3,
source_code=None
): assert source_code is not None def push_random_data(num_agents, num_envs):
# Initialize the CUDA data manager
cuda_data_manager = CUDADataManager(
num_agents=num_agents,
num_envs=num_envs,
episode_length=100
) # Create random data
random_data = np.random.rand(num_envs, num_agents) # Push data from host to device
data_feed = DataFeed()
data_feed.add_data(
name="random_data",
data=random_data,
)
data_feed.add_data(
name="num_agents",
data=num_agents
)
cuda_data_manager.push_data_to_device(data_feed) return cuda_data_manager # Initialize the CUDA function manager
def cuda_func_init():
cuda_function_manager = CUDAFunctionManager(
num_agents=num_agents, #cuda_data_manager.meta_info("n_agents"),
num_envs=num_envs #cuda_data_manager.meta_info("n_envs")
) # Load source code and initialize function
cuda_function_manager.load_cuda_from_source_code(
source_code,
default_functions_included=False
)
cuda_function_manager.initialize_functions(["cuda_increment"])
increment_function = cuda_function_manager._get_function("cuda_increment") return cuda_function_manager, increment_function def increment_data(cuda_data_manager, cuda_function_manager, increment_function):
increment_function(
cuda_data_manager.device_data("random_data"),
cuda_data_manager.device_data("num_agents"),
block=cuda_function_manager.block,
grid=cuda_function_manager.grid
) # set variable
# cuda_data_manager = push_random_data(num_agents, num_envs) # cuda function init
# cuda_function_manager, increment_function = cuda_func_init() # cuda function run
# increment_data(cuda_data_manager, cuda_function_manager, increment_function) #data_push_time = Timer(lambda: push_random_data(num_agents, num_envs)).timeit(number=num_runs) cuda_data_manager = push_random_data(num_agents, num_envs) ###
cuda_function_manager, increment_function = cuda_func_init() ###
program_run_time = Timer(lambda: increment_data(cuda_data_manager, cuda_function_manager, increment_function)).timeit(number=num_runs)
print(cuda_data_manager.pull_data_from_device('random_data')) return {
"data push times": '0', #data_push_time,
"code run time": program_run_time
} num_runs = 1000
times = {} for scenario in [
(1, 1),
(1, 100),
(1, 1000),
(100, 1000),
(1000, 1000)
]:
num_envs, num_agents = scenario
times.update(
{
f"envs={num_envs}, agents={num_agents}":
push_random_data_and_increment_timer(
num_runs,
num_envs,
num_agents,
source_code
)
}
) print(f"Times for {num_runs} function calls")
print("*"*40)
for key, value in times.items():
print(f"{key:30}: mean data push times: {value['data push times']:10.5}s,\t mean increment times: {value['code run time']:10.5}s") '''
print(cuda_data_manager._meta_info)
print(cuda_data_manager._host_data)
print(cuda_data_manager._device_data_pointer)
print(cuda_data_manager._scalar_data_list)
print(cuda_data_manager._reset_data_list)
print(cuda_data_manager._log_data_list)
print(cuda_data_manager._device_data_via_torch)
print(cuda_data_manager._shared_constants)
print(cuda_data_manager._shape)
print(cuda_data_manager._dtype) print(tensor_on_device)
time.sleep(300) '''

神奇的是不论在cuda函数初始化之前对cuda内存初始化多大空间的内存都不会再报错,这也是该问题神奇的地方所在。

如下代码:

import numpy as np

from warp_drive.managers.data_manager import CUDADataManager
from warp_drive.managers.function_manager import (
CUDAFunctionManager, CUDALogController, CUDASampler, CUDAEnvironmentReset
)
from warp_drive.utils.data_feed import DataFeed source_code = """
// A function to demonstrate how to manipulate data on the GPU.
// This function increments each the random data array we pushed to the GPU before.
// Each index corresponding to (env_id, agent_id) in the array is incremented by "agent_id + env_id".
// Everything inside the if() loop runs in parallel for each agent and environment.
//
extern "C"{
__global__ void cuda_increment(
float* data,
int num_agents
)
{
int env_id = blockIdx.x;
int agent_id = threadIdx.x;
if (agent_id < num_agents){
int array_index = env_id * num_agents + agent_id;
int increment = env_id + agent_id;
data[array_index] += increment;
}
}
}
""" from timeit import Timer def push_random_data_and_increment_timer(
num_runs=1,
num_envs=2,
num_agents=3,
source_code=None
): assert source_code is not None def push_random_data(num_agents, num_envs):
# Initialize the CUDA data manager
cuda_data_manager = CUDADataManager(
num_agents=num_agents,
num_envs=num_envs,
episode_length=100
) # Create random data
random_data = np.random.rand(num_envs, num_agents) # Push data from host to device
data_feed = DataFeed()
data_feed.add_data(
name="random_data",
data=random_data,
)
data_feed.add_data(
name="num_agents",
data=num_agents
)
cuda_data_manager.push_data_to_device(data_feed) return cuda_data_manager # Initialize the CUDA function manager
def cuda_func_init():
cuda_function_manager = CUDAFunctionManager(
num_agents=num_agents, #cuda_data_manager.meta_info("n_agents"),
num_envs=num_envs #cuda_data_manager.meta_info("n_envs")
) # Load source code and initialize function
cuda_function_manager.load_cuda_from_source_code(
source_code,
default_functions_included=False
)
cuda_function_manager.initialize_functions(["cuda_increment"])
increment_function = cuda_function_manager._get_function("cuda_increment") return cuda_function_manager, increment_function def increment_data(cuda_data_manager, cuda_function_manager, increment_function):
increment_function(
cuda_data_manager.device_data("random_data"),
cuda_data_manager.device_data("num_agents"),
block=cuda_function_manager.block,
grid=cuda_function_manager.grid
) # set variable
# cuda_data_manager = push_random_data(num_agents, num_envs) # cuda function init
# cuda_function_manager, increment_function = cuda_func_init() # cuda function run
# increment_data(cuda_data_manager, cuda_function_manager, increment_function) #data_push_time = Timer(lambda: push_random_data(num_agents, num_envs)).timeit(number=num_runs) push_random_data(1, 1) cuda_function_manager, increment_function = cuda_func_init() ###
cuda_data_manager = push_random_data(num_agents, num_envs) ###
program_run_time = Timer(lambda: increment_data(cuda_data_manager, cuda_function_manager, increment_function)).timeit(number=num_runs)
print(cuda_data_manager.pull_data_from_device('random_data')) return {
"data push times": '0', #data_push_time,
"code run time": program_run_time
} num_runs = 1000
times = {} for scenario in [
(1, 1),
(1, 100),
(1, 1000),
(100, 1000),
(1000, 1000)
]:
num_envs, num_agents = scenario
times.update(
{
f"envs={num_envs}, agents={num_agents}":
push_random_data_and_increment_timer(
num_runs,
num_envs,
num_agents,
source_code
)
}
) print(f"Times for {num_runs} function calls")
print("*"*40)
for key, value in times.items():
print(f"{key:30}: mean data push times: {value['data push times']:10.5}s,\t mean increment times: {value['code run time']:10.5}s") '''
print(cuda_data_manager._meta_info)
print(cuda_data_manager._host_data)
print(cuda_data_manager._device_data_pointer)
print(cuda_data_manager._scalar_data_list)
print(cuda_data_manager._reset_data_list)
print(cuda_data_manager._log_data_list)
print(cuda_data_manager._device_data_via_torch)
print(cuda_data_manager._shared_constants)
print(cuda_data_manager._shape)
print(cuda_data_manager._dtype) print(tensor_on_device)
time.sleep(300) '''

核心代码:

    push_random_data(1, 1)

    cuda_function_manager, increment_function = cuda_func_init()  ###
cuda_data_manager = push_random_data(num_agents, num_envs) ###
program_run_time = Timer(lambda: increment_data(cuda_data_manager, cuda_function_manager, increment_function)).timeit(number=num_runs)

下面代码为cuda的内存申请,即使是较小的内存申请也是可以是下面的cuda函数初始化正常运行,如果再cuda函数初始化之前没有任何对cudsa内存申请的操作那就会报错。

push_random_data(1, 1)

初始化cuda内存,cuda内存的申请操作:

    push_random_data(1, 1)

cuda函数的初始化操作:
cuda_function_manager, increment_function = cuda_func_init() ###

cuda 函数的执行:

increment_data(cuda_data_manager, cuda_function_manager, increment_function)

pycuda学习过程中的一些发现,cuda函数的初始化要在cuda内存空间初始化之后,否则会报错的更多相关文章

  1. 如果在Yii中,使用AR查询,不直接写sql,则在使用的时候会报错

    如果在Yii中,使用AR查询,不直接写sql,则在使用的时候会报错 Student::find() ->select("id,name,from_unixtime(create_tim ...

  2. JAVA_用_JCO连接_SAP,实现调用SAP_的_RFC_函数(整理)(附一篇看起来比较全面的说明)(JCO报错信息)

    // 获取RFC返回的字段值 11 JCoParameterList exportParam = function.getExportParameterList(); 12 String exPara ...

  3. vue中"‘webpack-dev-server’不是内部或外部命令,也不是可运行的程序"的报错

    在vue项目中发现了这个报错  解决办法将项目里的“node_modules”文件夹删除,然后重新运行cnpm install

  4. 在myeclipse中maven项目关于ssh整合时通过pom.xml导入依赖是pom.xml头部会报错

    错误如下 ArtifactTransferException: Failure to transfer org.springframework:spring-jdbc:jar:3.0.5.RELEAS ...

  5. free()函数释放一段分配的内存之陷阱

    朋友们对malloc函数应该是比较熟悉了,此函数功能是分配一段内存地址,并且将内存地址给一个指针变量,最后记得再调用free函数释放这段内存地址就可以了,标准的流程对吧,好像没什么问题.但是按照此标准 ...

  6. Unity C# 调用 C++ DLL 并在 DLL 中调用 C# 的回调函数

    Unity C# 调用 C++ DLL 并在 DLL 中调用 C# 的回调函数~~~    呵呵... 看着有点晕.. 再解释一下就是 在Unity中 使用 C# 调用 C++ 写的 DLL, 但是在 ...

  7. LoadRunner中常用的字符串操作函数

    LoadRunner中常用的字符串操作函数有:                strcpy(destination_string, source_string);               strc ...

  8. 【Python】从简单案列中揭示常用内置函数以及数据类型

    前面提到了BIF(内置函数)这个概念,什么是内置函数,就是python已经定义好的函数,不需要人为再自己定义,直接拿来就可以用的函数,那么都有哪些BIF呢? 可以在交互式界面(IDLE)输入这段代码, ...

  9. C语言calloc()函数:分配内存空间并初始化——stm32中的应用

    经常在代码中看到使用malloc来分配,然后memset清零,其实calloc更加方便,一句顶两句~ 头文件:#include <stdlib.h> calloc() 函数用来动态地分配内 ...

  10. 2018最新win10 安装tensorflow1.4(GPU/CPU)+cuda8.0+cudnn8.0-v6 + keras 安装CUDA失败 导入tensorflow失败报错问题解决

    原文作者:aircraft 原文链接:https://www.cnblogs.com/DOMLX/p/9747019.html 基本开发环境搭建 1. Microsoft Windows 版本 关于W ...

随机推荐

  1. C#开发的目录图标更改器 - 开源研究系列文章 - 个人小作品

    因为有一些项目保存在文件夹里,然后想着用不同的图标来显示该文件夹,但是Windows提供的那个修改文件夹的操作太麻烦,需要的操作太多(文件夹里鼠标右键,属性,自定义,更改图标,选择文件,选择图标,点击 ...

  2. Selenium模块的使用(一)

    简介 selenium最初是一个自动化测试工具,而爬虫中使用它主要是为了解决requests无法直接执行JavaScript代码的问题 selenium本质是通过驱动浏览器, 完全模拟浏览器的操作,比 ...

  3. 【译】向您介绍改版的 Visual Studio 资源管理器

    随着最近 Visual Studio 的资源管理器的改进,开发人员将得到一种全新的享受!我们非常激动地宣布重新设计的 Visual Studio 资源管理器,相信我们,它将改变游戏规则. 在 Visu ...

  4. 开源的网络瑞士军刀「GitHub 热点速览」

    上周的开源热搜项目可谓是精彩纷呈,主打的就一个方便快捷.开箱即用!这款无需安装.点开就用的网络瑞士军刀 CyberChef,试用后你就会感叹它的功能齐全和干净的界面.不喜欢 GitHub 的英文界面? ...

  5. 只听过 Python 做爬虫?不瞒你说 Java 也很强

    网络爬虫技术,早在万维网诞生的时候,就已经出现了,今天我们就一起来揭开它神秘的面纱! 一.摘要 说起网络爬虫,相信大家都不陌生,又俗称网络机器人,指的是程序按照一定的规则,从互联网上抓取网页,然后从中 ...

  6. recastnavigation.Sample_TempObstacles代码注解 - rcBuildHeightfieldLayers

    烘培代码在 rcBuildHeightfieldLayers 本质上是为每个tile生成高度上的不同layer 算法的关键是三层循环: for z 轴循环 for x 轴循环 for 高度span 循 ...

  7. 记录一下第一次webSocket通信成功

    webSocket前端代码 <!DOCTYPE html> <html lang="en"> <head> <meta charset=& ...

  8. 学习嵌入式为什么要学习uboot

    ref:http://www.elecfans.com/d/617674.html 为什么要有BootLoader 背景 很多人学习嵌入式一开始就搞Linux,这样子容易对底层缺少了解. 基础介绍 计 ...

  9. ubuntu20 配置nginx静态文件访问

    前言 在ubuntu上配置nginx,通过网页可以直接访问ubuntu本地文件,留作参考. 配置 我的nginx配置文件路径在/etc/nginx/目录下. 查看/etc/nginx/nginx.co ...

  10. vue-模块工程化-手稿