Triton 源码初步研读
一、核心接口形态
def jit(
fn: Optional[T] = None,
*,
version=None,
do_not_specialize: Optional[Iterable[int]] = None,
debug: Optional[bool] = None,
) -> Union[JITFunction[T], Callable[[T], JITFunction[T]]]:
接口返回的是一个 JITFunction 对象,继承自 KernelInterface
class JITFunction(KernelInterface[T]):
# 略
class KernelInterface(Generic[T]):
run: T
def __getitem__(self, grid) -> T:
return cast(T, functools.partial(cast(Callable, self.run), grid=grid))
JITFunction 调用时会有一个额外的参数 grid,类似 fn[grid](*args, **kwargs)。
在 JITFunction 类实现中,核心的逻辑是 _make_launcher() 函数,内部会执行一个模版函数:
def {self.fn.__name__}({', '.join(self.arg_names)}, grid, num_warps=4, num_stages=3, extern_libs=None, stream=None, warmup=False, device=None):
sig_key = {sig_keys},
constexpr_key = {f'{constexpr_keys},' if len(constexpr_keys) > 0 else ()}
spec_key = {f'{spec_keys},' if len(spec_keys) > 0 else ()}
key = (version_key, sig_key, constexpr_key, spec_key, num_warps, num_stages, self.debug)
if not extern_libs is None:
key = (key, tuple(extern_libs.items()))
assert num_warps > 0 and (num_warps & (num_warps - 1)) == 0, "num_warps must be a power of 2"
if callable(grid):
grid = grid({{{grid_args}}})
grid_size = len(grid)
grid_0 = grid[0]
grid_1 = grid[1] if grid_size > 1 else 1
grid_2 = grid[2] if grid_size > 2 else 1
if device is None:
device = get_current_device()
set_current_device(device)
if stream is None and not warmup:
stream = get_cuda_stream(device)
try:
bin = cache[device][key] # <-------- 首先尝试从self.cache中获取当前grid已经编译后的 bin
if not warmup:
bin.c_wrapper(grid_0, grid_1, grid_2, bin.num_warps, bin.shared, stream, bin.cu_function, triton.compiler.CompiledKernel.launch_enter_hook, triton.compiler.CompiledKernel.launch_exit_hook, bin, {args})
return bin
# kernel not cached -- compile
except KeyError:
# build dict of constant values
args = [{args}]
all_args = {', '.join([f'{arg}' for arg in self.arg_names])},
configs = self._get_config(*all_args),
constants = self._make_constants(constexpr_key)
constants.update({{i: None for i, arg in enumerate(all_args) if arg is None}})
constants.update({{i: 1 for i in configs[0].equal_to_1}})
# build kernel signature -- doesn't include specialized arguments
signature = {{ i: self._type_of(_key_of(arg)) for i, arg in enumerate(all_args) if i not in self.constexprs }}
# build stub signature -- includes arguments that are specialized
for i, arg in constants.items():
if callable(arg):
raise TypeError(f"Callable constexpr at index {{i}} is not supported")
if not self._call_hook(key, signature, device, constants, num_warps, num_stages, extern_libs, configs): # <-------- 如果缓存没有命中,则第一次指定 triton.compile
bin = triton.compile(self, signature=signature, device=device, constants=constants, num_warps=num_warps, num_stages=num_stages, extern_libs=extern_libs, configs=configs, debug=self.debug)
if not warmup:
bin.c_wrapper(grid_0, grid_1, grid_2, bin.num_warps, bin.shared, stream, bin.cu_function, triton.compiler.CompiledKernel.launch_enter_hook, triton.compiler.CompiledKernel.launch_exit_hook, bin, *args)
self.cache[device][key] = bin
return bin
return None
进一步我们来看 compile 函数的实现。其中第一个关键步骤是 make_stub 返回一个 so_path。此部分分为两步:
- step 1:创建一个 main.c文件,将CUDA launcher相关的代码写入
- step 2:调用 _build函数编译出一个 .so文件缓存起来,并返回此
so_path
def make_stub(name, signature, constants):
# name of files that are cached
so_cache_key = make_so_cache_key(version_key(), signature, constants)
so_cache_manager = CacheManager(so_cache_key)
so_name = f"{name}.so"
# retrieve stub from cache if it exists
if not so_cache_manager.has_file(so_name):
with tempfile.TemporaryDirectory() as tmpdir:
src = generate_launcher(constants, signature)
src_path = os.path.join(tmpdir, "main.c") # <------- step 1
with open(src_path, "w") as f:
f.write(src)
so = _build(name, src_path, tmpdir) # <------- step 2
with open(so, "rb") as f:
so_cache_manager.put(f.read(), so_name, binary=True)
return so_cache_manager._make_path(so_name)
我们首先来看 step 1 都写了什么内容到 main.c 里,主要内容如下,从代码里可以看出,此处这里主要是启动相关的代码:cuLaunchKernel(function, XXXX)
#include \"cuda.h\"
#include <stdbool.h>
#include <Python.h>
static inline void gpuAssert(CUresult code, const char *file, int line)
{{
if (code != CUDA_SUCCESS)
{{
const char* prefix = "Triton Error [CUDA]: ";
const char* str;
cuGetErrorString(code, &str);
char err[1024] = {{0}};
strcat(err, prefix);
strcat(err, str);
PyErr_SetString(PyExc_RuntimeError, err);
}}
}}
#define CUDA_CHECK(ans) {{ gpuAssert((ans), __FILE__, __LINE__); }}
static void _launch(int gridX, int gridY, int gridZ, int num_warps, int shared_memory, CUstream stream, CUfunction function, {arg_decls}) {{
void *params[] = {{ {', '.join(f"&arg{i}" for i in signature.keys() if i not in constants)} }};
if(gridX*gridY*gridZ > 0){{
CUDA_CHECK(cuLaunchKernel(function, gridX, gridY, gridZ, 32*num_warps, 1, 1, shared_memory, stream, params, 0)); // <-------- 注意此处的 function 应该就是我们原生的函数
}}
}}
typedef struct _DevicePtrInfo {{
CUdeviceptr dev_ptr;
bool valid;
}} DevicePtrInfo;
static inline DevicePtrInfo getPointer(PyObject *obj, int idx) {{
DevicePtrInfo ptr_info;
ptr_info.dev_ptr = 0;
ptr_info.valid = true;
if (PyLong_Check(obj)) {{
ptr_info.dev_ptr = PyLong_AsUnsignedLongLong(obj);
return ptr_info;
}}
if (obj == Py_None) {{
// valid nullptr
return ptr_info;
}}
PyObject *ptr = PyObject_GetAttrString(obj, "data_ptr");
if(ptr){{
PyObject *empty_tuple = PyTuple_New(0);
PyObject *ret = PyObject_Call(ptr, empty_tuple, NULL);
Py_DECREF(empty_tuple);
Py_DECREF(ptr);
if (!PyLong_Check(ret)) {{
PyErr_SetString(PyExc_TypeError, "data_ptr method of Pointer object must return 64-bit int");
ptr_info.valid = false;
return ptr_info;
}}
ptr_info.dev_ptr = PyLong_AsUnsignedLongLong(ret);
if(!ptr_info.dev_ptr)
return ptr_info;
uint64_t dev_ptr;
int status = cuPointerGetAttribute(&dev_ptr, CU_POINTER_ATTRIBUTE_DEVICE_POINTER, ptr_info.dev_ptr);
if (status == CUDA_ERROR_INVALID_VALUE) {{
PyErr_Format(PyExc_ValueError,
"Pointer argument (at %d) cannot be accessed from Triton (cpu tensor?)", idx);
ptr_info.valid = false;
}}
ptr_info.dev_ptr = dev_ptr;
Py_DECREF(ret); // Thanks ChatGPT!
return ptr_info;
}}
PyErr_SetString(PyExc_TypeError, "Pointer argument must be either uint64 or have data_ptr method");
return ptr_info;
}}
static PyObject* launch(PyObject* self, PyObject* args) {{
int gridX, gridY, gridZ;
uint64_t _stream;
uint64_t _function;
int num_warps;
int shared_memory;
PyObject *launch_enter_hook = NULL;
PyObject *launch_exit_hook = NULL;
PyObject *compiled_kernel = NULL;
{' '.join([f"{_extracted_type(ty)} _arg{i}; " for i, ty in signature.items()])}
if(!PyArg_ParseTuple(args, \"{format}\", &gridX, &gridY, &gridZ, &num_warps, &shared_memory, &_stream, &_function, &launch_enter_hook, &launch_exit_hook, &compiled_kernel, {', '.join(f"&_arg{i}" for i, ty in signature.items())})) {{
return NULL;
}}
if (launch_enter_hook != Py_None) {{
PyObject_CallObject(launch_enter_hook, args);
}}
// raise exception asap
{"; ".join([f"DevicePtrInfo ptr_info{i} = getPointer(_arg{i}, {i}); if (!ptr_info{i}.valid) return NULL;" if ty[0] == "*" else "" for i, ty in signature.items()])};
_launch(gridX, gridY, gridZ, num_warps, shared_memory, (CUstream)_stream, (CUfunction)_function, {', '.join(f"ptr_info{i}.dev_ptr" if ty[0]=="*" else f"_arg{i}"for i, ty in signature.items())});
if (launch_exit_hook != Py_None) {{
PyObject_CallObject(launch_exit_hook, args);
}}
if(PyErr_Occurred()) {{
return NULL;
}}
// return None
Py_INCREF(Py_None);
return Py_None;
}}
static PyMethodDef ModuleMethods[] = {{
{{"launch", launch, METH_VARARGS, "Entry point for all kernels with this signature"}}, # <-------- 与 CompiledKernel 中的 self.c_wrapper = getattr(mod, "launch") 相呼应
{{NULL, NULL, 0, NULL}} // sentinel
}};
static struct PyModuleDef ModuleDef = {{
PyModuleDef_HEAD_INIT,
\"__triton_launcher\",
NULL, //documentation
-1, //size
ModuleMethods
}};
PyMODINIT_FUNC PyInit___triton_launcher(void) {{
PyObject *m = PyModule_Create(&ModuleDef);
if(m == NULL) {{
return NULL;
}}
PyModule_AddFunctions(m, ModuleMethods);
return m;
}}
接着 step 2 做的事情就是创建一个子进程,调用 CC 命令+ setuptools工具编译成一个 .so 文件。(注意:setuptools 似乎是一个兜底策略,只有在 CC 编译失败才会走到。)
def _build(name, src, srcdir):
cuda_lib_dirs = libcuda_dirs()
base_dir = os.path.join(os.path.dirname(__file__), os.path.pardir)
cuda_path = os.path.join(base_dir, "third_party", "cuda")
cu_include_dir = os.path.join(cuda_path, "include")
triton_include_dir = os.path.join(os.path.dirname(__file__), "include")
cuda_header = os.path.join(cu_include_dir, "cuda.h")
triton_cuda_header = os.path.join(triton_include_dir, "cuda.h")
cu_include_dir = triton_include_dir
cc = os.environ.get("CC")
cc_cmd = [cc, src, "-O3", f"-I{cu_include_dir}", f"-I{py_include_dir}", f"-I{srcdir}", "-shared", "-fPIC", "-lcuda", "-o", so]
cc_cmd += [f"-L{dir}" for dir in cuda_lib_dirs]
ret = subprocess.check_call(cc_cmd)
# fallback on setuptools
extra_compile_args = []
library_dirs = cuda_lib_dirs
include_dirs = [srcdir, cu_include_dir]
libraries = ['cuda']
# extra arguments
extra_link_args = []
# create extension module
ext = setuptools.Extension(
name=name,
language='c',
sources=[src],
include_dirs=include_dirs,
extra_compile_args=extra_compile_args + ['-O3'],
extra_link_args=extra_link_args,
library_dirs=library_dirs,
libraries=libraries,
)
# build extension module
args = ['build_ext']
args.append('--build-temp=' + srcdir)
args.append('--build-lib=' + srcdir)
args.append('-q')
args = dict(
name=name,
ext_modules=[ext],
script_args=args,
)
with quiet():
setuptools.setup(**args)
return so
然后开始执行 compile 的第二阶段的工作,主要是IR的几个 stage 的转换:
def compile(xxx):
# 省略
asm = dict()
module = fn
first_stage = list(stages.keys()).index(ext)
# run compilation pipeline and populate metadata
for ir, (parse, compile_kernel) in list(stages.items())[first_stage:]:
# stage 1: ast_to_trir
if ir == ext:
next_module = parse(fn)
else:
# stage 2,3,4
next_module = compile_kernel(module)
if ir == "llir" and "shared" not in metadata:
metadata["shared"] = _triton.get_shared_memory_size(module)
if ir == "ptx":
metadata["name"] = get_kernel_name(next_module, pattern='// .globl')
if ir == "cubin":
asm[ir] = next_module
else:
asm[ir] = str(next_module)
module = next_module
几个重要阶段的IR转换分别对应如下函数:
ast_to_ttir:其中CodeGenerator(ast.NodeVisitor)负责主要工作,返回的是一个 C++ 端 Pybind 的mlir::ModuleOp对象ttir_to_ttgir:其中 pm变量是一个 C++ 端Pybind 的mlir::PassManager对象ttgir_to_llir:其中核心函数gpu_to_llvmir也是C++端 Pybind 的translateTritonGPUToLLVMIR函数llir_to_ptx:其中核心函数translate_llvmir_to_ptx也是主要由C++端的translateLLVMIRToPT函数来是实现ptx_to_cubin:其中核心函数compile_ptx_to_cubin主要由 C++ 端来实现的
def ast_to_ttir(fn, signature, specialization, constants, debug):
context = ir.context()
context.load_triton()
prototype = language.function_type([], arg_types)
generator = CodeGenerator(context, prototype, gscope=gscope, constants=all_constants,
function_name=function_name, attributes=new_attrs,
is_kernel=True, debug=debug)
generator.visit(fn.parse())
ret = generator.module # <------- 由 ir.builder(context).create_module()
# module takes ownership of the context
ret.context = context
return ret
def ttir_to_ttgir(mod, num_warps):
pm = _triton.ir.pass_manager(mod.context)
pm.add_convert_triton_to_tritongpu_pass(num_warps)
pm.run(mod)
return mod
def ttgir_to_llir(mod, extern_libs, arch):
if extern_libs:
_add_external_libs(mod, extern_libs)
# TODO: separate tritongpu_to_llvmir for different backends
if _is_cuda(arch):
return _triton.translate_triton_gpu_to_llvmir(mod, arch, False)
else:
return _triton.translate_triton_gpu_to_llvmir(mod, 0, True)
def llir_to_ptx(mod: Any, arch: int, ptx_version: int = None) -> str:
if ptx_version is None:
_, cuda_version = path_to_ptxas()
ptx_version = ptx_get_version(cuda_version)
return _triton.translate_llvmir_to_ptx(mod, arch, ptx_version)
def ptx_to_cubin(ptx: str, arch: int):
ptxas, _ = path_to_ptxas()
return _triton.compile_ptx_to_cubin(ptx, ptxas, arch)
这里额外贴一下 ttgir_to_llir 中的核心函数代码:
std::unique_ptr<llvm::Module>
translateTritonGPUToLLVMIR(llvm::LLVMContext *llvmContext,
mlir::ModuleOp module, int computeCapability,
bool isROCM) {
mlir::PassManager pm(module->getContext());
applyPassManagerCLOptions(pm);
auto printingFlags = mlir::OpPrintingFlags();
printingFlags.elideLargeElementsAttrs(16);
pm.enableIRPrinting(
/*shouldPrintBeforePass=*/nullptr,
/*shouldPrintAfterPass=*/
[](mlir::Pass *pass, mlir::Operation *) {
return ::triton::tools::getBoolEnv("MLIR_ENABLE_DUMP");
},
/*printModuleScope=*/false,
/*printAfterOnlyOnChange=*/true,
/*printAfterOnlyOnFailure*/ false, llvm::dbgs(), printingFlags);
pm.addPass(mlir::createConvertSCFToCFPass());
pm.addPass(mlir::createConvertIndexToLLVMPass());
pm.addPass(createConvertTritonGPUToLLVMPass(computeCapability, isROCM));
pm.addPass(mlir::createArithToLLVMConversionPass());
pm.addPass(mlir::createCanonicalizerPass());
// Simplify the IR
pm.addPass(mlir::createCSEPass());
pm.addPass(mlir::createSymbolDCEPass());
if (failed(pm.run(module))) {
llvm::errs() << "Pass execution failed";
return nullptr;
}
auto llvmIR = translateLLVMToLLVMIR(llvmContext, module, isROCM);
if (!llvmIR) {
llvm::errs() << "Translate to LLVM IR failed";
return nullptr;
}
if (::triton::tools::getBoolEnv("LLVM_IR_ENABLE_DUMP")) {
std::string mod_string;
std::unique_ptr<llvm::raw_string_ostream> ir_ss(
new llvm::raw_string_ostream(mod_string));
llvmIR->print(*ir_ss, nullptr);
std::cout << "// -----// LLVM IR Dump //----- //\n"
<< mod_string << std::endl;
}
return llvmIR;
}
其中 translateLLVMIRToPTX 的核心代码是:
std::string translateLLVMIRToPTX(llvm::Module &module, int cc, int version) {
// LLVM version in use may not officially support target hardware.
// Supported versions for LLVM 14 are here:
// https://github.com/llvm/llvm-project/blob/f28c006a5895fc0e329fe15fead81e37457cb1d1/clang/include/clang/Basic/BuiltinsNVPTX.def
int maxPTX = std::min(80, version);
int maxCC = std::min(90, cc);
// options
auto options = llvm::cl::getRegisteredOptions();
auto *shortPtr =
static_cast<llvm::cl::opt<bool> *>(options["nvptx-short-ptr"]);
assert(shortPtr);
shortPtr->setValue(true);
std::string sm = cc == 90 ? "sm_90a" : "sm_" + std::to_string(cc);
// max PTX version
int ptxMajor = maxPTX / 10;
int ptxMinor = maxPTX % 10;
// create
std::string triple = "nvptx64-nvidia-cuda";
std::string proc = "sm_" + std::to_string(maxCC);
std::string layout = "";
std::string features = "";
// std::string features = "+ptx" + std::to_string(maxPTX);
initLLVM();
// verify and store llvm
llvm::legacy::PassManager pm;
pm.add(llvm::createVerifierPass());
pm.run(module);
// module->print(llvm::outs(), nullptr);
// create machine
module.setTargetTriple(triple);
std::string error;
auto target =
llvm::TargetRegistry::lookupTarget(module.getTargetTriple(), error);
llvm::TargetOptions opt;
opt.AllowFPOpFusion = llvm::FPOpFusion::Fast;
opt.UnsafeFPMath = false;
opt.NoInfsFPMath = false;
opt.NoNaNsFPMath = true;
llvm::TargetMachine *machine = target->createTargetMachine(
module.getTargetTriple(), proc, features, opt, llvm::Reloc::PIC_,
std::nullopt, llvm::CodeGenOpt::Aggressive);
// set data layout
if (layout.empty())
module.setDataLayout(machine->createDataLayout());
else
module.setDataLayout(layout);
// emit machine code
std::string result;
{
llvm::raw_string_ostream stream(result);
llvm::buffer_ostream pstream(stream);
for (llvm::Function &f : module.functions())
f.addFnAttr(llvm::Attribute::AlwaysInline);
llvm::legacy::PassManager pass;
// emit
machine->addPassesToEmitFile(pass, pstream, nullptr,
llvm::CodeGenFileType::CGFT_AssemblyFile);
pass.run(module);
}
// post-process
findAndReplace(result, ".version", "\n",
".version " + std::to_string(ptxMajor) + "." +
std::to_string(ptxMinor) + "\n");
findAndReplace(result, ".target", "\n", ".target " + sm + "\n");
while (findAndReplace(result, "\t// begin inline asm", "\n", ""))
;
while (findAndReplace(result, "\t// end inline asm", "\n", ""))
;
return result;
}
如此已经得到了cubin文件,接下来是 compile 函数的最后一步:返回编译后的 CompiledKernel(fn, so_path, metadata, asm) handle对象。主要逻辑由CompiledKernel 来实现。
在最前面 jit函数里最后负责执行的逻辑语句是:
bin.c_wrapper(grid_0, grid_1, grid_2, bin.num_warps, bin.shared, stream, bin.cu_function, triton.compiler.CompiledKernel.launch_enter_hook, triton.compiler.CompiledKernel.launch_exit_hook, bin, *args)
也就是CompiledKernel.c_wrapper 函数:
class CompiledKernel:
def __init__(self, fn, so_path, metadata, asm):
# initialize launcher
import importlib.util
spec = importlib.util.spec_from_file_location("__triton_launcher", so_path)
mod = importlib.util.module_from_spec(spec)
self.fn = fn
spec.loader.exec_module(mod)
self.c_wrapper = getattr(mod, "launch") # <-------- 这个家伙
这里的 so_name 就是 make_stub 中编译出来的 Launcher 相关的 .so 的路径。
Triton 源码初步研读的更多相关文章
- FreeCAD源码初步了解
FreeCAD简介 FreeCAD是基于OpenCASCADE的开源CAD/CAE软件,完全开源(GPL的LGPL许可证),官方源码地址,详情可参考维基百科,百度百科等等. 如果要编译FreeCAD, ...
- java8 HashMap源码 详细研读
HashMap原理 目的: 单纯分析和学习hashmap的实现,不多说与Hashtable.ConcurrentHashMap等的区别. 基于 jdk1.8 在面试中有些水平的公司比较喜欢问HashM ...
- vue-style-loader源码初步分析
背景: 首先声明一下,我只是个菜鸡,为了解决问题才去看的源码,解决完问题之后也就没有兴趣看其他部分代码了,所以这篇文章是一次很低层次的解读,角度也相当片面,想必会有很多喷点吧. 事情的经过是这样,今年 ...
- 学习 MyBatis 的一点小总结 —— 底层源码初步分析
目录 MyBatis 如何获取数据库源? MyBatis 如何获取 sql 语句? MyBatis 如何执行 sql 语句? MyBatis 如何实现不同类型数据之间的转换? 在过去程序员使用 JDB ...
- Orchard源码--初步(1)
1.打开解决方案Orachard.sln 2.直接启动项目调试 3.接着你会看到下图 呵呵,有点啰嗦( ̄︶ ̄)↗ 涨 4.点击上图的‘Finish Setup’后
- 阅读nsq源码 ---初步架构设计图
- 【JUnit4.10源码分析】5 Statement
假设要评选JUnit中最最重要的类型.或者说核心,无疑是org.junit.runners.model.Statement.Runner等类型看起来热闹而已. package org.junit.ru ...
- Vue 源码解读(1)—— 前言
当学习成为了习惯,知识也就变成了常识. 感谢各位的 点赞.收藏和评论. 新视频和文章会第一时间在微信公众号发送,欢迎关注:李永宁lyn 文章已收录到 github 仓库 liyongning/blog ...
- JUC源码学习笔记3——AQS等待队列和CyclicBarrier,BlockingQueue
一丶Condition 1.概述 任何一个java对象都拥有一组定义在Object中的监视器方法--wait(),wait(long timeout),notify(),和notifyAll()方法, ...
- [dpdk] 熟悉SDK与初步使用 (四)(L3 Forwarding源码分析)
接续前节:[dpdk] 熟悉SDK与初步使用 (三)(IP Fragmentation源码分析) 前文中的最后一个问题,搁置,并没有找到答案.所以继续阅读其他例子的代码,想必定能在其他位置看到答案. ...
随机推荐
- Java实现栈
package algorithm; import java.util.Arrays; import java.util.Iterator; /** @author Administrator @da ...
- 【已解决】hive导出mysql报错:Container [pid=3962,containerID=container_1632883011739_0002_01_000002] is running 270113280B beyond the 'VIRTUAL' memory limit.
问题描述 Container [pid=3962,containerID=container_1632883011739_0002_01_000002] is running 270113280B b ...
- LLM面面观之MoE
1. 背景 根据本qiang~最新的趋势观察,基于MoE架构的开源大模型越来越多,比如马斯克的Grok-1(314B), Qwen1.5-MoE-A2.7B等,因此想探究一下MoE里面的部分细节. 此 ...
- #线性dp,排列组合#洛谷 2476 [SCOI2008]着色方案
题目 分析(弱化版) 最暴力的想法就是直接维护每种颜色的个数dp, 弱化版有一个很突出的地方就是 \(c_i\leq 5\), 也就是说可以将相同个数的颜色合并按照个数dp, 设 \(dp[c1][c ...
- 首届OpenHarmony竞赛训练营结营颁奖,75所高校学子助力建设开源生态
由OpenAtom OpenHarmony(以下简称"OpenHarmony")项目群工作委员会和OpenHarmony项目群技术指导委员会主办的首届OpenHarmony竞赛 ...
- 选择适合您网站的 SQL 托管:MS SQL Server、Oracle、MySQL
SQL托管 如果您希望您的网站能够存储和检索数据,您的Web服务器应该能够访问使用SQL语言的数据库系统.以下是一些常见的SQL托管选项: MS SQL Server Microsoft的SQL Se ...
- Go 语言 Printf 函数和格式化动词详解
Printf() 函数可以使用多种格式化动词对输出进行格式化.下面是可以与所有数据类型一起使用的一些通用格式化动词: 以下动词适用于所有数据类型: 动词 描述 %v 以默认格式打印值 %#v 以 Go ...
- k8s之存储卷local PV
一.简介 local能够作为PV使用的本地存储卷. local卷插件用于将本地存储设备(如磁盘.分区或目录) 配置为卷. hostPath卷在Pod被重建后可能被调试至其它节点而无法再次使用此前的数据 ...
- CentOS 7快速安装配置 Odoo 12
> Coding > CentOS 7快速安装配置 Odoo 12 CentOS 7快速安装配置 Odoo 12 Coding Alan 11个月前 (10-19) 4777次浏览 ...
- Linux CentOs6.4 静态IP 设置
Linux CentOs6.4 静态IP 设置 分类: IT技术 2013-04-07 09:20 2330人阅读 评论(1) 收藏 举报 To do that, just log on as ...