一、核心接口形态

def jit(
fn: Optional[T] = None,
*,
version=None,
do_not_specialize: Optional[Iterable[int]] = None,
debug: Optional[bool] = None,
) -> Union[JITFunction[T], Callable[[T], JITFunction[T]]]:

接口返回的是一个 JITFunction 对象,继承自 KernelInterface

class JITFunction(KernelInterface[T]):
# 略 class KernelInterface(Generic[T]):
run: T def __getitem__(self, grid) -> T:
return cast(T, functools.partial(cast(Callable, self.run), grid=grid))

JITFunction 调用时会有一个额外的参数 grid,类似 fn[grid](*args, **kwargs)

JITFunction 类实现中,核心的逻辑是 _make_launcher() 函数,内部会执行一个模版函数:

def {self.fn.__name__}({', '.join(self.arg_names)}, grid, num_warps=4, num_stages=3, extern_libs=None, stream=None, warmup=False, device=None):
sig_key = {sig_keys},
constexpr_key = {f'{constexpr_keys},' if len(constexpr_keys) > 0 else ()}
spec_key = {f'{spec_keys},' if len(spec_keys) > 0 else ()}
key = (version_key, sig_key, constexpr_key, spec_key, num_warps, num_stages, self.debug)
if not extern_libs is None:
key = (key, tuple(extern_libs.items()))
assert num_warps > 0 and (num_warps & (num_warps - 1)) == 0, "num_warps must be a power of 2"
if callable(grid):
grid = grid({{{grid_args}}})
grid_size = len(grid)
grid_0 = grid[0]
grid_1 = grid[1] if grid_size > 1 else 1
grid_2 = grid[2] if grid_size > 2 else 1
if device is None:
device = get_current_device()
set_current_device(device)
if stream is None and not warmup:
stream = get_cuda_stream(device)
try:
bin = cache[device][key] # <-------- 首先尝试从self.cache中获取当前grid已经编译后的 bin
if not warmup:
bin.c_wrapper(grid_0, grid_1, grid_2, bin.num_warps, bin.shared, stream, bin.cu_function, triton.compiler.CompiledKernel.launch_enter_hook, triton.compiler.CompiledKernel.launch_exit_hook, bin, {args})
return bin
# kernel not cached -- compile
except KeyError:
# build dict of constant values
args = [{args}]
all_args = {', '.join([f'{arg}' for arg in self.arg_names])},
configs = self._get_config(*all_args),
constants = self._make_constants(constexpr_key)
constants.update({{i: None for i, arg in enumerate(all_args) if arg is None}})
constants.update({{i: 1 for i in configs[0].equal_to_1}})
# build kernel signature -- doesn't include specialized arguments
signature = {{ i: self._type_of(_key_of(arg)) for i, arg in enumerate(all_args) if i not in self.constexprs }}
# build stub signature -- includes arguments that are specialized
for i, arg in constants.items():
if callable(arg):
raise TypeError(f"Callable constexpr at index {{i}} is not supported")
if not self._call_hook(key, signature, device, constants, num_warps, num_stages, extern_libs, configs): # <-------- 如果缓存没有命中,则第一次指定 triton.compile
bin = triton.compile(self, signature=signature, device=device, constants=constants, num_warps=num_warps, num_stages=num_stages, extern_libs=extern_libs, configs=configs, debug=self.debug)
if not warmup:
bin.c_wrapper(grid_0, grid_1, grid_2, bin.num_warps, bin.shared, stream, bin.cu_function, triton.compiler.CompiledKernel.launch_enter_hook, triton.compiler.CompiledKernel.launch_exit_hook, bin, *args)
self.cache[device][key] = bin
return bin
return None

进一步我们来看 compile 函数的实现。其中第一个关键步骤是 make_stub 返回一个 so_path。此部分分为两步:

  • step 1:创建一个 main.c文件,将CUDA launcher相关的代码写入
  • step 2:调用 _build函数编译出一个 .so文件缓存起来,并返回此 so_path
def make_stub(name, signature, constants):
# name of files that are cached
so_cache_key = make_so_cache_key(version_key(), signature, constants)
so_cache_manager = CacheManager(so_cache_key)
so_name = f"{name}.so"
# retrieve stub from cache if it exists
if not so_cache_manager.has_file(so_name):
with tempfile.TemporaryDirectory() as tmpdir:
src = generate_launcher(constants, signature)
src_path = os.path.join(tmpdir, "main.c") # <------- step 1
with open(src_path, "w") as f:
f.write(src)
so = _build(name, src_path, tmpdir) # <------- step 2
with open(so, "rb") as f:
so_cache_manager.put(f.read(), so_name, binary=True)
return so_cache_manager._make_path(so_name)

我们首先来看 step 1 都写了什么内容到 main.c 里,主要内容如下,从代码里可以看出,此处这里主要是启动相关的代码:cuLaunchKernel(function, XXXX)

#include \"cuda.h\"
#include <stdbool.h>
#include <Python.h> static inline void gpuAssert(CUresult code, const char *file, int line)
{{
if (code != CUDA_SUCCESS)
{{
const char* prefix = "Triton Error [CUDA]: ";
const char* str;
cuGetErrorString(code, &str);
char err[1024] = {{0}};
strcat(err, prefix);
strcat(err, str);
PyErr_SetString(PyExc_RuntimeError, err);
}}
}} #define CUDA_CHECK(ans) {{ gpuAssert((ans), __FILE__, __LINE__); }} static void _launch(int gridX, int gridY, int gridZ, int num_warps, int shared_memory, CUstream stream, CUfunction function, {arg_decls}) {{
void *params[] = {{ {', '.join(f"&arg{i}" for i in signature.keys() if i not in constants)} }};
if(gridX*gridY*gridZ > 0){{
CUDA_CHECK(cuLaunchKernel(function, gridX, gridY, gridZ, 32*num_warps, 1, 1, shared_memory, stream, params, 0)); // <-------- 注意此处的 function 应该就是我们原生的函数
}}
}} typedef struct _DevicePtrInfo {{
CUdeviceptr dev_ptr;
bool valid;
}} DevicePtrInfo; static inline DevicePtrInfo getPointer(PyObject *obj, int idx) {{
DevicePtrInfo ptr_info;
ptr_info.dev_ptr = 0;
ptr_info.valid = true;
if (PyLong_Check(obj)) {{
ptr_info.dev_ptr = PyLong_AsUnsignedLongLong(obj);
return ptr_info;
}}
if (obj == Py_None) {{
// valid nullptr
return ptr_info;
}}
PyObject *ptr = PyObject_GetAttrString(obj, "data_ptr");
if(ptr){{
PyObject *empty_tuple = PyTuple_New(0);
PyObject *ret = PyObject_Call(ptr, empty_tuple, NULL);
Py_DECREF(empty_tuple);
Py_DECREF(ptr);
if (!PyLong_Check(ret)) {{
PyErr_SetString(PyExc_TypeError, "data_ptr method of Pointer object must return 64-bit int");
ptr_info.valid = false;
return ptr_info;
}}
ptr_info.dev_ptr = PyLong_AsUnsignedLongLong(ret);
if(!ptr_info.dev_ptr)
return ptr_info;
uint64_t dev_ptr;
int status = cuPointerGetAttribute(&dev_ptr, CU_POINTER_ATTRIBUTE_DEVICE_POINTER, ptr_info.dev_ptr);
if (status == CUDA_ERROR_INVALID_VALUE) {{
PyErr_Format(PyExc_ValueError,
"Pointer argument (at %d) cannot be accessed from Triton (cpu tensor?)", idx);
ptr_info.valid = false;
}}
ptr_info.dev_ptr = dev_ptr;
Py_DECREF(ret); // Thanks ChatGPT!
return ptr_info;
}}
PyErr_SetString(PyExc_TypeError, "Pointer argument must be either uint64 or have data_ptr method");
return ptr_info;
}} static PyObject* launch(PyObject* self, PyObject* args) {{
int gridX, gridY, gridZ;
uint64_t _stream;
uint64_t _function;
int num_warps;
int shared_memory;
PyObject *launch_enter_hook = NULL;
PyObject *launch_exit_hook = NULL;
PyObject *compiled_kernel = NULL;
{' '.join([f"{_extracted_type(ty)} _arg{i}; " for i, ty in signature.items()])}
if(!PyArg_ParseTuple(args, \"{format}\", &gridX, &gridY, &gridZ, &num_warps, &shared_memory, &_stream, &_function, &launch_enter_hook, &launch_exit_hook, &compiled_kernel, {', '.join(f"&_arg{i}" for i, ty in signature.items())})) {{
return NULL;
}} if (launch_enter_hook != Py_None) {{
PyObject_CallObject(launch_enter_hook, args);
}} // raise exception asap
{"; ".join([f"DevicePtrInfo ptr_info{i} = getPointer(_arg{i}, {i}); if (!ptr_info{i}.valid) return NULL;" if ty[0] == "*" else "" for i, ty in signature.items()])};
_launch(gridX, gridY, gridZ, num_warps, shared_memory, (CUstream)_stream, (CUfunction)_function, {', '.join(f"ptr_info{i}.dev_ptr" if ty[0]=="*" else f"_arg{i}"for i, ty in signature.items())}); if (launch_exit_hook != Py_None) {{
PyObject_CallObject(launch_exit_hook, args);
}} if(PyErr_Occurred()) {{
return NULL;
}}
// return None
Py_INCREF(Py_None);
return Py_None;
}} static PyMethodDef ModuleMethods[] = {{
{{"launch", launch, METH_VARARGS, "Entry point for all kernels with this signature"}}, # <-------- 与 CompiledKernel 中的 self.c_wrapper = getattr(mod, "launch") 相呼应
{{NULL, NULL, 0, NULL}} // sentinel
}}; static struct PyModuleDef ModuleDef = {{
PyModuleDef_HEAD_INIT,
\"__triton_launcher\",
NULL, //documentation
-1, //size
ModuleMethods
}}; PyMODINIT_FUNC PyInit___triton_launcher(void) {{
PyObject *m = PyModule_Create(&ModuleDef);
if(m == NULL) {{
return NULL;
}}
PyModule_AddFunctions(m, ModuleMethods);
return m;
}}

接着 step 2 做的事情就是创建一个子进程,调用 CC 命令+ setuptools工具编译成一个 .so 文件。(注意:setuptools 似乎是一个兜底策略,只有在 CC 编译失败才会走到。)

def _build(name, src, srcdir):
cuda_lib_dirs = libcuda_dirs()
base_dir = os.path.join(os.path.dirname(__file__), os.path.pardir)
cuda_path = os.path.join(base_dir, "third_party", "cuda") cu_include_dir = os.path.join(cuda_path, "include")
triton_include_dir = os.path.join(os.path.dirname(__file__), "include")
cuda_header = os.path.join(cu_include_dir, "cuda.h")
triton_cuda_header = os.path.join(triton_include_dir, "cuda.h")
cu_include_dir = triton_include_dir cc = os.environ.get("CC")
cc_cmd = [cc, src, "-O3", f"-I{cu_include_dir}", f"-I{py_include_dir}", f"-I{srcdir}", "-shared", "-fPIC", "-lcuda", "-o", so]
cc_cmd += [f"-L{dir}" for dir in cuda_lib_dirs]
ret = subprocess.check_call(cc_cmd) # fallback on setuptools
extra_compile_args = []
library_dirs = cuda_lib_dirs
include_dirs = [srcdir, cu_include_dir]
libraries = ['cuda']
# extra arguments
extra_link_args = []
# create extension module
ext = setuptools.Extension(
name=name,
language='c',
sources=[src],
include_dirs=include_dirs,
extra_compile_args=extra_compile_args + ['-O3'],
extra_link_args=extra_link_args,
library_dirs=library_dirs,
libraries=libraries,
)
# build extension module
args = ['build_ext']
args.append('--build-temp=' + srcdir)
args.append('--build-lib=' + srcdir)
args.append('-q')
args = dict(
name=name,
ext_modules=[ext],
script_args=args,
)
with quiet():
setuptools.setup(**args)
return so

然后开始执行 compile 的第二阶段的工作,主要是IR的几个 stage 的转换:

def compile(xxx):
# 省略
asm = dict()
module = fn
first_stage = list(stages.keys()).index(ext)
# run compilation pipeline and populate metadata
for ir, (parse, compile_kernel) in list(stages.items())[first_stage:]:
# stage 1: ast_to_trir
if ir == ext:
next_module = parse(fn)
else:
# stage 2,3,4
next_module = compile_kernel(module) if ir == "llir" and "shared" not in metadata:
metadata["shared"] = _triton.get_shared_memory_size(module) if ir == "ptx":
metadata["name"] = get_kernel_name(next_module, pattern='// .globl') if ir == "cubin":
asm[ir] = next_module
else:
asm[ir] = str(next_module) module = next_module

几个重要阶段的IR转换分别对应如下函数:

  • ast_to_ttir :其中 CodeGenerator(ast.NodeVisitor) 负责主要工作,返回的是一个 C++ 端 Pybind 的 mlir::ModuleOp 对象
  • ttir_to_ttgir :其中 pm变量是一个 C++ 端Pybind 的 mlir::PassManager 对象
  • ttgir_to_llir:其中核心函数 gpu_to_llvmir 也是C++端 Pybind 的 translateTritonGPUToLLVMIR 函数
  • llir_to_ptx :其中核心函数 translate_llvmir_to_ptx 也是主要由C++端的 translateLLVMIRToPT 函数来是实现
  • ptx_to_cubin :其中核心函数compile_ptx_to_cubin主要由 C++ 端来实现的
def ast_to_ttir(fn, signature, specialization, constants, debug):
context = ir.context()
context.load_triton()
prototype = language.function_type([], arg_types)
generator = CodeGenerator(context, prototype, gscope=gscope, constants=all_constants,
function_name=function_name, attributes=new_attrs,
is_kernel=True, debug=debug)
generator.visit(fn.parse())
ret = generator.module # <------- 由 ir.builder(context).create_module()
# module takes ownership of the context
ret.context = context
return ret def ttir_to_ttgir(mod, num_warps):
pm = _triton.ir.pass_manager(mod.context)
pm.add_convert_triton_to_tritongpu_pass(num_warps)
pm.run(mod)
return mod def ttgir_to_llir(mod, extern_libs, arch):
if extern_libs:
_add_external_libs(mod, extern_libs)
# TODO: separate tritongpu_to_llvmir for different backends
if _is_cuda(arch):
return _triton.translate_triton_gpu_to_llvmir(mod, arch, False)
else:
return _triton.translate_triton_gpu_to_llvmir(mod, 0, True) def llir_to_ptx(mod: Any, arch: int, ptx_version: int = None) -> str:
if ptx_version is None:
_, cuda_version = path_to_ptxas()
ptx_version = ptx_get_version(cuda_version)
return _triton.translate_llvmir_to_ptx(mod, arch, ptx_version) def ptx_to_cubin(ptx: str, arch: int):
ptxas, _ = path_to_ptxas()
return _triton.compile_ptx_to_cubin(ptx, ptxas, arch)

这里额外贴一下 ttgir_to_llir 中的核心函数代码:

std::unique_ptr<llvm::Module>
translateTritonGPUToLLVMIR(llvm::LLVMContext *llvmContext,
mlir::ModuleOp module, int computeCapability,
bool isROCM) {
mlir::PassManager pm(module->getContext());
applyPassManagerCLOptions(pm);
auto printingFlags = mlir::OpPrintingFlags();
printingFlags.elideLargeElementsAttrs(16);
pm.enableIRPrinting(
/*shouldPrintBeforePass=*/nullptr,
/*shouldPrintAfterPass=*/
[](mlir::Pass *pass, mlir::Operation *) {
return ::triton::tools::getBoolEnv("MLIR_ENABLE_DUMP");
},
/*printModuleScope=*/false,
/*printAfterOnlyOnChange=*/true,
/*printAfterOnlyOnFailure*/ false, llvm::dbgs(), printingFlags); pm.addPass(mlir::createConvertSCFToCFPass());
pm.addPass(mlir::createConvertIndexToLLVMPass());
pm.addPass(createConvertTritonGPUToLLVMPass(computeCapability, isROCM));
pm.addPass(mlir::createArithToLLVMConversionPass());
pm.addPass(mlir::createCanonicalizerPass());
// Simplify the IR
pm.addPass(mlir::createCSEPass());
pm.addPass(mlir::createSymbolDCEPass()); if (failed(pm.run(module))) {
llvm::errs() << "Pass execution failed";
return nullptr;
} auto llvmIR = translateLLVMToLLVMIR(llvmContext, module, isROCM);
if (!llvmIR) {
llvm::errs() << "Translate to LLVM IR failed";
return nullptr;
} if (::triton::tools::getBoolEnv("LLVM_IR_ENABLE_DUMP")) {
std::string mod_string;
std::unique_ptr<llvm::raw_string_ostream> ir_ss(
new llvm::raw_string_ostream(mod_string));
llvmIR->print(*ir_ss, nullptr);
std::cout << "// -----// LLVM IR Dump //----- //\n"
<< mod_string << std::endl;
} return llvmIR;
}

其中 translateLLVMIRToPTX 的核心代码是:

std::string translateLLVMIRToPTX(llvm::Module &module, int cc, int version) {
// LLVM version in use may not officially support target hardware.
// Supported versions for LLVM 14 are here:
// https://github.com/llvm/llvm-project/blob/f28c006a5895fc0e329fe15fead81e37457cb1d1/clang/include/clang/Basic/BuiltinsNVPTX.def
int maxPTX = std::min(80, version);
int maxCC = std::min(90, cc);
// options
auto options = llvm::cl::getRegisteredOptions();
auto *shortPtr =
static_cast<llvm::cl::opt<bool> *>(options["nvptx-short-ptr"]);
assert(shortPtr);
shortPtr->setValue(true);
std::string sm = cc == 90 ? "sm_90a" : "sm_" + std::to_string(cc);
// max PTX version
int ptxMajor = maxPTX / 10;
int ptxMinor = maxPTX % 10;
// create
std::string triple = "nvptx64-nvidia-cuda";
std::string proc = "sm_" + std::to_string(maxCC);
std::string layout = "";
std::string features = "";
// std::string features = "+ptx" + std::to_string(maxPTX);
initLLVM();
// verify and store llvm
llvm::legacy::PassManager pm;
pm.add(llvm::createVerifierPass());
pm.run(module);
// module->print(llvm::outs(), nullptr); // create machine
module.setTargetTriple(triple);
std::string error;
auto target =
llvm::TargetRegistry::lookupTarget(module.getTargetTriple(), error);
llvm::TargetOptions opt;
opt.AllowFPOpFusion = llvm::FPOpFusion::Fast;
opt.UnsafeFPMath = false;
opt.NoInfsFPMath = false;
opt.NoNaNsFPMath = true;
llvm::TargetMachine *machine = target->createTargetMachine(
module.getTargetTriple(), proc, features, opt, llvm::Reloc::PIC_,
std::nullopt, llvm::CodeGenOpt::Aggressive);
// set data layout
if (layout.empty())
module.setDataLayout(machine->createDataLayout());
else
module.setDataLayout(layout);
// emit machine code
std::string result;
{
llvm::raw_string_ostream stream(result);
llvm::buffer_ostream pstream(stream);
for (llvm::Function &f : module.functions())
f.addFnAttr(llvm::Attribute::AlwaysInline);
llvm::legacy::PassManager pass;
// emit
machine->addPassesToEmitFile(pass, pstream, nullptr,
llvm::CodeGenFileType::CGFT_AssemblyFile);
pass.run(module);
}
// post-process
findAndReplace(result, ".version", "\n",
".version " + std::to_string(ptxMajor) + "." +
std::to_string(ptxMinor) + "\n");
findAndReplace(result, ".target", "\n", ".target " + sm + "\n");
while (findAndReplace(result, "\t// begin inline asm", "\n", ""))
;
while (findAndReplace(result, "\t// end inline asm", "\n", ""))
;
return result;
}

如此已经得到了cubin文件,接下来是 compile 函数的最后一步:返回编译后的 CompiledKernel(fn, so_path, metadata, asm) handle对象。主要逻辑由CompiledKernel 来实现。

在最前面 jit函数里最后负责执行的逻辑语句是:

bin.c_wrapper(grid_0, grid_1, grid_2, bin.num_warps, bin.shared, stream, bin.cu_function, triton.compiler.CompiledKernel.launch_enter_hook, triton.compiler.CompiledKernel.launch_exit_hook, bin, *args)

也就是CompiledKernel.c_wrapper 函数:

class CompiledKernel:

    def __init__(self, fn, so_path, metadata, asm):
# initialize launcher
import importlib.util
spec = importlib.util.spec_from_file_location("__triton_launcher", so_path)
mod = importlib.util.module_from_spec(spec)
self.fn = fn
spec.loader.exec_module(mod)
self.c_wrapper = getattr(mod, "launch") # <-------- 这个家伙

这里的 so_name 就是 make_stub 中编译出来的 Launcher 相关的 .so 的路径。

Triton 源码初步研读的更多相关文章

  1. FreeCAD源码初步了解

    FreeCAD简介 FreeCAD是基于OpenCASCADE的开源CAD/CAE软件,完全开源(GPL的LGPL许可证),官方源码地址,详情可参考维基百科,百度百科等等. 如果要编译FreeCAD, ...

  2. java8 HashMap源码 详细研读

    HashMap原理 目的: 单纯分析和学习hashmap的实现,不多说与Hashtable.ConcurrentHashMap等的区别. 基于 jdk1.8 在面试中有些水平的公司比较喜欢问HashM ...

  3. vue-style-loader源码初步分析

    背景: 首先声明一下,我只是个菜鸡,为了解决问题才去看的源码,解决完问题之后也就没有兴趣看其他部分代码了,所以这篇文章是一次很低层次的解读,角度也相当片面,想必会有很多喷点吧. 事情的经过是这样,今年 ...

  4. 学习 MyBatis 的一点小总结 —— 底层源码初步分析

    目录 MyBatis 如何获取数据库源? MyBatis 如何获取 sql 语句? MyBatis 如何执行 sql 语句? MyBatis 如何实现不同类型数据之间的转换? 在过去程序员使用 JDB ...

  5. Orchard源码--初步(1)

    1.打开解决方案Orachard.sln 2.直接启动项目调试 3.接着你会看到下图 呵呵,有点啰嗦( ̄︶ ̄)↗ 涨 4.点击上图的‘Finish Setup’后

  6. 阅读nsq源码 ---初步架构设计图

     

  7. 【JUnit4.10源码分析】5 Statement

    假设要评选JUnit中最最重要的类型.或者说核心,无疑是org.junit.runners.model.Statement.Runner等类型看起来热闹而已. package org.junit.ru ...

  8. Vue 源码解读(1)—— 前言

    当学习成为了习惯,知识也就变成了常识. 感谢各位的 点赞.收藏和评论. 新视频和文章会第一时间在微信公众号发送,欢迎关注:李永宁lyn 文章已收录到 github 仓库 liyongning/blog ...

  9. JUC源码学习笔记3——AQS等待队列和CyclicBarrier,BlockingQueue

    一丶Condition 1.概述 任何一个java对象都拥有一组定义在Object中的监视器方法--wait(),wait(long timeout),notify(),和notifyAll()方法, ...

  10. [dpdk] 熟悉SDK与初步使用 (四)(L3 Forwarding源码分析)

    接续前节:[dpdk] 熟悉SDK与初步使用 (三)(IP Fragmentation源码分析) 前文中的最后一个问题,搁置,并没有找到答案.所以继续阅读其他例子的代码,想必定能在其他位置看到答案. ...

随机推荐

  1. #博弈论#Poj 2505 A multiplication game

    题目 给你一个整数\(n\),你从1开始乘,乘2-9之间的任意一个数. 最先得到大于等于\(n\)的数的人胜利.Stan先手Ollie后手. 那么,请问给你一个数\(n\),Stan和Ollie都足够 ...

  2. Debian 11 x64 安装 MySQL 8.0.33

    更新 sudo apt update sudo apt install gnupg 安装 DEB Package wget -c https://dev.mysql.com/get/mysql-apt ...

  3. 深入理解 C++ 右值引用和移动语义:全面解析

    C++11引入了右值引用,它也是C++11最重要的新特性之一.原因在于它解决了C++的一大历史遗留问题,即消除了很多场景下的不必要的额外开销.即使你的代码中并不直接使用右值引用,也可以通过标准库,间接 ...

  4. Seaborn分布数据可视化---直方图/密度图

    直方图\密度图 直方图和密度图一般用于分布数据的可视化. distplot 用于绘制单变量的分布图,包括直方图和密度图. sns.distplot( a, bins=None, hist=True, ...

  5. MogDB 使用向量化执行引擎进行调优

    MogDB 使用向量化执行引擎进行调优 本文出处:https://www.modb.pro/db/430318 MogDB 数据库支持行执行引擎和向量化执行引擎,分别对应行存表和列存表. 一次一个 b ...

  6. ArcMap分别求取矢量要素各区域的面积

      本文介绍基于ArcMap软件,自动批量计算矢量图层中各个要素的面积的方法.   一次,遇到一个问题,需要分别计算ArcMap软件中一个图层的所有面要素的面积.如图,这个图层中包括多个省级行政区矢量 ...

  7. sumo简单安装及运行实例

    下载解压并添加环境变量 记录一下今天SUMO的安装及使用经验,写的可能比较潦草,没看懂的小伙伴在下方评论,我看到一定会解答. 第一步先打开网址下载sumo: https://sourceforge.n ...

  8. WPF开发随笔收录-获取程序专有内存

    分享一个C#获取程序当前所占用的内存大小的方法,实测跟任务管理器上的内存值一样 /// <summary> /// 性能计数器组件类 /// </summary> privat ...

  9. python爬虫实战以及数据可视化

    需要准备的环境: (1)python3.8 (2)pycharm (3)截取网络请求信息的工具,有很多,百度一种随便用即可. 第一:首先通过python的sqlalchemy模块,来新建一个表. 第二 ...

  10. dojo\dart脚本编程语言

    Dojo是一个用于构建高效.可扩展的Web应用程序的开源JavaScript框架.它提供了一系列功能丰富的模块和组件,包括DOM操作.事件处理.异步编程.动画效果等.Dojo还具有强大的用户界面(UI ...