Hadoop 生产配置文件V2
Hadoop 生产配置文件V2
生产环境的配置文件调优 !!! Apache Hadoop 2.7.3 && NN HA && RM HA且仅针对于HDFS && Yarn 本身配置文件,不包括Gc 等其他单独角色调优 ,可供与参考或者直接使用。当然并不一定是最优化。
Core-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://flashHadoopDev</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>file:///app/hadoop/tmp/</value>
</property>
<property>
<name>io.file.buffer.size</name>
<value>131072</value>
</property>
<property>
<name>ha.zookeeper.quorum</name>
<value>VECS02907:2181,VECS02908:2181,VECS02909:2181</value>
</property>
<property>
<name>io.compression.codecs</name>
<value>org.apache.hadoop.io.compress.SnappyCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec,org.apache.hadoop.io.compress.Lz4Codec</value>
</property>
<property>
<name>fs.trash.interval</name>
<value>2880</value>
</property>
<!-- <property>
<name>net.topology.script.file.name</name>
<value>/apps/hadoop-conf/rack.sh</value>
</property>
-->
<!-- HealthMonitor check namenode 的超时设置,默认50000ms,改为5mins -->
<property>
<name>ha.health-monitor.rpc-timeout.ms</name>
<value>300000</value>
</property>
<!-- zk failover的session 超时设置,默认5000ms,改为3mins -->
<property>
<name>ha.zookeeper.session-timeout.ms</name>
<value>180000</value>
</property>
<property>
<name>hadoop.proxyuser.deploy.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.deploy.groups</name>
<value>*</value>
</property>
</configuration>
hdfs-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>dfs.nameservices</name>
<value>flashHadoopDev</value>
</property>
<!-- flashHadoopDev -->
<property>
<name>dfs.ha.namenodes.flashHadoopDev</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.rpc-address.flashHadoopDev.nn1</name>
<value>VECS02907:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.flashHadoopDev.nn2</name>
<value>VECS02908:8020</value>
</property>
<property>
<name>dfs.namenode.http-address.flashHadoopDev.nn1</name>
<value>VECS02907:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.flashHadoopDev.nn2</name>
<value>VECS02908:50070</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.flashHadoopDev</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.namenode.shared.edits.dir.flashHadoopDev</name>
<value>qjournal://VECS02907:8485;VECS02908:8485;VECS02909:8485/flashHadoopDev</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:///data1/data/flashHadoopDev/namenode/,file:///data2/data/flashHadoopDev/namenode/</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:///data1/HDATA/dfs/local,
file:///data2/HDATA/dfs/local,
file:///data3/HDATA/dfs/local,
file:///data4/HDATA/dfs/local,
file:///data5/HDATA/dfs/local,
file:///data6/HDATA/dfs/local,
file:///data7/HDATA/dfs/local,
file:///data8/HDATA/dfs/local</value>
</property>
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/data1/data/flashHadoopDev/journal</value>
</property>
<property>
<name>dfs.qjournal.start-segment.timeout.ms</name>
<value>60000</value>
</property>
<property>
<name>dfs.qjournal.prepare-recovery.timeout.ms</name>
<value>240000</value>
</property>
<property>
<name>dfs.qjournal.accept-recovery.timeout.ms</name>
<value>240000</value>
</property>
<property>
<name>dfs.qjournal.finalize-segment.timeout.ms</name>
<value>240000</value>
</property>
<property>
<name>dfs.qjournal.select-input-streams.timeout.ms</name>
<value>60000</value>
</property>
<property>
<name>dfs.qjournal.get-journal-state.timeout.ms</name>
<value>240000</value>
</property>
<property>
<name>dfs.qjournal.new-epoch.timeout.ms</name>
<value>240000</value>
</property>
<property>
<name>dfs.qjournal.write-txns.timeout.ms</name>
<value>60000</value>
</property>
<property>
<name>dfs.namenode.acls.enabled</name>
<value>true</value>
<description>Number of replication for each chunk.</description>
</property>
<!--需要根据实际配置进行修改-->
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
</property>
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/home/hdfs/.ssh/id_rsa</value>
</property>
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.permissions.superusergroup</name>
<value>hadoop</value>
</property>
<property>
<name>dfs.datanode.max.transfer.threads</name>
<value>16384</value>
</property>
<property>
<name>dfs.hosts.exclude</name>
<value>/app/hadoop/etc/hadoop/exclude.list</value>
<description> List of nodes to decommission </description>
</property>
<property>
<name>dfs.datanode.fsdataset.volume.choosing.policy</name>
<value>org.apache.hadoop.hdfs.server.datanode.fsdataset.AvailableSpaceVolumeChoosingPolicy</value>
</property>
<property>
<name>dfs.datanode.available-space-volume-choosing-policy.balanced-space-threshold</name>
<value>10737418240</value>
</property>
<property>
<name>dfs.datanode.available-space-volume-choosing-policy.balanced-space-preference-fraction</name>
<value>0.75</value>
</property>
<!-- 2018.06.19 Disk parameter change 每个盘预留1.4T空间-->
<property>
<name>dfs.datanode.du.reserved</name>
<value>1503238553600</value>
<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use. </description>
</property>
<property>
<name>dfs.datanode.failed.volumes.tolerated</name>
<value>1</value>
<description>The number of volumes that are allowed to fail before a datanode stops offering service. By default any volume failure will cause a datanode to shutdown. </description>
</property>
<property>
<name>dfs.client.read.shortcircuit.streams.cache.size</name>
<value>1000</value>
</property>
<property>
<name>dfs.client.read.shortcircuit.streams.cache.expiry.ms</name>
<value>10000</value>
</property>
<property>
<name>dfs.client.read.shortcircuit</name>
<value>true</value>
</property>
<property>
<name>dfs.domain.socket.path</name>
<value>/var/run/hadoop-hdfs/dn_socket</value>
</property>
<property>
<name>dfs.client.read.shortcircuit.skip.checksum</name>
<value>false</value>
</property>
<property>
<name>dfs.block.size</name>
<value>134217728</value>
</property>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<property>
<name>dfs.namenode.handler.count</name>
<value>200</value>
</property>
<property>
<name>dfs.datanode.handler.count</name>
<value>40</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.namenode.datanode.registration.ip-hostname-check</name>
<value>false</value>
</property>
</configuration>
yarn-site.xml
<?xml version="1.0"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<configuration>
<!-- Site specific YARN configuration properties -->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
<name>yarn.log-aggregation-enable</name>
<value>true</value>
</property>
<property>
<description>Where to aggregate logs to.</description>
<name>yarn.nodemanager.remote-app-log-dir</name>
<value>hdfs://flashHadoopDev/tmp/logs</value>
</property>
<property>
<name>yarn.nodemanager.remote-app-log-dir-suffix</name>
<value>logs</value>
</property>
<property>
<description>Classpath for typical applications.</description>
<name>yarn.application.classpath</name>
<value>
$HADOOP_CONF_DIR,
$HADOOP_COMMON_HOME/*,$HADOOP_COMMON_HOME/lib/*,
$HADOOP_HDFS_HOME/*,$HADOOP_HDFS_HOME/lib/*,
$HADOOP_MAPRED_HOME/*,$HADOOP_MAPRED_HOME/lib/*,
$HADOOP_YARN_HOME/*,$HADOOP_YARN_HOME/lib/*,
$HADOOP_COMMON_HOME/share/hadoop/common/*,
$HADOOP_COMMON_HOME/share/hadoop/common/lib/*,
$HADOOP_COMMON_HOME/share/hadoop/hdfs/*,
$HADOOP_COMMON_HOME/share/hadoop/hdfs/lib/*,
$HADOOP_COMMON_HOME/share/hadoop/mapreduce/*,
$HADOOP_COMMON_HOME/share/hadoop/mapreduce/lib/*,
$HADOOP_COMMON_HOME/share/hadoop/yarn/*,
$HADOOP_COMMON_HOME/share/hadoop/yarn/lib/*
</value>
</property>
<!-- resourcemanager config -->
<property>
<name>yarn.resourcemanager.connect.retry-interval.ms</name>
<value>2000</value>
</property>
<property>
<name>yarn.resourcemanager.ha.enabled</name>
<value>true</value>
</property>
<property>
<name>yarn.resourcemanager.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<property>
<name>yarn.resourcemanager.ha.automatic-failover.embedded</name>
<value>true</value>
</property>
<property>
<name>yarn.resourcemanager.cluster-id</name>
<value>FLASH_YARN_DEV</value>
</property>
<property>
<name>yarn.resourcemanager.ha.rm-ids</name>
<value>rm1,rm2</value>
</property>
<property>
<name>yarn.resourcemanager.hostname.rm1</name>
<value>VECS02907</value>
</property>
<property>
<name>yarn.resourcemanager.hostname.rm2</name>
<value>VECS02908</value>
</property>
<!-- CapacityScheduler -->
<property>
<name>yarn.resourcemanager.scheduler.class</name>
<value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
</property>
<!-- CapacityScheduler End-->
<property>
<name>yarn.resourcemanager.recovery.enabled</name>
<value>true</value>
</property>
<property>
<name>yarn.app.mapreduce.am.scheduler.connection.wait.interval-ms</name>
<value>5000</value>
</property>
<!-- 下线yarn nodemanager的列表文件。-->
<property>
<name>yarn.resourcemanager.nodes.exclude-path</name>
<value>/app/hadoop/etc/hadoop/yarn.exclude</value>
<final>true</final>
</property>
<!-- ZKRMStateStore config -->
<property>
<name>yarn.resourcemanager.store.class</name>
<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
</property>
<property>
<name>yarn.resourcemanager.zk-address</name>
<value>VECS02908:2181,VECS02907:2181,VECS02909:2181</value>
</property>
<property>
<name>yarn.resourcemanager.zk.state-store.address</name>
<value>VECS02908:2181,VECS02907:2181,VECS02909:2181</value>
</property>
<!-- applications manager interface -->
<!--客户端通过该地址向RM提交对应用程序操作-->
<property>
<name>yarn.resourcemanager.address.rm1</name>
<value>VECS02907:23140</value>
</property>
<property>
<name>yarn.resourcemanager.address.rm2</name>
<value>VECS02908:23140</value>
</property>
<!-- scheduler interface -->
<!--向RM调度资源地址-->
<property>
<name>yarn.resourcemanager.scheduler.address.rm1</name>
<value>VECS02907:23130</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address.rm2</name>
<value>VECS02908:23130</value>
</property>
<!-- RM admin interface -->
<property>
<name>yarn.resourcemanager.admin.address.rm1</name>
<value>VECS02907:23141</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address.rm2</name>
<value>VECS02908:23141</value>
</property>
<!-- RM resource-tracker interface nm向rm汇报心跳&& 领取任务-->
<property>
<name>yarn.resourcemanager.resource-tracker.address.rm1</name>
<value>VECS02907:23125</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address.rm2</name>
<value>VECS02908:23125</value>
</property>
<!-- RM web application interface -->
<property>
<name>yarn.resourcemanager.webapp.address.rm1</name>
<value>VECS02907:8088</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address.rm2</name>
<value>VECS02908:8088</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.https.address.rm1</name>
<value>VECS02907:23189</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.https.address.rm2</name>
<value>VECS02908:23189</value>
</property>
<property>
<name>yarn.log.server.url</name>
<value>http://VECS02909:19888/jobhistory/logs</value>
</property>
<property>
<name>yarn.web-proxy.address</name>
<value>VECS02907:54315</value>
</property>
<!-- Node Manager Configs -->
<property>
<description>Address where the localizer IPC is.</description>
<name>yarn.nodemanager.localizer.address</name>
<value>0.0.0.0:23344</value>
</property>
<property>
<description>NM Webapp address.</description>
<name>yarn.nodemanager.webapp.address</name>
<value>0.0.0.0:8042</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.local-dirs</name>
<value>file:///data8/HDATA/yarn/local,
file:///data7/HDATA/yarn/local,
file:///data6/HDATA/yarn/local,
file:///data5/HDATA/yarn/local,
file:///data4/HDATA/yarn/local,
file:///data3/HDATA/yarn/local,
file:///data2/HDATA/yarn/local,
file:///data1/HDATA/yarn/local</value>
</property>
<property>
<name>yarn.nodemanager.log-dirs</name>
<value>file:///data8/HDATA/yarn/logs,
file:///data7/HDATA/yarn/logs,
file:///data6/HDATA/yarn/logs,
file:///data5/HDATA/yarn/logs,
file:///data4/HDATA/yarn/logs,
file:///data3/HDATA/yarn/logs,
file:///data2/HDATA/yarn/logs,
file:///data1/HDATA/yarn/logs</value>
</property>
<property>
<name>yarn.nodemanager.delete.debug-delay-sec</name>
<value>1200</value>
</property>
<property>
<name>mapreduce.shuffle.port</name>
<value>23080</value>
</property>
<property>
<name>yarn.resourcemanager.work-preserving-recovery.enabled</name>
<value>true</value>
</property>
<!-- tuning -->
<property>
<name>yarn.nodemanager.resource.memory-mb</name>
<value>20480</value>
</property>
<property>
<name>yarn.nodemanager.resource.cpu-vcores</name>
<value>8</value>
</property>
<!-- tuning yarn container -->
<property>
<name>yarn.scheduler.minimum-allocation-mb</name>
<value>2048</value>
</property>
<property>
<name>yarn.scheduler.maximum-allocation-mb</name>
<value>8192</value>
</property>
<property>
<name>yarn.scheduler.increment-allocation-mb</name>
<value>512</value>
</property>
<property>
<name>yarn.scheduler.fair.allow-undeclared-pools</name>
<value>false</value>
</property>
<property>
<name>yarn.scheduler.fair.allow-undeclared-pools</name>
<value>false</value>
</property>
<property>
<name>yarn.nodemanager.vmem-check-enabled</name>
<value>false</value>
</property>
<property>
<name>yarn.nodemanager.pmem-check-enabled</name>
<value>false</value>
</property>
<property>
<name>yarn.nodemanager.vmem-pmem-ratio</name>
<value>2.1</value>
<description>Ratio between virtual memory to physical memory when setting memory limits for containers</description>
</property>
<property>
<name>yarn.log-aggregation.retain-seconds</name>
<value>1209600</value>
</property>
<!-- 新增新特性 -->
<property>
<name>yarn.node-labels.enabled</name>
<value>true</value>
</property>
<property>
<name>yarn.node-labels.fs-store.root-dir</name>
<value>hdfs://flashHadoopDev/yarn/yarn-node-labels/</value>
</property>
<!-- timeline server -->
<property>
<name>yarn.timeline-service.enabled</name>
<value>true</value>
</property>
<property>
<name>yarn.resourcemanager.system-metrics-publisher.enabled</name>
<value>true</value>
</property>
<property>
<name>yarn.timeline-service.generic-application-history.enabled</name>
<value>true</value>
</property>
</configuration>
mapred-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>VECS02909:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>VECS02909:19888</value>
</property>
<property>
<name>yarn.app.mapreduce.am.staging-dir</name>
<value>/user</value>
</property>
<!-- tuning mapreduce -->
<property>
<name>mapreduce.map.memory.mb</name>
<value>2048</value>
</property>
<property>
<name>mapreduce.map.java.opts</name>
<value>-Xmx1536m -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+UseCMSCompactAtFullCollection -XX:CMSFullGCsBeforeCompaction=15 -XX:CMSInitiatingOccupancyFraction=70 -Dfile.encoding=UTF-8</value>
</property>
<property>
<name>mapreduce.reduce.memory.mb</name>
<value>6144</value>
</property>
<property>
<name>mapreduce.reduce.java.opts</name>
<value>-Xmx4608m -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+UseCMSCompactAtFullCollection -XX:CMSFullGCsBeforeCompaction=15 -XX:CMSInitiatingOccupancyFraction=70 -Dfile.encoding=UTF-8</value>
</property>
<property>
<name>mapreduce.map.cpu.vcores</name>
<value>1</value>
</property>
<property>
<name>mapreduce.reduce.cpu.vcores</name>
<value>2</value>
</property>
<property>
<name>mapreduce.cluster.local.dir</name>
<value>file:///data8/HDATA/mapred/local,
file:///data7/HDATA/mapred/local,
file:///data6/HDATA/mapred/local,
file:///data5/HDATA/mapred/local,
file:///data4/HDATA/mapred/local,
file:///data3/HDATA/mapred/local,
file:///data2/HDATA/mapred/local,
file:///data1/HDATA/mapred/local</value>
</property>
<!--map and shuffle and reduce turning -->
<property>
<name>mapreduce.task.io.sort.mb</name>
<value>300</value>
</property>
<!-- 30*10=io.sort.mb -->
<property>
<name>mapreduce.jobhistory.max-age-ms</name>
<value>1296000000</value>
<source>mapred-default.xml</source>
</property>
<property>
<name>mapreduce.jobhistory.joblist.cache.size</name>
<value>200000</value>
<source>mapred-default.xml</source>
</property>
<property>
<name>mapreduce.input.fileinputformat.input.dir.recursive</name>
<value>true</value>
</property>
</configuration>
Hadoop 生产配置文件V2的更多相关文章
- Hadoop生产环境搭建(含HA、Federation)
Hadoop生产环境搭建 1. 将安装包hadoop-2.x.x.tar.gz存放到某一目录下,并解压. 2. 修改解压后的目录中的文件夹etc/hadoop下的配置文件(若文件不存在,自己创建.) ...
- Hadoop: Hadoop Cluster配置文件
Hadoop配置文件 Hadoop的配置文件: 只读的默认配置文件:core-default.xml, hdfs-default.xml, yarn-default.xml 和 mapred-defa ...
- Hadoop生产环境配置文件
前提: ①已经搭建好zk ②已经安装好JDK 正文开始: 首先从官网下载hadoop 2.7.3 (虽然官网3.0都出了.但是目前还没经过完全的测试..待测试后...) 一.hadoop-env.sh ...
- Hadoop部署配置文件
为了方便大家修,我把要修改的地方标红了,找到对应文件,复制粘贴过去就可以了 注:这个是我安装的Hadoop的配置,要根据我之前发的文章配置才行, 里面有一些东西比如文件夹名称,路径不一样,稍加修改也可 ...
- hadoop 官方配置文件解析
比如我的版本是2.8.4 官网文档是: http://hadoop.apache.org/docs/r2.8.4/ 基本配置文件:包括一般的端口 hdfs-default.xml dfs.nameno ...
- maven分别打包开发、生产配置文件
项目工程针对开发和生产有两套配置,开发配置文件目录:src/main/resources/ applicationContext.xml (开发和生产共用) dubbo.propertie ...
- Hadoop的配置文件
hadoop-env.sh:脚本中所用到的环境变量,以运行Hadoop mapred-env.sh:脚本中所用到的环境变量,以运行mapreduce yarn-env.sh:脚本中所用到的环境变量,以 ...
- hadoop配置文件加载顺序(转)
原文 http://www.cnblogs.com/wolfblogs/p/4147485.html 用了一段时间的hadoop,现在回来看看源码发现别有一番味道,温故而知新,还真是这样的 在使用h ...
- hadoop学习笔记壹 --环境搭建及配置文件的修改
Hadoop生态和其他生态最大的不同之一就是“单一平台多种应用”的理念了. hadoop能解决是什么问题: 1.HDFS :海量数据存储 MapReduce: 海量数据分析 YARN :资源管理调 ...
随机推荐
- Django 列的自定义显示
ModelAdmin 作用:对后台数据表的显示做自定义的设置(如果对django默认的显示模式感到满意则不需要定义modeladmin).我对默认的显示模式永远不满意! 定义modeladmin: f ...
- Oracle dblink的连接模式的关系测试总结
这篇主要介绍一下database link由于连接数据库的方式不同遇到的一些问题,我们知道连接ORACLE服务器的模式一般有两种方式:专用服务器连接(dedicated server)和共享服务器连接 ...
- UNIX DOMAIN SOCKET效率
关于UNIX DOMAIN SOCKET和普通udp socket的对比 在TX1(4核A57 1.7GHz)的板卡上进行测试,每个包大小设置为1024,全速收发,UDS的速度在90Mbps左右,UD ...
- VS Code常用快捷键大全
常用 General 按 Press 功能 Function Ctrl + Shift + P,F1 显示命令面板 Show Command Palette Ctrl + P 快速打开 Quick O ...
- jQuery标签插件tagsinput.js
官网地址: http://xoxco.com/projects/code/tagsinput/ github地址: https://github.com/xoxco/jQuery-Tags-Input ...
- 标识符and数据类型
一,标识符 1.标识符用来给类,变量,包,方法等起名字的. 2.只能由字符,下滑线,美元符组成:这里的字符有大小写字母,中文字符,数字字符,但是符号只能有两个下划线和美元符. 3.不能由数字开头. 4 ...
- windows server 2008 R2 Enterprise 间实时同步之FreeFileSync 部署过程
WindowsServer间实时同步之FreeFileSync 部署过程 1. 实验主机信息 IP 操作系统 源目录 目标目录 10.155.0.80 Windows Server 2008 R2 D ...
- Node.js如何执行cmd
最近正好因业务的一个需求需要研究如何根据vscode的插件名来下载对应的插件以解决之前将插件打包上传到服务器上面导致的延迟问题(插件体积小还好说,如果体积过大,即便是压缩打成zip包,如果同一时刻很多 ...
- codeblocks-17.12mingw-nosetup(mingw编译,绿色免安装版)的下载、安装及设置一
一.先进入网址:http://www.codeblocks.org/downloads/,选择Download the binary release. 二.转换网页后,选择codeblocks-17. ...
- 如何改善SSH连接过慢(效率)
+++++++++++++++++++++++++++++++++++++++++问题:通过SSH链接远程Linux主机过慢.重点:学习如何通过调整ssh_config配置文件,提高SSH连接效率.时 ...