Hadoop 生产配置文件V2
Hadoop 生产配置文件V2
生产环境的配置文件调优 !!! Apache Hadoop 2.7.3 && NN HA && RM HA且仅针对于HDFS && Yarn 本身配置文件,不包括Gc 等其他单独角色调优 ,可供与参考或者直接使用。当然并不一定是最优化。
Core-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://flashHadoopDev</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>file:///app/hadoop/tmp/</value>
</property>
<property>
<name>io.file.buffer.size</name>
<value>131072</value>
</property>
<property>
<name>ha.zookeeper.quorum</name>
<value>VECS02907:2181,VECS02908:2181,VECS02909:2181</value>
</property>
<property>
<name>io.compression.codecs</name>
<value>org.apache.hadoop.io.compress.SnappyCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec,org.apache.hadoop.io.compress.Lz4Codec</value>
</property>
<property>
<name>fs.trash.interval</name>
<value>2880</value>
</property>
<!-- <property>
<name>net.topology.script.file.name</name>
<value>/apps/hadoop-conf/rack.sh</value>
</property>
-->
<!-- HealthMonitor check namenode 的超时设置,默认50000ms,改为5mins -->
<property>
<name>ha.health-monitor.rpc-timeout.ms</name>
<value>300000</value>
</property>
<!-- zk failover的session 超时设置,默认5000ms,改为3mins -->
<property>
<name>ha.zookeeper.session-timeout.ms</name>
<value>180000</value>
</property>
<property>
<name>hadoop.proxyuser.deploy.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.deploy.groups</name>
<value>*</value>
</property>
</configuration>
hdfs-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>dfs.nameservices</name>
<value>flashHadoopDev</value>
</property>
<!-- flashHadoopDev -->
<property>
<name>dfs.ha.namenodes.flashHadoopDev</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.rpc-address.flashHadoopDev.nn1</name>
<value>VECS02907:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.flashHadoopDev.nn2</name>
<value>VECS02908:8020</value>
</property>
<property>
<name>dfs.namenode.http-address.flashHadoopDev.nn1</name>
<value>VECS02907:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.flashHadoopDev.nn2</name>
<value>VECS02908:50070</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.flashHadoopDev</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.namenode.shared.edits.dir.flashHadoopDev</name>
<value>qjournal://VECS02907:8485;VECS02908:8485;VECS02909:8485/flashHadoopDev</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:///data1/data/flashHadoopDev/namenode/,file:///data2/data/flashHadoopDev/namenode/</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:///data1/HDATA/dfs/local,
file:///data2/HDATA/dfs/local,
file:///data3/HDATA/dfs/local,
file:///data4/HDATA/dfs/local,
file:///data5/HDATA/dfs/local,
file:///data6/HDATA/dfs/local,
file:///data7/HDATA/dfs/local,
file:///data8/HDATA/dfs/local</value>
</property>
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/data1/data/flashHadoopDev/journal</value>
</property>
<property>
<name>dfs.qjournal.start-segment.timeout.ms</name>
<value>60000</value>
</property>
<property>
<name>dfs.qjournal.prepare-recovery.timeout.ms</name>
<value>240000</value>
</property>
<property>
<name>dfs.qjournal.accept-recovery.timeout.ms</name>
<value>240000</value>
</property>
<property>
<name>dfs.qjournal.finalize-segment.timeout.ms</name>
<value>240000</value>
</property>
<property>
<name>dfs.qjournal.select-input-streams.timeout.ms</name>
<value>60000</value>
</property>
<property>
<name>dfs.qjournal.get-journal-state.timeout.ms</name>
<value>240000</value>
</property>
<property>
<name>dfs.qjournal.new-epoch.timeout.ms</name>
<value>240000</value>
</property>
<property>
<name>dfs.qjournal.write-txns.timeout.ms</name>
<value>60000</value>
</property>
<property>
<name>dfs.namenode.acls.enabled</name>
<value>true</value>
<description>Number of replication for each chunk.</description>
</property>
<!--需要根据实际配置进行修改-->
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
</property>
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/home/hdfs/.ssh/id_rsa</value>
</property>
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.permissions.superusergroup</name>
<value>hadoop</value>
</property>
<property>
<name>dfs.datanode.max.transfer.threads</name>
<value>16384</value>
</property>
<property>
<name>dfs.hosts.exclude</name>
<value>/app/hadoop/etc/hadoop/exclude.list</value>
<description> List of nodes to decommission </description>
</property>
<property>
<name>dfs.datanode.fsdataset.volume.choosing.policy</name>
<value>org.apache.hadoop.hdfs.server.datanode.fsdataset.AvailableSpaceVolumeChoosingPolicy</value>
</property>
<property>
<name>dfs.datanode.available-space-volume-choosing-policy.balanced-space-threshold</name>
<value>10737418240</value>
</property>
<property>
<name>dfs.datanode.available-space-volume-choosing-policy.balanced-space-preference-fraction</name>
<value>0.75</value>
</property>
<!-- 2018.06.19 Disk parameter change 每个盘预留1.4T空间-->
<property>
<name>dfs.datanode.du.reserved</name>
<value>1503238553600</value>
<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use. </description>
</property>
<property>
<name>dfs.datanode.failed.volumes.tolerated</name>
<value>1</value>
<description>The number of volumes that are allowed to fail before a datanode stops offering service. By default any volume failure will cause a datanode to shutdown. </description>
</property>
<property>
<name>dfs.client.read.shortcircuit.streams.cache.size</name>
<value>1000</value>
</property>
<property>
<name>dfs.client.read.shortcircuit.streams.cache.expiry.ms</name>
<value>10000</value>
</property>
<property>
<name>dfs.client.read.shortcircuit</name>
<value>true</value>
</property>
<property>
<name>dfs.domain.socket.path</name>
<value>/var/run/hadoop-hdfs/dn_socket</value>
</property>
<property>
<name>dfs.client.read.shortcircuit.skip.checksum</name>
<value>false</value>
</property>
<property>
<name>dfs.block.size</name>
<value>134217728</value>
</property>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<property>
<name>dfs.namenode.handler.count</name>
<value>200</value>
</property>
<property>
<name>dfs.datanode.handler.count</name>
<value>40</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.namenode.datanode.registration.ip-hostname-check</name>
<value>false</value>
</property>
</configuration>
yarn-site.xml
<?xml version="1.0"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<configuration>
<!-- Site specific YARN configuration properties -->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
<name>yarn.log-aggregation-enable</name>
<value>true</value>
</property>
<property>
<description>Where to aggregate logs to.</description>
<name>yarn.nodemanager.remote-app-log-dir</name>
<value>hdfs://flashHadoopDev/tmp/logs</value>
</property>
<property>
<name>yarn.nodemanager.remote-app-log-dir-suffix</name>
<value>logs</value>
</property>
<property>
<description>Classpath for typical applications.</description>
<name>yarn.application.classpath</name>
<value>
$HADOOP_CONF_DIR,
$HADOOP_COMMON_HOME/*,$HADOOP_COMMON_HOME/lib/*,
$HADOOP_HDFS_HOME/*,$HADOOP_HDFS_HOME/lib/*,
$HADOOP_MAPRED_HOME/*,$HADOOP_MAPRED_HOME/lib/*,
$HADOOP_YARN_HOME/*,$HADOOP_YARN_HOME/lib/*,
$HADOOP_COMMON_HOME/share/hadoop/common/*,
$HADOOP_COMMON_HOME/share/hadoop/common/lib/*,
$HADOOP_COMMON_HOME/share/hadoop/hdfs/*,
$HADOOP_COMMON_HOME/share/hadoop/hdfs/lib/*,
$HADOOP_COMMON_HOME/share/hadoop/mapreduce/*,
$HADOOP_COMMON_HOME/share/hadoop/mapreduce/lib/*,
$HADOOP_COMMON_HOME/share/hadoop/yarn/*,
$HADOOP_COMMON_HOME/share/hadoop/yarn/lib/*
</value>
</property>
<!-- resourcemanager config -->
<property>
<name>yarn.resourcemanager.connect.retry-interval.ms</name>
<value>2000</value>
</property>
<property>
<name>yarn.resourcemanager.ha.enabled</name>
<value>true</value>
</property>
<property>
<name>yarn.resourcemanager.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<property>
<name>yarn.resourcemanager.ha.automatic-failover.embedded</name>
<value>true</value>
</property>
<property>
<name>yarn.resourcemanager.cluster-id</name>
<value>FLASH_YARN_DEV</value>
</property>
<property>
<name>yarn.resourcemanager.ha.rm-ids</name>
<value>rm1,rm2</value>
</property>
<property>
<name>yarn.resourcemanager.hostname.rm1</name>
<value>VECS02907</value>
</property>
<property>
<name>yarn.resourcemanager.hostname.rm2</name>
<value>VECS02908</value>
</property>
<!-- CapacityScheduler -->
<property>
<name>yarn.resourcemanager.scheduler.class</name>
<value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
</property>
<!-- CapacityScheduler End-->
<property>
<name>yarn.resourcemanager.recovery.enabled</name>
<value>true</value>
</property>
<property>
<name>yarn.app.mapreduce.am.scheduler.connection.wait.interval-ms</name>
<value>5000</value>
</property>
<!-- 下线yarn nodemanager的列表文件。-->
<property>
<name>yarn.resourcemanager.nodes.exclude-path</name>
<value>/app/hadoop/etc/hadoop/yarn.exclude</value>
<final>true</final>
</property>
<!-- ZKRMStateStore config -->
<property>
<name>yarn.resourcemanager.store.class</name>
<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
</property>
<property>
<name>yarn.resourcemanager.zk-address</name>
<value>VECS02908:2181,VECS02907:2181,VECS02909:2181</value>
</property>
<property>
<name>yarn.resourcemanager.zk.state-store.address</name>
<value>VECS02908:2181,VECS02907:2181,VECS02909:2181</value>
</property>
<!-- applications manager interface -->
<!--客户端通过该地址向RM提交对应用程序操作-->
<property>
<name>yarn.resourcemanager.address.rm1</name>
<value>VECS02907:23140</value>
</property>
<property>
<name>yarn.resourcemanager.address.rm2</name>
<value>VECS02908:23140</value>
</property>
<!-- scheduler interface -->
<!--向RM调度资源地址-->
<property>
<name>yarn.resourcemanager.scheduler.address.rm1</name>
<value>VECS02907:23130</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address.rm2</name>
<value>VECS02908:23130</value>
</property>
<!-- RM admin interface -->
<property>
<name>yarn.resourcemanager.admin.address.rm1</name>
<value>VECS02907:23141</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address.rm2</name>
<value>VECS02908:23141</value>
</property>
<!-- RM resource-tracker interface nm向rm汇报心跳&& 领取任务-->
<property>
<name>yarn.resourcemanager.resource-tracker.address.rm1</name>
<value>VECS02907:23125</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address.rm2</name>
<value>VECS02908:23125</value>
</property>
<!-- RM web application interface -->
<property>
<name>yarn.resourcemanager.webapp.address.rm1</name>
<value>VECS02907:8088</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address.rm2</name>
<value>VECS02908:8088</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.https.address.rm1</name>
<value>VECS02907:23189</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.https.address.rm2</name>
<value>VECS02908:23189</value>
</property>
<property>
<name>yarn.log.server.url</name>
<value>http://VECS02909:19888/jobhistory/logs</value>
</property>
<property>
<name>yarn.web-proxy.address</name>
<value>VECS02907:54315</value>
</property>
<!-- Node Manager Configs -->
<property>
<description>Address where the localizer IPC is.</description>
<name>yarn.nodemanager.localizer.address</name>
<value>0.0.0.0:23344</value>
</property>
<property>
<description>NM Webapp address.</description>
<name>yarn.nodemanager.webapp.address</name>
<value>0.0.0.0:8042</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.local-dirs</name>
<value>file:///data8/HDATA/yarn/local,
file:///data7/HDATA/yarn/local,
file:///data6/HDATA/yarn/local,
file:///data5/HDATA/yarn/local,
file:///data4/HDATA/yarn/local,
file:///data3/HDATA/yarn/local,
file:///data2/HDATA/yarn/local,
file:///data1/HDATA/yarn/local</value>
</property>
<property>
<name>yarn.nodemanager.log-dirs</name>
<value>file:///data8/HDATA/yarn/logs,
file:///data7/HDATA/yarn/logs,
file:///data6/HDATA/yarn/logs,
file:///data5/HDATA/yarn/logs,
file:///data4/HDATA/yarn/logs,
file:///data3/HDATA/yarn/logs,
file:///data2/HDATA/yarn/logs,
file:///data1/HDATA/yarn/logs</value>
</property>
<property>
<name>yarn.nodemanager.delete.debug-delay-sec</name>
<value>1200</value>
</property>
<property>
<name>mapreduce.shuffle.port</name>
<value>23080</value>
</property>
<property>
<name>yarn.resourcemanager.work-preserving-recovery.enabled</name>
<value>true</value>
</property>
<!-- tuning -->
<property>
<name>yarn.nodemanager.resource.memory-mb</name>
<value>20480</value>
</property>
<property>
<name>yarn.nodemanager.resource.cpu-vcores</name>
<value>8</value>
</property>
<!-- tuning yarn container -->
<property>
<name>yarn.scheduler.minimum-allocation-mb</name>
<value>2048</value>
</property>
<property>
<name>yarn.scheduler.maximum-allocation-mb</name>
<value>8192</value>
</property>
<property>
<name>yarn.scheduler.increment-allocation-mb</name>
<value>512</value>
</property>
<property>
<name>yarn.scheduler.fair.allow-undeclared-pools</name>
<value>false</value>
</property>
<property>
<name>yarn.scheduler.fair.allow-undeclared-pools</name>
<value>false</value>
</property>
<property>
<name>yarn.nodemanager.vmem-check-enabled</name>
<value>false</value>
</property>
<property>
<name>yarn.nodemanager.pmem-check-enabled</name>
<value>false</value>
</property>
<property>
<name>yarn.nodemanager.vmem-pmem-ratio</name>
<value>2.1</value>
<description>Ratio between virtual memory to physical memory when setting memory limits for containers</description>
</property>
<property>
<name>yarn.log-aggregation.retain-seconds</name>
<value>1209600</value>
</property>
<!-- 新增新特性 -->
<property>
<name>yarn.node-labels.enabled</name>
<value>true</value>
</property>
<property>
<name>yarn.node-labels.fs-store.root-dir</name>
<value>hdfs://flashHadoopDev/yarn/yarn-node-labels/</value>
</property>
<!-- timeline server -->
<property>
<name>yarn.timeline-service.enabled</name>
<value>true</value>
</property>
<property>
<name>yarn.resourcemanager.system-metrics-publisher.enabled</name>
<value>true</value>
</property>
<property>
<name>yarn.timeline-service.generic-application-history.enabled</name>
<value>true</value>
</property>
</configuration>
mapred-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>VECS02909:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>VECS02909:19888</value>
</property>
<property>
<name>yarn.app.mapreduce.am.staging-dir</name>
<value>/user</value>
</property>
<!-- tuning mapreduce -->
<property>
<name>mapreduce.map.memory.mb</name>
<value>2048</value>
</property>
<property>
<name>mapreduce.map.java.opts</name>
<value>-Xmx1536m -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+UseCMSCompactAtFullCollection -XX:CMSFullGCsBeforeCompaction=15 -XX:CMSInitiatingOccupancyFraction=70 -Dfile.encoding=UTF-8</value>
</property>
<property>
<name>mapreduce.reduce.memory.mb</name>
<value>6144</value>
</property>
<property>
<name>mapreduce.reduce.java.opts</name>
<value>-Xmx4608m -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+UseCMSCompactAtFullCollection -XX:CMSFullGCsBeforeCompaction=15 -XX:CMSInitiatingOccupancyFraction=70 -Dfile.encoding=UTF-8</value>
</property>
<property>
<name>mapreduce.map.cpu.vcores</name>
<value>1</value>
</property>
<property>
<name>mapreduce.reduce.cpu.vcores</name>
<value>2</value>
</property>
<property>
<name>mapreduce.cluster.local.dir</name>
<value>file:///data8/HDATA/mapred/local,
file:///data7/HDATA/mapred/local,
file:///data6/HDATA/mapred/local,
file:///data5/HDATA/mapred/local,
file:///data4/HDATA/mapred/local,
file:///data3/HDATA/mapred/local,
file:///data2/HDATA/mapred/local,
file:///data1/HDATA/mapred/local</value>
</property>
<!--map and shuffle and reduce turning -->
<property>
<name>mapreduce.task.io.sort.mb</name>
<value>300</value>
</property>
<!-- 30*10=io.sort.mb -->
<property>
<name>mapreduce.jobhistory.max-age-ms</name>
<value>1296000000</value>
<source>mapred-default.xml</source>
</property>
<property>
<name>mapreduce.jobhistory.joblist.cache.size</name>
<value>200000</value>
<source>mapred-default.xml</source>
</property>
<property>
<name>mapreduce.input.fileinputformat.input.dir.recursive</name>
<value>true</value>
</property>
</configuration>
Hadoop 生产配置文件V2的更多相关文章
- Hadoop生产环境搭建(含HA、Federation)
Hadoop生产环境搭建 1. 将安装包hadoop-2.x.x.tar.gz存放到某一目录下,并解压. 2. 修改解压后的目录中的文件夹etc/hadoop下的配置文件(若文件不存在,自己创建.) ...
- Hadoop: Hadoop Cluster配置文件
Hadoop配置文件 Hadoop的配置文件: 只读的默认配置文件:core-default.xml, hdfs-default.xml, yarn-default.xml 和 mapred-defa ...
- Hadoop生产环境配置文件
前提: ①已经搭建好zk ②已经安装好JDK 正文开始: 首先从官网下载hadoop 2.7.3 (虽然官网3.0都出了.但是目前还没经过完全的测试..待测试后...) 一.hadoop-env.sh ...
- Hadoop部署配置文件
为了方便大家修,我把要修改的地方标红了,找到对应文件,复制粘贴过去就可以了 注:这个是我安装的Hadoop的配置,要根据我之前发的文章配置才行, 里面有一些东西比如文件夹名称,路径不一样,稍加修改也可 ...
- hadoop 官方配置文件解析
比如我的版本是2.8.4 官网文档是: http://hadoop.apache.org/docs/r2.8.4/ 基本配置文件:包括一般的端口 hdfs-default.xml dfs.nameno ...
- maven分别打包开发、生产配置文件
项目工程针对开发和生产有两套配置,开发配置文件目录:src/main/resources/ applicationContext.xml (开发和生产共用) dubbo.propertie ...
- Hadoop的配置文件
hadoop-env.sh:脚本中所用到的环境变量,以运行Hadoop mapred-env.sh:脚本中所用到的环境变量,以运行mapreduce yarn-env.sh:脚本中所用到的环境变量,以 ...
- hadoop配置文件加载顺序(转)
原文 http://www.cnblogs.com/wolfblogs/p/4147485.html 用了一段时间的hadoop,现在回来看看源码发现别有一番味道,温故而知新,还真是这样的 在使用h ...
- hadoop学习笔记壹 --环境搭建及配置文件的修改
Hadoop生态和其他生态最大的不同之一就是“单一平台多种应用”的理念了. hadoop能解决是什么问题: 1.HDFS :海量数据存储 MapReduce: 海量数据分析 YARN :资源管理调 ...
随机推荐
- 关于Fragment里面嵌套fragment
今天看到一篇好文章 https://www.2cto.com/kf/201609/545979.html 转载过来记录一下,往后需要的时候可以随时查看: 接下来进入正题: 动态fragment的使用 ...
- <自动化测试方案_7>第七章、PC端UI自动化测试
第七章.PC端UI自动化测试 UI自动化测试又分为:Web自动化测试,App自动化测试.微信小程序.微信公众号UI层的自动化测试工具非常多,比较主流的是UFT(QTP),Robot Framework ...
- .Net Core 学习笔记1——包、元包、框架
.Net Core 是由NuGet包(package)组成的平台. 一起使用的多个包的集合:元包(Metapackage) package 包 (对应以前的程序集概念) Framework 框架 as ...
- C#中++i与i++的区别
日常编程中经常用到++i与i++,知识点虽然很小,但有时候会犯迷糊,在这里小小的记录一下. ++i 即前递增,顾名思义也就是先自增后传值: 举个栗子 int i=5; int j=++i; 此时i的值 ...
- 安装mysql8.0.12
安装mysql8.0.12 https://blog.csdn.net/zwj1030711290/article/details/80039780 问题1:忘记记录日志打印的密码就把窗口给关了 解决 ...
- LinuxMint(Ubuntu)安装文泉驿家族黑体字
文泉驿黑体字家族在Ubuntu上很有用,可以解决系统字体发虚的问题. 通过下面的三条命令安装: sudo apt-get install ttf-wqy-microhei #文泉驿-微米黑 sudo ...
- PowerDesigner 提示 Existence of index、key、reference错误
一.建立一个表后,为何检测出现Existence of index的警告 A table should contain at least one column, one index, one key, ...
- 服务消费者Feign和Ribbon的区别
1.Ribbon通过注解@EnableEurekaClient/@EnableDiscoveryClient向服务中心注册: PS:选用的注册中心是eureka,那么就推荐@EnableEure ...
- ERROR:"org.apache.zookeeper.KeeperException$NoAuthException: KeeperErrorCode = NoAuth for /config/topics/test" when creating or deleting Kafka operations authorized through the Ranger policies
PROBLEM DESCRIPTION When creating or deleting topics in Kafka, they cannot be authorized through the ...
- 挥舞的手臂(mixly+二次开发)
从vb6到vb.net,一路c#, java, python, nn, c,对技术的切换早已经没有害怕的感觉了,一直有的是技术的热情和我所认为的技术信仰. 扯完,开始正文. 看看效果图: 使用的是Ar ...