Hadoop 生产配置文件V2
Hadoop 生产配置文件V2
生产环境的配置文件调优 !!! Apache Hadoop 2.7.3 && NN HA && RM HA且仅针对于HDFS && Yarn 本身配置文件,不包括Gc 等其他单独角色调优 ,可供与参考或者直接使用。当然并不一定是最优化。
Core-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at
    http://www.apache.org/licenses/LICENSE-2.0
  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
  <property>
    <name>fs.defaultFS</name>
    <value>hdfs://flashHadoopDev</value>
  </property>
  <property>
    <name>hadoop.tmp.dir</name>
    <value>file:///app/hadoop/tmp/</value>
  </property>
  <property>
    <name>io.file.buffer.size</name>
    <value>131072</value>
  </property>
  <property>
    <name>ha.zookeeper.quorum</name>
  <value>VECS02907:2181,VECS02908:2181,VECS02909:2181</value>
  </property>
  <property>
    <name>io.compression.codecs</name>
    <value>org.apache.hadoop.io.compress.SnappyCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec,org.apache.hadoop.io.compress.Lz4Codec</value>
  </property>
  <property>
    <name>fs.trash.interval</name>
    <value>2880</value>
  </property>
 <!-- <property>
    <name>net.topology.script.file.name</name>
    <value>/apps/hadoop-conf/rack.sh</value>
  </property>
-->
    <!-- HealthMonitor check namenode 的超时设置,默认50000ms,改为5mins -->
   <property>
       <name>ha.health-monitor.rpc-timeout.ms</name>
       <value>300000</value>
   </property>
   <!-- zk failover的session 超时设置,默认5000ms,改为3mins -->
   <property>
       <name>ha.zookeeper.session-timeout.ms</name>
       <value>180000</value>
   </property>
<property>
    <name>hadoop.proxyuser.deploy.hosts</name>
    <value>*</value>
</property>
<property>
    <name>hadoop.proxyuser.deploy.groups</name>
    <value>*</value>
</property>
</configuration>
hdfs-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at
    http://www.apache.org/licenses/LICENSE-2.0
  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
  <property>
    <name>dfs.nameservices</name>
    <value>flashHadoopDev</value>
  </property>
  <!-- flashHadoopDev -->
  <property>
    <name>dfs.ha.namenodes.flashHadoopDev</name>
    <value>nn1,nn2</value>
  </property>
  <property>
    <name>dfs.namenode.rpc-address.flashHadoopDev.nn1</name>
    <value>VECS02907:8020</value>
  </property>
  <property>
    <name>dfs.namenode.rpc-address.flashHadoopDev.nn2</name>
    <value>VECS02908:8020</value>
  </property>
  <property>
    <name>dfs.namenode.http-address.flashHadoopDev.nn1</name>
    <value>VECS02907:50070</value>
  </property>
  <property>
    <name>dfs.namenode.http-address.flashHadoopDev.nn2</name>
    <value>VECS02908:50070</value>
  </property>
  <property>
    <name>dfs.client.failover.proxy.provider.flashHadoopDev</name>
    <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
  </property>
  <property>
    <name>dfs.namenode.shared.edits.dir.flashHadoopDev</name>
    <value>qjournal://VECS02907:8485;VECS02908:8485;VECS02909:8485/flashHadoopDev</value>
  </property>
  <property>
    <name>dfs.namenode.name.dir</name>
    <value>file:///data1/data/flashHadoopDev/namenode/,file:///data2/data/flashHadoopDev/namenode/</value>
  </property>
  <property>
    <name>dfs.datanode.data.dir</name>
    <value>file:///data1/HDATA/dfs/local,
           file:///data2/HDATA/dfs/local,
           file:///data3/HDATA/dfs/local,
           file:///data4/HDATA/dfs/local,
           file:///data5/HDATA/dfs/local,
           file:///data6/HDATA/dfs/local,
           file:///data7/HDATA/dfs/local,
           file:///data8/HDATA/dfs/local</value>
  </property>
  <property>
    <name>dfs.journalnode.edits.dir</name>
    <value>/data1/data/flashHadoopDev/journal</value>
  </property>
  <property>
    <name>dfs.qjournal.start-segment.timeout.ms</name>
    <value>60000</value>
  </property>
  <property>
    <name>dfs.qjournal.prepare-recovery.timeout.ms</name>
    <value>240000</value>
  </property>
  <property>
    <name>dfs.qjournal.accept-recovery.timeout.ms</name>
    <value>240000</value>
  </property>
  <property>
    <name>dfs.qjournal.finalize-segment.timeout.ms</name>
    <value>240000</value>
    </property>
  <property>
    <name>dfs.qjournal.select-input-streams.timeout.ms</name>
    <value>60000</value>
    </property>
  <property>
    <name>dfs.qjournal.get-journal-state.timeout.ms</name>
    <value>240000</value>
  </property>
  <property>
    <name>dfs.qjournal.new-epoch.timeout.ms</name>
    <value>240000</value>
  </property>
  <property>
    <name>dfs.qjournal.write-txns.timeout.ms</name>
    <value>60000</value>
  </property>
  <property>
    <name>dfs.namenode.acls.enabled</name>
    <value>true</value>
    <description>Number of replication for each chunk.</description>
  </property>
  <!--需要根据实际配置进行修改-->
  <property>
    <name>dfs.ha.fencing.methods</name>
    <value>sshfence</value>
  </property>
  <property>
    <name>dfs.ha.fencing.ssh.private-key-files</name>
    <value>/home/hdfs/.ssh/id_rsa</value>
  </property>
  <property>
    <name>dfs.ha.automatic-failover.enabled</name>
    <value>true</value>
  </property>
  <property>
    <name>dfs.permissions.superusergroup</name>
    <value>hadoop</value>
  </property>
  <property>
    <name>dfs.datanode.max.transfer.threads</name>
    <value>16384</value>
  </property>
  <property>
    <name>dfs.hosts.exclude</name>
    <value>/app/hadoop/etc/hadoop/exclude.list</value>
    <description> List of nodes to decommission </description>
  </property>
  <property>
    <name>dfs.datanode.fsdataset.volume.choosing.policy</name>
    <value>org.apache.hadoop.hdfs.server.datanode.fsdataset.AvailableSpaceVolumeChoosingPolicy</value>
  </property>
  <property>
    <name>dfs.datanode.available-space-volume-choosing-policy.balanced-space-threshold</name>
    <value>10737418240</value>
  </property>
  <property>
    <name>dfs.datanode.available-space-volume-choosing-policy.balanced-space-preference-fraction</name>
    <value>0.75</value>
</property>
<!-- 2018.06.19 Disk parameter change 每个盘预留1.4T空间-->
<property>
    <name>dfs.datanode.du.reserved</name>
    <value>1503238553600</value>
    <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use. </description>
</property>
<property>
    <name>dfs.datanode.failed.volumes.tolerated</name>
    <value>1</value>
    <description>The number of volumes that are allowed to fail before a datanode stops offering service. By default any volume failure will cause a datanode to shutdown. </description>
</property>
  <property>
    <name>dfs.client.read.shortcircuit.streams.cache.size</name>
    <value>1000</value>
  </property>
  <property>
    <name>dfs.client.read.shortcircuit.streams.cache.expiry.ms</name>
    <value>10000</value>
  </property>
  <property>
    <name>dfs.client.read.shortcircuit</name>
    <value>true</value>
  </property>
  <property>
    <name>dfs.domain.socket.path</name>
    <value>/var/run/hadoop-hdfs/dn_socket</value>
  </property>
  <property>
    <name>dfs.client.read.shortcircuit.skip.checksum</name>
    <value>false</value>
  </property>
  <property>
    <name>dfs.block.size</name>
    <value>134217728</value>
  </property>
  <property>
    <name>dfs.replication</name>
    <value>3</value>
  </property>
  <property>
    <name>dfs.namenode.handler.count</name>
    <value>200</value>
  </property>
  <property>
    <name>dfs.datanode.handler.count</name>
    <value>40</value>
  </property>
  <property>
     <name>dfs.webhdfs.enabled</name>
     <value>true</value>
  </property>
  <property>
     <name>dfs.namenode.datanode.registration.ip-hostname-check</name>
     <value>false</value>
  </property>
</configuration>
yarn-site.xml
<?xml version="1.0"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at
    http://www.apache.org/licenses/LICENSE-2.0
  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->
<configuration>
  <!-- Site specific YARN configuration properties -->
  <property>
    <name>yarn.nodemanager.aux-services</name>
    <value>mapreduce_shuffle</value>
  </property>
  <property>
    <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
  </property>
  <property>
    <name>yarn.log-aggregation-enable</name>
    <value>true</value>
  </property>
  <property>
    <description>Where to aggregate logs to.</description>
    <name>yarn.nodemanager.remote-app-log-dir</name>
    <value>hdfs://flashHadoopDev/tmp/logs</value>
  </property>
  <property>
    <name>yarn.nodemanager.remote-app-log-dir-suffix</name>
    <value>logs</value>
  </property>
  <property>
    <description>Classpath for typical applications.</description>
    <name>yarn.application.classpath</name>
    <value>
      $HADOOP_CONF_DIR,
      $HADOOP_COMMON_HOME/*,$HADOOP_COMMON_HOME/lib/*,
      $HADOOP_HDFS_HOME/*,$HADOOP_HDFS_HOME/lib/*,
      $HADOOP_MAPRED_HOME/*,$HADOOP_MAPRED_HOME/lib/*,
      $HADOOP_YARN_HOME/*,$HADOOP_YARN_HOME/lib/*,
      $HADOOP_COMMON_HOME/share/hadoop/common/*,
      $HADOOP_COMMON_HOME/share/hadoop/common/lib/*,
      $HADOOP_COMMON_HOME/share/hadoop/hdfs/*,
      $HADOOP_COMMON_HOME/share/hadoop/hdfs/lib/*,
      $HADOOP_COMMON_HOME/share/hadoop/mapreduce/*,
      $HADOOP_COMMON_HOME/share/hadoop/mapreduce/lib/*,
      $HADOOP_COMMON_HOME/share/hadoop/yarn/*,
      $HADOOP_COMMON_HOME/share/hadoop/yarn/lib/*
     </value>
  </property>
  <!-- resourcemanager config -->
  <property>
    <name>yarn.resourcemanager.connect.retry-interval.ms</name>
    <value>2000</value>
  </property>
  <property>
    <name>yarn.resourcemanager.ha.enabled</name>
    <value>true</value>
  </property>
  <property>
    <name>yarn.resourcemanager.ha.automatic-failover.enabled</name>
    <value>true</value>
  </property>
  <property>
    <name>yarn.resourcemanager.ha.automatic-failover.embedded</name>
    <value>true</value>
  </property>
  <property>
    <name>yarn.resourcemanager.cluster-id</name>
    <value>FLASH_YARN_DEV</value>
  </property>
  <property>
    <name>yarn.resourcemanager.ha.rm-ids</name>
    <value>rm1,rm2</value>
  </property>
  <property>
    <name>yarn.resourcemanager.hostname.rm1</name>
    <value>VECS02907</value>
  </property>
  <property>
    <name>yarn.resourcemanager.hostname.rm2</name>
    <value>VECS02908</value>
  </property>
<!-- CapacityScheduler -->
  <property>
      <name>yarn.resourcemanager.scheduler.class</name>
      <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
  </property>
<!-- CapacityScheduler End-->
  <property>
    <name>yarn.resourcemanager.recovery.enabled</name>
    <value>true</value>
  </property>
  <property>
    <name>yarn.app.mapreduce.am.scheduler.connection.wait.interval-ms</name>
    <value>5000</value>
  </property>
  <!-- 下线yarn nodemanager的列表文件。-->
  <property>
    <name>yarn.resourcemanager.nodes.exclude-path</name>
    <value>/app/hadoop/etc/hadoop/yarn.exclude</value>
    <final>true</final>
  </property>
  <!-- ZKRMStateStore config -->
  <property>
    <name>yarn.resourcemanager.store.class</name>
    <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
  </property>
  <property>
    <name>yarn.resourcemanager.zk-address</name>
    <value>VECS02908:2181,VECS02907:2181,VECS02909:2181</value>
  </property>
  <property>
    <name>yarn.resourcemanager.zk.state-store.address</name>
    <value>VECS02908:2181,VECS02907:2181,VECS02909:2181</value>
  </property>
  <!-- applications manager interface -->
   <!--客户端通过该地址向RM提交对应用程序操作-->
  <property>
    <name>yarn.resourcemanager.address.rm1</name>
    <value>VECS02907:23140</value>
  </property>
  <property>
    <name>yarn.resourcemanager.address.rm2</name>
    <value>VECS02908:23140</value>
  </property>
  <!-- scheduler interface -->
  <!--向RM调度资源地址-->
  <property>
    <name>yarn.resourcemanager.scheduler.address.rm1</name>
    <value>VECS02907:23130</value>
  </property>
  <property>
    <name>yarn.resourcemanager.scheduler.address.rm2</name>
    <value>VECS02908:23130</value>
  </property>
  <!-- RM admin interface -->
  <property>
    <name>yarn.resourcemanager.admin.address.rm1</name>
    <value>VECS02907:23141</value>
  </property>
  <property>
    <name>yarn.resourcemanager.admin.address.rm2</name>
    <value>VECS02908:23141</value>
  </property>
  <!-- RM resource-tracker interface nm向rm汇报心跳&& 领取任务-->
  <property>
    <name>yarn.resourcemanager.resource-tracker.address.rm1</name>
    <value>VECS02907:23125</value>
  </property>
  <property>
    <name>yarn.resourcemanager.resource-tracker.address.rm2</name>
    <value>VECS02908:23125</value>
  </property>
  <!-- RM web application interface -->
  <property>
    <name>yarn.resourcemanager.webapp.address.rm1</name>
    <value>VECS02907:8088</value>
  </property>
  <property>
    <name>yarn.resourcemanager.webapp.address.rm2</name>
    <value>VECS02908:8088</value>
  </property>
  <property>
    <name>yarn.resourcemanager.webapp.https.address.rm1</name>
    <value>VECS02907:23189</value>
  </property>
  <property>
    <name>yarn.resourcemanager.webapp.https.address.rm2</name>
    <value>VECS02908:23189</value>
  </property>
  <property>
    <name>yarn.log.server.url</name>
    <value>http://VECS02909:19888/jobhistory/logs</value>
  </property>
  <property>
    <name>yarn.web-proxy.address</name>
    <value>VECS02907:54315</value>
  </property>
  <!-- Node Manager Configs -->
  <property>
    <description>Address where the localizer IPC is.</description>
    <name>yarn.nodemanager.localizer.address</name>
    <value>0.0.0.0:23344</value>
  </property>
  <property>
    <description>NM Webapp address.</description>
    <name>yarn.nodemanager.webapp.address</name>
    <value>0.0.0.0:8042</value>
  </property>
  <property>
    <name>yarn.nodemanager.aux-services</name>
    <value>mapreduce_shuffle</value>
  </property>
  <property>
    <name>yarn.nodemanager.local-dirs</name>
    <value>file:///data8/HDATA/yarn/local,
           file:///data7/HDATA/yarn/local,
           file:///data6/HDATA/yarn/local,
           file:///data5/HDATA/yarn/local,
           file:///data4/HDATA/yarn/local,
           file:///data3/HDATA/yarn/local,
           file:///data2/HDATA/yarn/local,
           file:///data1/HDATA/yarn/local</value>
  </property>
  <property>
    <name>yarn.nodemanager.log-dirs</name>
    <value>file:///data8/HDATA/yarn/logs,
           file:///data7/HDATA/yarn/logs,
           file:///data6/HDATA/yarn/logs,
           file:///data5/HDATA/yarn/logs,
           file:///data4/HDATA/yarn/logs,
           file:///data3/HDATA/yarn/logs,
           file:///data2/HDATA/yarn/logs,
           file:///data1/HDATA/yarn/logs</value>
  </property>
  <property>
    <name>yarn.nodemanager.delete.debug-delay-sec</name>
    <value>1200</value>
  </property>
  <property>
    <name>mapreduce.shuffle.port</name>
    <value>23080</value>
  </property>
  <property>
    <name>yarn.resourcemanager.work-preserving-recovery.enabled</name>
    <value>true</value>
  </property>
  <!-- tuning -->
  <property>
    <name>yarn.nodemanager.resource.memory-mb</name>
    <value>20480</value>
  </property>
  <property>
    <name>yarn.nodemanager.resource.cpu-vcores</name>
    <value>8</value>
  </property>
  <!-- tuning yarn container -->
  <property>
    <name>yarn.scheduler.minimum-allocation-mb</name>
    <value>2048</value>
  </property>
  <property>
    <name>yarn.scheduler.maximum-allocation-mb</name>
    <value>8192</value>
  </property>
  <property>
    <name>yarn.scheduler.increment-allocation-mb</name>
    <value>512</value>
  </property>
  <property>
    <name>yarn.scheduler.fair.allow-undeclared-pools</name>
    <value>false</value>
  </property>
  <property>
    <name>yarn.scheduler.fair.allow-undeclared-pools</name>
    <value>false</value>
  </property>
  <property>
    <name>yarn.nodemanager.vmem-check-enabled</name>
    <value>false</value>
  </property>
  <property>
    <name>yarn.nodemanager.pmem-check-enabled</name>
    <value>false</value>
</property>
<property>
       <name>yarn.nodemanager.vmem-pmem-ratio</name>
          <value>2.1</value>
             <description>Ratio between virtual memory to physical memory when setting memory limits for containers</description>
         </property>
  <property>
    <name>yarn.log-aggregation.retain-seconds</name>
    <value>1209600</value>
</property>
<!-- 新增新特性 -->
  <property>
    <name>yarn.node-labels.enabled</name>
    <value>true</value>
  </property>
  <property>
    <name>yarn.node-labels.fs-store.root-dir</name>
    <value>hdfs://flashHadoopDev/yarn/yarn-node-labels/</value>
  </property>
<!-- timeline server -->
 <property>
   <name>yarn.timeline-service.enabled</name>
   <value>true</value>
 </property>
 <property>
   <name>yarn.resourcemanager.system-metrics-publisher.enabled</name>
   <value>true</value>
 </property>
 <property>
   <name>yarn.timeline-service.generic-application-history.enabled</name>
   <value>true</value>
 </property>
</configuration>
mapred-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at
    http://www.apache.org/licenses/LICENSE-2.0
  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
  <property>
    <name>mapreduce.framework.name</name>
    <value>yarn</value>
  </property>
  <property>
    <name>mapreduce.jobhistory.address</name>
    <value>VECS02909:10020</value>
  </property>
  <property>
    <name>mapreduce.jobhistory.webapp.address</name>
    <value>VECS02909:19888</value>
  </property>
  <property>
    <name>yarn.app.mapreduce.am.staging-dir</name>
    <value>/user</value>
  </property>
  <!-- tuning  mapreduce -->
  <property>
    <name>mapreduce.map.memory.mb</name>
    <value>2048</value>
  </property>
  <property>
    <name>mapreduce.map.java.opts</name>
    <value>-Xmx1536m -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+UseCMSCompactAtFullCollection -XX:CMSFullGCsBeforeCompaction=15 -XX:CMSInitiatingOccupancyFraction=70 -Dfile.encoding=UTF-8</value>
  </property>
  <property>
    <name>mapreduce.reduce.memory.mb</name>
    <value>6144</value>
  </property>
  <property>
    <name>mapreduce.reduce.java.opts</name>
    <value>-Xmx4608m -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+UseCMSCompactAtFullCollection -XX:CMSFullGCsBeforeCompaction=15 -XX:CMSInitiatingOccupancyFraction=70 -Dfile.encoding=UTF-8</value>
  </property>
  <property>
    <name>mapreduce.map.cpu.vcores</name>
    <value>1</value>
  </property>
  <property>
    <name>mapreduce.reduce.cpu.vcores</name>
    <value>2</value>
  </property>
  <property>
      <name>mapreduce.cluster.local.dir</name>
      <value>file:///data8/HDATA/mapred/local,
             file:///data7/HDATA/mapred/local,
             file:///data6/HDATA/mapred/local,
             file:///data5/HDATA/mapred/local,
             file:///data4/HDATA/mapred/local,
             file:///data3/HDATA/mapred/local,
             file:///data2/HDATA/mapred/local,
             file:///data1/HDATA/mapred/local</value>
     </property>
<!--map and shuffle and reduce turning -->
  <property>
      <name>mapreduce.task.io.sort.mb</name>
      <value>300</value>
  </property>
  <!--     30*10=io.sort.mb -->
  <property>
    <name>mapreduce.jobhistory.max-age-ms</name>
    <value>1296000000</value>
    <source>mapred-default.xml</source>
  </property>
  <property>
    <name>mapreduce.jobhistory.joblist.cache.size</name>
    <value>200000</value>
    <source>mapred-default.xml</source>
</property>
  <property>
    <name>mapreduce.input.fileinputformat.input.dir.recursive</name>
    <value>true</value>
</property>
</configuration>
Hadoop 生产配置文件V2的更多相关文章
- Hadoop生产环境搭建(含HA、Federation)
		Hadoop生产环境搭建 1. 将安装包hadoop-2.x.x.tar.gz存放到某一目录下,并解压. 2. 修改解压后的目录中的文件夹etc/hadoop下的配置文件(若文件不存在,自己创建.) ... 
- Hadoop: Hadoop Cluster配置文件
		Hadoop配置文件 Hadoop的配置文件: 只读的默认配置文件:core-default.xml, hdfs-default.xml, yarn-default.xml 和 mapred-defa ... 
- Hadoop生产环境配置文件
		前提: ①已经搭建好zk ②已经安装好JDK 正文开始: 首先从官网下载hadoop 2.7.3 (虽然官网3.0都出了.但是目前还没经过完全的测试..待测试后...) 一.hadoop-env.sh ... 
- Hadoop部署配置文件
		为了方便大家修,我把要修改的地方标红了,找到对应文件,复制粘贴过去就可以了 注:这个是我安装的Hadoop的配置,要根据我之前发的文章配置才行, 里面有一些东西比如文件夹名称,路径不一样,稍加修改也可 ... 
- hadoop 官方配置文件解析
		比如我的版本是2.8.4 官网文档是: http://hadoop.apache.org/docs/r2.8.4/ 基本配置文件:包括一般的端口 hdfs-default.xml dfs.nameno ... 
- maven分别打包开发、生产配置文件
		项目工程针对开发和生产有两套配置,开发配置文件目录:src/main/resources/ applicationContext.xml (开发和生产共用) dubbo.propertie ... 
- Hadoop的配置文件
		hadoop-env.sh:脚本中所用到的环境变量,以运行Hadoop mapred-env.sh:脚本中所用到的环境变量,以运行mapreduce yarn-env.sh:脚本中所用到的环境变量,以 ... 
- hadoop配置文件加载顺序(转)
		原文 http://www.cnblogs.com/wolfblogs/p/4147485.html 用了一段时间的hadoop,现在回来看看源码发现别有一番味道,温故而知新,还真是这样的 在使用h ... 
- hadoop学习笔记壹 --环境搭建及配置文件的修改
		Hadoop生态和其他生态最大的不同之一就是“单一平台多种应用”的理念了. hadoop能解决是什么问题: 1.HDFS :海量数据存储 MapReduce: 海量数据分析 YARN :资源管理调 ... 
随机推荐
- github在README.md中插入图片
			例子  
- Redis笔记-Sentinel哨兵模式
			Redis以主从的模式搭建集群后,如果主节点Master挂掉,虽然可以实现将备用节点Slave切换成主节点,但是Redis本身并没有自动监控机制,需要借助Sentinel哨兵模式,实现监控并实现自动切 ... 
- SQL Server 取日期时间格式 日期与字符串之间的转换
			SQL Server 取日期时间部分 在本文中,GetDate()获得的日期由两部分组成,分别是今天的日期和当时的时间: Select GetDate() 用DateName()就可以获得相应的 ... 
- Go语言打造以太坊智能合约测试框架(level3)
			传送门: 柏链项目学院 第三课 智能合约自动化测试 之前课程回顾 我们之前介绍了go语言调用exec处理命令行,介绍了toml配置文件的处理,以及awk处理文本文件获得ABI信息.我们的代码算是完成了 ... 
- Centos7 安装mysql-8.0.13(rpm)
			yum or rpm? yum安装方式很方便,但是下载mysql的时候从官网下载,速度较慢. rpm安装方式可以从国内镜像下载mysql的rpm包,比较快.rpm也适合离线安装. 环境说明 操作系统: ... 
- 好程序员技术分享html5和JavaScript的区别
			好程序员技术分享html5和JavaScript的区别,HTML5广义上讲是前端开发学科的代名词,包含HTML5.CSS3及JavaScript三个重要的部分,是运行在浏览器上应用的统称.如PC端网站 ... 
- maven打包如何跳过测试
			Maven打包如何跳过测试?正常来说,不应该这样做,因为测试可以避免很多麻烦排除一些不必要的错误,前提是测试足够规范,这里主要指junit测试,如果junit测试有问题的话,将会直接影响到mvn in ... 
- iview 将table的selection多选变单选方法
			相信很多使用iview的朋友,在用到table,都会遇到需要使用selection的场景,但是总会有那么一个产品汪,觉得iview的单选效果不好,非要用selection的来做单选,那么下面这个方法就 ... 
- 石家庄地铁系统开发(java web版)(二)
			两种方法: 一,自己写数据库,自己写算法实现 二,调用已有软件的API(百度,高德) 
- 【刷题】Git工作流-相关知识点
			参考资料:[学习总结]Git学习-GIT工作流-千峰教育(来自B站) 1-Git工作流 GitFlow流五大分支: 主干分支 热修复分支 预发布分支 开发分支 功能分支 GitFlow 工作流定义了一 ... 
