如果之前没有安装jdk和zookeeper,安装了的请直接跳过

https://www.oracle.com/technetwork/java/javase/downloads/java-archive-javase8-2177648.html

# ==================================================================安装 jdk

mkdir -p /usr/java
tar -zxvf ~/jdk-8u111-linux-x64.tar.gz -C /usr/java
rm -r ~/jdk-8u111-linux-x64.tar.gz

http://archive.apache.org/dist/zookeeper/

# ==================================================================安装 zookeeper
# zookeeper集群搭建要至少3台服务器,服务器都要部署zookeeper

tar -zxvf ~/zookeeper-3.4.12.tar.gz -C /usr/local
rm -r ~/zookeeper-3.4.12.tar.gz

http://archive.apache.org/dist/hadoop/core/hadoop-2.7.6/

# ==================================================================安装 hadoop

tar -zxvf ~/hadoop-2.7.6.tar.gz -C /usr/local
rm -r ~/hadoop-2.7.6.tar.gz

# 配置环境变量

# ==================================================================node1 node2 node3

vi /etc/profile

# 在export PATH USER LOGNAME MAIL HOSTNAME HISTSIZE HISTCONTROL下添加
export JAVA_HOME=/usr/java/jdk1.8.0_111
export ZOOKEEPER_HOME=/usr/local/zookeeper-3.4.12
export HADOOP_HOME=/usr/local/hadoop-2.7.6 export PATH=$PATH:$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$ZOOKEEPER_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export HADOOP_INSTALL=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native

# ==================================================================node1

# 使环境变量生效
source /etc/profile # 查看配置结果
echo $HADOOP_HOME # hadoop 配置 hadoop-env.sh
vi $HADOOP_HOME/etc/hadoop/hadoop-env.sh export JAVA_HOME=/usr/java/jdk1.8.0_111 #export HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib:$HADOOP_HOME/lib/native"
export HADOOP_OPTS="-Djava.library.path=/usr/local/hadoop-2.7.6/lib:/usr/local/hadoop-2.7.6/lib/native" export HADOOP_PID_DIR=/usr/local/hadoop-2.7.6/pids # export HADOOP_OPTS="$HADOOP_OPTS -Duser.timezone=GMT+08" # hadoop 配置 mapred-env.sh
vi $HADOOP_HOME/etc/hadoop/mapred-env.sh export JAVA_HOME=/usr/java/jdk1.8.0_111 export HADOOP_MAPRED_PID_DIR=/usr/local/hadoop-2.7.6/pids # hadoop 配置 yarn-env.sh 修改 yarn-env.sh 中 JAVA_HEAP_MAX=-Xmx3072m 改为3G
vi $HADOOP_HOME/etc/hadoop/yarn-env.sh export JAVA_HOME=/usr/java/jdk1.8.0_111 JAVA_HEAP_MAX=3G # YARN_OPTS="$YARN_OPTS -Duser.timezone=GMT+08" vi $HADOOP_HOME/etc/hadoop/slaves node1
node2
node3 # 配置文件中对应需创建的文件夹
mkdir $HADOOP_HOME/tmp
mkdir $HADOOP_HOME/dfs
mkdir $HADOOP_HOME/dfs/name
mkdir $HADOOP_HOME/dfs/data
mkdir $HADOOP_HOME/logs
mkdir $HADOOP_HOME/journal
mkdir $HADOOP_HOME/journal/data
mkdir $HADOOP_HOME/yarn
mkdir $HADOOP_HOME/yarn/local
mkdir $HADOOP_HOME/yarn/logs

# core-site.xml

vi $HADOOP_HOME/etc/hadoop/core-site.xml

<configuration>
<!-- 指定hdfs的nameservice为cluster -->
<property>
<name>fs.defaultFS</name>
<value>hdfs://appcluster</value>
</property>
<!-- 指定zookeeper地址-->
<property>
<name>ha.zookeeper.quorum</name>
<value>node1:2181,node2:2181,node3:2181</value>
</property>
<property>
<name>ha.zookeeper.session-timeout.ms</name>
<value>2000</value>
</property>
<!-- 故障检查时间 -->
<property>
<name>ha.failover-controller.cli-check.rpc-timeout.ms</name>
<value>60000</value>
</property>
<!-- ipc通讯超时时间 -->
<property>
<name>ipc.client.connect.timeout</name>
<value>20000</value>
</property>
<!-- 指定hadoop临时目录 -->
<property>
<name>hadoop.tmp.dir</name>
<value>/usr/local/hadoop-2.7.6/tmp</value>
</property> <property>
<name>hadoop.security.authorization</name>
<value>true</value>
</property>
<property>
<name>hadoop.security.authentication</name>
<value>simple</value>
</property>
<property>
<name>hadoop.proxyuser.super.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.super.groups</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.root.groups</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.root.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.native.lib</name>
<value>true</value>
<description>Should native hadoop libraries, if present, be used.</description>
</property>
</configuration>

# hdfs-site.xml

vi $HADOOP_HOME/etc/hadoop/hdfs-site.xml

<configuration>
<!-- 指定namenode元数据存放路径;如果机器上有多块硬盘,推荐配置多个路径用逗号分隔 -->
<property>
<name>dfs.namenode.name.dir</name>
<value>/usr/local/hadoop-2.7.6/dfs/name</value>
</property>
<!-- 指定datanode数据存储地址 -->
<property>
<name>dfs.datanode.data.dir</name>
<value>/usr/local/hadoop-2.7.6/dfs/data</value>
</property>
<!-- 指定数据冗余份数,默认3份 -->
<property>
<name>dfs.replication</name>
<value>2</value>
</property> <!--指定hdfs的nameservice为cluster,需要和core-site.xml中的保持一致 -->
<property>
<name>dfs.nameservices</name>
<value>appcluster</value>
</property>
<!-- cluster下面有两个NameNode,分别是nn1,nn2 -->
<property>
<name>dfs.ha.namenodes.appcluster</name>
<value>nn1,nn2</value>
</property>
<!-- nn1、nn2的RPC通信地址 -->
<property>
<name>dfs.namenode.rpc-address.appcluster.nn1</name>
<value>node1:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.appcluster.nn2</name>
<value>node2:8020</value>
</property>
<!-- nn1、nn2的http通信地址 -->
<property>
<name>dfs.namenode.http-address.appcluster.nn1</name>
<value>node1:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.appcluster.nn2</name>
<value>node2:50070</value>
</property> <property>
<name>dfs.namenode.servicerpc-address.appcluster.nn1</name>
<value>node1:53310</value>
</property>
<property>
<name>dfs.namenode.servicerpc-address.appcluster.nn2</name>
<value>node2:53310</value>
</property> <!-- 指定NameNode的元数据在JournalNode上的存放位置 -->
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://node1:8485;node2:8485;node3:8485/appcluster</value>
</property>
<!-- 指定JournalNode在本地磁盘存放数据的位置 -->
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/usr/local/hadoop-2.7.6/journal/data</value>
</property>
<!-- 开启NameNode失败自动切换 -->
<property>
<name>dfs.ha.automatic-failover.enabled.appcluster</name>
<value>true</value>
</property> <!-- 配置失败自动切换实现方式 -->
<property>
<name>dfs.client.failover.proxy.provider.appcluster</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<!-- 配置隔离机制方法,多个机制用换行分割,即每个机制暂用一行 -->
<property>
<name>dfs.ha.fencing.methods</name>
<value>
sshfence
shell(/bin/true)
</value>
</property>
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/root/.ssh/id_rsa</value>
</property>
<property>
<name>dfs.ha.fencing.ssh.connect-timeout</name>
<value>30000</value>
</property> <property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.permissions.enable</name>
<value>false</value>
</property>
<property>
<name>dfs.permissions</name>
<value>false</value>
</property>
<!-- 提供web访问hdfs的权限 -->
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
</configuration>

# mapred-site.xml

cp $HADOOP_HOME/etc/hadoop/mapred-site.xml.template $HADOOP_HOME/etc/hadoop/mapred-site.xml
vi $HADOOP_HOME/etc/hadoop/mapred-site.xml

<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>

# yarn-site.xml

vi $HADOOP_HOME/etc/hadoop/yarn-site.xml

<configuration>
<property>
<name>dfs.ha.automatic-failover.enabled.appcluster</name>
<value>true</value>
</property> <!--rm失联后重新链接的时间-->
<property>
<name>yarn.resourcemanager.connect.retry-interval.ms</name>
<value>2000</value>
</property> <!--开启resourcemanagerHA,默认为false-->
<property>
<name>yarn.resourcemanager.ha.enabled</name>
<value>true</value>
</property> <!--配置resourcemanager-->
<property>
<name>yarn.resourcemanager.ha.rm-ids</name>
<value>rm1,rm2</value>
</property> <property>
<name>ha.zookeeper.quorum</name>
<value>node1:2181,node2:2181,node3:2181</value>
</property> <!--开启故障自动切换-->
<property>
<name>yarn.resourcemanager.ha.automatic-failover.enabled</name>
<value>true</value>
</property> <property>
<name>yarn.resourcemanager.hostname.rm1</name>
<value>node1</value>
</property> <property>
<name>yarn.resourcemanager.hostname.rm2</name>
<value>node2</value>
</property>
<!--
在node1上配置rm1,在node2上配置rm2,
注意:一般都喜欢把配置好的文件远程复制到其它机器上,但这个在YARN的另一个机器上一定要修改
-->
<property>
<name>yarn.resourcemanager.ha.id</name>
<value>rm1</value>
<description>If we want to launch more than one RM in single node,we need this configuration</description>
</property> <!--开启自动恢复功能-->
<property>
<name>yarn.resourcemanager.recovery.enabled</name>
<value>true</value>
</property> <!--配置与zookeeper的连接地址-->
<property>
<name>yarn.resourcemanager.zk-state-store.address</name>
<value>node1:2181,node2:2181,node3:2181</value>
</property> <property>
<name>yarn.resourcemanager.store.class</name>
<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
</property> <property>
<name>yarn.resourcemanager.zk-address</name>
<value>node1:2181,node2:2181,node3:2181</value>
</property> <property>
<name>yarn.resourcemanager.cluster-id</name>
<value>appcluster-yarn</value>
</property> <!--schelduler失联等待连接时间-->
<property>
<name>yarn.app.mapreduce.am.scheduler.connection.wait.interval-ms</name>
<value>5000</value>
</property> <!--配置rm1,rm2-->
<property>
<name>yarn.resourcemanager.address.rm1</name>
<value>node1:8032</value>
</property>
<property>
<name>yarn.resourcemanager.address.rm2</name>
<value>node2:8032</value>
</property> <property>
<name>yarn.resourcemanager.scheduler.address.rm1</name>
<value>node1:8030</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address.rm2</name>
<value>node2:8030</value>
</property> <property>
<name>yarn.resourcemanager.webapp.address.rm1</name>
<value>node1:8088</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address.rm2</name>
<value>node2:8088</value>
</property> <property>
<name>yarn.resourcemanager.resource-tracker.address.rm1</name>
<value>node1:8031</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address.rm2</name>
<value>node2:8031</value>
</property> <property>
<name>yarn.resourcemanager.admin.address.rm1</name>
<value>node1:8033</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address.rm2</name>
<value>node2:8033</value>
</property> <property>
<name>yarn.resourcemanager.ha.admin.address.rm1</name>
<value>node1:23142</value>
</property>
<property>
<name>yarn.resourcemanager.ha.admin.address.rm2</name>
<value>node2:23142</value>
</property> <property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property> <property>
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property> <property>
<name>yarn.nodemanager.local-dirs</name>
<value>/usr/local/hadoop-2.7.6/yarn/local</value>
</property> <property>
<name>yarn.nodemanager.log-dirs</name>
<value>/usr/local/hadoop-2.7.6/yarn/logs</value>
</property> <property>
<name>mapreduce.shuffle.port</name>
<value>23080</value>
</property> <!--故障处理类-->
<property>
<name>yarn.client.failover-proxy-provider</name>
<value>org.apache.hadoop.yarn.client.ConfiguredRMFailoverProxyProvider</value>
</property> <property>
<name>yarn.resourcemanager.ha.automatic-failover.zk-base-path</name>
<value>/yarn-leader-election</value>
<description>Optionalsetting.Thedefaultvalueis/yarn-leader-election</description>
</property> <property>
<name>yarn.nodemanager.resource.memory-mb</name>
<value>2048</value>
</property>
<property>
<name>yarn.scheduler.maximum-allocation-mb</name>
<value>2048</value>
</property>
<property>
<name>yarn.nodemanager.resource.cpu-vcores</name>
<value>1</value>
</property>
</configuration>

# 修改日志的路径

vi $HADOOP_HOME/etc/hadoop/log4j.properties

hadoop.log.dir=/usr/local/hadoop-2.7.6/logs

# ==================================================================node1

scp -r $HADOOP_HOME node2:/usr/local/
scp -r $HADOOP_HOME node3:/usr/local/

# ==================================================================node2 node3

# 使环境变量生效
source /etc/profile # 查看配置结果
echo $HADOOP_HOME

# ==================================================================node2

vi $HADOOP_HOME/etc/hadoop/yarn-site.xml

<property>
<name>yarn.resourcemanager.ha.id</name>
<value>rm2</value>
</property>

shutdown -h now
# 快照 hadoop集群前

# ==================================================================初次启动

# 1.启动zookeeper集群所有节点
# ==================================================================node1 node2 node3
zkServer.sh start
zkServer.sh status # zkServer.sh stop
# $ZOOKEEPER_HOME/bin/zkServer.sh start
# $ZOOKEEPER_HOME/bin/zkServer.sh status
# $ZOOKEEPER_HOME/bin/zkServer.sh stop # 2.格式化Zookeeper文件系统在active的namenode上运行
# ==================================================================node1
$HADOOP_HOME/bin/hdfs zkfc -formatZK # 验证:$ZOOKEEPER_HOME/bin/zkCli.sh
# ls /
# ls /hadoop-ha # 3.启动JournalNode集群
# ==================================================================node1 node2 node3
$HADOOP_HOME/sbin/hadoop-daemon.sh start journalnode # jps
# QuorumPeerMain
# Jps
# JournalNode # 4.格式化集群的 NameNode
# ==================================================================node1
$HADOOP_HOME/bin/hdfs namenode -format # 如果不是首次format的话还是把NameNode和DataNode存放数据地址下的数据手动删除一下,否则会造成NameNode ID和DataNode ID不一致,
# rm -rf $HADOOP_HOME/dfs/name/* & rm -rf $HADOOP_HOME/dfs/data/*
# $HADOOP_HOME/bin/hdfs namenode -format
# (如果是HDFS联盟,即有多个HDFS集群同时工作,则用hdfs namenode -format -clusterId [clusterID]) # 5.启动 NameNode
# ==================================================================node1
$HADOOP_HOME/sbin/hadoop-daemon.sh start namenode
$HADOOP_HOME/sbin/yarn-daemon.sh start resourcemanager # 6.把NameNode的数据从node1同步到node2中,在standby的namenode格式化namenode,同步NameNode的数据
# ==================================================================node2
$HADOOP_HOME/bin/hdfs namenode -bootstrapStandby
$HADOOP_HOME/sbin/hadoop-daemon.sh start namenode
$HADOOP_HOME/sbin/yarn-daemon.sh start resourcemanager # 7.启动所有的DataNode
# ==================================================================node1
$HADOOP_HOME/sbin/hadoop-daemons.sh start datanode # 8.启动Yarn
# ==================================================================node1
$HADOOP_HOME/sbin/start-yarn.sh # 9.在node1,node2启动ZooKeeperFailoverController(这里不用在node3中启动,因为node3这个节点是纯粹的DataNode)
# ==================================================================node1 node2
$HADOOP_HOME/sbin/hadoop-daemon.sh start zkfc # 10.验证HA的故障自动转移是否好用
# kill 进程号 # $HADOOP_HOME/sbin/hadoop-daemon.sh start namenode # 启动后在各个节点查看进程运行情况
# node1
jps
#2850 NodeManager
#2963 DFSZKFailoverController
#3027 Jps
#2374 NameNode
#2679 ResourceManager
#2523 DataNode
#2126 QuorumPeerMain
#2255 JournalNode # node2
jps
#2209 JournalNode
#2674 DataNode
#2850 NodeManager
#2134 QuorumPeerMain
#2424 ResourceManager
#2973 DFSZKFailoverController
#3021 Jps
#2318 NameNode # node3
jps
#2546 Jps
#2131 QuorumPeerMain
#2278 DataNode
#2200 JournalNode
#2446 NodeManager # http://node1:50070/dfshealth.html#tab-overview
# http://node2:50070/dfshealth.html#tab-overview
# http://node1:8088/cluster/nodes # ==================================================================停止集群 # ==================================================================node1
# stop已经启动的进程
$HADOOP_HOME/sbin/stop-all.sh # ==================================================================node1 node2 node3
# 停止 zookeeper
zkServer.sh stop # ==================================================================node1
$HADOOP_HOME/sbin/hadoop-daemon.sh stop zkfc # ==================================================================node2
$HADOOP_HOME/sbin/yarn-daemon.sh stop resourcemanager
$HADOOP_HOME/sbin/hadoop-daemon.sh stop zkfc

# ==================================================================再次顺序启动方式

# ==================================================================node1 node2 node3
# 启动 zookeeper
zkServer.sh start
zkServer.sh status # ==================================================================node1
# 启动hadoop所有进程
$HADOOP_HOME/sbin/start-all.sh $HADOOP_HOME/sbin/hadoop-daemon.sh start zkfc # ==================================================================node2
$HADOOP_HOME/sbin/yarn-daemon.sh start resourcemanager
$HADOOP_HOME/sbin/hadoop-daemon.sh start zkfc hadoop dfsadmin -safemode get # 命令强制离开
# hadoop dfsadmin -safemode leave # ==================================================================停止集群 # ==================================================================node1
# stop已经启动的进程
$HADOOP_HOME/sbin/stop-all.sh # ==================================================================node1 node2 node3
# 停止 zookeeper
zkServer.sh stop # ==================================================================node1
$HADOOP_HOME/sbin/hadoop-daemon.sh stop zkfc # ==================================================================node2
$HADOOP_HOME/sbin/yarn-daemon.sh stop resourcemanager
$HADOOP_HOME/sbin/hadoop-daemon.sh stop zkfc shutdown -h now
# 快照 hadoop集群

# 先不急着进行simple 认证

# ==================================================================simple 认证
vi $HADOOP_HOME/etc/hadoop/core-site.xml

<property>
<name>hadoop.http.filter.initializers</name>
<value>org.apache.hadoop.security.AuthenticationFilterInitializer</value>
</property>
<property>
<name>hadoop.http.authentication.type</name>
<value>simple</value>
</property>
<property>
<name>hadoop.http.authentication.token.validity</name>
<value>3600</value>
</property>
<property>
<name>hadoop.http.authentication.signature.secret.file</name>
<value>/usr/local/hadoop-2.7.6/hadoop-http-auth-signature-secret</value>
</property>
<property>
<name>hadoop.http.authentication.cookie.domain</name>
<value></value>
</property>
<property>
<name>hadoop.http.authentication.simple.anonymous.allowed</name>
<value>false</value>
</property>
# 在上述配置的目录 $HADOOP_HOME 下生成文件hadoop-http-auth-signature-secret
echo "hadoop" > $HADOOP_HOME/hadoop-http-auth-signature-secret scp -r $HADOOP_HOME/etc/hadoop/core-site.xml node2:$HADOOP_HOME/etc/hadoop/core-site.xml
scp -r $HADOOP_HOME/etc/hadoop/core-site.xml node3:$HADOOP_HOME/etc/hadoop/core-site.xml
scp -r $HADOOP_HOME/hadoop-http-auth-signature-secret node2:$HADOOP_HOME/hadoop-http-auth-signature-secret
scp -r $HADOOP_HOME/hadoop-http-auth-signature-secret node3:$HADOOP_HOME/hadoop-http-auth-signature-secret

# ==================================================================再次顺序启动方式

# ==================================================================node1 node2 node3

# 启动 zookeeper
zkServer.sh start
zkServer.sh status

# ==================================================================node1

# 启动hadoop所有进程
$HADOOP_HOME/sbin/start-all.sh $HADOOP_HOME/sbin/hadoop-daemon.sh start zkfc

# ==================================================================node2

$HADOOP_HOME/sbin/yarn-daemon.sh start resourcemanager
$HADOOP_HOME/sbin/hadoop-daemon.sh start zkfc

hadoop dfsadmin -safemode get

# 命令强制离开
# hadoop dfsadmin -safemode leave

# 网页访问
# http://node1:50070?user.name=hadoop
# http://node2:50070?user.name=hadoop
# http://node1:8088?user.name=hadoop/cluster/nodes

shutdown -h now
# 快照 hadoop_simple认证

# ==================================================================Kerberos 认证

# 后续整理好后开放

hadoop生态搭建(3节点)-04.hadoop配置的更多相关文章

  1. hadoop生态搭建(3节点)-10.spark配置

    # https://www.scala-lang.org/download/2.12.4.html# ================================================= ...

  2. hadoop生态搭建(3节点)-17.sqoop配置_单节点

    # ==================================================================安装 sqoop tar -zxvf ~/sqoop-1.4.7 ...

  3. hadoop生态搭建(3节点)

    软件:CentOS-7    VMware12    SSHSecureShellClient shell工具:Xshell 规划 vm网络配置 01.基础配置 02.ssh配置 03.zookeep ...

  4. Hadoop环境搭建|第二篇:hadoop环境搭建

    硬件配置:1台NameNode节点.2台DataNode节点 一.Linux环境配置 这里我只配置NameNode节点,DataNode节点的操作相同. 1.1.修改主机名 命令:vi /etc/sy ...

  5. 【Hadoop】搭建完全分布式的hadoop

    博客已转移,请借一步说话! http://www.weixuehao.com/archives/577 下面博文已更新,请移步 ↑ 用于测试,我用4台虚拟机搭建成了hadoop结构 我用了两个台式机. ...

  6. 【Hadoop】搭建完全分布式的hadoop【转】

    转自:http://www.cnblogs.com/laov/p/3421479.html 下面博文已更新,请移步 ↑ 用于测试,我用4台虚拟机搭建成了hadoop结构 我用了两个台式机.一个xp系统 ...

  7. 安装高可用Hadoop生态 (三) 安装Hadoop

    3.    安装Hadoop 3.1. 解压程序 ※ 3台服务器分别执行 .tar.gz -C/opt/cloud/packages /opt/cloud/bin/hadoop /etc/hadoop ...

  8. hadoop生态搭建(3节点)-08.kafka配置

    如果之前没有安装jdk和zookeeper,安装了的请直接跳过 # https://www.oracle.com/technetwork/java/javase/downloads/java-arch ...

  9. hadoop生态搭建(3节点)-09.flume配置

    # http://archive.apache.org/dist/flume/1.8.0/# ===================================================== ...

随机推荐

  1. 【起航计划ObjC 001】印第安老斑鸠ObjC的幻想 ---- Ubuntu下安装并使用Obj-C

    如何在最新版本的 Ubuntu下(14.10)来安装.编译Objective-C? Ubuntu已经有了对Objective-C的编译器(gobjc)的安装,因此安装gobjc的步骤可省,如果你用的U ...

  2. css 字体样式设置大全

    css样式大全(整理版)   字体属性:(font) 大小 {font-size: x-large;}(特大) xx-small;(极小) 一般中文用不到,只要用数值就可以,单位:PX.PD 样式 { ...

  3. koa2获取用户ip

    调用下面方法即可获取 // koa2 中 req 为 ctx.req const getUserIp = (req) => { return req.headers['x-forwarded-f ...

  4. js实现base64编码与解码(原生js)

    一直以来很多人使用到 JavaScript 进行 base64 编码解码时都是使用的 Base64.js,但事实上,浏览器很早就原生支持 base64 的编码与解码了 以前的方式 编码: <ja ...

  5. Linux 配置 ss

    Linux 配置 Shadowsocks 标签(空格分隔): ss VPS 1.首先安装 sudo pip install shadowsocks 2.然后在指定位置新建shadowsocks.jso ...

  6. STC12C5A60S2 51单片机最小系统

                                                                                    STC12C5A60S2 一.根据芯片文 ...

  7. Git软件的学习

    第一部分:我的git地址是https://github.com/ZHU19007/gitLearning 第二部分:我对git的认识 一.Git是一款免费.开源的分布式版本控制工具.而Github是用 ...

  8. Linux下elk安装配置

    安装jdkJDK版本大于1.8 elk下载地址:https://www.elastic.co/products注意:elk三个版本都要保持一致. rpm -ivh elasticsearch-5.4. ...

  9. Android(java)学习笔记7:多线程程序练习

    需求: 某电影院目前正在上映贺岁大片,共有100张票,而它有3个售票窗口售票,请设计一个程序模拟该电影院售票. 两种方式实现 A:继承Thread类 B:实现Runnable接 1. 首先我们利用方式 ...

  10. python:部分内置函数与匿名函数

    一.内置函数 1,数据类型:int,bool .......... 2,数据结构:dict,list,tuple,set,str 3,reversed--保留原列表,返回一个反序的迭代器 revers ...