ubuntu12.04+hadoop2.2.0+zookeeper3.4.5+hbase0.96.2+hive0.13.1伪分布式环境部署
|
1
|
root@m1:/home/hadoop# sudo apt-get install oracle-java7-installer |
|
1
|
root@m1:/home/hadoop# sudo vi /etc/environment |
|
1
|
PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/lib/jvm/java-7oracle/bin” |
|
1
2
3
|
CLASSPATH="/usr/lib/jvm/java-7-oracle/lib”JAVA_HOME="/usr/lib/jvm/java-7-oracle”JRE_HOME="/usr/lib/jvm/java-7-oracle/jre” |
|
1
2
3
|
root@m1:/home/hadoop# sudo update-alternatives --install /usr/bin/java java /usr/lib/jvm/java-7-oracle/bin/java 300root@m1:/home/hadoop# sudo update-alternatives --install /usr/bin/javac javac /usr/lib/jvm/java-7-oracle/bin/javac 300root@m1:/home/hadoop# sudo update-alternatives --config java |



|
1
|
root@m1:/home/hadoop# sudo apt-get install ssh openssh-server |
|
1
2
3
4
5
6
7
8
9
10
|
root@m1:/home/hadoop# scp -r root@m2:/root/.ssh/id_rsa.pub ~/.ssh/m2.pubroot@m1:/home/hadoop# scp -r root@s1:/root/.ssh/id_rsa.pub ~/.ssh/s1.pubroot@m1:/home/hadoop# scp -r root@s2:/root/.ssh/id_rsa.pub ~/.ssh/s2.pubroot@m1:/home/hadoop# cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keysroot@m1:/home/hadoop# cat ~/.ssh/m2.pub >> ~/.ssh/authorized_keysroot@m1:/home/hadoop# cat ~/.ssh/s1.pub >> ~/.ssh/authorized_keysroot@m1:/home/hadoop# cat ~/.ssh/s2.pub >> ~/.ssh/authorized_keysroot@m1:/home/hadoop# scp -r ~/.ssh/authorized_keys root@m2:~/.ssh/root@m1:/home/hadoop# scp -r ~/.ssh/authorized_keys root@s1:~/.ssh/root@m1:/home/hadoop# scp -r ~/.ssh/authorized_keys root@s2:~/.ssh/ |
|
1
|
root@m1:/home/hadoop/zookeeper-3.4.5/conf# vi zoo.cfg |
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
|
# The number of milliseconds of each ticktickTime=2000# The number of ticks that the initial # synchronization phase can takeinitLimit=10# The number of ticks that can pass between # sending a request and getting an acknowledgementsyncLimit=5# the directory where the snapshot is stored.# do not use /tmp for storage, /tmp here is just # example sakes.dataDir=/home/hadoop/zookeeper-3.4.5/datadataLogDir=/home/hadoop/zookeeper-3.4.5/logsserver.1=m1:2888:3888server.2=m2:2888:3888server.3=s1:2888:3888server.4=s2:2888:3888# the port at which the clients will connectclientPort=2181## Be sure to read the maintenance section of the # administrator guide before turning on autopurge.## http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance## The number of snapshots to retain in dataDir#autopurge.snapRetainCount=3# Purge task interval in hours# Set to "0" to disable auto purge feature#autopurge.purgeInterval=1 |
|
1
2
3
|
root@m1:/home/hadoop/zookeeper-3.4.5/conf# scp -r /home/hadoop/zookeeper-3.4.5 root@m2:/home/hadooproot@m1:/home/hadoop/zookeeper-3.4.5/conf# scp -r /home/hadoop/zookeeper-3.4.5 root@s1:/home/hadooproot@m1:/home/hadoop/zookeeper-3.4.5/conf# scp -r /home/hadoop/zookeeper-3.4.5 root@s2:/home/hadoop |
|
1
|
root@m1:/home/hadoop/hadoop-2.2.0/etc/hadoop# vi hadoop-env.sh |
|
1
2
|
export JAVA_HOME=/usr/lib/jvm/java-7-oracle#export JAVA_HOME=${JAVA_HOME} |
|
1
|
root@m1:/home/hadoop/hadoop-2.2.0/etc/hadoop# vi yarn-env.sh |
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
|
# Licensed to the Apache Software Foundation (ASF) under one or more# contributor license agreements. See the NOTICE file distributed with# this work for additional information regarding copyright ownership.# The ASF licenses this file to You under the Apache License, Version 2.0# (the "License"); you may not use this file except in compliance with# the License. You may obtain a copy of the License at## http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing, software# distributed under the License is distributed on an "AS IS" BASIS,# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# See the License for the specific language governing permissions and# limitations under the License.# User for YARN daemonsexport HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}export JAVA_HOME=/usr/lib/jvm/java-7-oracle |
|
1
|
root@m1:/home/hadoop/hadoop-2.2.0/etc/hadoop# vi hdfs-site.xml |
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
|
<?xml version="1.0" encoding="UTF-8"?><?xml-stylesheet type="text/xsl" href="configuration.xsl"?><!-- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file.--><!-- Put site-specific property overrides in this file. --><configuration> <property> <name>dfs.nameservices</name> <value>mycluster</value> </property> <property> <name>dfs.ha.namenodes.mycluster</name> <value>m1,m2</value> </property> <property> <name>dfs.namenode.rpc-address.mycluster.m1</name> <value>m1:9000</value> </property> <property> <name>dfs.namenode.rpc-address.mycluster.m2</name> <value>m2:9000</value> </property> <property> <name>dfs.namenode.http-address.mycluster.m1</name> <value>m1:50070</value> </property> <property> <name>dfs.namenode.http-address.mycluster.m2</name> <value>m2:50070</value> </property> <property> <name>dfs.namenode.shared.edits.dir</name> <value>qjournal://m1:8485;m2:8485/mycluster</value> </property> <property> <name>dfs.ha.automatic-failover.enabled.mycluster</name> <value>true</value> </property> <property> <name>dfs.client.failover.proxy.provider.mycluster</name> <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value> </property> <property> <name>dfs.ha.fencing.methods</name> <value>sshfence</value> </property> <property> <name>dfs.ha.fencing.ssh.private-key-files</name> <value>/root/.ssh/id_rsa</value> </property> <property> <name>dfs.journalnode.edits.dir</name> <value>/home/hadoop/hadoop-2.2.0/tmp/journal</value> </property> <property> <name>dfs.replication</name> <value>3</value> </property> <property> <name>dfs.webhdfs.enabled</name> <value>true</value> </property> <property> <name>dfs.permissions</name> <value>false</value> </property> <property> <name>dfs.permissions.enabled</name> <value>false</value> </property></configuration> |
|
1
|
root@m1:/home/hadoop/hadoop-2.2.0/etc/hadoop# vi mapred-site.xml |
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
|
<?xml version="1.0"?><?xml-stylesheet type="text/xsl" href="configuration.xsl"?><!-- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file.--><configuration> <property> <name>mapreduce.framework.name</name> <value>yarn</value> <description>Execution framework set to Hadoop YARN.</description> </property></configuration> |
|
1
|
root@m1:/home/hadoop/hadoop-2.2.0/etc/hadoop# vi core-site.xml |
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
|
<?xml version="1.0" encoding="UTF-8"?><?xml-stylesheet type="text/xsl" href="configuration.xsl"?><!-- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file.--><!-- Put site-specific property overrides in this file. --><configuration> <property> <name>fs.defaultFS</name> <value>hdfs://mycluster</value> </property><property> <name>dfs.nameservices</name> <value>mycluster</value> </property><property> <name>ha.zookeeper.quorum</name> <value>m1:2181,m2:2181,s1:2181,s2:2181</value> </property> <property> <name>hadoop.tmp.dir</name> <value>/home/hadoop/hadoop-2.2.0/tmp</value> <description></description> </property></configuration> |
|
1
|
root@m1:/home/hadoop/hadoop-2.2.0/etc/hadoop# vi yarn-site.xml |
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
|
<?xml version="1.0"?><!-- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file.--><configuration><!-- Site specific YARN configuration properties--> <property> <name>yarn.nodemanager.aux-services</name> <value>mapreduce_shuffle</value> </property> <property> <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name> <value>org.apache.hadoop.mapred.ShuffleHandler</value> </property> <property> <name>yarn.resourcemanager.hostname</name> <value>m1</value> </property></configuration> |
|
1
2
3
|
root@m1:/home/hadoop/hadoop-2.2.0/etc/hadoop# vi slaves s1s2 |
|
1
2
3
4
5
6
7
8
9
|
root@m1:/home/hadoop# /home/hadoop/zookeeper-3.4.5/bin/zkServer.sh startJMX enabled by defaultUsing config: /home/hadoop/zookeeper-3.4.5/bin/../conf/zoo.cfgStarting zookeeper ... STARTEDroot@m1:/home/hadoop# /home/hadoop/zookeeper-3.4.5/bin/zkServer.sh statusJMX enabled by defaultUsing config: /home/hadoop/zookeeper-3.4.5/bin/../conf/zoo.cfgMode: followerroot@m1:/home/hadoop# |
|
1
2
3
4
5
6
7
8
9
|
root@s1:/home/hadoop# /home/hadoop/zookeeper-3.4.5/bin/zkServer.sh startJMX enabled by defaultUsing config: /home/hadoop/zookeeper-3.4.5/bin/../conf/zoo.cfgStarting zookeeper ... STARTEDroot@s1:/home/hadoop# /home/hadoop/zookeeper-3.4.5/bin/zkServer.sh statusJMX enabled by defaultUsing config: /home/hadoop/zookeeper-3.4.5/bin/../conf/zoo.cfgMode: leaderroot@s1:/home/hadoop# |
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
|
root@m1:/home/hadoop# /home/hadoop/zookeeper-3.4.5/bin/zkCli.shConnecting to localhost:21812014-07-27 00:27:16,621 [myid:] - INFO [main:Environment@100] - Client environment:zookeeper.version=3.4.5-1392090, built on 09/30/2012 17:52 GMT2014-07-27 00:27:16,628 [myid:] - INFO [main:Environment@100] - Client environment:host.name=m12014-07-27 00:27:16,628 [myid:] - INFO [main:Environment@100] - Client environment:java.version=1.7.0_652014-07-27 00:27:16,629 [myid:] - INFO [main:Environment@100] - Client environment:java.vendor=Oracle Corporation2014-07-27 00:27:16,629 [myid:] - INFO [main:Environment@100] - Client environment:java.home=/usr/lib/jvm/java-7-oracle/jre2014-07-27 00:27:16,630 [myid:] - INFO [main:Environment@100] - Client environment:java.class.path=/home/hadoop/zookeeper-3.4.5/bin/../build/classes:/home/hadoop/zookeeper-3.4.5/bin/../build/lib/*.jar:/home/hadoop/zookeeper-3.4.5/bin/../lib/slf4j-log4j12-1.6.1.jar:/home/hadoop/zookeeper-3.4.5/bin/../lib/slf4j-api-1.6.1.jar:/home/hadoop/zookeeper-3.4.5/bin/../lib/netty-3.2.2.Final.jar:/home/hadoop/zookeeper-3.4.5/bin/../lib/log4j-1.2.15.jar:/home/hadoop/zookeeper-3.4.5/bin/../lib/jline-0.9.94.jar:/home/hadoop/zookeeper-3.4.5/bin/../zookeeper-3.4.5.jar:/home/hadoop/zookeeper-3.4.5/bin/../src/java/lib/*.jar:/home/hadoop/zookeeper-3.4.5/bin/../conf:/usr/lib/jvm/java-7-oracle/lib2014-07-27 00:27:16,630 [myid:] - INFO [main:Environment@100] - Client environment:java.library.path=:/usr/local/lib:/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib2014-07-27 00:27:16,631 [myid:] - INFO [main:Environment@100] - Client environment:java.io.tmpdir=/tmp2014-07-27 00:27:16,631 [myid:] - INFO [main:Environment@100] - Client environment:java.compiler=<NA>2014-07-27 00:27:16,632 [myid:] - INFO [main:Environment@100] - Client environment:os.name=Linux2014-07-27 00:27:16,632 [myid:] - INFO [main:Environment@100] - Client environment:os.arch=amd642014-07-27 00:27:16,632 [myid:] - INFO [main:Environment@100] - Client environment:os.version=3.11.0-15-generic2014-07-27 00:27:16,633 [myid:] - INFO [main:Environment@100] - Client environment:user.name=root2014-07-27 00:27:16,633 [myid:] - INFO [main:Environment@100] - Client environment:user.home=/root2014-07-27 00:27:16,634 [myid:] - INFO [main:Environment@100] - Client environment:user.dir=/home/hadoop2014-07-27 00:27:16,636 [myid:] - INFO [main:ZooKeeper@438] - Initiating client connection, connectString=localhost:2181 sessionTimeout=30000 watcher=org.apache.zookeeper.ZooKeeperMain$MyWatcher@19b1ebe5Welcome to ZooKeeper!2014-07-27 00:27:16,672 [myid:] - INFO [main-SendThread(localhost:2181):ClientCnxn$SendThread@966] - Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error)2014-07-27 00:27:16,685 [myid:] - INFO [main-SendThread(localhost:2181):ClientCnxn$SendThread@849] - Socket connection established to localhost/127.0.0.1:2181, initiating sessionJLine support is enabled2014-07-27 00:27:16,719 [myid:] - INFO [main-SendThread(localhost:2181):ClientCnxn$SendThread@1207] - Session establishment complete on server localhost/127.0.0.1:2181, sessionid = 0x147737cd5d30000, negotiated timeout = 30000WATCHER::WatchedEvent state:SyncConnected type:None path:null[zk: localhost:2181(CONNECTED) 0] ls /[zookeeper][zk: localhost:2181(CONNECTED) 1] |
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
|
root@m1:/home/hadoop# /home/hadoop/hadoop-2.2.0/bin/hdfs zkfc -formatZK14/07/27 00:31:59 INFO tools.DFSZKFailoverController: Failover controller configured for NameNode NameNode at m1/192.168.1.50:900014/07/27 00:32:00 INFO zookeeper.ZooKeeper: Client environment:zookeeper.version=3.4.5-1392090, built on 09/30/2012 17:52 GMT14/07/27 00:32:00 INFO zookeeper.ZooKeeper: Client environment:host.name=m114/07/27 00:32:00 INFO zookeeper.ZooKeeper: Client environment:java.version=1.7.0_6514/07/27 00:32:00 INFO zookeeper.ZooKeeper: Client environment:java.vendor=Oracle Corporation14/07/27 00:32:00 INFO zookeeper.ZooKeeper: Client environment:java.home=/usr/lib/jvm/java-7-oracle/jre14/07/27 00:32:00 INFO zookeeper.ZooKeeper: Client environment:java.class.path=/home/hadoop/hadoop-2.2.0/etc/hadoop:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/guava-11.0.2.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/commons-codec-1.4.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/hadoop-annotations-2.2.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/slf4j-api-1.7.5.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/commons-net-3.1.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/paranamer-2.3.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/snappy-java-1.0.4.1.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/commons-beanutils-core-1.8.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/jasper-compiler-5.5.23.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/commons-math-2.1.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/commons-lang-2.5.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/servlet-api-2.5.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/commons-logging-1.1.1.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/log4j-1.2.17.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/jasper-runtime-5.5.23.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/protobuf-java-2.5.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/mockito-all-1.8.5.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/hadoop-auth-2.2.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/jackson-core-asl-1.8.8.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/commons-digester-1.8.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/jsp-api-2.1.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/jersey-core-1.9.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/jettison-1.1.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/xmlenc-0.52.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/netty-3.6.2.Final.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/commons-httpclient-3.1.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/commons-io-2.1.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/jsch-0.1.42.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/commons-beanutils-1.7.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/commons-compress-1.4.1.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/jackson-jaxrs-1.8.8.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/jackson-mapper-asl-1.8.8.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/junit-4.8.2.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/jetty-6.1.26.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/commons-collections-3.2.1.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/jsr305-1.3.9.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/jackson-xc-1.8.8.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/asm-3.2.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/jersey-json-1.9.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/stax-api-1.0.1.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/jets3t-0.6.1.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/avro-1.7.4.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/commons-el-1.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/commons-cli-1.2.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/commons-configuration-1.6.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/jersey-server-1.9.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/jetty-util-6.1.26.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/activation-1.1.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/jaxb-api-2.2.2.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/slf4j-log4j12-1.7.5.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/zookeeper-3.4.5.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/jaxb-impl-2.2.3-1.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/xz-1.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/hadoop-nfs-2.2.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/hadoop-common-2.2.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/common/hadoop-common-2.2.0-tests.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/hdfs:/home/hadoop/hadoop-2.2.0/share/hadoop/hdfs/lib/guava-11.0.2.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/hdfs/lib/commons-codec-1.4.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/hdfs/lib/commons-lang-2.5.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/hdfs/lib/servlet-api-2.5.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/hdfs/lib/commons-logging-1.1.1.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/hdfs/lib/log4j-1.2.17.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/hdfs/lib/jasper-runtime-5.5.23.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/hdfs/lib/protobuf-java-2.5.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/hdfs/lib/jackson-core-asl-1.8.8.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/hdfs/lib/jsp-api-2.1.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/hdfs/lib/jersey-core-1.9.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/hdfs/lib/xmlenc-0.52.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/hdfs/lib/netty-3.6.2.Final.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/hdfs/lib/commons-io-2.1.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/hdfs/lib/jackson-mapper-asl-1.8.8.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/hdfs/lib/jetty-6.1.26.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/hdfs/lib/jsr305-1.3.9.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/hdfs/lib/asm-3.2.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/hdfs/lib/commons-el-1.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/hdfs/lib/commons-cli-1.2.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/hdfs/lib/jersey-server-1.9.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/hdfs/lib/commons-daemon-1.0.13.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/hdfs/lib/jetty-util-6.1.26.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/hdfs/hadoop-hdfs-2.2.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/hdfs/hadoop-hdfs-nfs-2.2.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/hdfs/hadoop-hdfs-2.2.0-tests.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/lib/hamcrest-core-1.1.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/lib/hadoop-annotations-2.2.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/lib/jersey-guice-1.9.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/lib/paranamer-2.3.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/lib/guice-servlet-3.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/lib/snappy-java-1.0.4.1.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/lib/log4j-1.2.17.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/lib/protobuf-java-2.5.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/lib/jackson-core-asl-1.8.8.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/lib/guice-3.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/lib/jersey-core-1.9.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/lib/netty-3.6.2.Final.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/lib/junit-4.10.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/lib/commons-io-2.1.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/lib/commons-compress-1.4.1.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/lib/jackson-mapper-asl-1.8.8.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/lib/javax.inject-1.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/lib/aopalliance-1.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/lib/asm-3.2.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/lib/avro-1.7.4.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/lib/jersey-server-1.9.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/lib/xz-1.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/hadoop-yarn-server-nodemanager-2.2.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/hadoop-yarn-client-2.2.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/hadoop-yarn-applications-unmanaged-am-launcher-2.2.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/hadoop-yarn-server-tests-2.2.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/hadoop-yarn-common-2.2.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/hadoop-yarn-server-common-2.2.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/hadoop-yarn-server-resourcemanager-2.2.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/hadoop-yarn-api-2.2.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/hadoop-yarn-server-web-proxy-2.2.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/hadoop-yarn-site-2.2.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/yarn/hadoop-yarn-applications-distributedshell-2.2.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/mapreduce/lib/hamcrest-core-1.1.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/mapreduce/lib/hadoop-annotations-2.2.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/mapreduce/lib/jersey-guice-1.9.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/mapreduce/lib/paranamer-2.3.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/mapreduce/lib/guice-servlet-3.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/mapreduce/lib/snappy-java-1.0.4.1.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/mapreduce/lib/log4j-1.2.17.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/mapreduce/lib/protobuf-java-2.5.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/mapreduce/lib/jackson-core-asl-1.8.8.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/mapreduce/lib/guice-3.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/mapreduce/lib/jersey-core-1.9.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/mapreduce/lib/netty-3.6.2.Final.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/mapreduce/lib/junit-4.10.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/mapreduce/lib/commons-io-2.1.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/mapreduce/lib/commons-compress-1.4.1.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/mapreduce/lib/jackson-mapper-asl-1.8.8.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/mapreduce/lib/javax.inject-1.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/mapreduce/lib/aopalliance-1.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/mapreduce/lib/asm-3.2.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/mapreduce/lib/avro-1.7.4.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/mapreduce/lib/jersey-server-1.9.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/mapreduce/lib/xz-1.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/mapreduce/hadoop-mapreduce-client-common-2.2.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.2.0-tests.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-plugins-2.2.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.2.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/mapreduce/hadoop-mapreduce-client-shuffle-2.2.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/mapreduce/hadoop-mapreduce-client-app-2.2.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-2.2.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.2.0.jar:/home/hadoop/hadoop-2.2.0/share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.2.0.jar:/home/hadoop/hadoop-2.2.0/contrib/capacity-scheduler/*.jar14/07/27 00:32:00 INFO zookeeper.ZooKeeper: Client environment:java.library.path=/home/hadoop/hadoop-2.2.0/lib/native14/07/27 00:32:00 INFO zookeeper.ZooKeeper: Client environment:java.io.tmpdir=/tmp14/07/27 00:32:00 INFO zookeeper.ZooKeeper: Client environment:java.compiler=<NA>14/07/27 00:32:00 INFO zookeeper.ZooKeeper: Client environment:os.name=Linux14/07/27 00:32:00 INFO zookeeper.ZooKeeper: Client environment:os.arch=amd6414/07/27 00:32:00 INFO zookeeper.ZooKeeper: Client environment:os.version=3.11.0-15-generic14/07/27 00:32:00 INFO zookeeper.ZooKeeper: Client environment:user.name=root14/07/27 00:32:00 INFO zookeeper.ZooKeeper: Client environment:user.home=/root14/07/27 00:32:00 INFO zookeeper.ZooKeeper: Client environment:user.dir=/home/hadoop14/07/27 00:32:00 INFO zookeeper.ZooKeeper: Initiating client connection, connectString=m1:2181,m2:2181,s1:2181,s2:2181 sessionTimeout=5000 watcher=org.apache.hadoop.ha.ActiveStandbyElector$WatcherWithClientRef@5990054a14/07/27 00:32:00 INFO zookeeper.ClientCnxn: Opening socket connection to server m1/192.168.1.50:2181. Will not attempt to authenticate using SASL (unknown error)14/07/27 00:32:00 INFO zookeeper.ClientCnxn: Socket connection established to m1/192.168.1.50:2181, initiating session14/07/27 00:32:00 INFO zookeeper.ClientCnxn: Session establishment complete on server m1/192.168.1.50:2181, sessionid = 0x147737cd5d30001, negotiated timeout = 5000===============================================The configured parent znode /hadoop-ha/mycluster already exists.Are you sure you want to clear all failover information fromZooKeeper?WARNING: Before proceeding, ensure that all HDFS services andfailover controllers are stopped!===============================================Proceed formatting /hadoop-ha/mycluster? (Y or N) 14/07/27 00:32:00 INFO ha.ActiveStandbyElector: Session connected.y14/07/27 00:32:13 INFO ha.ActiveStandbyElector: Recursively deleting /hadoop-ha/mycluster from ZK...14/07/27 00:32:13 INFO ha.ActiveStandbyElector: Successfully deleted /hadoop-ha/mycluster from ZK.14/07/27 00:32:13 INFO ha.ActiveStandbyElector: Successfully created /hadoop-ha/mycluster in ZK.14/07/27 00:32:13 INFO zookeeper.ClientCnxn: EventThread shut down14/07/27 00:32:13 INFO zookeeper.ZooKeeper: Session: 0x147737cd5d30001 closedroot@m1:/home/hadoop# |
|
1
2
3
4
|
root@m1:/home/hadoop# /home/hadoop/zookeeper-3.4.5/bin/zkCli.sh[zk: localhost:2181(CONNECTED) 0] ls /[hadoop-ha, zookeeper][zk: localhost:2181(CONNECTED) 1] |
|
1
2
3
4
5
6
7
|
root@m1:/home/hadoop# /home/hadoop/hadoop-2.2.0/sbin/hadoop-daemon.sh start journalnodestarting journalnode, logging to /home/hadoop/hadoop-2.2.0/logs/hadoop-root-journalnode-m1.outroot@m1:/home/hadoop# jps2884 JournalNode2553 QuorumPeerMain2922 Jpsroot@m1:/home/hadoop# |
|
1
|
root@m1:/home/hadoop# /home/hadoop/hadoop-2.2.0/bin/hdfs namenode –format |
|
1
|
root@m1:/home/hadoop/hadoop-2.2.0/bin/hdfs namenode -format -clusterId m1 |
|
1
|
root@m1:/home/hadoop# /home/hadoop/hadoop-2.2.0/sbin/hadoop-daemon.sh start namenode |
执行命令后,浏览:http://m1:50070/dfshealth.jsp可以看到m1的状态:

|
1
|
root@m2:/home/hadoop# /home/hadoop/hadoop-2.2.0/bin/hdfs namenode –bootstrapStandby |
|
1
|
root@m2:/home/hadoop# /home/hadoop/hadoop-2.2.0/sbin/hadoop-daemon.sh start namenode |
浏览:http://m2:50070/dfshealth.jsp可以看到m2的状态。这个时候在网址上可以发现m1和m2的状态都是standby。

|
1
2
3
4
|
root@m1:/home/hadoop# /home/hadoop/hadoop-2.2.0/sbin/hadoop-daemons.sh start datanodes2: starting datanode, logging to /home/hadoop/hadoop-2.2.0/logs/hadoop-root-datanode-s2.outs1: starting datanode, logging to /home/hadoop/hadoop-2.2.0/logs/hadoop-root-datanode-s1.outroot@m1:/home/hadoop# |
|
1
2
3
4
5
6
|
root@m1:/home/hadoop# /home/hadoop/hadoop-2.2.0/sbin/start-yarn.shstarting yarn daemonsstarting resourcemanager, logging to /home/hadoop/hadoop-2.2.0/logs/yarn-root-resourcemanager-m1.outs1: starting nodemanager, logging to /home/hadoop/hadoop-2.2.0/logs/yarn-root-nodemanager-s1.outs2: starting nodemanager, logging to /home/hadoop/hadoop-2.2.0/logs/yarn-root-nodemanager-s2.outroot@m1:/home/hadoop# |
然后浏览:http://m1:8088/cluster, 可以看到效果

|
1
2
3
|
root@m1:/home/hadoop# /home/hadoop/hadoop-2.2.0/sbin/hadoop-daemon.sh start zkfcstarting zkfc, logging to /home/hadoop/hadoop-2.2.0/logs/hadoop-root-zkfc-m1.outroot@m1:/home/hadoop# |

-----两张图片分隔线-----
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
|
root@m1:/home/hadoop/hadoop-2.2.0/bin# /home/hadoop/hadoop-2.2.0/bin/hdfs dfs -ls /Found 2 itemsdrwx------ - root supergroup 0 2014-07-17 23:54 /tmpdrwxr-xr-x - lion supergroup 0 2014-07-21 00:40 /userroot@m1:/home/hadoop/hadoop-2.2.0/bin# /home/hadoop/hadoop-2.2.0/bin/hdfs dfs -mkdir /input root@m1:/home/hadoop/hadoop-2.2.0/bin# /home/hadoop/hadoop-2.2.0/bin/hdfs dfs -ls / Found 3 itemsdrwxr-xr-x - root supergroup 0 2014-07-27 01:20 /inputdrwx------ - root supergroup 0 2014-07-17 23:54 /tmpdrwxr-xr-x - lion supergroup 0 2014-07-21 00:40 /userroot@m1:/home/hadoop/hadoop-2.2.0/bin# /home/hadoop/hadoop-2.2.0/bin/hdfs dfs -ls /input root@m1:/home/hadoop/hadoop-2.2.0/bin# /home/hadoop/hadoop-2.2.0/bin/hdfs dfs -put hadoop.cmd /inputroot@m1:/home/hadoop/hadoop-2.2.0/bin# /home/hadoop/hadoop-2.2.0/bin/hdfs dfs -ls /input Found 1 items-rw-r--r-- 3 root supergroup 7530 2014-07-27 01:20 /input/hadoop.cmdroot@m1:/home/hadoop/hadoop-2.2.0/bin# |
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
|
root@m1:/home/hadoop/hadoop-2.2.0/bin# /home/hadoop/hadoop-2.2.0/bin/hadoop jar /home/hadoop/hadoop-2.2.0/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.2.0.jar wordcount /input /output14/07/27 01:22:41 INFO client.RMProxy: Connecting to ResourceManager at m1/192.168.1.50:803214/07/27 01:22:43 INFO input.FileInputFormat: Total input paths to process : 114/07/27 01:22:44 INFO mapreduce.JobSubmitter: number of splits:114/07/27 01:22:44 INFO Configuration.deprecation: user.name is deprecated. Instead, use mapreduce.job.user.name14/07/27 01:22:44 INFO Configuration.deprecation: mapred.jar is deprecated. Instead, use mapreduce.job.jar14/07/27 01:22:44 INFO Configuration.deprecation: mapred.output.value.class is deprecated. Instead, use mapreduce.job.output.value.class14/07/27 01:22:44 INFO Configuration.deprecation: mapreduce.combine.class is deprecated. Instead, use mapreduce.job.combine.class14/07/27 01:22:44 INFO Configuration.deprecation: mapreduce.map.class is deprecated. Instead, use mapreduce.job.map.class14/07/27 01:22:44 INFO Configuration.deprecation: mapred.job.name is deprecated. Instead, use mapreduce.job.name14/07/27 01:22:44 INFO Configuration.deprecation: mapreduce.reduce.class is deprecated. Instead, use mapreduce.job.reduce.class14/07/27 01:22:44 INFO Configuration.deprecation: mapred.input.dir is deprecated. Instead, use mapreduce.input.fileinputformat.inputdir14/07/27 01:22:44 INFO Configuration.deprecation: mapred.output.dir is deprecated. Instead, use mapreduce.output.fileoutputformat.outputdir14/07/27 01:22:44 INFO Configuration.deprecation: mapred.map.tasks is deprecated. Instead, use mapreduce.job.maps14/07/27 01:22:44 INFO Configuration.deprecation: mapred.output.key.class is deprecated. Instead, use mapreduce.job.output.key.class14/07/27 01:22:44 INFO Configuration.deprecation: mapred.working.dir is deprecated. Instead, use mapreduce.job.working.dir14/07/27 01:22:45 INFO mapreduce.JobSubmitter: Submitting tokens for job: job_1406394452186_000114/07/27 01:22:46 INFO impl.YarnClientImpl: Submitted application application_1406394452186_0001 to ResourceManager at m1/192.168.1.50:803214/07/27 01:22:46 INFO mapreduce.Job: The url to track the job: http://m1:8088/proxy/application_1406394452186_0001/14/07/27 01:22:46 INFO mapreduce.Job: Running job: job_1406394452186_000114/07/27 01:23:10 INFO mapreduce.Job: Job job_1406394452186_0001 running in uber mode : false14/07/27 01:23:10 INFO mapreduce.Job: map 0% reduce 0%14/07/27 01:23:31 INFO mapreduce.Job: map 100% reduce 0%14/07/27 01:23:48 INFO mapreduce.Job: map 100% reduce 100%14/07/27 01:23:48 INFO mapreduce.Job: Job job_1406394452186_0001 completed successfully14/07/27 01:23:49 INFO mapreduce.Job: Counters: 43 File System Counters FILE: Number of bytes read=6574 FILE: Number of bytes written=175057 FILE: Number of read operations=0 FILE: Number of large read operations=0 FILE: Number of write operations=0 HDFS: Number of bytes read=7628 HDFS: Number of bytes written=5088 HDFS: Number of read operations=6 HDFS: Number of large read operations=0 HDFS: Number of write operations=2 Job Counters Launched map tasks=1 Launched reduce tasks=1 Data-local map tasks=1 Total time spent by all maps in occupied slots (ms)=18062 Total time spent by all reduces in occupied slots (ms)=14807 Map-Reduce Framework Map input records=240 Map output records=827 Map output bytes=9965 Map output materialized bytes=6574 Input split bytes=98 Combine input records=827 Combine output records=373 Reduce input groups=373 Reduce shuffle bytes=6574 Reduce input records=373 Reduce output records=373 Spilled Records=746 Shuffled Maps =1 Failed Shuffles=0 Merged Map outputs=1 GC time elapsed (ms)=335 CPU time spent (ms)=2960 Physical memory (bytes) snapshot=270057472 Virtual memory (bytes) snapshot=1990762496 Total committed heap usage (bytes)=136450048 Shuffle Errors BAD_ID=0 CONNECTION=0 IO_ERROR=0 WRONG_LENGTH=0 WRONG_MAP=0 WRONG_REDUCE=0 File Input Format Counters Bytes Read=7530 File Output Format Counters Bytes Written=5088root@m1:/home/hadoop/hadoop-2.2.0/bin# |
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
|
root@m1:/home/hadoop/hadoop-2.2.0/bin# jps5492 Jps2884 JournalNode4375 DFSZKFailoverController2553 QuorumPeerMain3898 NameNode4075 ResourceManagerroot@m1:/home/hadoop/hadoop-2.2.0/bin# kill -9 3898root@m1:/home/hadoop/hadoop-2.2.0/bin# jps2884 JournalNode4375 DFSZKFailoverController2553 QuorumPeerMain4075 ResourceManager5627 Jpsroot@m1:/home/hadoop/hadoop-2.2.0/bin# |

-----两张图片分隔线-----

|
1
|
root@m1:/home/hadoop/hbase-0.96.2-hadoop2/conf# vi hbase-env.sh |
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
|
##/**# * Copyright 2007 The Apache Software Foundation# *# * Licensed to the Apache Software Foundation (ASF) under one# * or more contributor license agreements. See the NOTICE file# * distributed with this work for additional information# * regarding copyright ownership. The ASF licenses this file# * to you under the Apache License, Version 2.0 (the# * "License"); you may not use this file except in compliance# * with the License. You may obtain a copy of the License at# *# * http://www.apache.org/licenses/LICENSE-2.0# *# * Unless required by applicable law or agreed to in writing, software# * distributed under the License is distributed on an "AS IS" BASIS,# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# * See the License for the specific language governing permissions and# * limitations under the License.# */# Set environment variables here.# This script sets variables multiple times over the course of starting an hbase process,# so try to keep things idempotent unless you want to take an even deeper look# into the startup scripts (bin/hbase, etc.)# The java implementation to use. Java 1.6 required.export JAVA_HOME=/usr/lib/jvm/java-7-oracle# Extra Java CLASSPATH elements. Optional.# export HBASE_CLASSPATH=# The maximum amount of heap to use, in MB. Default is 1000.# export HBASE_HEAPSIZE=1000# Extra Java runtime options.# Below are what we set by default. May only work with SUN JVM.# For more on why as well as other possible settings,# see http://wiki.apache.org/hadoop/PerformanceTuningexport HBASE_OPTS="-XX:+UseConcMarkSweepGC"# Uncomment one of the below three options to enable java garbage collection logging for the server-side processes.# This enables basic gc logging to the .out file.# export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps"# This enables basic gc logging to its own file.# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR .# export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH>"# This enables basic GC logging to its own file with automatic log rolling. Only applies to jdk 1.6.0_34+ and 1.7.0_2+.# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR .# export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH> -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=1 -XX:GCLogFileSize=512M"# Uncomment one of the below three options to enable java garbage collection logging for the client processes.# This enables basic gc logging to the .out file.# export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps"# This enables basic gc logging to its own file.# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR .# export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH>"# This enables basic GC logging to its own file with automatic log rolling. Only applies to jdk 1.6.0_34+ and 1.7.0_2+.# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR .# export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH> -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=1 -XX:GCLogFileSize=512M"# Uncomment below if you intend to use the EXPERIMENTAL off heap cache.# export HBASE_OPTS="$HBASE_OPTS -XX:MaxDirectMemorySize="# Set hbase.offheapcache.percentage in hbase-site.xml to a nonzero value.# Uncomment and adjust to enable JMX exporting# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html## export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"# export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10101"# export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10102"# export HBASE_THRIFT_OPTS="$HBASE_THRIFT_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"# export HBASE_ZOOKEEPER_OPTS="$HBASE_ZOOKEEPER_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"# export HBASE_REST_OPTS="$HBASE_REST_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10105"# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.# export HBASE_REGIONSERVERS=${HBASE_HOME}/conf/regionservers# Uncomment and adjust to keep all the Region Server pages mapped to be memory resident#HBASE_REGIONSERVER_MLOCK=true#HBASE_REGIONSERVER_UID="hbase"# File naming hosts on which backup HMaster will run. $HBASE_HOME/conf/backup-masters by default.# export HBASE_BACKUP_MASTERS=${HBASE_HOME}/conf/backup-masters# Extra ssh options. Empty by default.# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"# Where log files are stored. $HBASE_HOME/logs by default.# export HBASE_LOG_DIR=${HBASE_HOME}/logs# Enable remote JDWP debugging of major HBase processes. Meant for Core Developers # export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8070"# export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8071"# export HBASE_THRIFT_OPTS="$HBASE_THRIFT_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8072"# export HBASE_ZOOKEEPER_OPTS="$HBASE_ZOOKEEPER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8073"# A string representing this instance of hbase. $USER by default.# export HBASE_IDENT_STRING=$USER# The scheduling priority for daemon processes. See 'man nice'.# export HBASE_NICENESS=10# The directory where pid files are stored. /tmp by default.# export HBASE_PID_DIR=/var/hadoop/pids# Seconds to sleep between slave commands. Unset by default. This# can be useful in large clusters, where, e.g., slave rsyncs can# otherwise arrive faster than the master can service them.# export HBASE_SLAVE_SLEEP=0.1# Tell HBase whether it should manage it's own instance of Zookeeper or not.export HBASE_MANAGES_ZK=false#这个值为false时,表示启动的是独立的zookeeper。而配置成true则是hbase自带的zookeeper。# The default log rolling policy is RFA, where the log file is rolled as per the size defined for the # RFA appender. Please refer to the log4j.properties file to see more details on this appender.# In case one needs to do log rolling on a date change, one should set the environment property# HBASE_ROOT_LOGGER to "<DESIRED_LOG LEVEL>,DRFA".# For example:# HBASE_ROOT_LOGGER=INFO,DRFA# The reason for changing default to RFA is to avoid the boundary case of filling out disk space as # DRFA doesn't put any cap on the log size. Please refer to HBase-5655 for more context. |
|
1
|
root@m1:/home/hadoop/hbase-0.96.2-hadoop2/conf# vi hbase-site.xml |
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
|
<?xml version="1.0"?><?xml-stylesheet type="text/xsl" href="configuration.xsl"?><!--/** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */--><configuration> <property> <!--这里用来设置region server的共享目录,用来持久化Hbase。URL需要是'完全正确'的,还要包含文件系统的scheme。--> <name>hbase.rootdir</name> <value>hdfs://mycluster/hbase</value><!--这里必须跟hadoop的core-site.xml中的配置一样--> </property> <property> <!--Hbase的运行模式。false是单机模式,true是分布式模式。若为false,Hbase和Zookeeper会运行在同一个JVM里面。默认: false--> <name>hbase.cluster.distributed</name> <value>true</value> </property> <property> <name>hbase.tmp.dir</name> <value>/home/hadoop/hbase-0.96.2-hadoop2/tmp</value> </property> <property> <!--这里是对的,只配置端口,为了配置多个HMaster--> <name>hbase.master</name> <value>60000</value> </property> <property> <!--配置zookeeper--> <name>hbase.zookeeper.quorum</name> <value>m1,m2,s1,s2</value> </property> <property> <!--配置zookeeperp客户端连接端口,如果hbase.zookeeper.property.clientPort不配的话,将会默认一个端口,可能就不是你的zookeeper提供的3351~3353这些有用的端口。--> <name>hbase.zookeeper.property.clientPort</name> <value>2181</value> </property> <property> <name>hbase.zookeeper.property.dataDir</name> <value>/home/hadoop/zookeeper-3.4.5/data</value> </property></configuration> |
|
1
2
3
|
root@m1:/home/hadoop/hbase-0.96.2-hadoop2/conf# vi regionservers s1s2 |
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
|
root@m1:/home/hadoop/hbase-0.96.2-hadoop2/conf# ll总用量 40drwxr-xr-x 2 root root 4096 Jul 27 09:15 ./drwxr-xr-x 9 root root 4096 Jul 20 21:40 ../-rw-r--r-- 1 root staff 1026 Mar 25 06:29 hadoop-metrics2-hbase.properties-rw-r--r-- 1 root staff 4023 Mar 25 06:29 hbase-env.cmd-rw-r--r-- 1 root staff 7129 Jul 27 08:58 hbase-env.sh-rw-r--r-- 1 root staff 2257 Mar 25 06:29 hbase-policy.xml-rw-r--r-- 1 root staff 2550 Jul 27 09:10 hbase-site.xml-rw-r--r-- 1 root staff 3451 Mar 25 06:29 log4j.properties-rw-r--r-- 1 root staff 6 Jul 20 21:38 regionserversroot@m1:/home/hadoop/hbase-0.96.2-hadoop2/conf# ln -s /home/hadoop/hadoop-2.2.0/etc/hadoop/hdfs-site.xml hdfs-site.xmlroot@m1:/home/hadoop/hbase-0.96.2-hadoop2/conf# ll总用量 40drwxr-xr-x 2 root root 4096 Jul 27 09:16 ./drwxr-xr-x 9 root root 4096 Jul 20 21:40 ../-rw-r--r-- 1 root staff 1026 Mar 25 06:29 hadoop-metrics2-hbase.properties-rw-r--r-- 1 root staff 4023 Mar 25 06:29 hbase-env.cmd-rw-r--r-- 1 root staff 7129 Jul 27 08:58 hbase-env.sh-rw-r--r-- 1 root staff 2257 Mar 25 06:29 hbase-policy.xml-rw-r--r-- 1 root staff 2550 Jul 27 09:10 hbase-site.xmllrwxrwxrwx 1 root root 50 Jul 27 09:16 hdfs-site.xml -> /home/hadoop/hadoop-2.2.0/etc/hadoop/hdfs-site.xml*-rw-r--r-- 1 root staff 3451 Mar 25 06:29 log4j.properties-rw-r--r-- 1 root staff 6 Jul 20 21:38 regionserversroot@m1:/home/hadoop/hbase-0.96.2-hadoop2/conf# |
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
|
root@m1:/home/hadoop/hbase-0.96.2-hadoop2/lib# ls | grep hadoophadoop-annotations-2.2.0.jarhadoop-auth-2.2.0.jarhadoop-client-2.2.0.jarhadoop-common-2.2.0.jarhadoop-hdfs-2.2.0.jarhadoop-hdfs-2.2.0-tests.jarhadoop-mapreduce-client-app-2.2.0.jarhadoop-mapreduce-client-common-2.2.0.jarhadoop-mapreduce-client-core-2.2.0.jarhadoop-mapreduce-client-jobclient-2.2.0.jarhadoop-mapreduce-client-jobclient-2.2.0-tests.jarhadoop-mapreduce-client-shuffle-2.2.0.jarhadoop-yarn-api-2.2.0.jarhadoop-yarn-client-2.2.0.jarhadoop-yarn-common-2.2.0.jarhadoop-yarn-server-common-2.2.0.jarhadoop-yarn-server-nodemanager-2.2.0.jarhbase-client-0.96.2-hadoop2.jarhbase-common-0.96.2-hadoop2.jarhbase-common-0.96.2-hadoop2-tests.jarhbase-examples-0.96.2-hadoop2.jarhbase-hadoop2-compat-0.96.2-hadoop2.jarhbase-hadoop-compat-0.96.2-hadoop2.jarhbase-it-0.96.2-hadoop2.jarhbase-it-0.96.2-hadoop2-tests.jarhbase-prefix-tree-0.96.2-hadoop2.jarhbase-protocol-0.96.2-hadoop2.jarhbase-server-0.96.2-hadoop2.jarhbase-server-0.96.2-hadoop2-tests.jarhbase-shell-0.96.2-hadoop2.jarhbase-testing-util-0.96.2-hadoop2.jarhbase-thrift-0.96.2-hadoop2.jarroot@m1:/home/hadoop/hbase-0.96.2-hadoop2/lib# |
|
1
2
3
|
root@m1:/home/hadoop/hbase-0.96.2-hadoop2/lib# scp -r /home/hadoop/hbase-0.96.2-hadoop2 root@m2:/home/hadooproot@m1:/home/hadoop/hbase-0.96.2-hadoop2/lib# scp -r /home/hadoop/hbase-0.96.2-hadoop2 root@s1:/home/hadooproot@m1:/home/hadoop/hbase-0.96.2-hadoop2/lib# scp -r /home/hadoop/hbase-0.96.2-hadoop2 root@s2:/home/hadoop |
|
1
2
3
4
5
6
7
8
9
10
11
12
13
|
root@m1:/home/hadoop# /home/hadoop/hbase-0.96.2-hadoop2/bin/start-hbase.shstarting master, logging to /home/hadoop/hbase-0.96.2-hadoop2/bin/../logs/hbase-root-master-m1.outs1: starting regionserver, logging to /home/hadoop/hbase-0.96.2-hadoop2/bin/../logs/hbase-root-regionserver-s1.outs2: starting regionserver, logging to /home/hadoop/hbase-0.96.2-hadoop2/bin/../logs/hbase-root-regionserver-s2.outroot@m1:/home/hadoop# jps6688 NameNode7540 HMaster2884 JournalNode4375 DFSZKFailoverController2553 QuorumPeerMain7769 Jps4075 ResourceManagerroot@m1:/home/hadoop# |

|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
|
root@m1:/home/hadoop# /home/hadoop/hbase-0.96.2-hadoop2/bin/hbase shell2014-07-27 09:31:07,601 INFO [main] Configuration.deprecation: hadoop.native.lib is deprecated. Instead, use io.native.lib.availableHBase Shell; enter 'help<RETURN>' for list of supported commands.Type "exit<RETURN>" to leave the HBase ShellVersion 0.96.2-hadoop2, r1581096, Mon Mar 24 16:03:18 PDT 2014hbase(main):001:0> listTABLE SLF4J: Class path contains multiple SLF4J bindings.SLF4J: Found binding in [jar:file:/home/hadoop/hbase-0.96.2-hadoop2/lib/slf4j-log4j12-1.6.4.jar!/org/slf4j/impl/StaticLoggerBinder.class]SLF4J: Found binding in [jar:file:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/slf4j-log4j12-1.7.5.jar!/org/slf4j/impl/StaticLoggerBinder.class]SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.0 row(s) in 2.8030 seconds=> []hbase(main):002:0> version0.96.2-hadoop2, r1581096, Mon Mar 24 16:03:18 PDT 2014hbase(main):003:0> status2 servers, 0 dead, 1.0000 average loadhbase(main):004:0> create 'test_idoall_org','uid','name'0 row(s) in 0.5800 seconds=> Hbase::Table - test_idoall_orghbase(main):005:0> listTABLE test_idoall_org 1 row(s) in 0.0320 seconds=> ["test_idoall_org"]hbase(main):006:0> put 'test_idoall_org','10086','name:idoall','idoallvalue'0 row(s) in 0.1090 seconds ^hbase(main):009:0> get 'test_idoall_org','10086'COLUMN CELL name:idoall timestamp=1406424831473, value=idoallvalue 1 row(s) in 0.0450 secondshbase(main):010:0> scan 'test_idoall_org'ROW COLUMN+CELL 10086 column=name:idoall, timestamp=1406424831473, value=idoallvalue 1 row(s) in 0.0620 secondshbase(main):011:0> |
|
1
2
3
|
root@m2:/home/hadoop# /home/hadoop/hbase-0.96.2-hadoop2/bin/hbase-daemon.sh start masterstarting master, logging to /home/hadoop/hbase-0.96.2-hadoop2/bin/../logs/hbase-root-master-m2.outroot@m2:/home/hadoop# |


-----两张图片分隔线-----

|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
|
root@m1:/home/hadoop# jps6688 NameNode7540 HMaster2884 JournalNode8645 Jps4375 DFSZKFailoverController2553 QuorumPeerMain4075 ResourceManagerroot@m1:/home/hadoop# kill -9 7540root@m1:/home/hadoop# jps6688 NameNode2884 JournalNode4375 DFSZKFailoverController2553 QuorumPeerMain4075 ResourceManager8655 HMaster8719 Jpsroot@m1:/home/hadoop# |

-----两张图片分隔线-----

|
1
|
root@m1:/home/hadoop# mysql -uroot -p123456 |
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
|
Welcome to the MySQL monitor. Commands end with ; or \g.Your MySQL connection id is 36Server version: 5.5.22-0ubuntu1 (Ubuntu)Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.Oracle is a registered trademark of Oracle Corporation and/or itsaffiliates. Other names may be trademarks of their respectiveowners.Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.mysql> grant all on *.* to 'root'@'%' identified by '123456' WITH GRANT OPTION;Query OK, 0 rows affected (0.00 sec)mysql> flush privileges;Query OK, 0 rows affected (0.00 sec)mysql> quitBye |
|
1
2
|
root@m1:/home/hadoop/hive-0.13.1/conf# cp hive-env.sh.template hive-env.shroot@m1:/home/hadoop/hive-0.13.1/conf# cp hive-default.xml.template hive-site.xml |
|
1
2
|
root@m1:/home/hadoop/hive-0.13.1/conf# vi hive-env.shHADOOP_HOME=/home/hadoop/hadoop-2.2.0 |
|
1
|
root@m1:/home/hadoop/hive-0.13.1/conf# vi hive-site.xml |
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
|
<?xml version="1.0"?><?xml-stylesheet type="text/xsl" href="configuration.xsl"?><!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.--> <!-- <!--这里是重点的地方,为了跟Hbase整合,所以千万别写错了,hive.aux.jars.path 的value中间不允许有空格,回车,换行什么的,全部写在一行上就行了,不然会出各种错<configuration> <property> <!--hive 默认的数据文件存储路径,通常为 HDFS 可写的路径--> <name>hive.metastore.warehouse.dir</name> <value>hdfs://mycluster/user/hive/warehouse</value> </property> <property> </property> <description>The list of zookeeper servers to talk to. This isonly needed for read/write locks.</description> <!--HDFS路径,用于存储不同 map/reduce 阶段的执行计划和这些阶段的中间输出结果。--> <name>hive.exec.scratchdir</name> <value>hdfs://mycluster/user/hive/scratchdir</value> </property> <property> <!--Hive 实时查询日志所在的目录,如果该值为空,将不创建实时的查询日志。--> <name>hive.querylog.location</name> <value>/home/hadoop/hive-0.13.1/logs</value> </property> <property> <!--JDBC连接字符串,默认jdbc:derby:;databaseName=metastore_db;create=true;--> <name>javax.jdo.option.ConnectionURL</name> <value>jdbc:mysql://m1:3306/hiveMeta?createDatabaseIfNotExist=true</value> </property> <property> <!--JDBC的driver,默认org.apache.derby.jdbc.EmbeddedDriver;--> <name>javax.jdo.option.ConnectionDriverName</name> <value>com.mysql.jdbc.Driver</value> </property> <property> <name>javax.jdo.option.ConnectionUserName</name> <value>root</value> </property> <property> <name>javax.jdo.option.ConnectionPassword</name> <value>123456</value> </property> <property> <!--当用户自定义了UDF或者SerDe,这些插件的jar都要放到这个目录下,无默认值;--> <!--这里是重点的地方,为了跟Hbase整合,所以千万别写错了,hive.aux.jars.path 的value中间不允许有空格,回车,换行什么的,全部写在一行上就行了,不然会出各种错--> <name>hive.aux.jars.path</name> <value>file:///home/hadoop/hive-0.13.1/lib/hbase-hadoop-compat-0.96.2-hadoop2.jar,file:///home/hadoop/hive-0.13.1/lib/hbase-hadoop2-compat-0.96.2-hadoop2.jar,file:///home/hadoop/hive-0.13.1/lib/hive-hbase-handler-0.13.1.jar,file:///home/hadoop/hive-0.13.1/lib/protobuf-java-2.5.0.jar,file:///home/hadoop/hive-0.13.1/lib/hbase-client-0.96.2-hadoop2.jar,file:///home/hadoop/hive-0.13.1/lib/hbase-common-0.96.2-hadoop2.jar,file:///home/hadoop/hive-0.13.1/lib/hbase-protocol-0.96.2-hadoop2.jar,file:///home/hadoop/hive-0.13.1/lib/hbase-server-0.96.2-hadoop2.jar,file:///home/hadoop/hive-0.13.1/lib/zookeeper-3.4.5.jar,file:///home/hadoop/hive-0.13.1/lib/guava-11.0.2.jar,file:///home/hadoop/hive-0.13.1/lib/htrace-core-2.04.jar</value> </property> <property> <!--zk地址列表,默认是空;没用配置hive.zookeeper.quorum会导致无法并发执行hive ql请求和导致数据异常--> <name>hive.zookeeper.quorum</name> <value>m1,m2,s1,s2</value> </property></configuration> |
|
1
2
3
4
5
6
7
8
9
|
root@m1:/home/hadoop# cp /home/hadoop/hbase-0.96.2-hadoop2/lib/protobuf-java-2.5.0.jar /home/hadoop/hive-0.13.1/libroot@m1:/home/hadoop# cp /home/hadoop/hbase-0.96.2-hadoop2/lib/hbase-client-0.96.2-hadoop2.jar /home/hadoop/hive-0.13.1/libroot@m1:/home/hadoop# cp /home/hadoop/hbase-0.96.2-hadoop2/lib/hbase-common-0.96.2-hadoop2.jar /home/hadoop/hive-0.13.1/libroot@m1:/home/hadoop# cp /home/hadoop/hbase-0.96.2-hadoop2/lib/hbase-protocol-0.96.2-hadoop2.jar /home/hadoop/hive-0.13.1/libroot@m1:/home/hadoop# cp /home/hadoop/hbase-0.96.2-hadoop2/lib/hbase-server-0.96.2-hadoop2.jar /home/hadoop/hive-0.13.1/libroot@m1:/home/hadoop# cp /home/hadoop/hbase-0.96.2-hadoop2/lib/hbase-hadoop2-compat-0.96.2-hadoop2.jar /home/hadoop/hive-0.13.1/libroot@m1:/home/hadoop# cp /home/hadoop/hbase-0.96.2-hadoop2/lib/hbase-hadoop-compat-0.96.2-hadoop2.jar /home/hadoop/hive-0.13.1/libroot@m1:/home/hadoop# cp /home/hadoop/hbase-0.96.2-hadoop2/lib/htrace-core-2.04.jar /home/hadoop/hive-0.13.1/libroot@m1:/home/hadoop# cp /home/hadoop/zookeeper-3.4.5/dist-maven/zookeeper-3.4.5.jar /home/hadoop/hive-0.13.1/lib |
|
1
2
3
4
|
root@m1:/home/hadoop/hive-0.13.1/conf# vi /home/hadoop/hive-0.13.1/testdata001.dat12306,mname,yname10086,myidoall,youidoall/home/hadoop/hadoop-2.2.0/bin/hadoop fs -mkdir -p /user/hive/warehouse |
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
|
root@m1:/home/hadoop# /home/hadoop/hive-0.13.1/bin/hive 14/07/27 11:17:35 INFO Configuration.deprecation: mapred.reduce.tasks is deprecated. Instead, use mapreduce.job.reduces14/07/27 11:17:35 INFO Configuration.deprecation: mapred.min.split.size is deprecated. Instead, use mapreduce.input.fileinputformat.split.minsize14/07/27 11:17:35 INFO Configuration.deprecation: mapred.reduce.tasks.speculative.execution is deprecated. Instead, use mapreduce.reduce.speculative14/07/27 11:17:35 INFO Configuration.deprecation: mapred.min.split.size.per.node is deprecated. Instead, use mapreduce.input.fileinputformat.split.minsize.per.node14/07/27 11:17:35 INFO Configuration.deprecation: mapred.input.dir.recursive is deprecated. Instead, use mapreduce.input.fileinputformat.input.dir.recursive14/07/27 11:17:35 INFO Configuration.deprecation: mapred.min.split.size.per.rack is deprecated. Instead, use mapreduce.input.fileinputformat.split.minsize.per.rack14/07/27 11:17:35 INFO Configuration.deprecation: mapred.max.split.size is deprecated. Instead, use mapreduce.input.fileinputformat.split.maxsize14/07/27 11:17:35 INFO Configuration.deprecation: mapred.committer.job.setup.cleanup.needed is deprecated. Instead, use mapreduce.job.committer.setup.cleanup.neededLogging initialized using configuration in jar:file:/home/hadoop/hive-0.13.1/lib/hive-common-0.13.1.jar!/hive-log4j.propertieshive> show databases;OKdefaultTime taken: 0.464 seconds, Fetched: 1 row(s)hive> create database testidoall;OKTime taken: 0.279 secondshive> show databases;OKdefaulttestidoallTime taken: 0.021 seconds, Fetched: 2 row(s)hive> use testidoall;OKTime taken: 0.039 secondshive> create external table testtable(uid int,myname string,youname string) row format delimited fields terminated by ',' location '/user/hive/warehouse/testtable';OKTime taken: 0.205 secondshive> LOAD DATA LOCAL INPATH '/home/hadoop/hive-0.13.1/testdata001.dat' OVERWRITE INTO TABLE testtable;Copying data from file:/home/hadoop/hive-0.13.1/testdata001.datCopying file: file:/home/hadoop/hive-0.13.1/testdata001.datLoading data to table testidoall.testtablermr: DEPRECATED: Please use 'rm -r' instead.Deleted hdfs://mycluster/user/hive/warehouse/testtableTable testidoall.testtable stats: [numFiles=0, numRows=0, totalSize=0, rawDataSize=0]OKTime taken: 0.77 secondshive> select * from testtable;OK12306 mname yname10086 myidoall youidoallTime taken: 0.279 seconds, Fetched: 2 row(s)hive> |
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
|
root@m1:/home/hadoop# /home/hadoop/hive-0.13.1/bin/hive14/07/27 11:33:53 INFO Configuration.deprecation: mapred.reduce.tasks is deprecated. Instead, use mapreduce.job.reduces14/07/27 11:33:53 INFO Configuration.deprecation: mapred.min.split.size is deprecated. Instead, use mapreduce.input.fileinputformat.split.minsize14/07/27 11:33:53 INFO Configuration.deprecation: mapred.reduce.tasks.speculative.execution is deprecated. Instead, use mapreduce.reduce.speculative14/07/27 11:33:53 INFO Configuration.deprecation: mapred.min.split.size.per.node is deprecated. Instead, use mapreduce.input.fileinputformat.split.minsize.per.node14/07/27 11:33:53 INFO Configuration.deprecation: mapred.input.dir.recursive is deprecated. Instead, use mapreduce.input.fileinputformat.input.dir.recursive14/07/27 11:33:53 INFO Configuration.deprecation: mapred.min.split.size.per.rack is deprecated. Instead, use mapreduce.input.fileinputformat.split.minsize.per.rack14/07/27 11:33:53 INFO Configuration.deprecation: mapred.max.split.size is deprecated. Instead, use mapreduce.input.fileinputformat.split.maxsize14/07/27 11:33:53 INFO Configuration.deprecation: mapred.committer.job.setup.cleanup.needed is deprecated. Instead, use mapreduce.job.committer.setup.cleanup.neededLogging initialized using configuration in jar:file:/home/hadoop/hive-0.13.1/lib/hive-common-0.13.1.jar!/hive-log4j.propertieshive> show databases;OKdefaulttestidoallTime taken: 0.45 seconds, Fetched: 2 row(s)hive> use testidoall;OKTime taken: 0.021 secondshive> show tables;OKtesttableTime taken: 0.032 seconds, Fetched: 1 row(s)hive> CREATE TABLE hive2hbase_idoall(key int, value string) STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler' WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf1:val") TBLPROPERTIES ("hbase.table.name" = "hive2hbase_idoall");OKTime taken: 2.332 secondshive> show tables;OKhive2hbase_idoalltesttableTime taken: 0.036 seconds, Fetched: 2 row(s)hive> |
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
|
hive> create table hive2hbase_idoall_middle(foo int,bar string)row format delimited fields terminated by ',';OKTime taken: 0.086 secondshive> show tables; OKhive2hbase_idoallhive2hbase_idoall_middletesttableTime taken: 0.03 seconds, Fetched: 3 row(s)hive> load data local inpath '/home/hadoop/hive-0.13.1/testdata001.dat' overwrite into table hive2hbase_idoall_middle;Copying data from file:/home/hadoop/hive-0.13.1/testdata001.datCopying file: file:/home/hadoop/hive-0.13.1/testdata001.datLoading data to table testidoall.hive2hbase_idoall_middlermr: DEPRECATED: Please use 'rm -r' instead.Deleted hdfs://mycluster/user/hive/warehouse/testidoall.db/hive2hbase_idoall_middleTable testidoall.hive2hbase_idoall_middle stats: [numFiles=1, numRows=0, totalSize=43, rawDataSize=0]OKTime taken: 0.683 seconds hive> |
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
|
hive> insert overwrite table hive2hbase_idoall select * from hive2hbase_idoall_middle;Total jobs = 1Launching Job 1 out of 1Number of reduce tasks is set to 0 since there's no reduce operatorStarting Job = job_1406394452186_0002, Tracking URL = http://m1:8088/proxy/application_1406394452186_0002/Kill Command = /home/hadoop/hadoop-2.2.0/bin/hadoop job -kill job_1406394452186_0002Hadoop job information for Stage-0: number of mappers: 1; number of reducers: 02014-07-27 11:44:11,491 Stage-0 map = 0%, reduce = 0%2014-07-27 11:44:22,684 Stage-0 map = 100%, reduce = 0%, Cumulative CPU 1.51 secMapReduce Total cumulative CPU time: 1 seconds 510 msecEnded Job = job_1406394452186_0002MapReduce Jobs Launched: Job 0: Map: 1 Cumulative CPU: 1.51 sec HDFS Read: 288 HDFS Write: 0 SUCCESSTotal MapReduce CPU Time Spent: 1 seconds 510 msecOKTime taken: 25.613 secondshive> select * from hive2hbase_idoall;OK10086 myidoall12306 mnameTime taken: 0.179 seconds, Fetched: 2 row(s)hive> select * from hive2hbase_idoall_middle;OK12306 mname10086 myidoallTime taken: 0.088 seconds, Fetched: 2 row(s)hive> |
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
|
root@m1:/home/hadoop# /home/hadoop/hbase-0.96.2-hadoop2/bin/hbase shell2014-07-27 11:47:14,454 INFO [main] Configuration.deprecation: hadoop.native.lib is deprecated. Instead, use io.native.lib.availableHBase Shell; enter 'help<RETURN>' for list of supported commands.Type "exit<RETURN>" to leave the HBase ShellVersion 0.96.2-hadoop2, r1581096, Mon Mar 24 16:03:18 PDT 2014hbase(main):001:0> listTABLE SLF4J: Class path contains multiple SLF4J bindings.SLF4J: Found binding in [jar:file:/home/hadoop/hbase-0.96.2-hadoop2/lib/slf4j-log4j12-1.6.4.jar!/org/slf4j/impl/StaticLoggerBinder.class]SLF4J: Found binding in [jar:file:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/slf4j-log4j12-1.7.5.jar!/org/slf4j/impl/StaticLoggerBinder.class]SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.hive2hbase_idoall test_idoall_org 2 row(s) in 2.9480 seconds=> ["hive2hbase_idoall", "test_idoall_org"]hbase(main):002:0> scan "hive2hbase_idoall"ROW COLUMN+CELL 10086 column=cf1:val, timestamp=1406432660860, value=myidoall 12306 column=cf1:val, timestamp=1406432660860, value=mname 2 row(s) in 0.0540 secondshbase(main):003:0> get "hive2hbase_idoall",'12306'COLUMN CELL cf1:val timestamp=1406432660860, value=mname 1 row(s) in 0.0110 secondshbase(main):004:0> |
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
|
root@m1:/home/hadoop# /home/hadoop/hbase-0.96.2-hadoop2/bin/hbase shell2014-07-27 11:54:25,844 INFO [main] Configuration.deprecation: hadoop.native.lib is deprecated. Instead, use io.native.lib.availableHBase Shell; enter 'help<RETURN>' for list of supported commands.Type "exit<RETURN>" to leave the HBase ShellVersion 0.96.2-hadoop2, r1581096, Mon Mar 24 16:03:18 PDT 2014hbase(main):001:0> create 'hbase2hive_idoall','gid','info'SLF4J: Class path contains multiple SLF4J bindings.SLF4J: Found binding in [jar:file:/home/hadoop/hbase-0.96.2-hadoop2/lib/slf4j-log4j12-1.6.4.jar!/org/slf4j/impl/StaticLoggerBinder.class]SLF4J: Found binding in [jar:file:/home/hadoop/hadoop-2.2.0/share/hadoop/common/lib/slf4j-log4j12-1.7.5.jar!/org/slf4j/impl/StaticLoggerBinder.class]SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.0 row(s) in 3.4970 seconds=> Hbase::Table - hbase2hive_idoallhbase(main):002:0> put 'hbase2hive_idoall','3344520','info:time','20140704'0 row(s) in 0.1020 secondshbase(main):003:0> put 'hbase2hive_idoall','3344520','info:address','HK'0 row(s) in 0.0090 secondshbase(main):004:0> scan 'hbase2hive_idoall'ROW COLUMN+CELL 3344520 column=info:address, timestamp=1406433302317, value=HK 3344520 column=info:time, timestamp=1406433297567, value=20140704 1 row(s) in 0.0330 secondshbase(main):005:0> |
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
|
root@m1:/home/hadoop# /home/hadoop/hive-0.13.1/bin/hive14/07/27 11:57:20 INFO Configuration.deprecation: mapred.reduce.tasks is deprecated. Instead, use mapreduce.job.reduces14/07/27 11:57:20 INFO Configuration.deprecation: mapred.min.split.size is deprecated. Instead, use mapreduce.input.fileinputformat.split.minsize14/07/27 11:57:20 INFO Configuration.deprecation: mapred.reduce.tasks.speculative.execution is deprecated. Instead, use mapreduce.reduce.speculative14/07/27 11:57:20 INFO Configuration.deprecation: mapred.min.split.size.per.node is deprecated. Instead, use mapreduce.input.fileinputformat.split.minsize.per.node14/07/27 11:57:20 INFO Configuration.deprecation: mapred.input.dir.recursive is deprecated. Instead, use mapreduce.input.fileinputformat.input.dir.recursive14/07/27 11:57:20 INFO Configuration.deprecation: mapred.min.split.size.per.rack is deprecated. Instead, use mapreduce.input.fileinputformat.split.minsize.per.rack14/07/27 11:57:20 INFO Configuration.deprecation: mapred.max.split.size is deprecated. Instead, use mapreduce.input.fileinputformat.split.maxsize14/07/27 11:57:20 INFO Configuration.deprecation: mapred.committer.job.setup.cleanup.needed is deprecated. Instead, use mapreduce.job.committer.setup.cleanup.neededLogging initialized using configuration in jar:file:/home/hadoop/hive-0.13.1/lib/hive-common-0.13.1.jar!/hive-log4j.propertieshive> show databases;OKdefaulttestidoallTime taken: 0.449 seconds, Fetched: 2 row(s)hive> use testidoall;OKTime taken: 0.02 secondshive> show tables;OKhive2hbase_idoallhive2hbase_idoall_middletesttableTime taken: 0.026 seconds, Fetched: 3 row(s)hive> create external table hbase2hive_idoall (key string,gid map<string,string>)STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler' WITH SERDEPROPERTIES ("hbase.columns.mapping" ="info:") TBLPROPERTIES ("hbase.table.name" = "hbase2hive_idoall");OKTime taken: 1.696 secondshive> show tables; OKhbase2hive_idoallhive2hbase_idoallhive2hbase_idoall_middletesttableTime taken: 0.034 seconds, Fetched: 4 row(s)hive> select * from hbase2hive_idoall;OK3344520 {"address":"HK","time":"20140704"}Time taken: 0.701 seconds, Fetched: 1 row(s)hive> |
|
1
|
root@m1:/home/hadoop# export HADOOP_ROOT_LOGGER=DEBUG,console |
|
1
|
root@m1:/home/hadoop# /home/hadoop/hive-0.13.1/bin/hive --hiveconf hive.root.logger=DEBUG,console |
|
1
2
3
4
5
|
rm /var/lib/mysql/ -Rrm /etc/mysql/ -Rapt-get autoremove mysql* —purgeapt-get remove apparmorapt-get install mysql-server mysql-client mysql-common |
|
1
2
3
|
sudo rm /var/lib/dpkg/updates/*sudo apt-get updatesudo apt-get upgrade |
ubuntu12.04+hadoop2.2.0+zookeeper3.4.5+hbase0.96.2+hive0.13.1伪分布式环境部署的更多相关文章
- Hadoop-2.6.0 + Zookeeper-3.4.6 + HBase-0.98.9-hadoop2环境搭建示例
1 基本信息 1.1 软件信息 hadoop-2.6.0 zookeeper-3.4.6 hbase-0.98.9-hadoop2 (以下示例中使用的操作系统是Centos 6.5,请将 ...
- _00018 Hadoop-2.2.0 + Hbase-0.96.2 + Hive-0.13.1 分布式环境整合,Hadoop-2.X使用HA方式
博文作者:妳那伊抹微笑 itdog8 地址链接 : http://www.itdog8.com(个人链接) 博客地址:http://blog.csdn.net/u012185296 个性签名:世界上最 ...
- Hadoop-2.2.0 + Hbase-0.96.2 + Hive-0.13.1(转)
From:http://www.itnose.net/detail/6065872.html # 需要软件 Hadoop-2.2.0(目前Apache官网最新的Stable版本) Hbase-0.96 ...
- ubuntu12.04+kafka2.9.2+zookeeper3.4.5的伪分布式集群安装和demo(java api)测试
博文作者:迦壹 博客地址:http://idoall.org/home.php?mod=space&uid=1&do=blog&id=547 转载声明:可以转载, 但必须以超链 ...
- 在Win7虚拟机下搭建Hadoop2.6.0伪分布式环境
近几年大数据越来越火热.由于工作需要以及个人兴趣,最近开始学习大数据相关技术.学习过程中的一些经验教训希望能通过博文沉淀下来,与网友分享讨论,作为个人备忘. 第一篇,在win7虚拟机下搭建hadoop ...
- Hadoop2.5.0伪分布式环境搭建
本章主要介绍下在Linux系统下的Hadoop2.5.0伪分布式环境搭建步骤.首先要搭建Hadoop伪分布式环境,需要完成一些前置依赖工作,包括创建用户.安装JDK.关闭防火墙等. 一.创建hadoo ...
- Ubuntu15.10下Hadoop2.6.0伪分布式环境安装配置及Hadoop Streaming的体验
Ubuntu用的是Ubuntu15.10Beta2版本,正式的版本好像要到这个月的22号才发布.参考的资料主要是http://www.powerxing.com/install-hadoop-clus ...
- Ubuntu 14.04 (32位)上搭建Hadoop 2.5.1单机和伪分布式环境
引言 一直用的Ubuntu 32位系统(准备下次用Fedora,Ubuntu越来越不适合学习了),今天准备学习一下Hadoop,结果下载Apache官网上发布的最新的封装好的2.5.1版,配置完了根本 ...
- Spark2.4.0伪分布式环境搭建
一.搭建环境的前提条件 环境:ubuntu-16.04 hadoop-2.6.0 jdk1.8.0_161. spark-2.4.0-bin-hadoop2.6.这里的环境不一定需要和我一样,基本版 ...
随机推荐
- Drools规则文件结构说明
一.规则文件构成 package(规则文件所在包) import(导入java包) global(规则文件中的"全局变量") function(函数) query(查找) rule ...
- mac上安装MySQL
mysql 官网下载http://dev.mysql.com/downloads/mysql/ 注意安装mysql-5.5.17-osx10.6-x86_64.dmg这样安装方便设置系统安全权限后 点 ...
- 自定义NavigationBar
[self.navigationController.navigationBarsetHidden:YES]; CGRect screenRect = [[UIScreen mainScreen] b ...
- mysql实现高效率随机取数据
从数据库中(mysql)随机获取几条数据很简单,但是如果一个表的数据基数很大,比如一千万,从一千万中随机产生10条数据,那就相当慢了,如果同时一百个人访问网站,处理这些个进程,对于一般的服务器来说,肯 ...
- 第一个Sprint冲刺第二天
讨论成员:邵家文.朱浩龙.陈俊金.李新 讨论地点:宿舍 讨论内容:安卓开发技术,与后续需要加强的内容 计时功能完成度:刚开始讨论实现的技术
- 转:设置session过期时间
在Asp.net应用中,很多人会遇到Session过期设置有冲突.其中,可以有四处设置Session的过期时间: 一.全局网站(即服务器)级 IIS-网站-属性-Asp.net-编辑配置-状态管理-会 ...
- C# UdpClient 设置超时时间
/********************************************************************** * C# UdpClient 设置超时时间 * 说明: ...
- busybox reboot 无效
/*********************************************************************** * busybox reboot 无效 * 说明: * ...
- PHP 的snmp各个版本的Wrapper封装类
原文来自:http://blog.chinaunix.net/uid-7654720-id-3211234.html class SNMP_Wrapper { protected $_host; pr ...
- LeetCode Combinations (DFS)
题意: 产生从1-n的k个数的所有组合,按升序排列并返回. 思路: DFS一遍即可解决.注意升序. class Solution { public: vector<vector<int&g ...