hadoop生态搭建(3节点)-17.sqoop配置_单节点
# ==================================================================安装 sqoop
tar -zxvf ~/sqoop-1.4.7.bin__hadoop-2.6.0.tar.gz
mv ~/sqoop-1.4.7.bin__hadoop-2.6.0 /usr/local/sqoop-1.4.7
# 环境变量
# ==================================================================node1
vi /etc/profile # 在export PATH USER LOGNAME MAIL HOSTNAME HISTSIZE HISTCONTROL下添加 export JAVA_HOME=/usr/java/jdk1.8.0_111
export JRE_HOME=/usr/java/jdk1.8.0_111/jre
export ZOOKEEPER_HOME=/usr/local/zookeeper-3.4.12
export HADOOP_HOME=/usr/local/hadoop-2.7.6
export MYSQL_HOME=/usr/local/mysql
export HBASE_HOME=/usr/local/hbase-1.2.4
export HIVE_HOME=/usr/local/hive-2.1.1
export SCALA_HOME=/usr/local/scala-2.12.4
export KAFKA_HOME=/usr/local/kafka_2.12-0.10.2.1
export FLUME_HOME=/usr/local/flume-1.8.0
export SPARK_HOME=/usr/local/spark-2.3.0
export STORM_HOME=/usr/local/storm-1.1.0
export REDIS_HOME=/usr/local/redis-4.0.2
export ERLANG_HOME=/usr/local/erlang
export RABBITMQ_HOME=/usr/local/rabbitmq_server-3.7.5
export MONGODB_HOME=/usr/local/mongodb-3.4.5
export NGINX_HOME=/usr/local/nginx
export CATALINA_BASE=/usr/local/tomcat
export CATALINA_HOME=/usr/local/tomcat
export TOMCAT_HOME=/usr/local/tomcat
export KEEPALIVED_HOME=/usr/local/keepalived
export ELASTICSEARCH_HOME=/usr/local/elasticsearch-6.2.4
export LOGSTASH_HOME=/usr/local/logstash-6.2.4
export KIBANA_HOME=/usr/local/kibana-6.2.4
export SQOOP_HOME=/usr/local/sqoop-1.4.7 export PATH=$PATH:$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$ZOOKEEPER_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$MYSQL_HOME/bin:$HBASE_HOME/bin:$HIVE_HOME/bin:$SCALA_HOME/bin:$KAFKA_HOME/bin:$FLUME_HOME/bin:$SPARK_HOME/bin:$STORM_HOME/bin:$REDIS_HOME/bin:$ERLANG_HOME/bin:$RABBITMQ_HOME/ebin:$RABBITMQ_HOME/sbin:$MONGODB_HOME/bin:$NGINX_HOME/sbin:$CATALINA_HOME/bin:$KEEPALIVED_HOME/sbin:$ELASTICSEARCH_HOME/bin:$LOGSTASH_HOME/bin:$KIBANA_HOME/bin:$SQOOP_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar export HADOOP_INSTALL=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
# 使环境变量生效
source /etc/profile # 查看配置结果
echo $SQOOP_HOME
cp -a $SQOOP_HOME/conf/sqoop-env-template.sh $SQOOP_HOME/conf/sqoop-env.sh
vi $SQOOP_HOME/conf/sqoop-env.sh #Set path to where bin/hadoop is available
export HADOOP_COMMON_HOME=${HADOOP_HOME} #Set path to where hadoop-*-core.jar is available
export HADOOP_MAPRED_HOME=${HADOOP_HOME} #set the path to where bin/hbase is available
#export HBASE_HOME=${HBASE_HOME} #Set the path to where bin/hive is available
export HIVE_HOME=${HIVE_HOME} #Set the path for where zookeper config dir is
#export ZOOCFGDIR= cp -a ~/mysql-connector-java-5.1.46.jar $SQOOP_HOME/lib/
cp -a $HIVE_HOME/lib/hive-exec-2.1.1.jar $SQOOP_HOME/lib/ sqoop-version
mysql -u root -p > create database sqooptest character set utf8 ;
> create user 'sqoop'@'%' identified by 'Sqoop-123';
> grant all privileges on *.* to 'sqoop'@'%';
> flush privileges; > show databases; > quit;
# 用户 sqoop 登录 mysql
mysql -u sqoop -p Enter password: Sqoop-123 > use sqooptest; > create table emp(id INT NOT NULL PRIMARY KEY, name VARCHAR(20), age INT); > insert into emp(id, name, age)values(1, 'zhangsan', 11);
insert into emp(id, name, age)values(2, 'lisi', 12);
insert into emp(id, name, age)values(3, '王五', 13); > create table emp_add(id INT NOT NULL PRIMARY KEY, name VARCHAR(20), age INT, sex VARCHAR(20)); > insert into emp_add(id, name, age, sex)values(1, 'zhangsan', 11, '男');
insert into emp_add(id, name, age, sex)values(2, 'lisi', 12, '男');
insert into emp_add(id, name, age, sex)values(3, '王五', 13, '女');
insert into emp_add(id, name, age, sex)values(4,'liuliu', 11, '男'); > show tables; > select * from emp;
select * from emp_add; > quit;
# 启动 hadoop
# ==================================================================node1 node2 node3
zkServer.sh start # ==================================================================node1
# 启动hadoop所有进程
$HADOOP_HOME/sbin/start-all.sh $HADOOP_HOME/sbin/hadoop-daemon.sh start zkfc # ==================================================================node2
$HADOOP_HOME/sbin/yarn-daemon.sh start resourcemanager
$HADOOP_HOME/sbin/hadoop-daemon.sh start zkfc hadoop dfsadmin -safemode get # 命令强制离开
# hadoop dfsadmin -safemode leave # 网页访问
# http://node1:50070?user.name=hadoop
# http://node2:50070?user.name=hadoop
# http://node1:8088?user.name=hadoop/cluster/nodes
# 导入表数据到HDFS
# ==================================================================node1
sqoop import \
--connect jdbc:mysql://node1:3306/sqooptest \
--username sqoop \
--password Sqoop-123 \
--table emp \
--m 1
# 如果包下面的错误请将 $HADOOP_HOME/etc/hadoop/yarn-site.xml 文件进行调整
# ERROR tool.ImportTool: Import failed: java.io.IOException: org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException:
# Invalid resource request, requested memory < 0, or requested memory > max configured, requestedMemory=1536, maxMemory=1024
# ==================================================================node1 node2 node3
vi $HADOOP_HOME/etc/hadoop/yarn-site.xml <property>
<name>yarn.nodemanager.resource.memory-mb</name>
<value>2048</value>
</property>
<property>
<name>yarn.scheduler.maximum-allocation-mb</name>
<value>2048</value>
</property>
# 重启Yarn
# ==================================================================node1
$HADOOP_HOME/sbin/start-yarn.sh
# 导入表数据到HDFS
sqoop import \
--connect jdbc:mysql://node1:3306/sqooptest \
--username sqoop \
--password Sqoop-123 \
--table emp \
--m 1 # 查看导入的数据
hadoop fs -ls /user/root/emp hadoop fs -cat /user/root/emp/part-m-00000 # 导入表到HDFS指定目录
sqoop import --connect jdbc:mysql://node1:3306/sqooptest \
--username sqoop --password Sqoop-123 \
--target-dir /sqooptest/table_emp/queryresult \
--table emp --num-mappers 1 # 查看导入的数据
hadoop fs -ls /sqooptest/table_emp/queryresult hadoop fs -cat /sqooptest/table_emp/queryresult/part-m-00000
# 导入关系表到HIVE
sqoop import --connect jdbc:mysql://node1:3306/sqooptest \
--username sqoop --password Sqoop-123 \
--table emp --hive-import \
--num-mappers 1
# 如报下面的错误,请先删除 /user/root/emp
# ERROR tool.ImportTool: Import failed: org.apache.hadoop.mapred.FileAlreadyExistsException: Output directory hdfs://appcluster/user/root/emp already exists
hadoop fs -rmr /user/root/emp
# 重新导入关系表到HIVE
sqoop import --connect jdbc:mysql://node1:3306/sqooptest \
--username sqoop --password Sqoop-123 \
--table emp --hive-import \
--num-mappers 1
# 启动 hive
hive
# 查看导入的数据
> show tables; > select * from emp;
# 导入表到HIVE指定库指定表
sqoop import --connect jdbc:mysql://node1:3306/sqooptest \
--username sqoop --password Sqoop-123 \
--table emp \
--delete-target-dir \
--fields-terminated-by '\t' \
--hive-import \
--hive-database sqooptest \
--hive-table hive_emp \
--num-mappers 1
# 报错 hive的库sqooptest,必须先建立。否则会报:FAILED: SemanticException [Error 10072]: Database does not exist: sqooptest
> create database sqooptest; > show databases;
# 导入表到HIVE指定库指定表
sqoop import --connect jdbc:mysql://node1:3306/sqooptest \
--username sqoop --password Sqoop-123 \
--table emp \
--delete-target-dir \
--fields-terminated-by '\t' \
--hive-import \
--hive-database sqooptest \
--hive-table hive_emp \
--num-mappers 1 > use sqooptest; > show tables; > select * from hive_emp;
# 导入表数据子集 where子句的导入
sqoop import --connect jdbc:mysql://node1:3306/sqooptest \
--username sqoop --password Sqoop-123 \
--table emp_add \
--where "age =11" \
--target-dir /sqooptest/table_emp/queryresult2 \
--num-mappers 1 # 查看导入的数据
hadoop fs -ls /sqooptest/table_emp/queryresult2 hadoop fs -cat /sqooptest/table_emp/queryresult2/part-m-00000
# query按需导入
sqoop import --connect jdbc:mysql://node1:3306/sqooptest \
--username sqoop --password Sqoop-123 \
--query 'select id,name,age from emp WHERE id>=2 and $CONDITIONS' \
--split-by id \
--fields-terminated-by '\t' \
--target-dir /sqooptest/table_emp/queryresult3 \
--num-mappers 1 # 查看导入的数据
hadoop fs -cat /sqooptest/table_emp/queryresult3/part-m-00000
# 增量导入
sqoop import --connect jdbc:mysql://node1:3306/sqooptest \
--username sqoop --password Sqoop-123 \
--table emp \
--incremental append \
--check-column id \
--last-value 2 \
--fields-terminated-by '\t' \
--target-dir /sqooptest/table_emp/queryresult4 \
--num-mappers 1 # 查看导入的数据
hadoop fs -ls /sqooptest/table_emp/queryresult4/ hadoop fs -cat /sqooptest/table_emp/queryresult4/part-m-00000
# Sqoop的数据导出
# 查看 hdfs 数据
hadoop fs -ls /sqooptest/table_emp/queryresult
hadoop fs -cat /sqooptest/table_emp/queryresult/part-m-00000
# 1、首先需要手动创建mysql中的目标表
mysql -u sqoop -p Enter password: Sqoop-123 > use sqooptest; > CREATE TABLE employee(id INT NOT NULL PRIMARY KEY,name VARCHAR(20),age INT); > show tables; > select * from employee;
# 2、然后执行导出命令
sqoop export \
--connect jdbc:mysql://node1:3306/sqooptest \
--username sqoop --password Sqoop-123 \
--table employee \
--export-dir /sqooptest/table_emp/queryresult/ # 验证表mysql命令行
> select * from employee; > quit;
shutdown -h now
# sqoop
hadoop生态搭建(3节点)-17.sqoop配置_单节点的更多相关文章
- hadoop生态搭建(3节点)-05.mysql配置_单节点
# ==================================================================node1 # ======================== ...
- hadoop生态搭建(3节点)
软件:CentOS-7 VMware12 SSHSecureShellClient shell工具:Xshell 规划 vm网络配置 01.基础配置 02.ssh配置 03.zookeep ...
- js 节点 document html css 表单节点操作
js 节点 document html css 表单节点操作 节点操作:访问.属性.创建 (1)节点的访问:firstChild.lastChild.childNodes.parentChild(父子 ...
- hadoop生态搭建(3节点)-13.mongodb配置
# 13.mongodb配置_副本集_认证授权# ==================================================================安装 mongod ...
- hadoop生态搭建(3节点)-12.rabbitmq配置
# 安装 需要相关包# ==================================================================node1 node2 node3 yum ...
- hadoop生态搭建(3节点)-08.kafka配置
如果之前没有安装jdk和zookeeper,安装了的请直接跳过 # https://www.oracle.com/technetwork/java/javase/downloads/java-arch ...
- hadoop生态搭建(3节点)-04.hadoop配置
如果之前没有安装jdk和zookeeper,安装了的请直接跳过 # https://www.oracle.com/technetwork/java/javase/downloads/java-arch ...
- hadoop生态搭建(3节点)-10.spark配置
# https://www.scala-lang.org/download/2.12.4.html# ================================================= ...
- hadoop生态搭建(3节点)-15.Nginx_Keepalived_Tomcat配置
# Nginx+Tomcat搭建高可用服务器名称 预装软件 IP地址Nginx服务器 Nginx1 192.168.6.131Nginx服务器 Nginx2 192.168.6.132 # ===== ...
随机推荐
- 信用卡精养卡POS机方案
所谓的精养卡,就是模仿有钱人的一种方式,提额难吗!真心不难,难就难在养卡消费 ,信用卡都有,但是不同费率的POS机你有吗,没有POS机难道你真的要去花费去消费吗,你消费的起吗?所以我们这个行业就出现了 ...
- Linux 虚拟机虚拟网卡问题导致无法连接问题
问题描述 当 Linux 虚拟机启动时,通过串口输出或者启动日志, 观察到虚拟网卡启动或者初始化故障, 导致虚拟机无法连接. 问题分析 常见的超时报错范例如下: CentOS 复制 Bringing ...
- SQL Server ->> Database Snapshot(数据块快照)
Comming soon!!! 参考文献: View the Size of the Sparse File of a Database Snapshot 数据库快照 (SQL Server) 创建数 ...
- 学习WebSocket一(WebSocket初识)
Java EE 7 去年刚刚发布了JSR356规范,使得WebSocket的Java API得到了统一,Tomcat从7.0.47开始支持JSR356,这样一来写WebSocket的时候,所用的代码都 ...
- nefu 628 Garden visiting
//yy:想到昨天一个神题整了几个小时,最后按题解把p拆了用孙子定理..今天这个简单,把C暴力拆了.. 题目链接:nefu 628 Garden visiting 1 <= n, m, p &l ...
- Java虚拟机2:Java内存区域
1.几个计算机的概念 为以后写文章考虑,也为巩固自己的知识和一些基本概念,这里要理清楚几个计算机中的概念. 1.计算机存储单位 从小到大依次为位Bit.字节Byte.千字节KB.兆M.千兆GB.TB, ...
- HBase的rowkey排序和scan输出顺序
本文目的:搞清楚HBase里面行与行之间的排序排序规则,如何正序和反序输出扫描结果. 明确: HBase里面同一列的元素按照rowkey进行排序,排序规则是rowkey的ASCII码排序,小的在前大的 ...
- ListView实现下拉刷新(一)建立头布局
一.效果演示 ListView实现下拉刷新,是很常见的功能.下面是一个模拟的效果,如下图: 效果说明:当往下拉ListView的时候 ...
- bzoj2000 [Hnoi2010]stone 取石头游戏
Description A 公司正在举办一个智力双人游戏比赛----取石子游戏,游戏的获胜者将会获得 A 公司提供的丰厚奖金,因此吸引了来自全国各地的许多聪明的选手前来参加比赛. 与经典的取石子游戏相 ...
- chromedriver链接
http://npm.taobao.org/mirrors/chromedriver/