# ==================================================================安装 sqoop

tar -zxvf ~/sqoop-1.4.7.bin__hadoop-2.6.0.tar.gz
mv ~/sqoop-1.4.7.bin__hadoop-2.6.0 /usr/local/sqoop-1.4.7

# 环境变量

# ==================================================================node1

vi /etc/profile

# 在export PATH USER LOGNAME MAIL HOSTNAME HISTSIZE HISTCONTROL下添加

export JAVA_HOME=/usr/java/jdk1.8.0_111
export JRE_HOME=/usr/java/jdk1.8.0_111/jre
export ZOOKEEPER_HOME=/usr/local/zookeeper-3.4.12
export HADOOP_HOME=/usr/local/hadoop-2.7.6
export MYSQL_HOME=/usr/local/mysql
export HBASE_HOME=/usr/local/hbase-1.2.4
export HIVE_HOME=/usr/local/hive-2.1.1
export SCALA_HOME=/usr/local/scala-2.12.4
export KAFKA_HOME=/usr/local/kafka_2.12-0.10.2.1
export FLUME_HOME=/usr/local/flume-1.8.0
export SPARK_HOME=/usr/local/spark-2.3.0
export STORM_HOME=/usr/local/storm-1.1.0
export REDIS_HOME=/usr/local/redis-4.0.2
export ERLANG_HOME=/usr/local/erlang
export RABBITMQ_HOME=/usr/local/rabbitmq_server-3.7.5
export MONGODB_HOME=/usr/local/mongodb-3.4.5
export NGINX_HOME=/usr/local/nginx
export CATALINA_BASE=/usr/local/tomcat
export CATALINA_HOME=/usr/local/tomcat
export TOMCAT_HOME=/usr/local/tomcat
export KEEPALIVED_HOME=/usr/local/keepalived
export ELASTICSEARCH_HOME=/usr/local/elasticsearch-6.2.4
export LOGSTASH_HOME=/usr/local/logstash-6.2.4
export KIBANA_HOME=/usr/local/kibana-6.2.4
export SQOOP_HOME=/usr/local/sqoop-1.4.7 export PATH=$PATH:$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$ZOOKEEPER_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$MYSQL_HOME/bin:$HBASE_HOME/bin:$HIVE_HOME/bin:$SCALA_HOME/bin:$KAFKA_HOME/bin:$FLUME_HOME/bin:$SPARK_HOME/bin:$STORM_HOME/bin:$REDIS_HOME/bin:$ERLANG_HOME/bin:$RABBITMQ_HOME/ebin:$RABBITMQ_HOME/sbin:$MONGODB_HOME/bin:$NGINX_HOME/sbin:$CATALINA_HOME/bin:$KEEPALIVED_HOME/sbin:$ELASTICSEARCH_HOME/bin:$LOGSTASH_HOME/bin:$KIBANA_HOME/bin:$SQOOP_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar export HADOOP_INSTALL=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
# 使环境变量生效
source /etc/profile # 查看配置结果
echo $SQOOP_HOME
cp -a $SQOOP_HOME/conf/sqoop-env-template.sh $SQOOP_HOME/conf/sqoop-env.sh
vi $SQOOP_HOME/conf/sqoop-env.sh #Set path to where bin/hadoop is available
export HADOOP_COMMON_HOME=${HADOOP_HOME} #Set path to where hadoop-*-core.jar is available
export HADOOP_MAPRED_HOME=${HADOOP_HOME} #set the path to where bin/hbase is available
#export HBASE_HOME=${HBASE_HOME} #Set the path to where bin/hive is available
export HIVE_HOME=${HIVE_HOME} #Set the path for where zookeper config dir is
#export ZOOCFGDIR= cp -a ~/mysql-connector-java-5.1.46.jar $SQOOP_HOME/lib/
cp -a $HIVE_HOME/lib/hive-exec-2.1.1.jar $SQOOP_HOME/lib/ sqoop-version
mysql -u root -p

> create database sqooptest character set utf8 ;
> create user 'sqoop'@'%' identified by 'Sqoop-123';
> grant all privileges on *.* to 'sqoop'@'%';
> flush privileges; > show databases; > quit;

# 用户 sqoop 登录 mysql

mysql -u sqoop -p

Enter password: Sqoop-123

> use sqooptest;

> create table emp(id INT NOT NULL PRIMARY KEY, name VARCHAR(20), age INT);

> insert into emp(id, name, age)values(1, 'zhangsan', 11);
insert into emp(id, name, age)values(2, 'lisi', 12);
insert into emp(id, name, age)values(3, '王五', 13); > create table emp_add(id INT NOT NULL PRIMARY KEY, name VARCHAR(20), age INT, sex VARCHAR(20)); > insert into emp_add(id, name, age, sex)values(1, 'zhangsan', 11, '男');
insert into emp_add(id, name, age, sex)values(2, 'lisi', 12, '男');
insert into emp_add(id, name, age, sex)values(3, '王五', 13, '女');
insert into emp_add(id, name, age, sex)values(4,'liuliu', 11, '男'); > show tables; > select * from emp;
select * from emp_add; > quit;

# 启动 hadoop

# ==================================================================node1 node2 node3
zkServer.sh start # ==================================================================node1
# 启动hadoop所有进程
$HADOOP_HOME/sbin/start-all.sh $HADOOP_HOME/sbin/hadoop-daemon.sh start zkfc # ==================================================================node2
$HADOOP_HOME/sbin/yarn-daemon.sh start resourcemanager
$HADOOP_HOME/sbin/hadoop-daemon.sh start zkfc hadoop dfsadmin -safemode get # 命令强制离开
# hadoop dfsadmin -safemode leave # 网页访问
# http://node1:50070?user.name=hadoop
# http://node2:50070?user.name=hadoop
# http://node1:8088?user.name=hadoop/cluster/nodes

# 导入表数据到HDFS

# ==================================================================node1

sqoop import \
--connect jdbc:mysql://node1:3306/sqooptest \
--username sqoop \
--password Sqoop-123 \
--table emp \
--m 1

# 如果包下面的错误请将 $HADOOP_HOME/etc/hadoop/yarn-site.xml 文件进行调整
# ERROR tool.ImportTool: Import failed: java.io.IOException: org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException:
# Invalid resource request, requested memory < 0, or requested memory > max configured, requestedMemory=1536, maxMemory=1024

# ==================================================================node1 node2 node3
vi $HADOOP_HOME/etc/hadoop/yarn-site.xml <property>
<name>yarn.nodemanager.resource.memory-mb</name>
<value>2048</value>
</property>
<property>
<name>yarn.scheduler.maximum-allocation-mb</name>
<value>2048</value>
</property>

# 重启Yarn

# ==================================================================node1
$HADOOP_HOME/sbin/start-yarn.sh
# 导入表数据到HDFS
sqoop import \
--connect jdbc:mysql://node1:3306/sqooptest \
--username sqoop \
--password Sqoop-123 \
--table emp \
--m 1 # 查看导入的数据
hadoop fs -ls /user/root/emp hadoop fs -cat /user/root/emp/part-m-00000 # 导入表到HDFS指定目录
sqoop import --connect jdbc:mysql://node1:3306/sqooptest \
--username sqoop --password Sqoop-123 \
--target-dir /sqooptest/table_emp/queryresult \
--table emp --num-mappers 1 # 查看导入的数据
hadoop fs -ls /sqooptest/table_emp/queryresult hadoop fs -cat /sqooptest/table_emp/queryresult/part-m-00000

# 导入关系表到HIVE

sqoop import --connect jdbc:mysql://node1:3306/sqooptest \
--username sqoop --password Sqoop-123 \
--table emp --hive-import \
--num-mappers 1

# 如报下面的错误,请先删除 /user/root/emp

# ERROR tool.ImportTool: Import failed: org.apache.hadoop.mapred.FileAlreadyExistsException: Output directory hdfs://appcluster/user/root/emp already exists

hadoop fs -rmr /user/root/emp

# 重新导入关系表到HIVE

sqoop import --connect jdbc:mysql://node1:3306/sqooptest \
--username sqoop --password Sqoop-123 \
--table emp --hive-import \
--num-mappers 1

# 启动 hive

hive
# 查看导入的数据
> show tables; > select * from emp;

# 导入表到HIVE指定库指定表

sqoop import --connect jdbc:mysql://node1:3306/sqooptest \
--username sqoop --password Sqoop-123 \
--table emp \
--delete-target-dir \
--fields-terminated-by '\t' \
--hive-import \
--hive-database sqooptest \
--hive-table hive_emp \
--num-mappers 1 

# 报错 hive的库sqooptest,必须先建立。否则会报:FAILED: SemanticException [Error 10072]: Database does not exist: sqooptest

> create database sqooptest;

> show databases;

# 导入表到HIVE指定库指定表

sqoop import --connect jdbc:mysql://node1:3306/sqooptest \
--username sqoop --password Sqoop-123 \
--table emp \
--delete-target-dir \
--fields-terminated-by '\t' \
--hive-import \
--hive-database sqooptest \
--hive-table hive_emp \
--num-mappers 1 > use sqooptest; > show tables; > select * from hive_emp;

# 导入表数据子集 where子句的导入

sqoop import --connect jdbc:mysql://node1:3306/sqooptest \
--username sqoop --password Sqoop-123 \
--table emp_add \
--where "age =11" \
--target-dir /sqooptest/table_emp/queryresult2 \
--num-mappers 1 # 查看导入的数据
hadoop fs -ls /sqooptest/table_emp/queryresult2 hadoop fs -cat /sqooptest/table_emp/queryresult2/part-m-00000

# query按需导入

sqoop import --connect jdbc:mysql://node1:3306/sqooptest \
--username sqoop --password Sqoop-123 \
--query 'select id,name,age from emp WHERE id>=2 and $CONDITIONS' \
--split-by id \
--fields-terminated-by '\t' \
--target-dir /sqooptest/table_emp/queryresult3 \
--num-mappers 1 # 查看导入的数据
hadoop fs -cat /sqooptest/table_emp/queryresult3/part-m-00000

# 增量导入

sqoop import --connect jdbc:mysql://node1:3306/sqooptest \
--username sqoop --password Sqoop-123 \
--table emp \
--incremental append \
--check-column id \
--last-value 2 \
--fields-terminated-by '\t' \
--target-dir /sqooptest/table_emp/queryresult4 \
--num-mappers 1 # 查看导入的数据
hadoop fs -ls /sqooptest/table_emp/queryresult4/ hadoop fs -cat /sqooptest/table_emp/queryresult4/part-m-00000

# Sqoop的数据导出

# 查看 hdfs 数据

hadoop fs -ls /sqooptest/table_emp/queryresult
hadoop fs -cat /sqooptest/table_emp/queryresult/part-m-00000

# 1、首先需要手动创建mysql中的目标表

mysql -u sqoop -p

Enter password: Sqoop-123

> use sqooptest;

> CREATE TABLE employee(id INT NOT NULL PRIMARY KEY,name VARCHAR(20),age INT);

> show tables;

> select * from employee;

# 2、然后执行导出命令

sqoop export \
--connect jdbc:mysql://node1:3306/sqooptest \
--username sqoop --password Sqoop-123 \
--table employee \
--export-dir /sqooptest/table_emp/queryresult/ # 验证表mysql命令行
> select * from employee; > quit;

shutdown -h now
# sqoop

hadoop生态搭建(3节点)-17.sqoop配置_单节点的更多相关文章

  1. hadoop生态搭建(3节点)-05.mysql配置_单节点

    # ==================================================================node1 # ======================== ...

  2. hadoop生态搭建(3节点)

    软件:CentOS-7    VMware12    SSHSecureShellClient shell工具:Xshell 规划 vm网络配置 01.基础配置 02.ssh配置 03.zookeep ...

  3. js 节点 document html css 表单节点操作

    js 节点 document html css 表单节点操作 节点操作:访问.属性.创建 (1)节点的访问:firstChild.lastChild.childNodes.parentChild(父子 ...

  4. hadoop生态搭建(3节点)-13.mongodb配置

    # 13.mongodb配置_副本集_认证授权# ==================================================================安装 mongod ...

  5. hadoop生态搭建(3节点)-12.rabbitmq配置

    # 安装 需要相关包# ==================================================================node1 node2 node3 yum ...

  6. hadoop生态搭建(3节点)-08.kafka配置

    如果之前没有安装jdk和zookeeper,安装了的请直接跳过 # https://www.oracle.com/technetwork/java/javase/downloads/java-arch ...

  7. hadoop生态搭建(3节点)-04.hadoop配置

    如果之前没有安装jdk和zookeeper,安装了的请直接跳过 # https://www.oracle.com/technetwork/java/javase/downloads/java-arch ...

  8. hadoop生态搭建(3节点)-10.spark配置

    # https://www.scala-lang.org/download/2.12.4.html# ================================================= ...

  9. hadoop生态搭建(3节点)-15.Nginx_Keepalived_Tomcat配置

    # Nginx+Tomcat搭建高可用服务器名称 预装软件 IP地址Nginx服务器 Nginx1 192.168.6.131Nginx服务器 Nginx2 192.168.6.132 # ===== ...

随机推荐

  1. SQL Server ->> 关于SQL Server Agent Job执行步骤时的用户上下文(User Context)问题

    这是最近项目相关和自己感兴趣的一个问题:SQL Server Agent Job有几种方法可以以特定用户上下文去执行任务步骤的? 这个事情需要分几种情况来说,因为对于不同类型的任务步骤,SQL Ser ...

  2. IT装B小技巧

    1.编写简单的关机脚本 新建一个文本文档,将代码复制上去,将后缀改成bat,双击运行 @echo off shutdown -s -t 2.语音播报 新建一个文本文档,将代码复制上去,将后缀改成vbs ...

  3. 【Leetcode】【Medium】Maximum Subarray

    Find the contiguous subarray within an array (containing at least one number) which has the largest ...

  4. 使用UIWebView中html标签显示富文本

    使用UIWebView中html标签显示富文本 用UIWebView来渲染文本并期望达到富文本的效果开销很大哦! Work 本人此处直接加载自定义字体"新蒂小丸子体",源码不公开, ...

  5. Redis设计与实现读后感

    看了一下时间,现在是2018年8月22日14:28,看完最后一页内容之后,我简短的停留了一下,任思绪翻飞. redis设计与实现大概看了有12天左右,12天前,我的心里很乱,整个人都处于一种焦虑不安的 ...

  6. 在ubuntu16.04上安装eclipse

     在ubuntu16.04上安装eclipse 一.下载     首先我们需要安装jdk1.8及其以上,然后从官网:https://www.eclipse.org/downloads/上下载,需要注意 ...

  7. Maven高级应用--编译全模块包-dist包

    1. 在需要生成dist包的模块级别,新建文件夹xxx-xxxx-dist 2. 进入目录,新建pom.xml,建议copy 3. dependencies节点,把要编译成全局包的应用引入进来 < ...

  8. ApiServer_YiChat apache项目布置过程

    1.复制文件到   /var/www/  文件夹下 2.配置项目目录 3.修改/var/www/api/public 文件夹下的隐藏文件  .htaccess     增加‘?’号 4.打开/etc/ ...

  9. linux 安装pip 和python3

    前言: python3应该是python的趋势所在,当然目前争议也比较大,这篇随笔的主要目的是记录在linux6.4下搭建python3环境的过程 以及碰到的问题和解决过程. 另外,如果本机安装了py ...

  10. nodejs+postgis实现搜周边

    利用nodejs搭建服务器,并连接PostgreSQL数据库,利用前端传过来的中心点坐标和搜索半径,进行空间查询,实现简单的搜周边,下面是实现流程和nodejs的代码: app.post('/tose ...