http://www.cnblogs.com/iLoveMyD/p/4281534.html

2015年2月9日 14:36:38

# find <directory> -type f -name "*.c" | xargs grep "<strings>"

<directory>是你要找的文件夹;如果是当前文件夹可以省略
-type f 意思是只找文件
-name "*.c" 表示只找C语言写的代码,从而避免去查binary;也可以不写,表示找所有文件
<strings>是你要找的某个字符串 Stopping secondary namenodes [bigdata-server-02]
Last login: Thu Dec 21 17:18:39 CST 2017 on pts/0
ERROR: Both HADOOP_WORKERS and HADOOP_WORKER_NAMES were defined. Aborting.
Stopping nodemanagers
Last login: Thu Dec 21 17:18:42 CST 2017 on pts/0
Stopping resourcemanager
Last login: Thu Dec 21 17:18:46 CST 2017 on pts/0
[root@bigdata-server-02 hadoop]# vim etc/hadoop/hadoop-env.sh [root@bigdata-server-02 hadoop]# find . -type f | xargs grep HADOOP_WORKER
./sbin/workers.sh:#   HADOOP_WORKERS    File naming remote hosts.
./sbin/workers.sh:#   HADOOP_WORKER_SLEEP Seconds to sleep between spawning remote commands.
grep: ./share/hadoop/yarn/webapps/ui2/assets/images/datatables/Sorting: No such file or directory
grep: icons.psd: No such file or directory
./share/doc/hadoop/hadoop-project-dist/hadoop-common/UnixShellAPI.html:<p>Connect to ${HADOOP_WORKERS} or ${HADOOP_WORKER_NAMES} and execute command.</p>
./share/doc/hadoop/hadoop-project-dist/hadoop-common/UnixShellAPI.html:<p>Connect to ${HADOOP_WORKER_NAMES} and execute command under the environment which does not support pdsh.</p>
./bin/hadoop:if [[ ${HADOOP_WORKER_MODE} = true ]]; then
./bin/yarn:if [[ ${HADOOP_WORKER_MODE} = true ]]; then
./bin/mapred:if [[ ${HADOOP_WORKER_MODE} = true ]]; then
./bin/hdfs:if [[ ${HADOOP_WORKER_MODE} = true ]]; then
./etc/hadoop/hadoop-env.sh:#export HADOOP_WORKERS="${HADOOP_CONF_DIR}/workers"
./etc/hadoop/hadoop-user-functions.sh.example:#  tmpslvnames=$(echo "${HADOOP_WORKER_NAMES}" | tr ' ' '\n' )
./libexec/hadoop-config.cmd:  set HADOOP_WORKERS=%HADOOP_CONF_DIR%\%2
./libexec/hadoop-config.sh:hadoop_deprecate_envvar HADOOP_SLAVES HADOOP_WORKERS
./libexec/hadoop-config.sh:hadoop_deprecate_envvar HADOOP_SLAVE_NAMES HADOOP_WORKER_NAMES
./libexec/hadoop-config.sh:hadoop_deprecate_envvar HADOOP_SLAVE_SLEEP HADOOP_WORKER_SLEEP
./libexec/yarn-config.sh:  hadoop_deprecate_envvar YARN_SLAVES HADOOP_WORKERS
./libexec/hadoop-functions.sh:    HADOOP_WORKERS="${workersfile}"
./libexec/hadoop-functions.sh:    HADOOP_WORKERS="${HADOOP_CONF_DIR}/${workersfile}"
./libexec/hadoop-functions.sh:## @description  Connect to ${HADOOP_WORKERS} or ${HADOOP_WORKER_NAMES}
./libexec/hadoop-functions.sh:  if [[ -n "${HADOOP_WORKERS}" && -n "${HADOOP_WORKER_NAMES}" ]] ; then
./libexec/hadoop-functions.sh:    hadoop_error "ERROR: Both HADOOP_WORKERS and HADOOP_WORKER_NAMES were defined. Aborting."
./libexec/hadoop-functions.sh:  elif [[ -z "${HADOOP_WORKER_NAMES}" ]]; then
./libexec/hadoop-functions.sh:    if [[ -n "${HADOOP_WORKERS}" ]]; then
./libexec/hadoop-functions.sh:      worker_file=${HADOOP_WORKERS}
./libexec/hadoop-functions.sh:    if [[ -z "${HADOOP_WORKER_NAMES}" ]] ; then
./libexec/hadoop-functions.sh:      tmpslvnames=$(echo ${HADOOP_WORKER_NAMES} | tr -s ' ' ,)
./libexec/hadoop-functions.sh:    if [[ -z "${HADOOP_WORKER_NAMES}" ]]; then
./libexec/hadoop-functions.sh:      HADOOP_WORKER_NAMES=$(sed 's/#.*$//;/^$/d' "${worker_file}")
./libexec/hadoop-functions.sh:## @description  Connect to ${HADOOP_WORKER_NAMES} and execute command
./libexec/hadoop-functions.sh:  local workers=(${HADOOP_WORKER_NAMES})
./libexec/hadoop-functions.sh:        HADOOP_WORKER_NAMES="$1"
./libexec/hadoop-functions.sh:        HADOOP_WORKER_MODE=true
[root@bigdata-server-02 hadoop]#

[root@hadoop3 logs]# cat hadoop-root-namenode-hadoop3.log

2017-12-29 15:06:50,183 INFO org.apache.hadoop.http.HttpServer2: addJerseyResourcePackage: packageName=org.apache.hadoop.hdfs.server.namenode.web.resources;org.apache.hadoop.hdfs.web.resources, pathSpec=/webhdfs/v1/*
2017-12-29 15:06:50,190 INFO org.apache.hadoop.http.HttpServer2: HttpServer.start() threw a non Bind IOException
java.net.BindException: Port in use: 0.0.0.0:9870
at org.apache.hadoop.http.HttpServer2.constructBindException(HttpServer2.java:1133)
at org.apache.hadoop.http.HttpServer2.bindForSinglePort(HttpServer2.java:1155)
at org.apache.hadoop.http.HttpServer2.openListeners(HttpServer2.java:1214)
at org.apache.hadoop.http.HttpServer2.start(HttpServer2.java:1069)
at org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer.start(NameNodeHttpServer.java:173)
at org.apache.hadoop.hdfs.server.namenode.NameNode.startHttpServer(NameNode.java:888)
at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:724)
at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:950)
at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:929)
at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1653)
at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1720)
Caused by: java.net.BindException: 地址已在使用
at sun.nio.ch.Net.bind0(Native Method)
at sun.nio.ch.Net.bind(Net.java:433)
at sun.nio.ch.Net.bind(Net.java:425)
at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:223)
at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
at org.eclipse.jetty.server.ServerConnector.open(ServerConnector.java:317)
at org.apache.hadoop.http.HttpServer2.bindListener(HttpServer2.java:1120)
at org.apache.hadoop.http.HttpServer2.bindForSinglePort(HttpServer2.java:1151)
... 9 more
2017-12-29 15:06:50,192 INFO org.apache.hadoop.metrics2.impl.MetricsSystemImpl: Stopping NameNode metrics system...

但是并没有配置这个端口啊

find检索字符串

[root@hadoop3 hadoop]# find . -type f | xargs grep 9870
grep: ./share/hadoop/yarn/webapps/ui2/assets/images/datatables/Sorting: 没有那个文件或目录
grep: icons.psd: 没有那个文件或目录
./share/doc/hadoop/hadoop-yarn/hadoop-yarn-registry/apidocs/org/apache/hadoop/registry/client/types/AddressTypes.html: ["namenode.example.org", "9870"]
./share/doc/hadoop/api/org/apache/hadoop/registry/client/types/AddressTypes.html: ["namenode.example.org", "9870"]
./share/doc/hadoop/hadoop-project-dist/hadoop-hdfs/hdfs-default.xml: <value>0.0.0.0:9870</value>
./share/doc/hadoop/hadoop-project-dist/hadoop-hdfs/HdfsUserGuide.html:<p>NameNode and DataNode each run an internal web server in order to display basic information about the current status of the cluster. With the default configuration, the NameNode front page is at <tt>http://namenode-name:9870/</tt>. It lists the DataNodes in the cluster and basic statistics of the cluster. The web interface can also be used to browse the file system (using “Browse the file system” link on the NameNode front page).</p></div>
./share/doc/hadoop/hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithQJM.html: &lt;value&gt;machine1.example.com:9870&lt;/value&gt;
./share/doc/hadoop/hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithQJM.html: &lt;value&gt;machine2.example.com:9870&lt;/value&gt;
./share/doc/hadoop/hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithQJM.html: &lt;value&gt;machine3.example.com:9870&lt;/value&gt;
./share/doc/hadoop/hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithNFS.html: &lt;value&gt;machine1.example.com:9870&lt;/value&gt;
./share/doc/hadoop/hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithNFS.html: &lt;value&gt;machine2.example.com:9870&lt;/value&gt;
./share/doc/hadoop/hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithNFS.html: &lt;value&gt;machine3.example.com:9870&lt;/value&gt;
./share/doc/hadoop/hadoop-project-dist/hadoop-common/release/3.0.0-alpha1/CHANGES.3.0.0-alpha1.html:<td align="left"> <a class="externalLink" href="https://issues.apache.org/jira/browse/HDFS-9870">HDFS-9870</a> </td>
./share/doc/hadoop/hadoop-project-dist/hadoop-common/release/3.0.0-alpha1/RELEASENOTES.3.0.0-alpha1.html:<p>The patch updates the HDFS default HTTP/RPC ports to non-ephemeral ports. The changes are listed below: Namenode ports: 50470 –&gt; 9871, 50070 –&gt; 9870, 8020 –&gt; 9820 Secondary NN ports: 50091 –&gt; 9869, 50090 –&gt; 9868 Datanode ports: 50020 –&gt; 9867, 50010 –&gt; 9866, 50475 –&gt; 9865, 50075 –&gt; 9864</p><hr />
./share/doc/hadoop/hadoop-project-dist/hadoop-common/release/2.8.0/CHANGES.2.8.0.html:<td align="left"> <a class="externalLink" href="https://issues.apache.org/jira/browse/HDFS-9870">HDFS-9870</a> </td>
./share/doc/hadoop/hadoop-project-dist/hadoop-common/CommandsManual.html:<pre class="source">$ bin/hadoop daemonlog -setlevel 127.0.0.1:9870 org.apache.hadoop.hdfs.server.namenode.NameNode DEBUG
./share/doc/hadoop/hadoop-project-dist/hadoop-common/SingleCluster.html:<li>NameNode - <tt>http://localhost:9870/</tt></li>
./share/doc/hadoop/hadoop-project-dist/hadoop-common/ClusterSetup.html:<td align="left"> Default HTTP port is 9870. </td></tr>
./logs/hadoop-root-namenode-hadoop3.log:2017-12-29 15:06:50,085 INFO org.apache.hadoop.hdfs.DFSUtil: Starting Web-server for hdfs at: http://0.0.0.0:9870
./logs/hadoop-root-namenode-hadoop3.log:java.net.BindException: Port in use: 0.0.0.0:9870
./logs/hadoop-root-namenode-hadoop3.log:java.net.BindException: Port in use: 0.0.0.0:9870
./logs/hadoop-root-namenode-hadoop3.log:2017-12-29 15:06:50,193 INFO org.apache.hadoop.util.ExitUtil: Exiting with status 1: java.net.BindException: Port in use: 0.0.0.0:9870
./logs/hadoop-root-namenode-hadoop3.log:2017-12-29 15:23:48,931 INFO org.apache.hadoop.hdfs.DFSUtil: Starting Web-server for hdfs at: http://0.0.0.0:9870
./logs/hadoop-root-namenode-hadoop3.log:java.net.BindException: Port in use: 0.0.0.0:9870
./logs/hadoop-root-namenode-hadoop3.log:java.net.BindException: Port in use: 0.0.0.0:9870
./logs/hadoop-root-namenode-hadoop3.log:2017-12-29 15:23:49,035 INFO org.apache.hadoop.util.ExitUtil: Exiting with status 1: java.net.BindException: Port in use: 0.0.0.0:9870
[root@hadoop3 hadoop]# xlc
Stopping namenodes on [hadoop3]

9870为默认

./share/doc/hadoop/hadoop-project-dist/hadoop-hdfs/hdfs-default.xml: <value>0.0.0.0:9870</value>

<property>
<name>dfs.namenode.http-address</name>
<value>0.0.0.0:9870</value>
<description>
The address and the base port where the dfs namenode web ui will listen on.
</description>
</property>

vim查找9870

:/9870

:?9870

												

linux 多个文件中查找字符串 hadoop 3 安装 调试的更多相关文章

  1. linux 多个文件中查找字符串

    2015年2月9日 14:36:38 # find <directory> -type f -name "*.c" | xargs grep "<str ...

  2. 在文件夹中 的指定类型文件中 查找字符串(CodeBlocks+GCC编译,控制台程序,仅能在Windows上运行)

    说明: 程序使用 io.h 中的 _findfirst 和 _findnext 函数遍历文件夹,故而程序只能在 Windows 下使用. 程序遍历当前文件夹,对其中的文件夹执行递归遍历.同时检查遍历到 ...

  3. linux上查找文件存放地点和文件中查找字符串方法

    一.查找文件存放地点 1.locate 语法:locate <filename> locate命令实际是"find -name"的另一种写法,但是查找方式跟find不同 ...

  4. 在文件夹下所有文件中查找字符串(linux/windows)

    在linux下可以用 grep "String" filename.txt#字符串 文件名grep -r "String" /home/#递归查找目录下所有文件 ...

  5. Linux平台从文件中查找字符赋值于变量

    以telnet方式登录Linux主机,在默认目录下用命令创建一个包含DUT wanIP的文本文件.[root] echo wanIP=88.0.100.253 > ./wanIP.txt在默认目 ...

  6. linux命令统计文件中某个字符串出现的次数

    1.使用grep linux grep命令在我的随笔linux分类里有过简单的介绍,这里就只简单的介绍下使用grep命令统计某个文件这某个字符串出现的次数,首先介绍grep命令的几个参数,详细参数请自 ...

  7. linux批量修改文件中包含字符串的查找替换

    find -name "*.env" | xargs perl -pi -e 's|\babcdefg\b|hahaha|g' .env 文件中abcdef 改为hahaha

  8. 【Linux】查询文件中指定字符串的记录

    语法 cat 文件 |grep 查询字符串 例如现在有文件file.dat,文件中内容如下: zhangsan Lisi wangwu123 wangwu890 zhangsan28290 现在想从文 ...

  9. linux在所有文件中查找某一个字符

    # find <directory> -type f -name "*.c" | xargs grep "<strings>" < ...

随机推荐

  1. SolrCloud 概念

    原文链接 https://www.w3cschool.cn/solr_doc 当您的集合对于一个节点来说太大时,您可以通过创建多个分片将其分解并分段存储. 碎片是集合的逻辑分区,包含集合中的文档的子集 ...

  2. OI中的小智慧

    反正不会咕咕的. sort之类没+1的问题不说 双向边n*2的问题不说 变量n+5的问题不说 1.先生成后判断 (见NOIP 2016 pj t2回文日期) 这个思想在这道题体现的不明显,记得洛谷上面 ...

  3. Elastic-Job-Lite 源码分析 —— 作业分片策略

    摘要: 原创出处 http://www.iocoder.cn/Elastic-Job/job-sharding-strategy/ 「芋道源码」欢迎转载,保留摘要,谢谢! 本文基于 Elastic-J ...

  4. pycharm配置git--图文教程

    1.     下载git客户端 2.     File->Default Setting-> Version Control->Git 3.     Path to Git exec ...

  5. _063_Android_Android内存泄露

    深入内存泄露 Android应用的内存泄露,其实就是java虚拟机的堆内存泄漏. 当然,当应用有ndk,jni时,没有及时free,本地堆也会出现内存泄漏. 本文只是针对JVM内存泄漏应用,进行阐述分 ...

  6. bzoj2277 [Poi2011]Strongbox

    2277: [Poi2011]Strongbox Time Limit: 60 Sec  Memory Limit: 32 MBSubmit: 498  Solved: 218[Submit][Sta ...

  7. 【贪心】codeforces D. Minimum number of steps

    http://codeforces.com/contest/805/problem/D [思路] 要使最后的字符串不出现ab字样,贪心的从后面开始更换ab为bba,并且字符串以"abbbb. ...

  8. spring几种依赖注入方式以及ref-local/bean,factory-bean,factory-method区别联系

    平常的java开发中,程序员在某个类中需要依赖其它类的方法,则通常是new一个依赖类再调用类实例的方法,这种开发存在的问题是new的类实例不好统一管理,spring提出了依赖注入的思想,即依赖类不由程 ...

  9. [APIO2012] 派遣 dispatching

    Time Limit: 10 Sec  Memory Limit: 128 MBSubmit: 4580  Solved: 2348 Description 在一个忍者的帮派里,一些忍者们被选中派遣给 ...

  10. Codeforces947D. Picking Strings

    $n \leq 100000,m \leq 100000$,给长度$n$的字符串$s$和$m$的字符串$t$,只含ABC.定义串$a$可以经过任意次如下操作变成其他串. 现在$q \leq 10000 ...