1. $HADOOP_HOME/bin/ hadoop

#!/usr/bin/env bash# Licensed to the Apache Software Foundation (ASF) under one or more# contributor license agreements.  See the NOTICE file distributed with# this work for additional information regarding copyright ownership.# The ASF licenses this file to You under the Apache License, Version 2.0# (the "License"); you may not use this file except in compliance with# the License.  You may obtain a copy of the License at##     http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing, software# distributed under the License is distributed on an "AS IS" BASIS,# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# See the License for the specific language governing permissions and# limitations under the License.# The Hadoop command script## Environment Variables##   JAVA_HOME        The java implementation to use.  Overrides JAVA_HOME.##   HADOOP_CLASSPATH Extra Java CLASSPATH entries.##   HADOOP_USER_CLASSPATH_FIRST      When defined, the HADOOP_CLASSPATH is #                                    added in the beginning of the global#                                    classpath. Can be defined, for example,#                                    by doing #                                    export HADOOP_USER_CLASSPATH_FIRST=true##   HADOOP_HEAPSIZE  The maximum amount of heap to use, in MB. #                    Default is 1000.##   HADOOP_OPTS      Extra Java runtime options.#   #   HADOOP_NAMENODE_OPTS       These options are added to HADOOP_OPTS  #   HADOOP_CLIENT_OPTS         when the respective command is run.#   HADOOP_{COMMAND}_OPTS etc  HADOOP_JT_OPTS applies to JobTracker #                              for e.g.  HADOOP_CLIENT_OPTS applies to #                              more than one command (fs, dfs, fsck, #                              dfsadmin etc)  ##   HADOOP_CONF_DIR  Alternate conf dir. Default is ${HADOOP_HOME}/conf.##   HADOOP_ROOT_LOGGER The root appender. Default is INFO,console#

bin=`dirname "$0"`//(1)获取$HADOOP_HOME/hadoop/bin所在目录
bin=`cd "$bin"; pwd`//(2)进入到$HADOOP_HOME/bin/hadoop/bin目录if[-e "$bin"/../libexec/hadoop-config.sh ];then//(3)执行hadoop-config.sh进行,进行配置设置."$bin"/../libexec/hadoop-config.sh
else."$bin"/hadoop-config.sh
fi cygwin=falsecase"`uname`"in
CYGWIN*) cygwin=true;;esac# if no args specified, show usageif[ $# = 0 ]; then
echo "Usage: hadoop [--config confdir] COMMAND"
echo "where COMMAND is one of:"
echo " namenode -format format the DFS filesystem"
echo " secondarynamenode run the DFS secondary namenode"
echo " namenode run the DFS namenode"
echo " datanode run a DFS datanode"
echo " dfsadmin run a DFS admin client"
echo " mradmin run a Map-Reduce admin client"
echo " fsck run a DFS filesystem checking utility"
echo " fs run a generic filesystem user client"
echo " balancer run a cluster balancing utility"
echo " fetchdt fetch a delegation token from the NameNode"
echo " jobtracker run the MapReduce job Tracker node"
echo " pipes run a Pipes job"
echo " tasktracker run a MapReduce task Tracker node"
echo " historyserver run job history servers as a standalone daemon"
echo " job manipulate MapReduce jobs"
echo " queue get information regarding JobQueues"
echo " version print the version"
echo " jar <jar> run a jar file"
echo " distcp <srcurl> <desturl> copy file or directories recursively"
echo " archive -archiveName NAME -p <parent path> <src>* <dest> create a hadoop archive"
echo " classpath prints the class path needed to get the"
echo " Hadoop jar and the required libraries"
echo " daemonlog get/set the log level for each daemon"
echo " or"
echo " CLASSNAME run the class named CLASSNAME"
echo "Most commands print help when invoked w/o parameters."exit1fi# get arguments
COMMAND=$1
shift # Determine if we're starting a secure datanode, and if so, redefine appropriate variablesif["$COMMAND"=="datanode"]&&["$EUID"-eq 0]&&[-n "$HADOOP_SECURE_DN_USER"];then
HADOOP_PID_DIR=$HADOOP_SECURE_DN_PID_DIR
HADOOP_LOG_DIR=$HADOOP_SECURE_DN_LOG_DIR
HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER
starting_secure_dn="true"fi# some Java parametersif["$JAVA_HOME"!=""];then#echo "run java in $JAVA_HOME"
JAVA_HOME=$JAVA_HOME
fiif["$JAVA_HOME"=""];then
echo "Error: JAVA_HOME is not set."exit1fi JAVA=$JAVA_HOME/bin/java
JAVA_HEAP_MAX=-Xmx1000m# check envvars which might override default args if["$HADOOP_HEAPSIZE"!=""];then#echo "run with heapsize $HADOOP_HEAPSIZE"
JAVA_HEAP_MAX="-Xmx""$HADOOP_HEAPSIZE""m"#echo $JAVA_HEAP_MAXfi# CLASSPATH initially contains $HADOOP_CONF_DIR //(3)设置CLASSPATH
CLASSPATH="${HADOOP_CONF_DIR}"if["$HADOOP_USER_CLASSPATH_FIRST"!=""]&&["$HADOOP_CLASSPATH"!=""];then
CLASSPATH=${CLASSPATH}:${HADOOP_CLASSPATH}fi
CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar # for developers, add Hadoop classes to CLASSPATHif[-d "$HADOOP_HOME/build/classes"];then
CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/classes
fiif[-d "$HADOOP_HOME/build/webapps"];then
CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build
fiif[-d "$HADOOP_HOME/build/test/classes"];then
CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/test/classes
fiif[-d "$HADOOP_HOME/build/tools"];then
CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/tools
fi# so that filenames w/ spaces are handled correctly in loops below
IFS=# for releases, add core hadoop jar & webapps to CLASSPATHif[-e $HADOOP_PREFIX/share/hadoop/hadoop-core-*];then# binary layoutif[-d "$HADOOP_PREFIX/share/hadoop/webapps"];then
CLASSPATH=${CLASSPATH}:$HADOOP_PREFIX/share/hadoop
fifor f in $HADOOP_PREFIX/share/hadoop/hadoop-core-*.jar;do
CLASSPATH=${CLASSPATH}:$f;done# add libs to CLASSPATHfor f in $HADOOP_PREFIX/share/hadoop/lib/*.jar; do
CLASSPATH=${CLASSPATH}:$f;
done for f in $HADOOP_PREFIX/share/hadoop/lib/jsp-2.1/*.jar; do
CLASSPATH=${CLASSPATH}:$f;
done for f in $HADOOP_PREFIX/share/hadoop/hadoop-tools-*.jar; do
TOOL_PATH=${TOOL_PATH}:$f;
done
else
# tarball layout
if [ -d "$HADOOP_HOME/webapps" ]; then
CLASSPATH=${CLASSPATH}:$HADOOP_HOME
fi
for f in $HADOOP_HOME/hadoop-core-*.jar; do
CLASSPATH=${CLASSPATH}:$f;
done # add libs to CLASSPATH
for f in $HADOOP_HOME/lib/*.jar; do
CLASSPATH=${CLASSPATH}:$f;
done if [ -d "$HADOOP_HOME/build/ivy/lib/Hadoop/common" ]; then
for f in $HADOOP_HOME/build/ivy/lib/Hadoop/common/*.jar; do
CLASSPATH=${CLASSPATH}:$f;
done
fi for f in $HADOOP_HOME/lib/jsp-2.1/*.jar; do
CLASSPATH=${CLASSPATH}:$f;
done for f in $HADOOP_HOME/hadoop-tools-*.jar; do
TOOL_PATH=${TOOL_PATH}:$f;
done
for f in $HADOOP_HOME/build/hadoop-tools-*.jar; do
TOOL_PATH=${TOOL_PATH}:$f;
done
fi # add user-specified CLASSPATH last
if [ "$HADOOP_USER_CLASSPATH_FIRST" = "" ] && [ "$HADOOP_CLASSPATH" != "" ]; then
CLASSPATH=${CLASSPATH}:${HADOOP_CLASSPATH}
fi # default log directory & file
if [ "$HADOOP_LOG_DIR" = "" ]; then
HADOOP_LOG_DIR="$HADOOP_HOME/logs"
fi
if [ "$HADOOP_LOGFILE" = "" ]; then
HADOOP_LOGFILE='hadoop.log'
fi # default policy file for service-level authorization
if [ "$HADOOP_POLICYFILE" = "" ]; then
HADOOP_POLICYFILE="hadoop-policy.xml"
fi # restore ordinary behaviour
unset IFS # figure out which class to run //(4)根据不同命令确定CLASS
if [ "$COMMAND" = "classpath" ] ; then
if $cygwin; then
CLASSPATH=`cygpath -p -w "$CLASSPATH"`
fi
echo $CLASSPATH
exit
elif [ "$COMMAND" = "namenode" ] ; then
CLASS='org.apache.hadoop.hdfs.server.namenode.NameNode'
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NAMENODE_OPTS"
elif [ "$COMMAND" = "secondarynamenode" ] ; then
CLASS='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode'
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_SECONDARYNAMENODE_OPTS"
elif [ "$COMMAND" = "datanode" ] ; then
CLASS='org.apache.hadoop.hdfs.server.datanode.DataNode'
if [ "$starting_secure_dn" = "true" ]; then
HADOOP_OPTS="$HADOOP_OPTS -jvm server $HADOOP_DATANODE_OPTS"
else
HADOOP_OPTS="$HADOOP_OPTS -server $HADOOP_DATANODE_OPTS"
fi
elif [ "$COMMAND" = "fs" ] ; then
CLASS=org.apache.hadoop.fs.FsShell
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
elif [ "$COMMAND" = "dfs" ] ; then
CLASS=org.apache.hadoop.fs.FsShell
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
elif [ "$COMMAND" = "dfsadmin" ] ; then
CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
elif [ "$COMMAND" = "mradmin" ] ; then
CLASS=org.apache.hadoop.mapred.tools.MRAdmin
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
elif [ "$COMMAND" = "fsck" ] ; then
CLASS=org.apache.hadoop.hdfs.tools.DFSck
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
elif [ "$COMMAND" = "balancer" ] ; then
CLASS=org.apache.hadoop.hdfs.server.balancer.Balancer
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_BALANCER_OPTS"
elif [ "$COMMAND" = "fetchdt" ] ; then
CLASS=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher
elif [ "$COMMAND" = "jobtracker" ] ; then
CLASS=org.apache.hadoop.mapred.JobTracker
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_JOBTRACKER_OPTS"
elif [ "$COMMAND" = "historyserver" ] ; then
CLASS=org.apache.hadoop.mapred.JobHistoryServer
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_JOB_HISTORYSERVER_OPTS"
elif [ "$COMMAND" = "tasktracker" ] ; then
CLASS=org.apache.hadoop.mapred.TaskTracker
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_TASKTRACKER_OPTS"
elif [ "$COMMAND" = "job" ] ; then
CLASS=org.apache.hadoop.mapred.JobClient
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
elif [ "$COMMAND" = "queue" ] ; then
CLASS=org.apache.hadoop.mapred.JobQueueClient
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
elif [ "$COMMAND" = "pipes" ] ; then
CLASS=org.apache.hadoop.mapred.pipes.Submitter
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
elif [ "$COMMAND" = "version" ] ; then
CLASS=org.apache.hadoop.util.VersionInfo
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
elif [ "$COMMAND" = "jar" ] ; then
CLASS=org.apache.hadoop.util.RunJar
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
elif [ "$COMMAND" = "distcp" ] ; then
CLASS=org.apache.hadoop.tools.DistCp
CLASSPATH=${CLASSPATH}:${TOOL_PATH}
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
elif [ "$COMMAND" = "daemonlog" ] ; then
CLASS=org.apache.hadoop.log.LogLevel
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
elif [ "$COMMAND" = "archive" ] ; then
CLASS=org.apache.hadoop.tools.HadoopArchives
CLASSPATH=${CLASSPATH}:${TOOL_PATH}
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
elif [ "$COMMAND" = "sampler" ] ; then
CLASS=org.apache.hadoop.mapred.lib.InputSampler
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
else
CLASS=$COMMAND
fi # cygwin path translation
if $cygwin; then
CLASSPATH=`cygpath -p -w "$CLASSPATH"`
HADOOP_HOME=`cygpath -w "$HADOOP_HOME"`
HADOOP_LOG_DIR=`cygpath -w "$HADOOP_LOG_DIR"`
TOOL_PATH=`cygpath -p -w "$TOOL_PATH"`
fi #Determine the JAVA_PLATFORM //(5)获取系统平台
JAVA_PLATFORM=`CLASSPATH=${CLASSPATH} ${JAVA} -Xmx32m ${HADOOP_JAVA_PLATFORM_OPTS} org.apache.hadoop.util.PlatformName | sed -e "s/ /_/g"` if [ "$JAVA_PLATFORM" = "Linux-amd64-64" ]; then
JSVC_ARCH="amd64"
else
JSVC_ARCH="i386"
fi # setup 'java.library.path' for native-hadoop code if necessary
JAVA_LIBRARY_PATH=''
if [ -d "${HADOOP_HOME}/build/native" -o -d "${HADOOP_HOME}/lib/native" -o -e "${HADOOP_PREFIX}/lib/libhadoop.a" ]; then if [ -d "$HADOOP_HOME/build/native" ]; then
JAVA_LIBRARY_PATH=${HADOOP_HOME}/build/native/${JAVA_PLATFORM}/lib
fi if [ -d "${HADOOP_HOME}/lib/native" ]; then
if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:${HADOOP_HOME}/lib/native/${JAVA_PLATFORM}
else
JAVA_LIBRARY_PATH=${HADOOP_HOME}/lib/native/${JAVA_PLATFORM}
fi
fi if [ -e "${HADOOP_PREFIX}/lib/libhadoop.a" ]; then
JAVA_LIBRARY_PATH=${HADOOP_PREFIX}/lib
fi
fi # cygwin path translation
if $cygwin; then
JAVA_LIBRARY_PATH=`cygpath -p "$JAVA_LIBRARY_PATH"`
fi HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR"
HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.file=$HADOOP_LOGFILE"
HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.home.dir=$HADOOP_HOME"
HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_IDENT_STRING"
HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.root.logger=${HADOOP_ROOT_LOGGER:-INFO,console}" #turn security logger on the namenode and jobtracker only
if [ $COMMAND = "namenode" ] || [ $COMMAND = "jobtracker" ]; then
HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,DRFAS}"
else
HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
fi if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
HADOOP_OPTS="$HADOOP_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH" //(6)设定使用hadoop本地库
fi
HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.policy.file=$HADOOP_POLICYFILE" # Check to see if we should start a secure datanode
if [ "$starting_secure_dn" = "true" ]; then
if [ "$HADOOP_PID_DIR" = "" ]; then
HADOOP_SECURE_DN_PID="/tmp/hadoop_secure_dn.pid"
else
HADOOP_SECURE_DN_PID="$HADOOP_PID_DIR/hadoop_secure_dn.pid"
fi exec "$HADOOP_HOME/libexec/jsvc.${JSVC_ARCH}" -Dproc_$COMMAND -outfile "$HADOOP_LOG_DIR/jsvc.out" \
-errfile "$HADOOP_LOG_DIR/jsvc.err" \
-pidfile "$HADOOP_SECURE_DN_PID" \
-nodetach \
-user "$HADOOP_SECURE_DN_USER" \
-cp "$CLASSPATH" \
$JAVA_HEAP_MAX $HADOOP_OPTS \
org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter "$@"
else
# run it
exec "$JAVA" -Dproc_$COMMAND $JAVA_HEAP_MAX $HADOOP_OPTS -classpath "$CLASSPATH" $CLASS "$@" //运行具体的java class fi

2. $HADOOP_HOME/bin/ hadoop-config.sh

# Licensed to the Apache Software Foundation (ASF) under one or more# contributor license agreements.  See the NOTICE file distributed with# this work for additional information regarding copyright ownership.# The ASF licenses this file to You under the Apache License, Version 2.0# (the "License"); you may not use this file except in compliance with# the License.  You may obtain a copy of the License at##     http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing, software# distributed under the License is distributed on an "AS IS" BASIS,# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# See the License for the specific language governing permissions and# limitations under the License.# included in all the hadoop scripts with source command# should not be executable directly# also should not be passed any arguments, since we need original $*# resolve links - $0 may be a softlinkthis="${BASH_SOURCE-$0}"
common_bin=$(cd -P --"$(dirname -- "$this")"&& pwd -P)
script="$(basename -- "$this")"this="$common_bin/$script"# convert relative path to absolute path
config_bin=`dirname "$this"`
script=`basename "$this"`
config_bin=`cd "$config_bin"; pwd`this="$config_bin/$script"# the root of the Hadoop installationexport HADOOP_PREFIX=`dirname "$this"`/..#check to see if the conf dir is given as an optional argumentif[ $# -gt 1 ]thenif["--config"="$1"]then
shift
confdir=$1
shift
HADOOP_CONF_DIR=$confdir
fifi# Allow alternate conf dir location.if[-e "${HADOOP_PREFIX}/conf/hadoop-env.sh"];then
DEFAULT_CONF_DIR="conf"else
DEFAULT_CONF_DIR="etc/hadoop"fi
HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-$HADOOP_PREFIX/$DEFAULT_CONF_DIR}"#check to see it is specified whether to use the slaves or the# masters fileif[ $# -gt 1 ]thenif["--hosts"="$1"]then
shift
slavesfile=$1
shift
export HADOOP_SLAVES="${HADOOP_CONF_DIR}/$slavesfile"fifiif[-f "${HADOOP_CONF_DIR}/hadoop-env.sh"];then."${HADOOP_CONF_DIR}/hadoop-env.sh"//(2)执行hadoop-env.sh,进行配置设置fiif["$HADOOP_HOME_WARN_SUPPRESS"=""]&&["$HADOOP_HOME"!=""];then
echo "Warning: \$HADOOP_HOME is deprecated."1>&2
echo 1>&2fi# Newer versions of glibc use an arena memory allocator that causes virtual# memory usage to explode. This interacts badly with the many threads that# we use in Hadoop. Tune the variable down to prevent vmem explosion.export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4}export HADOOP_HOME=${HADOOP_PREFIX}export HADOOP_HOME_WARN_SUPPRESS=1

3. $HADOOP_HOME/conf/hadoop-env.sh

总结,hadoop命令在运行时一次分别执行hadoop-config.sh 和 hadoop-env.sh来设置配置和参数。最后将设置的配置和参数交给java类来运行,根据不同的参数定位不同的执行类和行为。

hadoop源码剖析--$HADOOP_HOME/bin/hadoop脚本文件分析的更多相关文章

  1. Eclipse导入Hadoop源码项目及编写Hadoop程序

    一 Eclipse导入Hadoop源码项目 基本步骤: 1)在Eclipse新建一个java项目[hadoop-1.2.1] 2)将Hadoop压缩包解压目录src下的core,hdfs,mapred ...

  2. webpack4.0源码解析之打包后js文件分析

    首先,init之后创建一个简单的webpack基本的配置,在src目录下创建两个js文件(一个主入口文件和一个非主入口文件)和一个html文件,package.json,webpack.config. ...

  3. Hadoop源码编译过程

    一.           为什么要编译Hadoop源码 Hadoop是使用Java语言开发的,但是有一些需求和操作并不适合使用java,所以就引入了本地库(Native Libraries)的概念,通 ...

  4. Hadoop源码如何查看

    如何查看hadoop源码 1解压hadoop安装压缩文件成为文件夹,再进入解压后的文件夹下的src文件夹,选中core,hdfs,mapred三个文件夹

  5. 安装Hadoop系列 — 导入Hadoop源码项目

    将Hadoop源码导入Eclipse有个最大好处就是通过 "ctrl + shift + r" 可以快速打开Hadoop源码文件. 第一步:在Eclipse新建一个Java项目,h ...

  6. Hadoop源码的编译过程详细解读(各版本)

    说在前面的话   重新试多几次.编译过程中会出现下载某个包的时间太久,这是由于连接网站的过程中会出现假死,按ctrl+c,重新运行编译命令. 如果出现缺少了某个文件的情况,则要先清理maven(使用命 ...

  7. hadoop源码编译

    为何要自行编译hadoop源码,往往是由于官方提供的hadoop发行版都是基于32位操作系统,在操作hadoop时会发生warn.   准备软件: 1)JDK 2)Hadoop源码 3)Maven 4 ...

  8. Eclipse远程调试hadoop源码

    1. 修改对应调试端口 之前的一篇blog里讲述了hadoop单机版调试的方法,那种调试只限于单机运行hadoop命令而已,对于运行整个hadoop环境而言是不可取的,因为hadoop会开启多个jav ...

  9. 编译Hadoop源码

    背景: 在阅读hadoop源代码过程中会发现部分类或者函数无法找到,这是由于Hadoop2.0使用了Protocol Buffers定义了RPC协议, 而这些Protocol Buffers文件在Ma ...

随机推荐

  1. 使用Reachability检测网格

    
#pragma mark - 网络连接检查 - (void) currentReach { // 网络检测 Reachability *curReach = [Reachability reacha ...

  2. AC日记——L国的战斗之间谍 洛谷 P1916

    题目背景 L国即将与I国发动战争!! 题目描述 俗话说的好:“知己知彼,百战不殆”.L国的指挥官想派出间谍前往I国,于是,选人工作就落到了你身上. 你现在有N个人选,每个人都有这样一些数据:A(能得到 ...

  3. PAT (Advanced Level) 1086. Tree Traversals Again (25)

    入栈顺序为先序遍历,出栈顺序为中序遍历. #include<cstdio> #include<cstring> #include<cmath> #include&l ...

  4. redis集群设置密码详解

    原文:http://lookingdream.blog.51cto.com/5177800/1827851 注意事项: 1.如果是使用redis-trib.rb工具构建集群,集群构建完成前不要配置密码 ...

  5. spring-quartz定时任务使用小结

    在实际项目中,通常须要用到定时任务(定时作业).spring框架提供了非常好的实现. 1.  下载spring-quartz插件包 这里默认当前系统中是集成了spring框架的基本功能的.去网上下载s ...

  6. atitit. hb 原生sql跨数据库解决原理 获得hb 数据库类型执行期获得Dialect

    atitit. hb 原生sql跨数据库解决原理 获得hb 数据库类型执行期获得Dialect   #-----原理 Hibernate 执行期获得Dialect   2010-07-28 12:59 ...

  7. unix && linux

    区别和联系 Linux和UNIX的最大的区别是,前者是开发源代码的自由软件,而后者是对源代码实行知识产权保护的传统商业软件.这应该是他们最大的不同,这种不同体现在用户对前者有很高的自主权,而对后者却只 ...

  8. centos7+php7 swoole 安装

    下载 swoole 首先下载swoole的源码包,这个操作很简单,没有太多说的. wget -c https://github.com/swoole/swoole-src/archive/v2.0.6 ...

  9. mysql order by的一些技巧

    1. 只按日期排序,忽略年份> select date, description from table_name order by month(date),dayofmonth(date);注意 ...

  10. 笔记整理--LibCurl开发

    LibCurl开发_未了的雨_百度空间 - Google Chrome (2013/7/26 21:11:15) LibCurl开发 一:LibCurl 编程流程1.调用curl_global_ini ...