1. 在根目录创建kafka文件夹(service1、service2、service3都创建)
[root@localhost /]# mkdir kafka
 
2.通过Xshell上传文件到service1服务器:上传kafka_2.9.2-0.8.1.1.tgz到/software文件夹
3.远程copy将service1下的/software/kafka_2.9.2-0.8.1.1.tgz到service2、service3
[root@localhost software]# scp -r /software/kafka_2.9.2-0.8.1.1.tgz root@192.168.2.212:/software/
[root@localhost software]# scp -r /software/kafka_2.9.2-0.8.1.1.tgz root@192.168.2.213:/software/
 
3.copy /software/kafka_2.9.2-0.8.1.1.tgz到/kafka/目录(service1、service2、service3都执行)
[root@localhost software]# cp /software/kafka_2.9.2-0.8.1.1.tgz /kafka/
 
4.安装解压kafka_2.9.2-0.8.1.1.tgz(service1、service2、service3都执行)
[root@localhost /]# cd /kafka/
[root@localhost kafka]# tar -zxvf kafka_2.9.2-0.8.1.1.tgz
 
 
5.创建kafka消息目录(service1,service2,service3都要创建)
[root@localhost kafka]# mkdir kafkaLogs
 
6. 修改kafka的配置文件(service1,service2,service3都要配置)
[root@localhost /]# cd /kafka/kafka_2.9.2-0.8.1.1/
[root@localhost kafka_2.9.2-0.8.1.1]# cd config/
[root@localhost config]# ls
consumer.properties  log4j.properties  producer.properties  server.properties  test-log4j.properties  tools-log4j.properties  zookeeper.properties
[root@localhost config]# vi server.properties
 
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements.  See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.  You may obtain a copy of the License at
#
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# see kafka.server.KafkaConfig for additional details and defaults
 
############################# Server Basics #############################
 
# The id of the broker. This must be set to a unique integer for each broker.
broker.id=0  ---唯一标识
 
############################# Socket Server Settings #############################
 
# The port the socket server listens on
port=19092  --当前broker对外提供的TCP端口,默认9092
 
# Hostname the broker will bind to. If not set, the server will bind to all interfaces
host.name=192.168.2.213  --一般是关闭状态,我们要将它打开,如果dns解析失败,会出现文件句柄泄露,不要小看dns解析失败率,如果dns解析失败率为万分之一,由于kafka的性能非常高,每个topic的每个分区,每秒可以处理十万多条的数据,即使万分之一的失败率,每秒也要泄露10个文件句柄,很快句柄数就会泄露完毕,就会超过linux打开文件的数,就会出现异常,所以我们配置ip,就不会进行dns解析
 
 
# Hostname the broker will advertise to producers and consumers. If not set, it uses the
# value for "host.name" if configured.  Otherwise, it will use the value returned from
# java.net.InetAddress.getCanonicalHostName().
#advertised.host.name=<hostname routable by clients>
 
# The port to publish to ZooKeeper for clients to use. If this is not set,
# it will publish the same port that the broker binds to.
#advertised.port=<port accessible by clients>
 
# The number of threads handling network requests
num.network.threads=2   --broker网络处理的线程数,一般不做处理
 
# The number of threads doing disk I/O
num.io.threads=8  --broker io处理的线程数,这个数量一定要比log.dirs的目录数要大
 
# The send buffer (SO_SNDBUF) used by the socket server
socket.send.buffer.bytes=1048576  --将发送的消息先放到缓冲区,当到达一定量的时候再一次性发出
 
# The receive buffer (SO_RCVBUF) used by the socket server
socket.receive.buffer.bytes=1048576  --kafka接受消息的缓冲区,当接受的数量达到一定量的时候再写入磁盘
 
# The maximum size of a request that the socket server will accept (protection against OOM)
socket.request.max.bytes=104857600   --像kafka发送或者请求消息的最大数,此设置不能超过java堆栈大小
 
 
############################# Log Basics #############################
 
# A comma seperated list of directories under which to store log files
log.dirs=/kafka/kafkaLogs  --多个目录可以用,隔开
 
# The default number of log partitions per topic. More partitions allow greater
# parallelism for consumption, but this will also result in more files across
# the brokers.
num.partitions=2  --一个topic默认分区数
 
############################# Log Flush Policy #############################
 
# Messages are immediately written to the filesystem but by default we only fsync() to sync
# the OS cache lazily. The following configurations control the flush of data to disk.
# There are a few important trade-offs here:
#    1. Durability: Unflushed data may be lost if you are not using replication.
#    2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
#    3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
# The settings below allow one to configure the flush policy to flush data after a period of time or
# every N messages (or both). This can be done globally and overridden on a per-topic basis.
 
# The number of messages to accept before forcing a flush of data to disk
#log.flush.interval.messages=10000
 
# The maximum amount of time a message can sit in a log before we force a flush
#log.flush.interval.ms=1000
 
############################# Log Retention Policy #############################
 
# The following configurations control the disposal of log segments. The policy can
# be set to delete segments after a period of time, or after a given size has accumulated.
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
# from the end of the log.
 
# The minimum age of a log file to be eligible for deletion
log.retention.hours=168
 
message.max.byte=5048576   --kafka每条消息容纳的最大大小
default.replication.factor=2  --默认的复制因子,默认消息只有一个副本,不太安全,所以设置为2,如果某个分区的消息失败了,我们可以使用另一个分区的消息服务
replica.fetch.max.byte=5048576 --kafka每条消息容纳的最大大小
 
 
# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
# segments don't drop below log.retention.bytes.
#log.retention.bytes=1073741824
 
# The maximum size of a log segment file. When this size is reached a new log segment will be created.
log.segment.bytes=536870912  --消息持久化的最大大小
 
# The interval at which log segments are checked to see if they can be deleted according
# to the retention policies
log.retention.check.interval.ms=60000
 
# By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires.
# If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction.
log.cleaner.enable=false  --不使用log压缩
 
############################# Zookeeper #############################
 
# Zookeeper connection string (see zookeeper docs for details).
# This is a comma separated host:port pairs, each corresponding to a zk
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
# You can also append an optional chroot string to the urls to specify the
# root directory for all kafka znodes.
zookeeper.connect=192.168.2.211:2181,192.168.2.212:2181,192.168.2.213:2181   --zk地址
 
# Timeout in ms for connecting to zookeeper
zookeeper.connection.timeout.ms=1000000
 
7.启动kafka服务
[root@localhost bin]# ./kafka-server-start.sh -daemon ../config/server.properties
[root@localhost bin]# jps
27413 Kafka
27450 Jps
17884 QuorumPeerMain
 
8.验证kafka集群
[root@localhost bin]# ./kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 2 --partitions 1 --topic test
Created topic "test".
 
9.在service1上开启producer程序
./kafka-console-producer.sh --broker-list 192.168.2.211:9092 --topic test
[root@localhost bin]# ./kafka-console-producer.sh --broker-list 192.168.2.211:9092 --topic test
SLF4J: Failed to load class "org.slf4j.impl.StaticLoggerBinder".
SLF4J: Defaulting to no-operation (NOP) logger implementation
SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details.
 
 
10. 在service2上开启consumer程序
[root@localhost bin]# ./kafka-console-consumer.sh --zookeeper localhost:2181 --topic test --from-beginning
SLF4J: Failed to load class "org.slf4j.impl.StaticLoggerBinder".
SLF4J: Defaulting to no-operation (NOP) logger implementation
SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details.
 
11.在producer中发送消息:hello jeesz
[root@localhost bin]# ./kafka-console-consumer.sh --zookeeper localhost:2181 --topic test --from-beginning
SLF4J: Failed to load class "org.slf4j.impl.StaticLoggerBinder".
SLF4J: Defaulting to no-operation (NOP) logger implementation
SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details.
hello jeesz
 
12. 在consumer中接受到消息
[root@localhost bin]# ./kafka-console-consumer.sh --zookeeper localhost:2181 --topic test --from-beginning
SLF4J: Failed to load class "org.slf4j.impl.StaticLoggerBinder".
SLF4J: Defaulting to no-operation (NOP) logger implementation
SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details.

hello jeesz

源码来源:minglisoft.cn/technology

欢迎大家一起学习研究相关技术,源码获取请加求求(企鹅): 2042849237

JEESZ-kafka集群安装的更多相关文章

  1. kafka集群安装部署

    kafka集群安装 使用的版本 系统:centos6.5 centos6.7 jdk:1.7.0_79 zookeeper:3.4.9 kafka:2.10-0.10.1.0 一.环境准备[只列,不具 ...

  2. zookeeper+kafka集群安装之二

    zookeeper+kafka集群安装之二 此为上一篇文章的续篇, kafka安装需要依赖zookeeper, 本文与上一篇文章都是真正分布式安装配置, 可以直接用于生产环境. zookeeper安装 ...

  3. zookeeper+kafka集群安装之一

    zookeeper+kafka集群安装之一 准备3台虚拟机, 系统是RHEL64服务版. 1) 每台机器配置如下: $ cat /etc/hosts ... # zookeeper hostnames ...

  4. zookeeper+kafka集群安装之中的一个

    版权声明:本文为博主原创文章.未经博主同意不得转载. https://blog.csdn.net/cheungmine/article/details/26678877 zookeeper+kafka ...

  5. KafKa集群安装详细步骤

    最近在使用Spring Cloud进行分布式微服务搭建,顺便对集成KafKa的方案做了一些总结,今天详细介绍一下KafKa集群安装过程: 1. 在根目录创建kafka文件夹(service1.serv ...

  6. Kafka 集群安装

    Kafka 集群安装 环境: Linux 7.X kafka_2.x 在linux操作系统中,kafka安装在 /u04/app目录中 1. 下载 # wget https://mirrors.cnn ...

  7. KafKa集群安装、配置

    一.事前准备 1.kafka官网:http://kafka.apache.org/downloads. 2.选择使用版本下载. 3.kafka集群环境准备:(linux) 192.168.145.12 ...

  8. Centos7.4 kafka集群安装与kafka-eagle1.3.9的安装

    Centos7.4 kafka集群安装与kafka-eagle1.3.9的安装 集群规划: hostname Zookeeper Kafka kafka-eagle kafka01 √ √ √ kaf ...

  9. (Linux环境Kafka集群安装配置及常用命令

    Linux环境Kafka集群安装配置及常用命令 Kafka 消息队列内部实现原理 Kafka架构 一.下载Kafka安装包 二.Kafka安装包的解压 三.设置环境变量 四.配置kafka文件 4.1 ...

  10. Kafka集群安装Version1.0.1(自带Zookeeper)

    1.说明 Kafka集群安装,基于版本1.0.1, 使用kafka_2.12-1.0.1.tgz安装包, 其中2.12是编译工具Scala的版本. 而且不需要另外安装Zookeeper服务, 使用Ka ...

随机推荐

  1. 解决Appium 抓取toast

    首先我们先看看这个gif,图中需要,要抓取的字符串--->请输入转让份数 1.要导入java-client-5.0.0-SNAPSHOT.jar 包的地址:链接:http://pan.baidu ...

  2. iOS最好用的引导页

    最近项目结束的时候又要改引导页,之前写的启动页改起来太麻烦了,所以就直接封装一个,功能可能还不是很完善,但是感觉用起来也比较方便,在这里和大家分享一下. 这是github的下载地址:https://g ...

  3. 踩坑实录 Android studio中关于 No cached version of **** available for of处理办法

    当我们添加某些依赖库(Okhttp.Retrofit)时, Android studio 会报如下错误: Error:A problem occurred configuring project ': ...

  4. 老李分享:Android性能优化之内存泄漏2

    这种创建Handler的方式会造成内存泄漏,由于mHandler是Handler的非静态匿名内部类的实例,所以它持有外部类Activity的引用,我们知道消息队列是在一个Looper线程中不断轮询处理 ...

  5. Struts2基础学习(二)—Action

    一.ActionSupport      为了让用户开发的Action类更加规范,Struts2提供了一个Action接口,这个接口定义了Struts2的Action处理类应该实现的规范.下面是标准A ...

  6. 数字图像处理(MATLAB版)学习笔记(1)——第1章 绪言

    0.下定决心 当当入手数字图像处理一本,从此开此正式跨入数字图像处理大门.以前虽然多多少少接触过这些东西,也做过一些相关的事情,但感觉都不够系统,也不够专业,从今天开始,一步一步地学习下去,相信会有成 ...

  7. 如何在 FineUIMvc 中引用第三方 JavaScript 库

    声明:FineUIMvc(基础版)是免费软件,本系列文章适用于基础版. 引入第三方颜色选择器 在 FineUIMvc 中使用第三方 JavaScript 遵循一定的约定,也非常简单. 下面以官网示例为 ...

  8. ios 毛玻璃效果

    推荐第三方库: https://github.com/nicklockwood/FXBlurView FXBlurView*Fx=[[FXBlurView alloc]initWithFrame:CG ...

  9. 简单的利用JS来判断页面是在手机端还是在PC端打开的方法

    在移动设备应用越来越广泛的今天,许多网站都开始做移动端的界面展示,两者屏幕尺寸差异很大,所以展示的内容也有所差别.于是就遇到一个问题,如何判断你的页面是在移动端还是在PC端打开的,很简单的问题,那我们 ...

  10. Unity调用Android的两个方式:其一、调用jar包

    unity在Android端开发的时候,免不了要调用Java:Unity可以通过两种方式来调用Android:一是调用jar.二是调用aar. 这篇文章主要讲解怎么从无到有的生成一个jar包,然后un ...