broker.id=0
num.network.threads=9
num.io.threads=24
socket.send.buffer.bytes=102400
listeners=PLAINTEXT://:9092
port=9092  
host.name=
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/home/service/var/kafka
num.partitions=12
offsets.topic.replication.factor=2   
transaction.state.log.min.isr=1
log.retention.hours=72
log.retention.check.interval.ms=300000
zookeeper.connect=10.12.176.3:2181,10.12.172.32:2181,10.12.174.14:2181/security-kafka
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=3000
log.cleaner.enable=true
delete.topic.enable=true

 
1. addrep_cpd-app-down.json 文件内容:
{"version":1, "partitions":[
{"topic":"cpd-app-down","partition":0,"replicas":[1,2]},
{"topic":"cpd-app-down","partition":1,"replicas":[2,3]},
{"topic":"cpd-app-down","partition":2,"replicas":[3,4]},
{"topic":"cpd-app-down","partition":3,"replicas":[4,5]},
{"topic":"cpd-app-down","partition":4,"replicas":[5,6]},
{"topic":"cpd-app-down","partition":5,"replicas":[6,0]},
{"topic":"cpd-app-down","partition":6,"replicas":[0,1]},
{"topic":"cpd-app-down","partition":7,"replicas":[1,2]},
{"topic":"cpd-app-down","partition":8,"replicas":[2,3]},
{"topic":"cpd-app-down","partition":9,"replicas":[3,4]},
{"topic":"cpd-app-down","partition":10,"replicas":[4,5]},
{"topic":"cpd-app-down","partition":11,"replicas":[5,6]},
{"topic":"cpd-app-down","partition":12,"replicas":[6,0]},
{"topic":"cpd-app-down","partition":13,"replicas":[0,1]}
] } 2.sh kafka-reassign-partitions.sh --zookeeper 10.6.72.38:2181,10.6.72.8:2181 --reassignment-json-file ../config/addrep_cpd-app-down.json --execute
broker.id=
listeners=PLAINTEXT://10.32.104.37:9092
num.network.threads=
num.io.threads=
socket.send.buffer.bytes=
socket.receive.buffer.bytes=
socket.request.max.bytes=
log.dirs=/var/data/kafka
num.partitions=
num.recovery.threads.per.data.dir=
log.retention.hours=
log.segment.bytes=
log.retention.check.interval.ms=
log.cleaner.enable=true
zookeeper.connect=10.32.106.42:,10.32.114.34:,10.32.104.37:
zookeeper.connection.timeout.ms=
delete.topic.enable=true
transaction.state.log.min.isr=
log.retention.hours=
default.replication.factor=
1. 创建topics-to-move.json,输入topic信息
{"topics":
[{"topic": "TestSing"}],
"version":1
} 2. 生成topic迁移到新broker的配置文件,json格式
sh bin/kafka-reassign-partitions.sh --zookeeper 10.32.106.42:2181 --topics-to-move-json-file topics-to-move.json --broker-list "3,4,5" --generate 3. 执行脚本,加载json文件,开始迁移操作
sh bin/kafka-reassign-partitions.sh --zookeeper 10.32.106.42:2181 --reassignment-json-file config/testsing.json --execute
kafka日志按文件大小分割: log4j.properties
#log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender
#log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH
log4j.appender.kafkaAppender=org.apache.log4j.RollingFileAppender
log4j.appender.kafkaAppender.MaxFileSize=500MB
log4j.appender.kafkaAppender.MaxBackupIndex=5

log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.kafkaAppender=org.apache.log4j.RollingFileAppender
log4j.appender.kafkaAppender.MaxFileSize=500MB
log4j.appender.kafkaAppender.MaxBackupIndex=5
log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log
log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.stateChangeAppender=org.apache.log4j.RollingFileAppender
log4j.appender.stateChangeAppender.MaxFileSize=500MB
log4j.appender.stateChangeAppender.MaxBackupIndex=5
log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log
log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.requestAppender=org.apache.log4j.RollingFileAppender
log4j.appender.requestAppender.MaxFileSize=500MB
log4j.appender.requestAppender.MaxBackupIndex=5
log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log
log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.cleanerAppender=org.apache.log4j.RollingFileAppender
log4j.appender.cleanerAppender.MaxFileSize=500MB
log4j.appender.cleanerAppender.MaxBackupIndex=5
log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log
log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.controllerAppender=org.apache.log4j.RollingFileAppender
log4j.appender.controllerAppender.MaxFileSize=500MB
log4j.appender.controllerAppender.MaxBackupIndex=5
log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log
log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.authorizerAppender=org.apache.log4j.RollingFileAppender
log4j.appender.authorizerAppender.MaxFileSize=500MB
log4j.appender.authorizerAppender.MaxBackupIndex=5
log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log
log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.logger.kafka=INFO, kafkaAppender
log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender
log4j.additivity.kafka.network.RequestChannel$=false
log4j.logger.kafka.request.logger=WARN, requestAppender
log4j.additivity.kafka.request.logger=false
log4j.logger.kafka.controller=INFO, controllerAppender
log4j.additivity.kafka.controller=false
log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender
log4j.additivity.kafka.log.LogCleaner=false
log4j.logger.state.change.logger=INFO, stateChangeAppender
log4j.additivity.state.change.logger=false
log4j.logger.kafka.authorizer.logger=WARN, authorizerAppender
log4j.additivity.kafka.authorizer.logger=false

 

Kafka配置文件及解释的更多相关文章

  1. kafka实战教程(python操作kafka),kafka配置文件详解

    kafka实战教程(python操作kafka),kafka配置文件详解 应用往Kafka写数据的原因有很多:用户行为分析.日志存储.异步通信等.多样化的使用场景带来了多样化的需求:消息是否能丢失?是 ...

  2. Hadoop生态圈-Kafka配置文件详解

    Hadoop生态圈-Kafka配置文件详解 作者:尹正杰 版权声明:原创作品,谢绝转载!否则将追究法律责任. 一.默认kafka配置文件内容([yinzhengjie@s101 ~]$ more /s ...

  3. my.cnf 配置文件参数解释

    my.cnf 配置文件参数解释: #*** client options 相关选项 ***# #以下选项会被MySQL客户端应用读取.注意只有MySQL附带的客户端应用程序保证可以读取这段内容.如果你 ...

  4. 分布式文件存储FastDFS(七)FastDFS配置文件具体解释

    配置FastDFS时.改动配置文件是非常重要的一个步骤,理解配置文件里每一项的意义更加重要,所以我參考了大神的帖子,整理了配置文件的解释.原帖例如以下:http://bbs.chinaunix.net ...

  5. kafka配置文件中参数的限制

    在kafka的优化过程中,不断的调节配置文件中的参数,但是有时候会遇到java.lang.NumberFormatException这样的错误 比如socket.receive.buffer.byte ...

  6. 在C#代码中应用Log4Net(三)Log4Net中配置文件的解释

    一个完整的配置文件的例子如下所示,这个是”在C#代码中应用Log4Net(二)”中使用的配置文件. <log4net> <!-- 错误日志类--> <logger nam ...

  7. 在C#代码中应用Log4Net 中配置文件的解释

    一个完整的配置文件的例子如下所示,这个是”在C#代码中应用Log4Net(二)”中使用的配置文件. <log4net> <!-- 错误日志类--> <logger nam ...

  8. log4j配置文件详细解释

    web.xml中配置启动log4j的配置 <!-- webAppRootKey进行配置,这里主要是让log能将日志写到对应项目根目录下 --> <!-- 定义以后,在Web Cont ...

  9. [转]Log4Net中配置文件的解释

    FROM:http://www.cnblogs.com/kissazi2/p/3392605.html 一个完整的配置文件的例子如下所示 <log4net> <!-- 错误日志类-- ...

随机推荐

  1. Tooltips

    #include<windows.h> #include<Commctrl.h> #include"resource.h" #pragma comment( ...

  2. SQL触发器笔记

    触发器(Trigger)是在对表进行插入.更新.删除等操作时自动执行的存储过程. 触发器是一种特殊的存储过程,它在执行语言事件时自动生效,采用事件驱动机制.当某个触发事件发生时,定义在触发器中的功能将 ...

  3. python 输入输出 条件判断 循环

    1.条件判断 score = int(input("请输入学生成绩:"))if score>100 and score <0: print("请输入正确的成绩 ...

  4. 四、Shell脚本高级编程实战第四部

    一.比较两个数的大小 #!/bin/shread -p "Pls input two num:" a b[ -z "$a" ] || [ -z "$b ...

  5. 吴裕雄--天生自然 pythonTensorFlow图形数据处理:输入数据处理框架

    import tensorflow as tf # 1. 创建文件列表,通过文件列表创建输入文件队列 files = tf.train.match_filenames_once("F:\\o ...

  6. linux中cd / cd~ cd cd- 和cd..之间的区别

    cd        进入用户主目录  cd ~     进入用户主目录  cd -      返回进入此目录之前所在目录  cd ..     返回上一级目录  cd ../..  返回上两级目录  ...

  7. liquibase 注意事项

    liquibase 一个changelog中有多个sql语句时,如果后边报错,前边的sql执行成功后是不会回滚的,所以最好分开写sql <changeSet author="lihao ...

  8. ionic3 发布订阅者模式实现

    在ionic3 中实现订阅发布模式,需要用到Events. Events下面有三个方法 events.subscribe()  订阅 events.publish()  发布 events.unsub ...

  9. 4)在url中加上a分发参数,用来选哪一个函数

    文件关系目录展示: 然后代码改动部分展示: zixun.controller.class.php <?php //header('Content-type:text/html;charset=u ...

  10. python项目中对mysql数据库进行配置,并进行连接测试

    在settings.py中配置mysql数据库进行相关配置 DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', 'NAME ...