# Licensed to the Apache Software Foundation (ASF) under one
  # or more contributor license agreements. See the NOTICE file
  # distributed with this work for additional information
  # regarding copyright ownership. The ASF licenses this file
  # to you under the Apache License, Version 2.0 (the
  # "License"); you may not use this file except in compliance
  # with the License. You may obtain a copy of the License at
  #
  # http://www.apache.org/licenses/LICENSE-2.0
  #
  # Unless required by applicable law or agreed to in writing, software
  # distributed under the License is distributed on an "AS IS" BASIS,
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  # See the License for the specific language governing permissions and
  # limitations under the License.
   
   
  ########### These all have default values as shown
  ########### Additional configuration goes into storm.yaml
   
  java.library.path: "/usr/local/lib:/opt/local/lib:/usr/lib:/usr/lib64"
   
  ### storm.* configs are general configurations
  # the local dir is where jars are kept
  storm.local.dir: "storm-local"
  storm.log4j2.conf.dir: "log4j2"
  storm.zookeeper.servers:
  - "localhost"
  storm.zookeeper.port: 2181
  storm.zookeeper.root: "/storm"
  storm.zookeeper.session.timeout: 20000
  storm.zookeeper.connection.timeout: 15000
  storm.zookeeper.retry.times: 5
  storm.zookeeper.retry.interval: 1000
  storm.zookeeper.retry.intervalceiling.millis: 30000
  storm.zookeeper.auth.user: null
  storm.zookeeper.auth.password: null
  storm.exhibitor.port: 8080
  storm.exhibitor.poll.uripath: "/exhibitor/v1/cluster/list"
  storm.cluster.mode: "distributed" # can be distributed or local
  storm.local.mode.zmq: false
  storm.thrift.transport: "org.apache.storm.security.auth.SimpleTransportPlugin"
  storm.thrift.socket.timeout.ms: 600000
  storm.principal.tolocal: "org.apache.storm.security.auth.DefaultPrincipalToLocal"
  storm.group.mapping.service: "org.apache.storm.security.auth.ShellBasedGroupsMapping"
  storm.group.mapping.service.params: null
  storm.messaging.transport: "org.apache.storm.messaging.netty.Context"
  storm.nimbus.retry.times: 5
  storm.nimbus.retry.interval.millis: 2000
  storm.nimbus.retry.intervalceiling.millis: 60000
  storm.auth.simple-white-list.users: []
  storm.cluster.state.store: "org.apache.storm.cluster.ZKStateStorageFactory"
  storm.meta.serialization.delegate: "org.apache.storm.serialization.GzipThriftSerializationDelegate"
  storm.codedistributor.class: "org.apache.storm.codedistributor.LocalFileSystemCodeDistributor"
  storm.workers.artifacts.dir: "workers-artifacts"
  storm.health.check.dir: "healthchecks"
  storm.health.check.timeout.ms: 5000
  storm.disable.symlinks: false
   
  ### nimbus.* configs are for the master
  nimbus.seeds : ["localhost"]
  nimbus.thrift.port: 6627
  nimbus.thrift.threads: 64
  nimbus.thrift.max_buffer_size: 1048576
  nimbus.childopts: "-Xmx1024m"
  nimbus.task.timeout.secs: 30
  nimbus.supervisor.timeout.secs: 60
  nimbus.monitor.freq.secs: 10
  nimbus.cleanup.inbox.freq.secs: 600
  nimbus.inbox.jar.expiration.secs: 3600
  nimbus.code.sync.freq.secs: 120
  nimbus.task.launch.secs: 120
  nimbus.file.copy.expiration.secs: 600
  nimbus.topology.validator: "org.apache.storm.nimbus.DefaultTopologyValidator"
  topology.min.replication.count: 1
  topology.max.replication.wait.time.sec: 60
  nimbus.credential.renewers.freq.secs: 600
  nimbus.queue.size: 100000
  scheduler.display.resource: false
   
  ### ui.* configs are for the master
  ui.host: 0.0.0.0
  ui.port: 8080
  ui.childopts: "-Xmx768m"
  ui.actions.enabled: true
  ui.filter: null
  ui.filter.params: null
  ui.users: null
  ui.header.buffer.bytes: 4096
  ui.http.creds.plugin: org.apache.storm.security.auth.DefaultHttpCredentialsPlugin
   
  logviewer.port: 8000
  logviewer.childopts: "-Xmx128m"
  logviewer.cleanup.age.mins: 10080
  logviewer.appender.name: "A1"
  logviewer.max.sum.worker.logs.size.mb: 4096
  logviewer.max.per.worker.logs.size.mb: 2048
   
  logs.users: null
   
  drpc.port: 3772
  drpc.worker.threads: 64
  drpc.max_buffer_size: 1048576
  drpc.queue.size: 128
  drpc.invocations.port: 3773
  drpc.invocations.threads: 64
  drpc.request.timeout.secs: 600
  drpc.childopts: "-Xmx768m"
  drpc.http.port: 3774
  drpc.https.port: -1
  drpc.https.keystore.password: ""
  drpc.https.keystore.type: "JKS"
  drpc.http.creds.plugin: org.apache.storm.security.auth.DefaultHttpCredentialsPlugin
  drpc.authorizer.acl.filename: "drpc-auth-acl.yaml"
  drpc.authorizer.acl.strict: false
   
  transactional.zookeeper.root: "/transactional"
  transactional.zookeeper.servers: null
  transactional.zookeeper.port: null
   
  ## blobstore configs
  supervisor.blobstore.class: "org.apache.storm.blobstore.NimbusBlobStore"
  supervisor.blobstore.download.thread.count: 5
  supervisor.blobstore.download.max_retries: 3
  supervisor.localizer.cache.target.size.mb: 10240
  supervisor.localizer.cleanup.interval.ms: 30000
   
  nimbus.blobstore.class: "org.apache.storm.blobstore.LocalFsBlobStore"
  nimbus.blobstore.expiration.secs: 600
   
  storm.blobstore.inputstream.buffer.size.bytes: 65536
  client.blobstore.class: "org.apache.storm.blobstore.NimbusBlobStore"
  storm.blobstore.replication.factor: 3
  # For secure mode we would want to change this config to true
  storm.blobstore.acl.validation.enabled: false
   
  ### supervisor.* configs are for node supervisors
  # Define the amount of workers that can be run on this machine. Each worker is assigned a port to use for communication
  supervisor.slots.ports:
  - 6700
  - 6701
  - 6702
  - 6703
  supervisor.childopts: "-Xmx256m"
  supervisor.run.worker.as.user: false
  #how long supervisor will wait to ensure that a worker process is started
  supervisor.worker.start.timeout.secs: 120
  #how long between heartbeats until supervisor considers that worker dead and tries to restart it
  supervisor.worker.timeout.secs: 30
  #how many seconds to sleep for before shutting down threads on worker
  supervisor.worker.shutdown.sleep.secs: 3
  #how frequently the supervisor checks on the status of the processes it's monitoring and restarts if necessary
  supervisor.monitor.frequency.secs: 3
  #how frequently the supervisor heartbeats to the cluster state (for nimbus)
  supervisor.heartbeat.frequency.secs: 5
  supervisor.enable: true
  supervisor.supervisors: []
  supervisor.supervisors.commands: []
  supervisor.memory.capacity.mb: 4096.0
  #By convention 1 cpu core should be about 100, but this can be adjusted if needed
  # using 100 makes it simple to set the desired value to the capacity measurement
  # for single threaded bolts
  supervisor.cpu.capacity: 400.0
   
  ### worker.* configs are for task workers
  worker.heap.memory.mb: 768
  worker.childopts: "-Xmx%HEAP-MEM%m -XX:+PrintGCDetails -Xloggc:artifacts/gc.log -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=1M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=artifacts/heapdump"
  worker.gc.childopts: ""
   
  # Unlocking commercial features requires a special license from Oracle.
  # See http://www.oracle.com/technetwork/java/javase/terms/products/index.html
  # For this reason, profiler features are disabled by default.
  worker.profiler.enabled: false
  worker.profiler.childopts: "-XX:+UnlockCommercialFeatures -XX:+FlightRecorder"
  worker.profiler.command: "flight.bash"
  worker.heartbeat.frequency.secs: 1
   
  # check whether dynamic log levels can be reset from DEBUG to INFO in workers
  worker.log.level.reset.poll.secs: 30
   
  # control how many worker receiver threads we need per worker
  topology.worker.receiver.thread.count: 1
   
  task.heartbeat.frequency.secs: 3
  task.refresh.poll.secs: 10
  task.credentials.poll.secs: 30
  task.backpressure.poll.secs: 30
   
  # now should be null by default
  topology.backpressure.enable: false
  backpressure.disruptor.high.watermark: 0.9
  backpressure.disruptor.low.watermark: 0.4
  backpressure.znode.timeout.secs: 30
  backpressure.znode.update.freq.secs: 15
   
  zmq.threads: 1
  zmq.linger.millis: 5000
  zmq.hwm: 0
   
   
  storm.messaging.netty.server_worker_threads: 1
  storm.messaging.netty.client_worker_threads: 1
  storm.messaging.netty.buffer_size: 5242880 #5MB buffer
  # Since nimbus.task.launch.secs and supervisor.worker.start.timeout.secs are 120, other workers should also wait at least that long before giving up on connecting to the other worker. The reconnection period need also be bigger than storm.zookeeper.session.timeout(default is 20s), so that we can abort the reconnection when the target worker is dead.
  storm.messaging.netty.max_retries: 300
  storm.messaging.netty.max_wait_ms: 1000
  storm.messaging.netty.min_wait_ms: 100
   
  # If the Netty messaging layer is busy(netty internal buffer not writable), the Netty client will try to batch message as more as possible up to the size of storm.messaging.netty.transfer.batch.size bytes, otherwise it will try to flush message as soon as possible to reduce latency.
  storm.messaging.netty.transfer.batch.size: 262144
  # Sets the backlog value to specify when the channel binds to a local address
  storm.messaging.netty.socket.backlog: 500
   
  # By default, the Netty SASL authentication is set to false. Users can override and set it true for a specific topology.
  storm.messaging.netty.authentication: false
   
  # Default plugin to use for automatic network topology discovery
  storm.network.topography.plugin: org.apache.storm.networktopography.DefaultRackDNSToSwitchMapping
   
  # default number of seconds group mapping service will cache user group
  storm.group.mapping.service.cache.duration.secs: 120
   
  ### topology.* configs are for specific executing storms
  topology.enable.message.timeouts: true
  topology.debug: false
  topology.workers: 1
  topology.acker.executors: null
  topology.eventlogger.executors: 0
  topology.tasks: null
  # maximum amount of time a message has to complete before it's considered failed
  topology.message.timeout.secs: 30
  topology.multilang.serializer: "org.apache.storm.multilang.JsonSerializer"
  topology.shellbolt.max.pending: 100
  topology.skip.missing.kryo.registrations: false
  topology.max.task.parallelism: null
  topology.max.spout.pending: null
  topology.state.synchronization.timeout.secs: 60
  topology.stats.sample.rate: 0.05
  topology.builtin.metrics.bucket.size.secs: 60
  topology.fall.back.on.java.serialization: true
  topology.worker.childopts: null
  topology.worker.logwriter.childopts: "-Xmx64m"
  topology.executor.receive.buffer.size: 1024 #batched
  topology.executor.send.buffer.size: 1024 #individual messages
  topology.transfer.buffer.size: 1024 # batched
  topology.tick.tuple.freq.secs: null
  topology.worker.shared.thread.pool.size: 4
  topology.spout.wait.strategy: "org.apache.storm.spout.SleepSpoutWaitStrategy"
  topology.sleep.spout.wait.strategy.time.ms: 1
  topology.error.throttle.interval.secs: 10
  topology.max.error.report.per.interval: 5
  topology.kryo.factory: "org.apache.storm.serialization.DefaultKryoFactory"
  topology.tuple.serializer: "org.apache.storm.serialization.types.ListDelegateSerializer"
  topology.trident.batch.emit.interval.millis: 500
  topology.testing.always.try.serialize: false
  topology.classpath: null
  topology.environment: null
  topology.bolts.outgoing.overflow.buffer.enable: false
  topology.disruptor.wait.timeout.millis: 1000
  topology.disruptor.batch.size: 100
  topology.disruptor.batch.timeout.millis: 1
  topology.disable.loadaware.messaging: false
  topology.state.checkpoint.interval.ms: 1000
  topology.localityaware.higher.bound.percent: 0.8
  topology.localityaware.lower.bound.percent: 0.2
  topology.serialized.message.size.metrics: false
   
  # Configs for Resource Aware Scheduler
  # topology priority describing the importance of the topology in decreasing importance starting from 0 (i.e. 0 is the highest priority and the priority importance decreases as the priority number increases).
  # Recommended range of 0-29 but no hard limit set.
  topology.priority: 29
  topology.component.resources.onheap.memory.mb: 128.0
  topology.component.resources.offheap.memory.mb: 0.0
  topology.component.cpu.pcore.percent: 10.0
  topology.worker.max.heap.size.mb: 768.0
  topology.scheduler.strategy: "org.apache.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy"
  resource.aware.scheduler.priority.strategy: "org.apache.storm.scheduler.resource.strategies.priority.DefaultSchedulingPriorityStrategy"
   
  blacklist.scheduler.tolerance.time.secs: 300
  blacklist.scheduler.tolerance.count: 3
  blacklist.scheduler.resume.time.secs: 1800
  blacklist.scheduler.reporter: "org.apache.storm.scheduler.blacklist.reporters.LogReporter"
  blacklist.scheduler.strategy: "org.apache.storm.scheduler.blacklist.strategies.DefaultBlacklistStrategy"
   
  dev.zookeeper.path: "/tmp/dev-storm-zookeeper"
   
  pacemaker.servers: []
  pacemaker.port: 6699
  pacemaker.base.threads: 10
  pacemaker.max.threads: 50
  pacemaker.thread.timeout: 10
  pacemaker.childopts: "-Xmx1024m"
  pacemaker.auth.method: "NONE"
  pacemaker.kerberos.users: []
  pacemaker.thrift.message.size.max: 10485760
   
  #default storm daemon metrics reporter plugins
  storm.daemon.metrics.reporter.plugins:
  - "org.apache.storm.daemon.metrics.reporters.JmxPreparableReporter"
   
  # configuration of cluster metrics consumer
  storm.cluster.metrics.consumer.publish.interval.secs: 60
   
  storm.resource.isolation.plugin: "org.apache.storm.container.cgroup.CgroupManager"
  # Also determines whether the unit tests for cgroup runs.
  # If storm.resource.isolation.plugin.enable is set to false the unit tests for cgroups will not run
  storm.resource.isolation.plugin.enable: false
  storm.cgroup.memory.enforcement.enable: false
   
  # Configs for CGroup support
  storm.cgroup.hierarchy.dir: "/cgroup/storm_resources"
  storm.cgroup.resources:
  - "cpu"
  - "memory"
  storm.cgroup.hierarchy.name: "storm"
  storm.supervisor.cgroup.rootdir: "storm"
  storm.cgroup.cgexec.cmd: "/bin/cgexec"
  storm.cgroup.memory.limit.tolerance.margin.mb: 0.0
  storm.supervisor.memory.limit.tolerance.margin.mb: 128.0
  storm.supervisor.hard.memory.limit.multiplier: 2.0
  storm.supervisor.hard.memory.limit.overage.mb: 2024
  storm.supervisor.low.memory.threshold.mb: 1024
  storm.supervisor.medium.memory.threshold.mb: 1536
  storm.supervisor.medium.memory.grace.period.ms: 30000
  storm.topology.classpath.beginning.enabled: false
  worker.metrics:
  "CGroupMemory": "org.apache.storm.metric.cgroup.CGroupMemoryUsage"
  "CGroupMemoryLimit": "org.apache.storm.metric.cgroup.CGroupMemoryLimit"
  "CGroupCpu": "org.apache.storm.metric.cgroup.CGroupCpu"
  "CGroupCpuGuarantee": "org.apache.storm.metric.cgroup.CGroupCpuGuarantee"
   
  num.stat.buckets: 20

storm一些可调节的参数的更多相关文章

  1. Storm里面fieldsGrouping和Field参数和 declareOutputFields

    Fields,个人理解,类似于一张表,你取那些字段以及这些字段所对应的数据给后面的bolt用 这个Field通常和fieldsGrouping分组机制一起使用,这个Field特别难理解,我自己也是在网 ...

  2. linux 内核参数VM调优 之 参数调节和场景分析

    1. pdflush刷新脏数据条件 (linux IO 内核参数调优 之 原理和参数介绍)上一章节讲述了IO内核调优介个重要参数参数. 总结可知cached中的脏数据满足如下几个条件中一个或者多个的时 ...

  3. (转)linux IO 内核参数调优 之 参数调节和场景分析

    1. pdflush刷新脏数据条件 (linux IO 内核参数调优 之 原理和参数介绍)上一章节讲述了IO内核调优介个重要参数参数. 总结可知cached中的脏数据满足如下几个条件中一个或者多个的时 ...

  4. inux IO 内核参数调优 之 参数调节和场景分析

    http://backend.blog.163.com/blog/static/2022941262013112081215609/ http://blog.csdn.net/icycode/arti ...

  5. 线上Storm的worker,executor,task参数调优篇

    问题引入: 线上最近的数据量越来越大,出现了数据处理延迟的现象,观察storm ui的各项数据,发现有大量的spout失败的情况,如下: ------------------------------- ...

  6. Spark Shuffle原理、Shuffle操作问题解决和参数调优

    摘要: 1 shuffle原理 1.1 mapreduce的shuffle原理 1.1.1 map task端操作 1.1.2 reduce task端操作 1.2 spark现在的SortShuff ...

  7. /proc/sys/vm/参数

    1) /proc/sys/vm/block_dump该文件表示是否打开Block Debug模式,用于记录所有的读写及Dirty Block写回动作.缺省设置:0,禁用Block Debug模式2) ...

  8. Storm集成Kafka应用的开发

    我们知道storm的作用主要是进行流式计算,对于源源不断的均匀数据流流入处理是非常有效的,而现实生活中大部分场景并不是均匀的数据流,而是时而多时而少的数据流入,这种情况下显然用批量处理是不合适的,如果 ...

  9. Storm入门学习随记

    推荐慕课网视频:http://www.imooc.com/video/10055 ====Storm的起源. Storm是开源的.分布式.流式计算系统 什么是分布式呢?就是将一个任务拆解给多个计算机去 ...

随机推荐

  1. K8S之集群搭建

    转自声明 ASP.NET Core on K8S深入学习(1)K8S基础知识与集群搭建 1.K8S环境搭建的几种方式 搭建K8S环境有几种常见的方式如下: (1)Minikube Minikube是一 ...

  2. Leetcode207. Course Schedule课程表

    现在你总共有 n 门课需要选,记为 0 到 n-1. 在选修某些课程之前需要一些先修课程. 例如,想要学习课程 0 ,你需要先完成课程 1 ,我们用一个匹配来表示他们: [0,1] 给定课程总量以及它 ...

  3. 数据库MySQL--子查询

    例子文件1:https://files.cnblogs.com/files/Vera-y/myemployees.zip 子查询:又称内查询,出现在其他语句中的select语句 主查询:又称外查询,内 ...

  4. csps模拟93序列,二叉搜索树,走路题解

    题面: 模拟93考得并不理想,二维偏序没看出来,然而看出来了也不会打 序列: 对a,b数列求前缀和,那么题意转化为了满足$suma[i]>=suma[j]$且$sumb[i]>=sumb[ ...

  5. pca算法实现

    pca基础知识不了解的可以先看下一这篇博客:https://www.cnblogs.com/lliuye/p/9156763.html 具体算法实现如下: import numpy as np imp ...

  6. 玩转gulp之gulp编译less

    用好gulp grunt webpack让前端编程走向自动化,是作为一个前端开发必须学会的技能,不然逼格怎么提升的上去呢... 然后教大家如何用gulp装逼.一点点的学,都是相通的嘛 1. 安装nod ...

  7. 缓冲(cache)和缓存(buffer)

    缓存: 指把常用数据存储到可以快速获取的区域,以备重复利用 一般叫做cache. 缓存能提高效率 缓冲: 是指在数据流转过程中,不同层次速度不一致时,利用缓冲区来缓解上下层之间速率问题(性能差异) 一 ...

  8. FastJSON实现详解

    摘要:“快”作为程序员追逐的终极目标之一,而FastJSON则很好的证明了这一特性.本期<问底>,静行将带大家见证它序列化和反序列化的实现过程,一起领略它的“快”感. 还记得电影<功 ...

  9. 026_JDBC

    JDBC简介 JDBC(Java DataBase Connectivity,Java数据库连接)是一种用于执行SQL语句的Java API,可以为多种关系数据库提供统一访问,它由一组用Java语言编 ...

  10. 第九篇:Spring的applicationContext.xml配置总结

    在前面的一篇日志中,记录了web.xml配置启动的顺序,web启动到监听器ContextLoaderListener时,开始加载spring的配置文件applicationContext.xml(通常 ...