1. #Nginx所用用户和组,window下不指定
  2. #user nobody;
  3.  
  4. #工作的子进程数量(通常等于CPU数量或者2倍于CPU)
  5. worker_processes 2;
  6.  
  7. #错误日志存放路径
  8. #error_log logs/error.log;
  9. #error_log logs/error.log notice;
  10. error_log logs/error.log info;
  11.  
  12. #指定pid存放文件
  13. pid logs/nginx.pid;
  14.  
  15. #master_process off; # 简化调试 此指令不得用于生产环境
  16. #daemon off; # 简化调试 此指令可以用到生产环境
  17.  
  18. #最大文件描述符
  19. worker_rlimit_nofile 51200;
  20.  
  21. events {
  22. #使用网络IO模型linux建议epoll,FreeBSD建议采用kqueue,window下不指定。
  23. #use epoll;
  24.  
  25. #允许最大连接数
  26. worker_connections 2048;
  27. }
  28.  
  29. # load modules compiled as Dynamic Shared Object (DSO)
  30. #
  31. #dso {
  32. # load ngx_http_fastcgi_module.so;
  33. # load ngx_http_rewrite_module.so;
  34. #}
  35.  
  36. http {
  37. include mime.types;
  38.  
  39. #反向代理配置
  40. include proxy.conf;
  41. include gzip.conf;
  42.  
  43. default_type application/octet-stream;
  44.  
  45. #定义日志格式
  46. log_format main '$remote_addr - $remote_user [$time_local] $request '
  47. '"$status" $body_bytes_sent "$http_referer" '
  48. '"$http_user_agent" "$http_x_forwarded_for"';
  49.  
  50. #access_log off;
  51. access_log logs/access.log main;
  52.  
  53. client_header_timeout 3m;
  54. client_body_timeout 3m;
  55. send_timeout 3m;
  56.  
  57. client_header_buffer_size 1k;
  58. large_client_header_buffers 4 4k;
  59.  
  60. #允许客户端请求的最大单文件字节数
  61. client_max_body_size 10m;
  62.  
  63. #缓冲区代理缓冲用户端请求的最大字节数,
  64. client_body_buffer_size 128k;
  65.  
  66. client_body_in_single_buffer on;
  67.  
  68. sendfile on;
  69. #tcp_nopush on;
  70. #tcp_nodelay on;
  71.  
  72. keepalive_timeout 65;
  73.  
  74. ##for genarate uuid
  75. #lua_package_path 'uuid4.lua';
  76. #init_by_lua '
  77. # uuid4 = require "uuid4"
  78. #';
  79.  
  80. upstream upstream_test{
  81. server 127.0.0.1:8080;
  82.  
  83. #ip_hash;
  84. keepalive 30;
  85.  
  86. ## tengine config
  87. #check interval=300 rise=10 fall=10 timeout=100 type=http port=80;
  88. #check_http_send "GET / HTTP/1.0\r\n\r\n";
  89. #check_http_expect_alive http_2xx http_3xx;
  90.  
  91. ## tengine config
  92. #session_sticky cookie=cookieTest mode=insert;
  93. }
  94.  
  95. server {
  96. listen 80;
  97. server_name localhost somename alias another.alias;
  98.  
  99. location / {
  100. autoindex on; #允许目录浏览
  101. autoindex_exact_size off; #显示文件大概大小
  102. autoindex_localtime on; #显示的文件时间为文件的服务器时间,off则为GMT时间
  103. limit_rate_after 10m; #10m之后下载速度为10k
  104. limit_rate 10k;
  105.  
  106. root html;
  107. index index.html index.htm;
  108. }
  109.  
  110. location /proxy {
  111. proxy_pass http://upstream_test;
  112. }
  113.  
  114. location ^~ /resources/ {
  115. alias /data/resources/;
  116. access_log off;
  117. expires 30d;
  118. }
  119.  
  120. ## for images、html static files
  121. location ~* ^/static/.*\.(jpg|jpeg|gif|png|html|htm|swf)$ {
  122. root /data/static/;
  123. access_log off;
  124. expires 30d;
  125. }
  126.  
  127. ## for js、css static files
  128. location ~* \.(js|css)$ {
  129. root /data/static/;
  130. access_log off;
  131. expires 1d;
  132. }
  133.  
  134. ## nginx lua example
  135.  
  136. #location /test-uuid{
  137. # set_by_lua $uuid 'return uuid4.getUUID() ';
  138.  
  139. # echo $uuid;
  140. #}
  141.  
  142. #location /test-io{
  143. # set_by_lua $uuid '
  144. # local t = io.popen("cat /data/test")
  145. # return t:read("*all")
  146. # ';
  147.  
  148. # echo $uuid;
  149. #}
  150.  
  151. #location /inline_concat {
  152. # MIME type determined by default_type:
  153. # default_type 'text/plain';
  154.  
  155. # set $a "hello";
  156. # set $b "world";
  157. # inline Lua script
  158. # set_by_lua $res "return ngx.arg[1]..ngx.arg[2]" $a $b;
  159. # echo $res;
  160. #}
  161.  
  162. #location /lua_test{
  163. # default_type 'text/plain';
  164. # content_by_lua '
  165. # if jit then
  166. # ngx.say(jit.version)
  167. # else
  168. # ngx.say(_VERSION)
  169. # end
  170. # ';
  171. #}
  172.  
  173. location /nginx_status {
  174. stub_status on;
  175. access_log off;
  176. allow 127.0.0.1;#设置为可访问该状态信息的ip
  177. deny all;
  178. }
  179.  
  180. ## tengine upstream status
  181. #location /upstream_status {
  182. # check_status;
  183.  
  184. # access_log off;
  185. # allow 127.0.0.1;
  186. # deny all;
  187. #}
  188.  
  189. error_page 500 502 503 504 /50x.html;
  190. location = /50x.html {
  191. root html;
  192. }
  193. }
  194.  
  195. ## rewrite example
  196. #server {
  197. # listen 8090;
  198. # server_name 127.0.0.1;
  199.  
  200. # location / {
  201. # rewrite ^(.*) http://125.76.215.230/test/ last;
  202. # }
  203. #}
  204.  
  205. # HTTPS server
  206. #
  207. #server {
  208. # listen 443 ssl;
  209. # server_name localhost;
  210.  
  211. # ssl_certificate cert.pem;
  212. # ssl_certificate_key cert.key;
  213.  
  214. # ssl_session_cache shared:SSL:1m;
  215. # ssl_session_timeout 5m;
  216.  
  217. # ssl_ciphers HIGH:!aNULL:!MD5;
  218. # ssl_prefer_server_ciphers on;
  219.  
  220. # location / {
  221. # root html;
  222. # index index.html index.htm;
  223. # }
  224. #}
  225. }

#mysql配置

  1. [client]
  2. port = 3306
  3. socket = /var/lib/mysql/mysql.sock
  4.  
  5. [mysqld]
  6. datadir=/var/lib/mysql
  7. socket=/var/lib/mysql/mysql.sock
  8. user = mysql
  9. port = 3306
  10. pid-file = /var/lib/mysql/mysql.pid
  11. #配置此项可以追踪sql执行记录
  12. log=存放日志的路径/mysql-sql.log
  13. ##以下为开启主从的必要配置
  14. server-id = 1
  15. binlog-do-db=db_nameA #指定对db_nameA记录二进制日志
  16. binlog-ignore-db=db_namB #指定不对db_namB记录二进制日志
  17. log_bin = /var/lib/mysql/log/mysql-bin.log
  18. expire_logs_days = 30
  19. character-set-server = utf8mb4
  20. default-storage-engine = InnoDB
  21. #thread connection
  22. max_connections = 1024
  23. max_connect_errors = 1024
  24. # Try number of CPU's*2 for thread_concurrency
  25. thread_concurrency = 8
  26. thread_cache_size = 256
  27. #*network
  28. skip-name-resolve
  29. max_allowed_packet = 1M
  30. #buffer&cache
  31. table_open_cache = 4096
  32. sort_buffer_size = 256K
  33. join_buffer_size = 256K
  34. #query cache
  35. query_cache_limit = 4M
  36. query_cache_size = 4M
  37. query_cache_type = 1
  38. #temptable
  39. tmp_table_size = 64M
  40. max_heap_table_size = 64M
  41.  
  42. #MyISAM
  43. key_buffer_size = 8M
  44. read_buffer_size = 1M
  45. read_rnd_buffer_size = 256K
  46.  
  47. #Innodb
  48. innodb_log_file_size = 256M
  49. innodb_log_files_in_group = 2
  50. innodb_status_file = 1
  51.  
  52. innodb_additional_mem_pool_size = 32M
  53. innodb_buffer_pool_size = 5G
  54. innodb_data_file_path = ibdata1:1G:autoextend
  55. innodb_file_per_table = 1
  56.  
  57. innodb_additional_mem_pool_size = 32M
  58. innodb_buffer_pool_size = 5G
  59. innodb_data_file_path = ibdata1:1G:autoextend
  60. innodb_file_per_table = 1
  61.  
  62. innodb_force_recovery = 0
  63. #innodb_table_locks
  64. innodb_thread_concurrency = 8
  65.  
  66. innodb_flush_log_at_trx_commit = 2
  67.  
  68. innodb_force_recovery = 0
  69. #innodb_table_locks
  70. innodb_thread_concurrency = 8
  71.  
  72. innodb_flush_log_at_trx_commit = 2
  73.  
  74. #slow log
  75. slow_query_log=1
  76. long_query_time=1
  77. slow_query_log_file=/var/lib/mysql/log/slow.log
  78.  
  79. [mysqld_safe]
  80. #error log
  81. log-error = /var/log/mysqld.log
  82. pid-file = /var/lib/mysql/mysql.pid
  83. open-files-limit = 40960
  84.  
  85. [mysqldump]
  86. quick
  87. max_allowed_packet = 48M
  88.  
  89. [mysql]
  90. no-auto-rehash
  91. # Remove the next comment character if you are not familiar with SQL
  92. #safe-updates
  93. default-character-set=utf8
  94.  
  95. [isamchk]
  96. key_buffer = 128M
  97. sort_buffer_size = 128M
  98. read_buffer = 2M
  99. write_buffer = 2M
  100.  
  101. [myisamchk]
  102. key_buffer = 128M
  103. sort_buffer_size = 128M
  104. read_buffer = 2M
  105. write_buffer = 2M

##redis配置

  1. # By default Redis does not run as a daemon. Use 'yes' if you need it.
  2. # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
  3. # 设置此选项使得redis以守护进程方式运行
  4. daemonize no
  5.  
  6. # When running daemonized, Redis writes a pid file in /var/run/redis.pid by
  7. # default. You can specify a custom pid file location here.
  8. # 以守护进程运行时,pid的存放路径
  9. pidfile /var/run/redis.pid
  10.  
  11. # Accept connections on the specified port, default is 6379.
  12. # If port 0 is specified Redis will not listen on a TCP socket.
  13. #端口
  14. port 6379
  15.  
  16. # If you want you can bind a single interface, if the bind option is not
  17. # specified all the interfaces will listen for incoming connections.
  18. #指定Redis可接收请求的IP地址,不设置将处理所有请求,建议生产环境中设置
  19. # bind 127.0.0.1
  20.  
  21. # Close the connection after a client is idle for N seconds (0 to disable)
  22. #客户端连接的超时时间,单位为秒,超时后会关闭连接
  23. timeout 0
  24.  
  25. # Specify the log file name. Also 'stdout' can be used to force
  26. # Redis to log on the standard output. Note that if you use standard
  27. # output for logging but daemonize, logs will be sent to /dev/null
  28. #配置 log 文件地址,默认打印在命令行终端的窗口上
  29. logfile stdout
  30.  
  31. # Set the number of databases. The default database is DB 0, you can select
  32. # a different one on a per-connection basis using SELECT <dbid> where
  33. # dbid is a number between 0 and 'databases'-1
  34. #设置数据库的个数,可以使用 SELECT <dbid>命令来切换数据库。默认使用的数据库是 0
  35. databases 16
  36.  
  37. #
  38. # Save the DB on disk:
  39. #
  40. # save <seconds> <changes>
  41. #
  42. # Will save the DB if both the given number of seconds and the given
  43. # number of write operations against the DB occurred.
  44. #
  45. # In the example below the behaviour will be to save:
  46. # after 900 sec (15 min) if at least 1 key changed
  47. # after 300 sec (5 min) if at least 10 keys changed
  48. # after 60 sec if at least 10000 keys changed
  49. #
  50. # Note: you can disable saving at all commenting all the "save" lines.
  51. #设置 Redis 进行数据库镜像的频率。
  52. #900秒之内有1个keys发生变化时
  53. #30秒之内有10个keys发生变化时
  54. #60秒之内有10000个keys发生变化时
  55. save 900 1
  56. save 300 10
  57. save 60 10000
  58.  
  59. # Compress string objects using LZF when dump .rdb databases?
  60. # For default that's set to 'yes' as it's almost always a win.
  61. # If you want to save some CPU in the saving child set it to 'no' but
  62. # the dataset will likely be bigger if you have compressible values or keys.
  63. #在进行镜像备份时,是否进行压缩
  64. rdbcompression yes
  65.  
  66. # The filename where to dump the DB
  67. #镜像备份文件的文件名
  68. dbfilename dump.rdb
  69.  
  70. # The working directory.
  71. #
  72. # The DB will be written inside this directory, with the filename specified
  73. # above using the 'dbfilename' configuration directive.
  74. #
  75. # Also the Append Only File will be created inside this directory.
  76. #
  77. # Note that you must specify a directory here, not a file name.
  78. #数据库镜像备份的文件放置的路径。这里的路径跟文件名要分开配置是因为 Redis 在进行备份时,先会将当前数据库的状态写入到一个临时文件中,等备份完成时,再把该该临时文件替换为上面所指定的文件,
  79. #而这里的临时文件和上面所配置的备份文件都会放在这个指定的路径当中
  80. dir ./
  81.  
  82. # Master-Slave replication. Use slaveof to make a Redis instance a copy of
  83. # another Redis server. Note that the configuration is local to the slave
  84. # so for example it is possible to configure the slave to save the DB with a
  85. # different interval, or to listen to another port, and so on.
  86. #设置该数据库为其他数据库的从数据库
  87. # slaveof <masterip> <masterport>
  88.  
  89. # If the master is password protected (using the "requirepass" configuration
  90. # directive below) it is possible to tell the slave to authenticate before
  91. # starting the replication synchronization process, otherwise the master will
  92. # refuse the slave request.
  93. #指定与主数据库连接时需要的密码验证
  94. # masterauth <master-password>
  95.  
  96. # Require clients to issue AUTH <PASSWORD> before processing any other
  97. # commands. This might be useful in environments in which you do not trust
  98. # others with access to the host running redis-server.
  99. #
  100. # This should stay commented out for backward compatibility and because most
  101. # people do not need auth (e.g. they run their own servers).
  102. #
  103. # Warning: since Redis is pretty fast an outside user can try up to
  104. # 150k passwords per second against a good box. This means that you should
  105. # use a very strong password otherwise it will be very easy to break.
  106. #设置客户端连接后进行任何其他指定前需要使用的密码。
  107. 警告:redis速度相当快,一个外部的用户可以在一秒钟进行150K次的密码尝试,你需要指定非常非常强大的密码来防止暴力破解。
  108.  
  109. # requirepass foobared
  110. # Set the max number of connected clients at the same time. By default there
  111. # is no limit, and it's up to the number of file descriptors the Redis process
  112. # is able to open. The special value '0' means no limits.
  113. # Once the limit is reached Redis will close all the new connections sending
  114. # an error 'max number of clients reached'.
  115. #限制同时连接的客户数量。当连接数超过这个值时,redis 将不再接收其他连接请求,客户端尝试连接时将收到 error 信息
  116. # maxclients 128
  117.  
  118. # Don't use more memory than the specified amount of bytes.
  119. # When the memory limit is reached Redis will try to remove keys
  120. # accordingly to the eviction policy selected (see maxmemmory-policy).
  121. #
  122. # If Redis can't remove keys according to the policy, or if the policy is
  123. # set to 'noeviction', Redis will start to reply with errors to commands
  124. # that would use more memory, like SET, LPUSH, and so on, and will continue
  125. # to reply to read-only commands like GET.
  126. #
  127. # This option is usually useful when using Redis as an LRU cache, or to set
  128. # an hard memory limit for an instance (using the 'noeviction' policy).
  129. # 当内存达到设置上限时,内存的淘汰策略
  130. # maxmemmory-policy [volatile-lru or volatile-tt or volatile-randowm or allkeys-lru or allkeys-random]
  131.  
  132. # WARNING: If you have slaves attached to an instance with maxmemory on,
  133. # the size of the output buffers needed to feed the slaves are subtracted
  134. # from the used memory count, so that network problems / resyncs will
  135. # not trigger a loop where keys are evicted, and in turn the output
  136. # buffer of slaves is full with DELs of keys evicted triggering the deletion
  137. # of more keys, and so forth until the database is completely emptied.
  138. #
  139. # In short… if you have slaves attached it is suggested that you set a lower
  140. # limit for maxmemory so that there is some free RAM on the system for slave
  141. # output buffers (but this is not needed if the policy is 'noeviction').
  142. #设置redis能够使用的最大内存。当内存满了的时候,如果还接收到set命令,redis将先尝试剔除设置过expire信息的key,而不管该key的过期时间还没有到达。
  143. #在删除时,将按照过期时间进行删除,最早将要被过期的key将最先被删除。如果带有expire信息的key都删光了,那么将返回错误。
  144. #这样,redis将不再接收写请求,只接收get请求。maxmemory的设置比较适合于把redis当作于类似memcached 的缓存来使用
  145. # maxmemory <bytes>
  146.  
  147. # By default Redis asynchronously dumps the dataset on disk. If you can live
  148. # with the idea that the latest records will be lost if something like a crash
  149. # happens this is the preferred way to run Redis. If instead you care a lot
  150. # about your data and don't want to that a single record can get lost you should
  151. # enable the append only mode: when this mode is enabled Redis will append
  152. # every write operation received in the file appendonly.aof. This file will
  153. # be read on startup in order to rebuild the full dataset in memory.
  154. #
  155. # Note that you can have both the async dumps and the append only file if you
  156. # like (you have to comment the "save" statements above to disable the dumps).
  157. # Still if append only mode is enabled Redis will load the data from the
  158. # log file at startup ignoring the dump.rdb file.
  159. #
  160. # IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append
  161. # log file in background when it gets too big.
  162. #默认情况下,redis 会在后台异步的把数据库镜像备份到磁盘,但是该备份是非常耗时的,而且备份也不能很频繁,如果发生诸如拉闸限电、拔插头等状况,那么将造成比较大范围的数据丢失。
  163. #所以redis提供了另外一种更加高效的数据库备份及灾难恢复方式。
  164. #开 启append only 模式之后,redis 会把所接收到的每一次写操作请求都追加到appendonly.aof 文件中,当redis重新启动时,会从该文件恢复出之前的状态。
  165. #但是这样会造成 appendonly.aof 文件过大,所以redis还支持了BGREWRITEAOF 指令,对appendonly.aof进行重新整理
  166. appendonly no
  167.  
  168. # The fsync() call tells the Operating System to actually write data on disk
  169. # instead to wait for more data in the output buffer. Some OS will really flush
  170. # data on disk, some other OS will just try to do it ASAP.
  171. #
  172. # Redis supports three different modes:
  173. #
  174. # no: don't fsync, just let the OS flush the data when it wants. Faster.
  175. # always: fsync after every write to the append only log . Slow, Safest.
  176. # everysec: fsync only if one second passed since the last fsync. Compromise.
  177. #
  178. # The default is "everysec" that's usually the right compromise between
  179. # speed and data safety. It's up to you to understand if you can relax this to
  180. # "no" that will will let the operating system flush the output buffer when
  181. # it wants, for better performances (but if you can live with the idea of
  182. # some data loss consider the default persistence mode that's snapshotting),
  183. # or on the contrary, use "always" that's very slow but a bit safer than
  184. # everysec.
  185. #
  186. # If unsure, use "everysec".
  187. #设置对 appendonly.aof 文件进行同步的频率。always 表示每次有写操作都进行同步,everysec 表示对写操作进行累积,每秒同步一次。
  188. # appendfsync always
  189. appendfsync everysec
  190. # appendfsync no
  191.  
  192. # Virtual Memory allows Redis to work with datasets bigger than the actual
  193. # amount of RAM needed to hold the whole dataset in memory.
  194. # In order to do so very used keys are taken in memory while the other keys
  195. # are swapped into a swap file, similarly to what operating systems do
  196. # with memory pages.
  197. #
  198. # To enable VM just set 'vm-enabled' to yes, and set the following three
  199. # VM parameters accordingly to your needs.
  200. #是否开启虚拟内存支持。因为 redis 是一个内存数据库,而且当内存满的时候,无法接收新的写请求,所以在redis2.0中,提供了虚拟内存的支持。
  201. #但是需要注意的是,redis中,所有的key都会放在内存中,在内存不够时,只会把value 值放入交换区。
  202. #这样保证了虽然使用虚拟内存,但性能基本不受影响,同时,你需要注意的是你要把vm-max-memory设置到足够来放下你的所有的key
  203. vm-enabled no
  204. # vm-enabled yes
  205.  
  206. # This is the path of the Redis swap file. As you can guess, swap files
  207. # can't be shared by different Redis instances, so make sure to use a swap
  208. # file for every redis process you are running. Redis will complain if the
  209. # swap file is already in use.
  210. #
  211. # The best kind of storage for the Redis swap file (that's accessed at random)
  212. # is a Solid State Disk (SSD).
  213. #
  214. # *** WARNING *** if you are using a shared hosting the default of putting
  215. # the swap file under /tmp is not secure. Create a dir with access granted
  216. # only to Redis user and configure Redis to create the swap file there.
  217. #设置虚拟内存的交换文件路径
  218. vm-swap-file /tmp/redis.swap
  219.  
  220. # vm-max-memory configures the VM to use at max the specified amount of
  221. # RAM. Everything that deos not fit will be swapped on disk *if* possible, that
  222. # is, if there is still enough contiguous space in the swap file.
  223. #
  224. # With vm-max-memory 0 the system will swap everything it can. Not a good
  225. # default, just specify the max amount of RAM you can in bytes, but it's
  226. # better to leave some margin. For instance specify an amount of RAM
  227. # that's more or less between 60 and 80% of your free RAM.
  228. #这里设置开启虚拟内存之后,redis将使用的最大物理内存的大小。默认为0,redis将把他所有的能放到交换文件的都放到交换文件中,以尽量少的使用物理内存。
  229. #在生产环境下,需要根据实际情况设置该值,最好不要使用默认的 0
  230. vm-max-memory 0
  231.  
  232. # Redis swap files is split into pages. An object can be saved using multiple
  233. # contiguous pages, but pages can't be shared between different objects.
  234. # So if your page is too big, small objects swapped out on disk will waste
  235. # a lot of space. If you page is too small, there is less space in the swap
  236. # file (assuming you configured the same number of total swap file pages).
  237. #
  238. # If you use a lot of small objects, use a page size of 64 or 32 bytes.
  239. # If you use a lot of big objects, use a bigger page size.
  240. # If unsure, use the default
  241. #设置虚拟内存的页大小,如果你的 value 值比较大,比如说你要在 value 中放置博客、新闻之类的所有文章内容,就设大一点,如果要放置的都是很小的内容,那就设小一点
  242. vm-page-size 32
  243.  
  244. # Number of total memory pages in the swap file.
  245. # Given that the page table (a bitmap of free/used pages) is taken in memory,
  246. # every 8 pages on disk will consume 1 byte of RAM.
  247. #
  248. # The total swap size is vm-page-size * vm-pages
  249. #
  250. # With the default of 32-bytes memory pages and 134217728 pages Redis will
  251. # use a 4 GB swap file, that will use 16 MB of RAM for the page table.
  252. #
  253. # It's better to use the smallest acceptable value for your application,
  254. # but the default is large in order to work in most conditions.
  255. #设置交换文件的总的 page 数量,需要注意的是,page table信息会放在物理内存中,每8个page 就会占据RAM中的 1 个 byte。
  256. #总的虚拟内存大小 = vm-page-size * vm-pages
  257. vm-pages 134217728
  258.  
  259. # Max number of VM I/O threads running at the same time.
  260. # This threads are used to read/write data from/to swap file, since they
  261. # also encode and decode objects from disk to memory or the reverse, a bigger
  262. # number of threads can help with big objects even if they can't help with
  263. # I/O itself as the physical device may not be able to couple with many
  264. # reads/writes operations at the same time.
  265. #
  266. # The special value of 0 turn off threaded I/O and enables the blocking
  267. # Virtual Memory implementation.
  268. #设置 VM IO 同时使用的线程数量。
  269. vm-max-threads 4
  270.  
  271. # Hashes are encoded in a special way (much more memory efficient) when they
  272. # have at max a given numer of elements, and the biggest element does not
  273. # exceed a given threshold. You can configure this limits with the following
  274. # configuration directives.
  275. #redis 2.0 中引入了 hash 数据结构。
  276. #hash 中包含超过指定元素个数并且最大的元素当没有超过临界时,hash 将以zipmap(又称为 small hash大大减少内存使用)来存储,这里可以设置这两个临界值
  277. hash-max-zipmap-entries 512
  278. hash-max-zipmap-value 64
  279.  
  280. # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
  281. # order to help rehashing the main Redis hash table (the one mapping top-level
  282. # keys to values). The hash table implementation redis uses (see dict.c)
  283. # performs a lazy rehashing: the more operation you run into an hash table
  284. # that is rhashing, the more rehashing "steps" are performed, so if the
  285. # server is idle the rehashing is never complete and some more memory is used
  286. # by the hash table.
  287. #
  288. # The default is to use this millisecond 10 times every second in order to
  289. # active rehashing the main dictionaries, freeing memory when possible.
  290. #
  291. # If unsure:
  292. # use "activerehashing no" if you have hard latency requirements and it is
  293. # not a good thing in your environment that Redis can reply form time to time
  294. # to queries with 2 milliseconds delay.
  295. #
  296. # use "activerehashing yes" if you don't have such hard requirements but
  297. # want to free memory asap when possible.
  298. #开启之后,redis 将在每 100 毫秒时使用 1 毫秒的 CPU 时间来对 redis 的 hash 表进行重新 hash,可以降低内存的使用。
  299. #当你的使用场景中,有非常严格的实时性需要,不能够接受 Redis 时不时的对请求有 2 毫秒的延迟的话,把这项配置为 no。
  300. #如果没有这么严格的实时性要求,可以设置为 yes,以便能够尽可能快的释放内存
  301. activerehashing yes

##proxy.conf

  1. #后端的Web服务器可以通过X-Forwarded-For获取用户真实IP
  2. proxy_set_header Host $host;
  3. proxy_set_header X-Real-IP $remote_addr;
  4. proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
  5.  
  6. #nginx跟后端服务器连接超时时间(代理连接超时)
  7. proxy_connect_timeout 90;
  8.  
  9. #连接成功后,后端服务器响应时间(代理接收超时)
  10. proxy_read_timeout 90;
  11.  
  12. #nginx发送数据到后端服务器超时时间(代理发送超时)
  13. proxy_send_timeout 600;
  14.  
  15. #设置代理服务器(nginx)保存用户头信息的缓冲区大小
  16. proxy_buffer_size 4k;
  17.  
  18. #proxy_buffers缓冲区,网页平均在32k以下的话,这样设置
  19. proxy_buffers 4 32k;
  20.  
  21. #高负荷下缓冲大小(proxy_buffers*2)
  22. proxy_busy_buffers_size 64k;
  23.  
  24. #设定缓存文件夹大小,大于这个值,将从upstream服务器传
  25. proxy_temp_file_write_size 64k;
  26.  
  27. #为了支持新的upstream keepalive选项
  28. proxy_http_version 1.1;
  29. proxy_set_header Connection "";

 ##gzip.conf

  1. gzip on;
  2. gzip_comp_level 7;
  3. gzip_min_length 1100; #需要压缩的最小长度
  4. gzip_buffers 4 8k;
  5. gzip_types text/plain application/javascript text/css text/xml application/x-httpd-php; #指定需要压缩的文件类型
  6. output_buffers 1 32k;
  7. postpone_output 1460;

  

转载:https://github.com/superhj1987/awesome-config/blob/master/nginx/nginx.conf

nginx_mysql_redis配置的更多相关文章

  1. 配置android sdk 环境

    1:下载adnroid sdk安装包 官方下载地址无法打开,没有vpn,使用下面这个地址下载,地址:http://www.android-studio.org/

  2. Android Studio配置 AndroidAnnotations——Hi_博客 Android App 开发笔记

    以前用Eclicps 用习惯了现在 想学学 用Android Studio 两天的钻研终于 在我电脑上装了一个Android Studio 并完成了AndroidAnnotations 的配置. An ...

  3. react-router 组件式配置与对象式配置小区别

    1. react-router 对象式配置 和 组件式配置    组件式配置(Redirect) ----对应---- 对象式配置(onEnter钩子) IndexRedirect -----对应-- ...

  4. 总结:Mac前端开发环境的搭建(配置)

    新年新气象,在2016年的第一天,我入手了人生中第一台自己的电脑(大一时好友赠送的电脑在一次无意中烧坏了主板,此后便不断借用别人的或者网站的).macbook air,身上已无分文...接下来半年的房 ...

  5. Android Studio 多个编译环境配置 多渠道打包 APK输出配置

    看完这篇你学到什么: 熟悉gradle的构建配置 熟悉代码构建环境的目录结构,你知道的不仅仅是只有src/main 开发.生成环境等等环境可以任意切换打包 多渠道打包 APK输出文件配置 需求 一般我 ...

  6. Virtual Box配置CentOS7网络(图文教程)

    之前很多次安装CentOS7虚拟机,每次配置网络在网上找教程,今天总结一下,全图文配置,方便以后查看. Virtual Box可选的网络接入方式包括: NAT 网络地址转换模式(NAT,Network ...

  7. [linux]阿里云主机的免登陆安全SSH配置与思考

    公司服务器使用的第三方云端服务,即阿里云,而本地需要经常去登录到服务器做相应的配置工作,鉴于此,每次登录都要使用密码是比较烦躁的,本着极速思想,我们需要配置我们的免登陆. 一 理论概述 SSH介绍 S ...

  8. nginx配置反向代理或跳转出现400问题处理记录

    午休完上班后,同事说测试站点访问接口出现400 Bad Request  Request Header Or Cookie Too Large提示,心想还好是测试服务器出现问题,影响不大,不过也赶紧上 ...

  9. Swift3.0服务端开发(一) 完整示例概述及Perfect环境搭建与配置(服务端+iOS端)

    本篇博客算是一个开头,接下来会持续更新使用Swift3.0开发服务端相关的博客.当然,我们使用目前使用Swift开发服务端较为成熟的框架Perfect来实现.Perfect框架是加拿大一个创业团队开发 ...

随机推荐

  1. NFR

    你NFR了吗? NFR,即非功能性需求 (Non -Functional Requirements) ,即系统能够完成所期望的工作的性能与质量.具体包括如下内容: – 效率: 软件实现其功能所需要的计 ...

  2. [Erlang 0129] Erlang 杂记 VI

    把之前阅读资料的时候记下的东西,整理了一下. Adding special-purpose processor support to the Erlang VM   P23 简单介绍了Erlang C ...

  3. MTU(Maximum transmission unit) 最大传输单元

    最大传输单元(Maximum transmission unit),以太网MTU为1500. 不同网络MTU如下: 如果最大报文数据大小(MSS)超过MTU,则会引起分片操作.   路径MTU: 网路 ...

  4. S5PV210_串行通信

    1.universal asynchronous reciver and transmitter 通用异步收发器 2.transmitter:由发送缓冲区和发送移位器构成.发送信息时,首先将信息编码( ...

  5. async & await 的前世今生

    async 和 await 出现在C# 5.0之后,给并行编程带来了不少的方便,特别是当在MVC中的Action也变成async之后,有点开始什么都是async的味道了.但是这也给我们编程埋下了一些隐 ...

  6. 用 ElementTree 在 Python 中解析 XML

    用 ElementTree 在 Python 中解析 XML 原文: http://eli.thegreenplace.net/2012/03/15/processing-xml-in-python- ...

  7. Jenkins插件安装和系统配置

    前面我们只是把Jenkins部署在Tomcat中了,下面来看看Jenkins中的插件和一些基础的系统配置. 1.用户管理 我们一般的项目组肯定是由多名成员组成的,如何向Jenkins添加我们的成员呢? ...

  8. day7_subprocess模块和面向对象,反射

    常用subprocess方法示例 #执行命令,返回命令执行状态 , 0 or 非0>>> retcode = subprocess.call(["ls", &qu ...

  9. Canvas电子签名和游戏化

    今天一天的时间都在做包团报价的无流程原型设计,一方面参考了其他系统,一方面整理先在系统中不合理的部分,规范了报价元素的分类.梳理了意向需求,其实原来粗略的放了一个模板进去是听不靠谱的.客户的要求-&g ...

  10. 基于Visual Studio Code搭建Golang开发调试环境【非转载】

    由于对Docker+kubernetes的使用及持续关注,要理解这个平台的原理,势必需要对golang有一定的理解,基于此开始利用业余时间学习go,基础语法看完之后,搭建开发环境肯定是第一步,虽然能g ...