mongodb_server.py

#! /bin/env python
#-*- coding:utf8 -*- import sys
import os
from bson.timestamp import Timestamp import pymongo
from pymongo import MongoClient class mongodbMonitor(object): def mongodb_connect(self,host=None, port=None, user=None, password=None):
# try:
conn = MongoClient(host, port, serverSelectionTimeoutMS=) # conntion timeout sec. if user and password:
db_admin = conn["admin"]
if not db_admin.authenticate(user,password):
pass;
conn.server_info()
print conn
# except :
# e = sys.exc_info()[]
# return e, None return ,conn #data node(): standalone, replset primary, replset secondary. mongos(), mongoConfigSrv()
def get_mongo_role(self, conn): mongo_role =
conn.server_info()
if (conn.is_mongos):
mongo_role =
elif ("chunks" in conn.get_database("config").collection_names()): # Role is a config servers? not mongos and has config.chunks collections. it's a config server.
mongo_role =
return mongo_role def get_mongo_monitor_data(self, conn): mongo_monitor_dict ={}
mongo_monitor_dict["mongo_local_alive"] = # mongo local alive metric for all nodes.
mongo_role = self.get_mongo_role(conn) if(mongo_role == ):
mongodb_role,serverStatus_dict = self.serverStatus(conn)
mongo_monitor_dict.update(serverStatus_dict)
repl_status_dict = {}
if (mongodb_role == "master" or mongodb_role == "secondary"):
repl_status_dict = self.repl_status(conn)
mongo_monitor_dict.update(repl_status_dict)
else:
print "this is standalone node"
elif(mongo_role == ): # mongos
shards_dict = self.shard_status(conn)
mongo_monitor_dict.update(shards_dict)
return mongo_monitor_dict def serverStatus(self,connection): serverStatus = connection.admin.command(pymongo.son_manipulator.SON([('serverStatus', )])) mongodb_server_dict = {} # mongodb server status metric for upload to falcon mongo_version = serverStatus["version"]
#uptime metric
mongodb_server_dict["uptime"] = int(serverStatus["uptime"]) #asserts section metrics
mongo_asserts = serverStatus["asserts"]
for asserts_key in mongo_asserts.keys():
asserts_key_name = "asserts_" + asserts_key
mongodb_server_dict[asserts_key_name] = mongo_asserts[asserts_key] ### "extra_info" section metrics: page_faults. falcon counter type.
if serverStatus.has_key("extra_info"):
mongodb_server_dict["page_faults"] = serverStatus["extra_info"]["page_faults"] ### "connections" section metrics
current_conn = serverStatus["connections"]["current"]
available_conn = serverStatus["connections"]["available"] mongodb_server_dict["connections_current"] = current_conn
mongodb_server_dict["connections_available"] = available_conn # mongodb connection used percent
mongodb_server_dict["connections_used_percent"] = int((current_conn/(current_conn + available_conn)*)) # total created from mongodb started. COUNTER metric
mongodb_server_dict["connections_totalCreated"] = serverStatus["connections"]["totalCreated"] # "globalLock" currentQueue mongodb_server_dict["globalLock_currentQueue_total"] = serverStatus["globalLock"]["currentQueue"]["total"]
mongodb_server_dict["globalLock_currentQueue_readers"] = serverStatus["globalLock"]["currentQueue"]["readers"]
mongodb_server_dict["globalLock_currentQueue_writers"] = serverStatus["globalLock"]["currentQueue"]["writers"] # "locks" section, Changed in version 3.0
if serverStatus.has_key("locks") and mongo_version >"3.0":
locks_dict_keys = serverStatus["locks"].keys()
for lock_scope in locks_dict_keys: # Global, Database,Collection,Oplog
for lock_metric in serverStatus["locks"][lock_scope]:
for lock_type in serverStatus["locks"][lock_scope][lock_metric]: if lock_type == "R":
lock_name = "Slock"
elif lock_type == "W":
lock_name = "Xlock"
elif lock_type == "r":
lock_name = "ISlock"
elif lock_type == "w":
lock_name = "IXlock"
lock_metric_key = "locks_" + lock_scope + "_" + lock_metric + "_" + lock_name
mongodb_server_dict[lock_metric_key] = serverStatus["locks"][lock_scope][lock_metric][lock_type] # "network" section metrics: bytesIn, bytesOut, numRequests; counter type
if serverStatus.has_key("network"):
for network_metric in serverStatus["network"].keys():
network_metric_key = "network_" + network_metric # network metric key for upload
mongodb_server_dict[network_metric_key] = serverStatus["network"][network_metric] ### "opcounters" section metrics: insert, query, update, delete, getmore, command. couter type
if serverStatus.has_key("opcounters"):
for opcounters_metric in serverStatus["opcounters"].keys():
opcounters_metric_key = "opcounters_" + opcounters_metric
mongodb_server_dict[opcounters_metric_key] = serverStatus["opcounters"][opcounters_metric] ### "opcountersRepl" section metrics: insert, query, update, delete, getmore, command. couter type
if serverStatus.has_key("opcountersRepl"):
for opcountersRepl_metric in serverStatus["opcountersRepl"].keys():
opcountersRepl_metric_key = "opcountersRepl_" + opcountersRepl_metric
mongodb_server_dict[opcountersRepl_metric_key] = serverStatus["opcounters"][opcountersRepl_metric] ### "mem" section metrics:
if serverStatus.has_key("mem"):
for mem_metric in serverStatus["mem"].keys():
mem_metric_key = "mem_" + mem_metric
if( mem_metric in ["bits","supported"] ):
mongodb_server_dict[mem_metric_key] = serverStatus["mem"][mem_metric]
else:
mongodb_server_dict[mem_metric_key] = serverStatus["mem"][mem_metric]** ### "dur" section metrics:
if serverStatus.has_key("dur"):
mongodb_server_dict["dur_journaledBytes"] = serverStatus["dur"]["journaledMB"]**
mongodb_server_dict["dur_writeToDataFilesBytes"] = serverStatus["dur"]["writeToDataFilesMB"]**
mongodb_server_dict["dur_commitsInWriteLock"] = serverStatus["dur"]["commitsInWriteLock"] ### "repl" section
mongodb_role = ""
if (serverStatus.has_key("repl") and serverStatus["repl"].has_key("secondary")):
if serverStatus["repl"]["ismaster"]:
mongodb_role = "master"
if serverStatus["repl"]["secondary"]:
mongodb_role = "secondary"
else: # not Replica sets mode
mongodb_role = "standalone" ### "backgroundFlushing" section metrics, only for MMAPv1
if serverStatus.has_key("backgroundFlushing"):
for bgFlush_metric in serverStatus["backgroundFlushing"].keys():
if bgFlush_metric != "last_finished": # discard last_finished metric
bgFlush_metric_key = "backgroundFlushing_" + bgFlush_metric
mongodb_server_dict[bgFlush_metric_key] = serverStatus["backgroundFlushing"][bgFlush_metric] ### cursor from "metrics" section
if serverStatus.has_key("metrics") and serverStatus["metrics"].has_key("cursor"):
cursor_status = serverStatus["metrics"]["cursor"]
mongodb_server_dict["cursor_timedOut"] = cursor_status["timedOut"]
mongodb_server_dict["cursor_open_noTimeout"] = cursor_status["open"]["noTimeout"]
mongodb_server_dict["cursor_open_pinned"] = cursor_status["open"]["pinned"]
mongodb_server_dict["cursor_open_total"] = cursor_status["open"]["total"] ### "wiredTiger" section
if serverStatus.has_key("wiredTiger"):
serverStatus_wt = serverStatus["wiredTiger"] #cache
wt_cache = serverStatus_wt["cache"]
mongodb_server_dict["wt_cache_used_total_bytes"] = wt_cache["bytes currently in the cache"]
mongodb_server_dict["wt_cache_dirty_bytes"] = wt_cache["tracked dirty bytes in the cache"]
mongodb_server_dict["wt_cache_readinto_bytes"] = wt_cache["bytes read into cache"]
mongodb_server_dict["wt_cache_writtenfrom_bytes"] = wt_cache["bytes written from cache"] #concurrentTransactions
wt_concurrentTransactions = serverStatus_wt["concurrentTransactions"]
mongodb_server_dict["wt_concurrentTransactions_write"] = wt_concurrentTransactions["write"]["available"]
mongodb_server_dict["wt_concurrentTransactions_read"] = wt_concurrentTransactions["read"]["available"] #"block-manager" section
wt_block_manager = serverStatus_wt["block-manager"]
mongodb_server_dict["wt_bm_bytes_read"] = wt_block_manager["bytes read"]
mongodb_server_dict["wt_bm_bytes_written"] = wt_block_manager["bytes written"]
mongodb_server_dict["wt_bm_blocks_read"] = wt_block_manager["blocks read" ]
mongodb_server_dict["wt_bm_blocks_written"] = wt_block_manager["blocks written"] ### "rocksdb" engine
if serverStatus.has_key("rocksdb"):
serverStatus_rocksdb = serverStatus["rocksdb"] mongodb_server_dict["rocksdb_num_immutable_mem_table"] = serverStatus_rocksdb["num-immutable-mem-table"]
mongodb_server_dict["rocksdb_mem_table_flush_pending"] = serverStatus_rocksdb["mem-table-flush-pending"]
mongodb_server_dict["rocksdb_compaction_pending"] = serverStatus_rocksdb["compaction-pending"]
mongodb_server_dict["rocksdb_background_errors"] = serverStatus_rocksdb["background-errors"]
mongodb_server_dict["rocksdb_num_entries_active_mem_table"] = serverStatus_rocksdb["num-entries-active-mem-table"]
mongodb_server_dict["rocksdb_num_entries_imm_mem_tables"] = serverStatus_rocksdb["num-entries-imm-mem-tables"]
mongodb_server_dict["rocksdb_num_snapshots"] = serverStatus_rocksdb["num-snapshots"]
mongodb_server_dict["rocksdb_oldest_snapshot_time"] = serverStatus_rocksdb["oldest-snapshot-time"]
mongodb_server_dict["rocksdb_num_live_versions"] = serverStatus_rocksdb["num-live-versions"]
mongodb_server_dict["rocksdb_total_live_recovery_units"] = serverStatus_rocksdb["total-live-recovery-units"] ### "PerconaFT" engine
if serverStatus.has_key("PerconaFT"):
serverStatus_PerconaFT = serverStatus["PerconaFT"] mongodb_server_dict["PerconaFT_log_count"] = serverStatus_PerconaFT["log"]["count"]
mongodb_server_dict["PerconaFT_log_time"] = serverStatus_PerconaFT["log"]["time"]
mongodb_server_dict["PerconaFT_log_bytes"] = serverStatus_PerconaFT["log"]["bytes"] mongodb_server_dict["PerconaFT_fsync_count"] = serverStatus_PerconaFT["fsync"]["count"]
mongodb_server_dict["PerconaFT_fsync_time"] = serverStatus_PerconaFT["fsync"]["time"] ### cachetable
PerconaFT_cachetable = serverStatus_PerconaFT["cachetable"]
mongodb_server_dict["PerconaFT_cachetable_size_current"] = PerconaFT_cachetable["size"]["current"]
mongodb_server_dict["PerconaFT_cachetable_size_writing"] = PerconaFT_cachetable["size"]["writing"]
mongodb_server_dict["PerconaFT_cachetable_size_limit"] = PerconaFT_cachetable["size"]["limit"] ### PerconaFT checkpoint
PerconaFT_checkpoint = serverStatus_PerconaFT["checkpoint"]
mongodb_server_dict["PerconaFT_checkpoint_count"] = PerconaFT_checkpoint["count"]
mongodb_server_dict["PerconaFT_checkpoint_time"] = PerconaFT_checkpoint["time"] mongodb_server_dict["PerconaFT_checkpoint_write_nonleaf_count"] = PerconaFT_checkpoint["write"]["nonleaf"]["count"]
mongodb_server_dict["PerconaFT_checkpoint_write_nonleaf_time"] = PerconaFT_checkpoint["write"]["nonleaf"]["time"]
mongodb_server_dict["PerconaFT_checkpoint_write_nonleaf_bytes_compressed"] = PerconaFT_checkpoint["write"]["nonleaf"]["bytes"]["compressed"]
mongodb_server_dict["PerconaFT_checkpoint_write_nonleaf_bytes_uncompressed"] = PerconaFT_checkpoint["write"]["nonleaf"]["bytes"]["uncompressed"]
mongodb_server_dict["PerconaFT_checkpoint_write_leaf_count"] = PerconaFT_checkpoint["write"]["leaf"]["count"]
mongodb_server_dict["PerconaFT_checkpoint_write_leaf_time"] = PerconaFT_checkpoint["write"]["leaf"]["time"]
mongodb_server_dict["PerconaFT_checkpoint_write_leaf_bytes_compressed"] = PerconaFT_checkpoint["write"]["leaf"]["bytes"]["compressed"]
mongodb_server_dict["PerconaFT_checkpoint_write_leaf_bytes_uncompressed"] = PerconaFT_checkpoint["write"]["leaf"]["bytes"]["uncompressed"] ### serializeTime for serializeTime_item in serverStatus_PerconaFT["serializeTime"]:
prefix = "PerconaFT_serializeTime_" + serializeTime_item
for serializeTime_key in serverStatus_PerconaFT["serializeTime"][serializeTime_item]:
key_name = prefix + "_" + serializeTime_key
mongodb_server_dict[key_name] = serverStatus_PerconaFT["serializeTime"][serializeTime_item][serializeTime_key] ### PerconaFT compressionRatio
for compressionRatio_item in serverStatus_PerconaFT["compressionRatio"]:
key_name = "PerconaFT_compressionRatio_" + compressionRatio_item
mongodb_server_dict[key_name] = serverStatus_PerconaFT["compressionRatio"][compressionRatio_item] return (mongodb_role, mongodb_server_dict) def repl_status(self,connection):
replStatus = connection.admin.command("replSetGetStatus")
print replStatus
repl_status_dict = {} # repl set metric dict # myState "" for PRIMARY , "" for SECONDARY, "":
repl_status_dict["repl_myState"] = replStatus["myState"] repl_status_members = replStatus["members"] master_optime = # Master oplog ops time
myself_optime = # SECONDARY oplog ops time print "开始打印repl_status_members"
print repl_status_members
print "结束打印repl_status_members"
for repl_member in repl_status_members:
if repl_member.has_key("self") and repl_member["self"]:
repl_status_dict["repl_health"] = repl_member["health"]
#repl_status_dict["repl_optime"] = repl_member["optime"].time
#print "value of optime ts is:"
#print type(repl_member["optime"])
#print type(repl_member["optime"]["ts"])
print repl_member["optime"]["ts"].time
repl_status_dict["repl_optime"] = repl_member["optime"]["ts"].time
if repl_member.has_key("repl_electionTime"):
repl_status_dict["repl_electionTime"] = repl_member["electionTime"].time
if repl_member.has_key("repl_configVersion"):
repl_status_dict["repl_configVersion"] = repl_member["configVersion"]
#myself_optime = repl_member["optime"].time
myself_optime = repl_member["optime"]["ts"].time
if (replStatus["myState"] == and repl_member["state"] == ): # CONDARY ,get repl lag
master_optime = repl_member["optime"]["ts"].time
if replStatus["myState"] == : repl_status_dict["repl_lag"] = master_optime - myself_optime ### oplog window hours oplog_collection = connection["local"]["oplog.rs"] oplog_tFirst = oplog_collection.find({},{"ts":}).sort('$natural',pymongo.ASCENDING).limit().next()
oplog_tLast = oplog_collection.find({},{"ts":}).sort('$natural',pymongo.DESCENDING).limit().next() oplogrs_collstats = connection["local"].command("collstats", "oplog.rs") window_multiple = ##oplog.rs collections is not full
if oplogrs_collstats.has_key("maxSize"):
window_multiple = oplogrs_collstats["maxSize"]/(oplogrs_collstats["count"] * oplogrs_collstats["avgObjSize"])
else:
window_multiple = oplogrs_collstats["storageSize"]/(oplogrs_collstats["count"] * oplogrs_collstats["avgObjSize"]) #oplog_window .xx hours
oplog_window = round((oplog_tLast["ts"].time - oplog_tFirst["ts"].time)/3600.0,) * window_multiple # full repl_status_dict["repl_oplog_window"] = oplog_window return repl_status_dict # only for mongos node
def shard_status(self, conn): config_db = conn["config"] settings_col = config_db["settings"] balancer_doc = settings_col.find_one({'_id':'balancer'}) shards_dict = {}
if balancer_doc is None:
shards_dict["shards_BalancerState"] =
elif balancer_doc["stopped"]:
shards_dict["shards_BalancerState"] =
else:
shards_dict["shards_BalancerState"] = # shards_activeWindow metric,: without setting, :setting
# shards_activeWindow_start metric, { "start" : "23:30", "stop" : "6:00" } : 23.30 for :
# shards_activeWindow_stop metric if balancer_doc is None:
shards_dict["shards_activeWindow"] = elif balancer_doc.has_key("activeWindow"):
shards_dict["shards_activeWindow"] =
if balancer_doc["activeWindow"].has_key("start"):
window_start = balancer_doc["activeWindow"]["start"]
shards_dict["shards_activeWindow_start"] = window_start.replace(":",".") if balancer_doc["activeWindow"].has_key("stop"):
window_stop = balancer_doc["activeWindow"]["stop"]
shards_dict["shards_activeWindow_stop"] = window_stop.replace(":",".") # shards_chunkSize metric
chunksize_doc = settings_col.find_one({"_id" : "chunksize"})
if chunksize_doc is not None:
shards_dict["shards_chunkSize"] = chunksize_doc["value"] # shards_isBalancerRunning metric
locks_col = config_db["locks"]
balancer_lock_doc = locks_col.find_one({'_id':'balancer'}) if balancer_lock_doc is None:
print "config.locks collection empty or missing. be sure you are connected to a mongos"
shards_dict["shards_isBalancerRunning"] =
elif balancer_lock_doc["state"] > :
shards_dict["shards_isBalancerRunning"] =
else:
shards_dict["shards_isBalancerRunning"] = # shards_size metric shards_col = config_db["shards"]
shards_dict["shards_size"] = shards_col.count() # shards_mongosSize metric
mongos_col = config_db["mongos"]
shards_dict["shards_mongosSize"] = mongos_col.count() return shards_dict

monodb_monitor.py

#! /bin/env python
#-*- coding:utf8 -*- import sys
import os
import time
import datetime
import socket
import yaml
import requests
import json from mongodb_server import mongodbMonitor falcon_client = "http://127.0.0.1:1988/v1/push"
ts = int(time.time()) # all falcon counter type metrics list mongodb_counter_metric = ["uptime","asserts_msg",
"asserts_regular",
"asserts_rollovers",
"asserts_user",
"asserts_warning",
"page_faults",
"connections_totalCreated",
"locks_Global_acquireCount_ISlock",
"locks_Global_acquireCount_IXlock",
"locks_Global_acquireCount_Slock",
"locks_Global_acquireCount_Xlock",
"locks_Global_acquireWaitCount_ISlock",
"locks_Global_acquireWaitCount_IXlock",
"locks_Global_timeAcquiringMicros_ISlock",
"locks_Global_timeAcquiringMicros_IXlock",
"locks_Database_acquireCount_ISlock",
"locks_Database_acquireCount_IXlock",
"locks_Database_acquireCount_Slock",
"locks_Database_acquireCount_Xlock",
"locks_Collection_acquireCount_ISlock",
"locks_Collection_acquireCount_IXlock",
"locks_Collection_acquireCount_Xlock",
"opcounters_command",
"opcounters_insert",
"opcounters_delete",
"opcounters_update",
"opcounters_query",
"opcounters_getmore",
"opcountersRepl_command",
"opcountersRepl_insert",
"opcountersRepl_delete",
"opcountersRepl_update",
"opcountersRepl_query",
"opcountersRepl_getmore",
"network_bytesIn",
"network_bytesOut",
"network_numRequests",
"backgroundFlushing_flushes",
"backgroundFlushing_last_ms",
"cursor_timedOut",
"wt_cache_readinto_bytes",
"wt_cache_writtenfrom_bytes",
"wt_bm_bytes_read",
"wt_bm_bytes_written",
"wt_bm_blocks_read",
"wt_bm_blocks_written"
] with open('../../cfg.json') as f:
data = f.read().replace('\n','')
jsonlist = json.loads(data)
mongodb_hostname = jsonlist['hostname'] f=open("../conf/mongomon.conf")
y = yaml.load(f)
f.close()
mongodb_items = y["items"] for mongodb_ins in mongodb_items: mongodb_monitor = mongodbMonitor() mongodb_tag = "mongo=" + str(mongodb_ins["port"]) err,conn = mongodb_monitor.mongodb_connect(host="127.0.0.1",port=mongodb_ins["port"], user=mongodb_ins["user"], password=mongodb_ins["password"]) mongodb_upate_list = []
if err != :
key_item_dict = {"endpoint": mongodb_hostname, "metric": "mongo_local_alive", "tags":mongodb_tag , "timestamp":ts, "value": , "step": , "counterType": "GAUGE"}
mongodb_upate_list.append(key_item_dict)
r = requests.post(falcon_client,data=json.dumps(mongodb_upate_list))
continue #The instance is dead. upload the "mongo_alive_local=0" key, then continue. mongodb_dict = mongodb_monitor.get_mongo_monitor_data(conn)
mongodb_dict_keys = mongodb_dict.keys() for mongodb_metric in mongodb_dict_keys: if mongodb_metric in mongodb_counter_metric :
key_item_dict = {"endpoint": mongodb_hostname, "metric": mongodb_metric, "tags":mongodb_tag , "timestamp":ts, "value": mongodb_dict[mongodb_metric], "step": , "counterType": "COUNTER"}
else:
key_item_dict = {"endpoint": mongodb_hostname, "metric": mongodb_metric, "tags":mongodb_tag , "timestamp":ts, "value": mongodb_dict[mongodb_metric], "step": , "counterType": "GAUGE"} mongodb_upate_list.append(key_item_dict)
print "开始上报"
print json.dumps(mongodb_upate_list)
r = requests.post(falcon_client,data=json.dumps(mongodb_upate_list))
print r

mogodb监控脚本的更多相关文章

  1. nginx响应时间监控脚本

    最近我们服务的使用方总是反应说我们接口超时,于是做了一个监控脚本,统计最近五分钟的响应情况,并对异常情况发送邮件报警. #!/bin/bash function define(){ ori_log_p ...

  2. Linux系统性能统计工具Sar和实时系统性能监控脚本

    sar(System Activity Reporter系统活动情况报告)是目前 Linux 上最为全面的系统性能分析工具之一,可以从多方面对系统的活动进行报告,包括:文件的读写情况.系统调用的使用情 ...

  3. [工具开发] keepalived使用nagios监控脚本

    最近在做开发和办公环境的高可用,采用的是keepalived:keepalived基于Linux内核支持的LVS,既能实现高可用,又能实现负载均衡,非常实用. keepalived监控服务状态时可以用 ...

  4. 关于mysql和Apache以及nginx的监控脚本怎么写会比较好的记录

    最近,自己业务进行上线,上线后,需要考虑的是对各种服务进行监控,包括(httpd服务,mysqld服务等),现在想以mysqld服务为例总结下那种方式的脚本最为专业和合理: (1).根据mysql的端 ...

  5. MySQL慢日志监控脚本实例剖析

    公司线上的 MySQL 慢日志,之前一直没有做好监控.趁着上周空闲,我就把监控脚本写了下,今天特地把代码发出来与51博友分享一下. 针对脚本的注解和整体构思,我会放到脚本之后为大家详解. 1 2 3 ...

  6. centos shell编程6一些工作中实践脚本 nagios监控脚本 自定义zabbix脚本 mysql备份脚本 zabbix错误日志 直接送给bc做计算 gzip innobackupex/Xtrabackup 第四十节课

    centos   shell编程6一些工作中实践脚本   nagios监控脚本 自定义zabbix脚本 mysql备份脚本 zabbix错误日志  直接送给bc做计算  gzip  innobacku ...

  7. oracle监控脚本

    简单命令 1.显示服务器上的可用实例:ps -ef | grep smon2.显示服务器上的可用监听器:ps -ef | grep -i listener | grep -v grep3.查看Orac ...

  8. linux服务监控脚本

    配置需要监控的服务器 数组定义:host_ports=(host_name=host_port=uri_path)host_name为容易识别的服务器名称host_port为服务器ip和服务端口uri ...

  9. 【不积跬步,无以致千里】五个常用的Linux监控脚本代码

    为大家提供五个常用Linux监控脚本(查看主机网卡流量.系统状况监控.监控主机的磁盘空间,当使用空间超过90%就通过发mail来发警告.监控CPU和内存的使用情况.全方位监控主机),有需要的朋友不妨看 ...

随机推荐

  1. linux下将不同线程绑定到不同core和cpu上——pthread_setaffinity_np

    =============================================================== linux下的单进程多线程的程序,要实现每个线程平均分配到多核cpu,主 ...

  2. Android_ListView简单例子

    ListView是Android软件开发中非常重要组件之一,基本上是个软件基本都会使用ListView ,今天我通过一个demo来教大家怎么样使用ListView组件 activity_main.xm ...

  3. web请求的拦截与处理

    1,特定请求的拦截:spring或struct2的拦截器,指定拦截模式和处理的servlet: 2,非特定的恶意非法请求,web.xml的error-page元素可以接受tomcat返回的错误代码,并 ...

  4. TMS 例子63 分组,子node

    procedure TForm1.InitGrid; begin advstringgrid1.Grouping.MergeHeader := true; //这个什么作用没有是 advstringg ...

  5. 如何设置游戏分辨率(C++)

  6. HDU3333 Turing Tree(线段树)

    题目 Source http://acm.hdu.edu.cn/showproblem.php?pid=3333 Description After inventing Turing Tree, 3x ...

  7. Employment Planning[HDU1158]

    Employment Planning Time Limit: 2000/1000 MS (Java/Others) Memory Limit: 65536/32768 K (Java/Others) ...

  8. Android入门(七):Spinner下拉式菜单组件

    对于手机和平板电脑的应用程序来说,打字是非常不方便的操作方式,比较好的方式就是列出一组选项让用户挑选,这样就可以避免打字的麻烦.使用Spinner下拉菜单组件需要完成以下几个步骤: 1.建立选项列表, ...

  9. 【CodeVS2800】 送外卖 最短路+状压DP

    首先求出各点之间的最短路,floyed即可,注意是0-n. 然后考虑状压,f[i][j]表示状态为i时访问j点时的最短路和,1表示访问,0表示未访问,然后第j个点所在的位置就是(1<<j) ...

  10. python3 抓取网页资源的 N 种方法

    1. 最简单 import urllib.request response = urllib.request.urlopen('http://python.org/') html = response ...