1.    MongoDB分片+副本集

健壮的集群方案

多个配置服务器 多个mongos服务器  每个片都是副本集 正确设置w

架构图

说明:

1.   此实验环境在一台机器上通过不同port和dbpath实现启动不同的mongod实例

2.   总的9个mongod实例,分别做成shard1、shard2、shard3三组副本集,每组1主2从

3.   Mongos进程的数量不限,建议把mongos配置在每个应用服务器本机上,这样每个应用服务器就与自身的mongos进行通信,如果服务器不工作了,并不会影响其他的应用服务器与其自己的mongos通信

4.   此实验模拟2台应用服务器(2个mongos服务)

5.   生产环境中每个片都应该是副本集,这样单个服务器坏了,才不会导致片失效

部署环境

创建相关目录

[root@Master cluster2]# mkdir -p shard{,,}/node{,,}
[root@Master cluster2]# mkdir -p shard{,,}/logs
[root@Master cluster2]# ls shard*
shard1:
logs node1 node2 node3 shard2:
logs node1 node2 node3 shard3:
logs node1 node2 node3
[root@Master cluster2]# mkdir -p config/logs
[root@Master cluster2]# mkdir -p config/node{,,}
[root@Master cluster2]# ls config/
logs node1 node2 node3 [root@Master cluster2]# mkdir -p mongos/logs

启动配置服务

Config server

/data/mongodb/config/node1

/data/mongodb/config/logs/node1.log

10000

/data/mongodb/config/node2

/data/mongodb/config/logs/node2.log

20000

/data/mongodb/config/node3

/data/mongodb/config/logs/node3.log

30000

#按规划启动3个:跟启动单个配置服务一样,只是重复3次

[root@Master cluster2]# mongod --dbpath config/node1 --logpath config/logs/node1.log --logappend --fork --port
[root@Master cluster2]# mongod --dbpath config/node2 --logpath config/logs/node2.log --logappend --fork --port
[root@Master cluster2]# mongod --dbpath config/node3 --logpath config/logs/node3.log --logappend --fork --port
[root@Master cluster2]# ps -ef|grep mongod|grep -v grep
mongod : ? :: /usr/bin/mongod -f /etc/mongod.conf
root : ? :: mongod --dbpath config/node1 --logpath config/logs/node1.log --logappend --fork --port
root : ? :: mongod --dbpath config/node2 --logpath config/logs/node2.log --logappend --fork --port
root : ? :: mongod --dbpath config/node3 --logpath config/logs/node3.log --logappend --fork --port

启动路由服务

Mongos server

——

/data/mongodb/mongos/logs/node1.log

40000

——

/data/mongodb/mongos/logs/node2.log

50000

#mongos的数量不受限制,通常应用一个服务器运行一个mongos

[root@Master cluster2]# mongos --port  --configdb localhost:,localhost:,localhost: --logpath mongos/logs/mongos1.log  --logappend --fork
[root@Master cluster2]# mongos --port --configdb localhost:,localhost:,localhost: --logpath mongos/logs/mongos1.log --logappend --fork
[root@Master cluster2]# ps -ef|grep mongos|grep -v grep
root : ? :: mongos --port --configdb localhost:,localhost:,localhost: --logpath mongos/logs/mongos1.log --logappend --fork
root : ? :: mongos --port --configdb localhost:,localhost:,localhost: --logpath mongos/logs/mongos1.log --logappend --fork

配置副本集

按规划,配置启动shard1、shard2、shard3三组副本集

#此处以shard1为例说明配置方法

#启动三个mongod进程

[root@Master cluster2]# mongod --replSet shard1 --dbpath shard1/node1 --logpath shard1/logs/node1.log --logappend --fork --port
[root@Master cluster2]# mongod --replSet shard1 --dbpath shard1/node2 --logpath shard1/logs/node2.log --logappend --fork --port
[root@Master cluster2]# mongod --replSet shard1 --dbpath shard1/node3 --logpath shard1/logs/node3.log --logappend --fork --port

#初始化Replica Set:shard1

[root@Master cluster2]# mongo --port
MongoDB shell version: 3.0.
connecting to: 127.0.0.1:/test
> use admin
switched to db admin
> rsconf={"_id" : "shard1","members" : [{"_id" : , "host" : "localhost:10001"}]}
{
"_id" : "shard1",
"members" : [
{
"_id" : ,
"host" : "localhost:10001"
}
]
}
> rs.initiate(rsconf)
{ "ok" : }
shard1:OTHER> rs.add("localhost:10002")
{ "ok" : }
shard1:PRIMARY> rs.add("localhost:10003")
{ "ok" : }
shard1:PRIMARY> rs.conf()
{
"_id" : "shard1",
"version" : ,
"members" : [
{
"_id" : ,
"host" : "localhost:10001",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : ,
"tags" : { },
"slaveDelay" : ,
"votes" :
},
{
"_id" : ,
"host" : "localhost:10002",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : ,
"tags" : { },
"slaveDelay" : ,
"votes" :
},
{
"_id" : ,
"host" : "localhost:10003",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : ,
"tags" : { },
"slaveDelay" : ,
"votes" :
}
],
"settings" : {
"chainingAllowed" : true,
"heartbeatTimeoutSecs" : ,
"getLastErrorModes" : { },
"getLastErrorDefaults" : {
"w" : ,
"wtimeout" :
}
}
} 

Shard2和shard3同shard1配置副本集

[root@Master cluster2]# mongod --replSet shard2 --dbpath shard2/node1 --logpath shard2/logs/node1.log --logappend --fork --port
[root@Master cluster2]# mongod --replSet shard2 --dbpath shard2/node2 --logpath shard2/logs/node2.log --logappend --fork --port
[root@Master cluster2]# mongod --replSet shard2 --dbpath shard2/node3 --logpath shard2/logs/node3.log --logappend --fork --port
[root@Master cluster2]# mongo --port
> use admin
> rsconf={"_id" : "shard2","members" : [{"_id" : , "host" : "localhost:20001"}]}
> rs.initiate(rsconf)
shard2:OTHER> rs.add("localhost:20002")
shard2:PRIMARY> rs.add("localhost:20003")
shard2:PRIMARY> rs.conf()
{
"_id" : "shard2",
"version" : ,
"members" : [
{
"_id" : ,
"host" : "localhost:20001",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : ,
"tags" : { },
"slaveDelay" : ,
"votes" :
},
{
"_id" : ,
"host" : "localhost:20002",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : ,
"tags" : { },
"slaveDelay" : ,
"votes" :
},
{
"_id" : ,
"host" : "localhost:20003",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : ,
"tags" : { },
"slaveDelay" : ,
"votes" :
}
],
"settings" : {
"chainingAllowed" : true,
"heartbeatTimeoutSecs" : ,
"getLastErrorModes" : { },
"getLastErrorDefaults" : {
"w" : ,
"wtimeout" :
}
}
}

  

[root@Master cluster2]# mongod --replSet shard3 --dbpath shard3/node1 --logpath shard3/logs/node1.log --logappend --fork --port
[root@Master cluster2]# mongod --replSet shard3 --dbpath shard3/node2 --logpath shard3/logs/node2.log --logappend --fork --port
[root@Master cluster2]# mongod --replSet shard3 --dbpath shard3/node3 --logpath shard3/logs/node3.log --logappend --fork --port
[root@Master cluster2]# mongo --port
connecting to: 127.0.0.1:/test> use admin
> rsconf={"_id" : "shard3","members" : [{"_id" : , "host" : "localhost:30001"}]}
> rs.initiate(rsconf)
shard3:OTHER> rs.add("localhost:30002")
shard3:PRIMARY> rs.add("localhost:30003")
shard3:PRIMARY> rs.conf()
{
"_id" : "shard3",
"version" : ,
"members" : [
{
"_id" : ,
"host" : "localhost:30001",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : ,
"tags" : { },
"slaveDelay" : ,
"votes" :
},
{
"_id" : ,
"host" : "localhost:30002",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : ,
"tags" : { },
"slaveDelay" : ,
"votes" :
},
{
"_id" : ,
"host" : "localhost:30003",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : ,
"tags" : { },
"slaveDelay" : ,
"votes" :
}
],
"settings" : {
"chainingAllowed" : true,
"heartbeatTimeoutSecs" : ,
"getLastErrorModes" : { },
"getLastErrorDefaults" : {
"w" : ,
"wtimeout" :
}
}
}

添加(副本集)分片

#连接到mongs,并切换到admin这里必须连接路由节点

[root@Master cluster2]# mongo --port
MongoDB shell version: 3.0.
connecting to: 127.0.0.1:/test
mongos> use admin
switched to db admin
mongos> db.runCommand({"addShard":"shard1/localhost:10001"})
{ "shardAdded" : "shard1", "ok" : }
mongos> db.runCommand({"addShard":"shard2/localhost:20001"})
{ "shardAdded" : "shard2", "ok" : }
mongos> db.runCommand({"addShard":"shard3/localhost:30001"})
{ "shardAdded" : "shard3", "ok" : }
mongos> db.runCommand({listshards:})
{
"shards" : [
{
"_id" : "shard1",
"host" : "shard1/localhost:10001,localhost:10002,localhost:10003"
},
{
"_id" : "shard2",
"host" : "shard2/localhost:20001,localhost:20002,localhost:20003"
},
{
"_id" : "shard3",
"host" : "shard3/localhost:30001,localhost:30002,localhost:30003"
}
],
"ok" :
}

激活db和collections分片

激活数据库分片,命令

> db.runCommand( { enablesharding : "数据库名称"} );

执行以上命令,可以让数据库跨shard,如果不执行这步,数据库只会存放在一个shard

一旦激活数据库分片,数据库中不同的collection将被存放在不同的shard上

但一个collection仍旧存放在同一个shard上,要使单个collection也分片,还需单独对collection作些操作

#如:激活test数据库分片功能,连接mongos进程

[root@Master cluster2]# mongo --port
MongoDB shell version: 3.0.
connecting to: 127.0.0.1:/test
mongos> use admin
switched to db admin
mongos> db.runCommand({"enablesharding":"test"})
{ "ok" : }

要使单个collection也分片存储,需要给collection指定一个分片key,通过以下命令操作:

> db.runCommand( { shardcollection : "集合名称",key : "字段名称"});

注:  a. 分片的collection系统会自动创建一个索引(也可用户提前创建好)

b. 分片的collection只能有一个在分片key上的唯一索引,其它唯一索引不被允许

#对collection:test.yujx分片

mongos> db.runCommand({"shardcollection":"test.yujx","key":{"_id":}})
{ "collectionsharded" : "test.yujx", "ok" : }

生成测试数据

mongos> use test
switched to db test
mongos> for(var i=;i<=;i++) db.yujx.save({"id":i,"a":,"b":,"c":})
WriteResult({ "nInserted" : })
mongos> db.yujx.count()

查看集合分片

mongos> db.yujx.stats()
{
"sharded" : true,
"paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : ,
"capped" : false,
"ns" : "test.yujx",
"count" : ,
"numExtents" : ,
"size" : ,
"storageSize" : ,
"totalIndexSize" : ,
"indexSizes" : {
"_id_" :
},
"avgObjSize" : ,
"nindexes" : ,
"nchunks" : ,
"shards" : {
"shard1" : {
"ns" : "test.yujx",
"count" : ,
"size" : ,
"avgObjSize" : ,
"numExtents" : ,
"storageSize" : ,
"lastExtentSize" : ,
"paddingFactor" : ,
"paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : ,
"capped" : false,
"nindexes" : ,
"totalIndexSize" : ,
"indexSizes" : {
"_id_" :
},
"ok" : ,
"$gleStats" : {
"lastOpTime" : Timestamp(, ),
"electionId" : ObjectId("55d15366716d7504d5d74c4c")
}
},
"shard2" : {
"ns" : "test.yujx",
"count" : ,
"size" : ,
"avgObjSize" : ,
"numExtents" : ,
"storageSize" : ,
"lastExtentSize" : ,
"paddingFactor" : ,
"paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : ,
"capped" : false,
"nindexes" : ,
"totalIndexSize" : ,
"indexSizes" : {
"_id_" :
},
"ok" : ,
"$gleStats" : {
"lastOpTime" : Timestamp(, ),
"electionId" : ObjectId("55d1543eabed7d6d4a71d25e")
}
},
"shard3" : {
"ns" : "test.yujx",
"count" : ,
"size" : ,
"avgObjSize" : ,
"numExtents" : ,
"storageSize" : ,
"lastExtentSize" : ,
"paddingFactor" : ,
"paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : ,
"capped" : false,
"nindexes" : ,
"totalIndexSize" : ,
"indexSizes" : {
"_id_" :
},
"ok" : ,
"$gleStats" : {
"lastOpTime" : Timestamp(, ),
"electionId" : ObjectId("55d155346f36550e3c5f062c")
}
}
},
"ok" :
}

查看数据库分片

mongos> db.printShardingStatus()
--- Sharding Status ---
sharding version: {
"_id" : ,
"minCompatibleVersion" : ,
"currentVersion" : ,
"clusterId" : ObjectId("55d152a35348652fbc726a10")
}
shards:
{ "_id" : "shard1", "host" : "shard1/localhost:10001,localhost:10002,localhost:10003" }
{ "_id" : "shard2", "host" : "shard2/localhost:20001,localhost:20002,localhost:20003" }
{ "_id" : "shard3", "host" : "shard3/localhost:30001,localhost:30002,localhost:30003" }
balancer:
Currently enabled: yes
Currently running: yes
Balancer lock taken at Sun Aug :: GMT- (PDT) by Master.Hadoop::::Balancer:
Failed balancer rounds in last attempts:
Migration Results for the last hours:
: Success
: Failed with error 'could not acquire collection lock for test.yujx to migrate chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for migrating chunk [{ : MinKey }, { : MaxKey }) in test.yujx is taken.', from shard1 to shard2
databases:
{ "_id" : "admin", "partitioned" : false, "primary" : "config" }
{ "_id" : "test", "partitioned" : true, "primary" : "shard1" }
test.yujx
shard key: { "_id" : }
chunks:
shard1
shard2
shard3
{ "_id" : { "$minKey" : } } -->> { "_id" : ObjectId("55d157cca0c90140e33a9342") } on : shard3 Timestamp(, )
{ "_id" : ObjectId("55d157cca0c90140e33a9342") } -->> { "_id" : ObjectId("55d157cca0c90140e33a934a") } on : shard1 Timestamp(, )
{ "_id" : ObjectId("55d157cca0c90140e33a934a") } -->> { "_id" : { "$maxKey" : } } on : shard2 Timestamp(, )

#或者连接mongos的config数据库查询

mongos> use config
switched to db config
mongos> db.shards.find()
{ "_id" : "shard1", "host" : "shard1/localhost:10001,localhost:10002,localhost:10003" }
{ "_id" : "shard2", "host" : "shard2/localhost:20001,localhost:20002,localhost:20003" }
{ "_id" : "shard3", "host" : "shard3/localhost:30001,localhost:30002,localhost:30003" }
mongos> db.databases.find()
{ "_id" : "admin", "partitioned" : false, "primary" : "config" }
{ "_id" : "test", "partitioned" : true, "primary" : "shard1" }
mongos> db.chunks.find()
{ "_id" : "test.yujx-_id_MinKey", "lastmod" : Timestamp(, ), "lastmodEpoch" : ObjectId("55d15738679c4d5f9108eba0"), "ns" : "test.yujx", "min" : { "_id" : { "$minKey" : } }, "max" : { "_id" : ObjectId("55d157cca0c90140e33a9342") }, "shard" : "shard3" }
{ "_id" : "test.yujx-_id_ObjectId('55d157cca0c90140e33a9342')", "lastmod" : Timestamp(, ), "lastmodEpoch" : ObjectId("55d15738679c4d5f9108eba0"), "ns" : "test.yujx", "min" : { "_id" : ObjectId("55d157cca0c90140e33a9342") }, "max" : { "_id" : ObjectId("55d157cca0c90140e33a934a") }, "shard" : "shard1" }
{ "_id" : "test.yujx-_id_ObjectId('55d157cca0c90140e33a934a')", "lastmod" : Timestamp(, ), "lastmodEpoch" : ObjectId("55d15738679c4d5f9108eba0"), "ns" : "test.yujx", "min" : { "_id" : ObjectId("55d157cca0c90140e33a934a") }, "max" : { "_id" : { "$maxKey" : } }, "shard" : "shard2" }

hash分片

MongoDB2.4以上的版本支持基于哈希的分片

mongos> use admin
mongos> db.runCommand({"enablesharding":"mydb"})
mongos> db.runCommand({"shardcollection":"mydb.mycollection","key":{"_id":"hashed"}})
mongos> use mydb
switched to db mydb
mongos> for(i=;i<;i++){ db.mycollection.insert({"Uid":i,"Name":"zhanjindong2","Age":,"Date":new Date()}); }
WriteResult({ "nInserted" : })
mongos> db.mycollection.stats()
{
"sharded" : true,
"paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : ,
"capped" : false,
"ns" : "mydb.mycollection",
"count" : ,
"numExtents" : ,
"size" : ,
"storageSize" : ,
"totalIndexSize" : ,
"indexSizes" : {
"_id_" : ,
"_id_hashed" :
},
"avgObjSize" : ,
"nindexes" : ,
"nchunks" : ,
"shards" : {
"shard1" : {
"ns" : "mydb.mycollection",
"count" : ,
"size" : ,
"avgObjSize" : ,
"numExtents" : ,
"storageSize" : ,
"lastExtentSize" : ,
"paddingFactor" : ,
"paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : ,
"capped" : false,
"nindexes" : ,
"totalIndexSize" : ,
"indexSizes" : {
"_id_" : ,
"_id_hashed" :
},
"ok" : ,
"$gleStats" : {
"lastOpTime" : Timestamp(, ),
"electionId" : ObjectId("55d15366716d7504d5d74c4c")
}
},
"shard2" : {
"ns" : "mydb.mycollection",
"count" : ,
"size" : ,
"avgObjSize" : ,
"numExtents" : ,
"storageSize" : ,
"lastExtentSize" : ,
"paddingFactor" : ,
"paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : ,
"capped" : false,
"nindexes" : ,
"totalIndexSize" : ,
"indexSizes" : {
"_id_" : ,
"_id_hashed" :
},
"ok" : ,
"$gleStats" : {
"lastOpTime" : Timestamp(, ),
"electionId" : ObjectId("55d1543eabed7d6d4a71d25e")
}
},
"shard3" : {
"ns" : "mydb.mycollection",
"count" : ,
"size" : ,
"avgObjSize" : ,
"numExtents" : ,
"storageSize" : ,
"lastExtentSize" : ,
"paddingFactor" : ,
"paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : ,
"capped" : false,
"nindexes" : ,
"totalIndexSize" : ,
"indexSizes" : {
"_id_" : ,
"_id_hashed" :
},
"ok" : ,
"$gleStats" : {
"lastOpTime" : Timestamp(, ),
"electionId" : ObjectId("55d155346f36550e3c5f062c")
}
}
},
"ok" :
}

单点故障分析

由于这是为了了解入门mongodb做的实验,而故障模拟太浪费时间,所以这里就不一一列出,关于故障场景分析,可以参考:
http://blog.itpub.net/27000195/viewspace-1404402/

MongoDB健壮集群——用副本集做分片的更多相关文章

  1. 搭建mongodb集群(副本集+分片)

    搭建mongodb集群(副本集+分片) 转载自:http://blog.csdn.net/bluejoe2000/article/details/41323051 完整的搭建mongodb集群(副本集 ...

  2. MongoDB集群搭建-副本集

    MongoDB集群搭建-副本集 概念性的知识,可以参考本人博客地址: 一.Master-Slave方案: 主从: 二.Replica Set方案: 副本集: 步骤:(只要按步骤操作,100%成功) 1 ...

  3. mongodb集群配置副本集

    测试环境 操作系统:CentOS 7.2 最小化安装 主服务器IP地址:192.168.197.21 mongo01 从服务器IP地址:192.168.197.22 mongo02 从服务器IP地址: ...

  4. centos7下安装部署mongodb集群(副本集模式)

    环境需求:Mongodb集群有三种模式:  Replica Set, Sharding,Master-Slaver.  这里部署的是Replica Set模式. 测试环境: 这里副本集(Replica ...

  5. Mongodb集群之副本集

    上篇咱们遗留了几个问题 1主节点是否能自己主动切换连接? 眼下须要手动切换 2主节点读写压力过大怎样解决 3从节点每一个上面的数据都是对数据库全量拷贝,从节点压力会不会过大 4数据压力达到机器支撑不了 ...

  6. MongoDB学习笔记~Mongo集群和副本集

    回到目录 一些概念 对于Mongo在数据容灾上,推荐的模式是使用副本集模式,它有一个对外的主服务器Primary,还有N个副本服务器Secondary(N>=1,当N=1时,需要有一台仲裁服务器 ...

  7. MongoDB集群-主从复制(副本集)、failover

    1.概念 主从复制的目的:数据冗余.备份.读写分离 主从方式:一主一从(不推荐,只能实现复制,主节点挂掉且未重新启动的时候,无法提升从节点为master),一主一从一裁判,一主多从 复制方式:主节点记 ...

  8. MongoDB学习笔记——Replica Set副本集

    副本集 可以将MongoDB中的副本集看作一组服务器集群由一个主节点和多个副本节点等组成,相对于之前讲到的主从复制提供了故障自动转移的功能 副本集实现数据同步的方式依赖于local数据库中的oplog ...

  9. MongoDB高可用集群搭建(主从、分片、路由、安全验证)

    目录 一.环境准备 1.部署图 2.模块介绍 3.服务器准备 二.环境变量 1.准备三台集群 2.安装解压 3.配置环境变量 三.集群搭建 1.新建配置目录 2.修改配置文件 3.分发其他节点 4.批 ...

随机推荐

  1. 人脸检测及识别python实现系列(2)——识别出人脸

    人脸检测及识别python实现系列(2)——识别出人脸 http://www.cnblogs.com/neo-T/p/6430583.html

  2. ADT下载地址整理(转)

    转自: http://blog.csdn.net/xqf222/article/details/9821971 参考下面文章 http://developer.android.com/tools/sd ...

  3. 判断viewpager左右滑动方向

    实现思路就是通过viewpager的滑动监听,用参数position进行比较,同时当判断完这个要把比较的positon覆盖.这里简单介绍一下public void onPageScrolled(int ...

  4. js小例子之二级联动

    联动原理 当用户点击省级的下拉选项,选择所在省,下一个下拉选项里的选项,则变成用户选择省下的所有市的信息,不会出现其它省市的信息. 省市数据 把省市数据,保存在js文件中,以json形式保存,以便读取 ...

  5. MySQL学习3---事务

    MySQL 事务 MySQL 事务主要用于处理操作量大,复杂度高的数据. 在 MySQL 中只有使用了 Innodb 数据库引擎的数据库或表才支持事务. 事务处理可以用来维护数据库的完整性,保证成批的 ...

  6. 岛屿的个数12 · Number of Islands12

    [抄题]: 给一个01矩阵,求不同的岛屿的个数. 0代表海,1代表岛,如果两个1相邻,那么这两个1属于同一个岛.我们只考虑上下左右为相邻. [ [1, 1, 0, 0, 0], [0, 1, 0, 0 ...

  7. SVG与HTML、JavaScript的三种调用方式

    一.在HTMl中访问SVG的DOM Code highlighting produced by Actipro CodeHighlighter (freeware)http://www.CodeHig ...

  8. xshell上传下载文件(Windows、Linux)

    经常有这样的需求,我们在Windows下载的软件包,如何上传到远程Linux主机上?还有如何从Linux主机下载软件包到Windows下:之前我的做法现在看来好笨好繁琐,不过也达到了目的,笨人有本方法 ...

  9. marioTcp

    https://github.com/nicholaszj/marioTcp MarioTCP MarioTCP 是使用libevent模型来建立的一个性能强大的TCP服务器. 1:Getting S ...

  10. windows10 查看进程端口的情况

    以程序 winnfsd.exe 为例: 1 查看进程号 PID C:\Users\leo>tasklist|findstr winnfsd.exe winnfsd.exe             ...