mongodb2
db.adminCommand() db.getWriteConcern()
db.aggregate() db.grantPrivilegesToRole()
db.auth() db.grantRolesToRole()
db.changeUserPassword() db.grantRolesToUser()
db.cloneCollection() db.group()
db.cloneDatabase() db.groupcmd()
db.commandHelp() db.groupeval()
db.constructor db.hasOwnProperty
db.copyDatabase() db.help()
db.createCollection() 创建一个表 db.hostInfo()
db.createRole() db.isMaster()
db.createUser() db.killOP()
db.createView() db.killOp()
db.currentOP() db.listCommands()
db.currentOp() db.loadServerScripts()
db.dbEval() db.logout()
db.disableFreeMonitoring() db.printCollectionStats()
db.dropAllRoles() db.printReplicationInfo()
db.dropAllUsers() db.printShardingStatus()
db.dropDatabase() db.printSlaveReplicationInfo()
db.dropRole() db.propertyIsEnumerable
db.dropUser() 删除user库 db.prototype
db.enableFreeMonitoring() db.removeUser()
db.eval() db.repairDatabase()
db.forceError() db.resetError()
db.fsyncLock() db.revokePrivilegesFromRole()
db.fsyncUnlock() db.revokeRolesFromRole()
db.getCollection() db.revokeRolesFromUser()
db.getCollectionInfos() db.runCommand()
db.getCollectionNames() db.runCommandWithMetadata()
db.getFreeMonitoringStatus() db.runReadCommand()
db.getLastError() db.serverBits()
db.getLastErrorCmd() db.serverBuildInfo()
db.getLastErrorObj() db.serverCmdLineOpts()
db.getLogComponents() db.serverStatus()
db.getMongo() db.setLogLevel()
db.getName() db.setProfilingLevel()
db.getPrevError() db.setSlaveOk()
db.getProfilingLevel() db.setWriteConcern()
db.getProfilingStatus() db.shutdownServer()
db.getQueryOptions() db.stats()
db.getReplicationInfo() db.toLocaleString
db.getRole() db.toString()
db.getRoles() db.tojson()
db.getSession() db.unsetWriteConcern()
db.getSiblingDB() db.updateRole()
db.getSisterDB() db.updateUser()
db.getSlaveOk() db.user
db.getUser() db.valueOf
db.getUsers() db.version()
db.getUsers()
db.user.addIdIfNeeded() db.user.getCollection() db.user.mapReduce()
db.user.aggregate() db.user.getDB() db.user.propertyIsEnumerable
db.user.bulkWrite() db.user.getFullName() db.user.prototype
db.user.constructor db.user.getIndexKeys() db.user.reIndex()
db.user.convertToCapped() db.user.getIndexSpecs() db.user.remove({gender:'m'},true)
(查询表达式,选项)不加,都会删({_id:001},{justOne:true})
db.user.convertToSingleObject() db.user.getIndexes() db.user.renameCollection()
db.user.copyTo() db.user.getIndices() db.user.replaceOne()
db.user.count() db.user.getMongo() db.user.runCommand()
db.user.countDocuments() db.user.getName() db.user.runReadCommand()
db.user.createIndex() db.user.getPlanCache() db.user.save()
db.user.createIndexes() db.user.getQueryOptions() db.user.setSlaveOk()
db.user.dataSize() db.user.getShardDistribution() db.user.setWriteConcern()
db.user.deleteMany() db.user.getShardVersion() db.user.shellPrint()
db.user.deleteOne() db.user.getSlaveOk() db.user.stats()
db.user.distinct() db.user.getSplitKeysForChunks() db.user.storageSize()
db.user.drop() 删除 db.user.getWriteConcern() db.user.toLocaleString
db.user.dropIndex() db.user.group() db.user.toString()
db.user.dropIndexes() db.user.groupcmd() db.user.tojson()
db.user.ensureIndex() db.user.hasOwnProperty db.user.totalIndexSize()
db.user.estimatedDocumentCount() db.user.hashAllDocs() db.user.totalSize()
db.user.exists() db.user.help() db.user.unsetWriteConcern()
db.user.explain() db.user.initializeOrderedBulkOp() db.user.update({},{})#整体文档替换
({name:'poxi'},
{
$set:{name:'wudalang'},
$unset:{jingshu:1},
$rename:{sex:gender},
$inc:{age:16},
$multi:{},
$setOnInsert:{'gender':'未知'},# 当upsert为true时,并且发生了insert操作时,可以补充的字段2.4版本之后新家的
},
{
multi:true,# 默认改一行,及时匹配到多行默认false
upsert:true, # 没找到直接创建默认false
}
)$局部替换 $set 修改某列值 $unset 删除某列值 $rename重命名某列值 $inc 增长某个列:自增16
db.user.find() 查 db.user.initializeUnorderedBulkOp() db.user.updateMany()
db.user.findAndModify() db.user.insert({json格式的对象}) db.user.updateOne() # ({hobby:6},{$set{'hobby.$',‘六’}})
db.user.findOne() db.user.insertMany() db.user.validate()
db.user.findOneAndDelete() db.user.insertOne() db.user.valueOf()
db.user.findOneAndReplace() db.user.isCapped() db.user.verify()
db.user.findOneAndUpdate() db.user.latencyStats()
查找
find,findOne
db.collection.find(查询表达式,查询的列)
db.stu.find({},{gender:1,_id:0}) # 显示所有的gender属性 # 默认id列是都会显示的,无论查什么
db.stu.find().count()
.find({cat_id:{$ne:3}},{cat_id:1,goods_name:1,_id:0}) # cat_id不是3的 同一个{}中的是且{},{}也是且
$gt 大于
$lt 小于
$lte 小于等于
$gte 大于等于
$nin:[] not in
$in:[] in {'name':{$in:['xx',]}}
$all ({hobby:{$all:[1,2,3]}}) # hobby列表中的元素,全部包含指定的值
.find(
{$and:[{shop_price:{$gte:100}},{shop_price:{$lte:500}}]},
{shop_price:1,goods_name:1,_id:0}
)
.find(
{$nor:[{shop_price:3},{shop_price:{$ne:500}}]},# 都不成立,才显示
{shop_price:1,goods_name:1,_id:0}
)
。find(
{$or:[]}, 列表中的{} 为或
)
$mod求余数
。find(
{goods_id:{$mod:[5,0]},}, #余数为0的
)
$exists 有某一个属性
.find(
{age:{$exists:1}},{}
)
$types #是某种类型
.find(
{age:{$types:1}},{}
)
| Double | 1 |
| String | 2 |
| Object | 3 |
| Array | 4 |
| Binary data | 5 |
| Undefind | 6 |
| Object id | 7 |
| Boolean | 8 |
| Date | 9 |
| Null | 10 |
| Regular Expression | 11 |
| JavaScript | 12 |
$all都有
.find(
{hobby:{$all:['a','b']}},
{}
)
$where 转换为json对象后比较,效率低,而别的都是以二进制的形式比较
.find({goods_name:{$regex:/^农机呀.*/}}) 效率一样低
.insert({dtime:new Date().valueOf()})显示的是时间戳
query selector之js运算符 Name Description $inc 增长 {$inc:{age:2}} $rename 重命名列 $setOnInsert 当upsert时,设置字段的值 $set 设置字段的新值 $unset 删除指定的列 {$unset:{name:1}}
$push 给array追加新元素 {$push:{hobby:10}}
$pull 删除array指定值 {$pull:{hobby:9}}
$pop 从后删 {$pop:{hobby:1}}#正数从后往前 ;反之
Array() UUID() getBuildInfo() propertyIsEnumerable()
BinData() WriteCommandError() getHostName() prototype
Boolean() WriteConcern() getMemInfo() pwd()
BulkWriteError() WriteError() getOwnPropertyDescriptor rawMongoProgramOutput()
BulkWriteResult() WriteResult() getOwnPropertyNames reconnect()
CountDownLatch allocatePort() getPrototypeOf removeFile()
DBCommandCursor() allocatePorts() hasOwnProperty() replSetMemberStatePrompt()
DBExplainQuery() assert() help() resetAllocatedPorts()
DBPointer() authutil hex_md5() resetDbpath()
Date() benchFinish() hostname() retryOnNetworkError()
DriverSession() benchRun() indentStr() rs()
ErrorCodeStrings benchRunSync() interpreterVersion() run()
ErrorCodes benchStart() isExtensible runMongoProgram()
Explainable() bsonsize() isFinite() runNonMongoProgram()
Geo cat() isFrozen runProgram()
HexData() cd() isMasterStatePrompt() seal
ISODate() chatty() isNaN() setJsTestOption()
Infinity checkProgram() isNetworkError() setVerboseShell()
JSON clearRawMongoProgramOutput()isNumber() sh()
MD5() compare() isObject() shellAutocomplete()
MR compareOn() isSealed shellHelper()
Map() computeSHA256Block() isString() shellPrint()
MapReduceResult() connect() jsTest shellPrintHelper()
Math constructor jsTestLog() sortDoc()
MaxKey convertShardKeyToHashed() jsTestName() startMongoProgram()
MinKey copyDbpath() jsTestOptions() startMongoProgramNoConnect()
Mongo() copyFile() keys startParallelShell()
MongoBridge() create listFiles() stopMongoProgramByPid()
MongoRunner() db load() testingReplication
NaN decodeURI() ls() timestampCmp()
Number() decodeURIComponent() md5sumFile() toLocaleString()
NumberDecimal() defaultPrompt() mkdir() toString()
NumberInt() defineProperties module tojson()
NumberLong() defineProperty myPort() tojsonObject()
show dbs ;show databases;
use user ; # 隐式创建库
show tables ;show collections
db.user.instert({name:"lisi",age:22});
db.user.find(); #查看该表下的所有数据
db.user.instert({_id:2,name:"lisi",age:22})
db.user.instert({_id:3,name:"lisi",age:22,hobby:["asd","as"],
intro:{title:"my_intro",content:"im from china"}
})
db.goods.insert({name:"xas"})
collection 在mongodb称为集合 /表
for (var i =0;i<10000;i++){ db.bar.insert({_id:i+1,title:"hello world"}) }
游标
游标不是查询结果,而是查询的返回结果
var mycurser = db.stu.find()
mycurser.hasNext()
print(mycursor.Next()) #[object bson_object]
printjson(mycursor.next())
var mycurser = db.stu.find() while(mycursor.hasNext){ printjson(mycursor.next()) }
var mycurser = db.stu.find() mycursor.forEach( function(obj){ printjson(mycursor.next()) } )
游标在分页中的应用
mysql(limit ,offset,n)
var mycurser = db.stu.find().skip(9000);#查询结果中跳过前9000行
var mycurser = db.stu.find().skip(9000).limit(10);取第901页,每页10条
mycursor.forEach(
function(obj){
printjson(obj)
}
)
mycursor.toArray()# 所有的行立即以对象形式组织到内存里;当然这么用丧失了使用游标的意义
或者
db.stu.find().skip(9000).limit(10)
索引
- 索引可以提高查询速度,降低写入速度,权衡常用的查询字段,不必再太多列上建索引
- 再mongodb中索引可以按字段圣墟,降序来创建,便于排序
- 默认是用btree来组织索引文件,2.4以后允许建立hash索引
for (i=0;i<1000;i++){ db.stu.insert({sn:i,name:"xzcz"}) } db.stu.find({sn:99}).explain() { cursor:BasicCursor, # 没有使用索引 nscannedObjects : 1000 # 理论上要使用多少行 }
加索引:
db.stu.ensureIndex({sn:1}) #1正序,-1倒序
db.stu.find({sn:99}).explain()
{
cursor:BtreeCursor sn_1, # 没有使用索引
nscannedObjects : 1 # 理论上要使用多少行
}
查看索引
db.stu.getIndexes()
删除索引
db.stu.dropIndex({name:-1})
db.stu.dropIndexs( )
多列索引
db.stu.ensureIndex({name:1,sn:1})
子文档索引
db.stu.ensureIndex({"spc.area":1})
子文档查询
db.stu.find({'spc.area':'taiwan'})
唯一索引
db.stu.ensureIndex({email:1},{unique:true})
稀疏索引 :当该文档没有所建立索引的属性,则不建立索引,与之相对普通索引会把该属性的值人为是null并建立索引
db.stu.ensureIndex({email:1},{sparse:true})
哈希索引 :相邻的数据,存储位置散列,顺序查找吃亏
db.stu.ensureIndex({email:'hashed'})
重建索引:数据修改后,产生空洞,提高效率,减少碎片
db.stu.reIndex()
用户管理
默认没有用户
use admin 进入超级用户管理模式
mongo的用户是以数据库为单位来建立的,每隔数据库都有自己的管理员
use test
db.addUser("zhangsan","password",false)# 是否只读
程序在启动时 添加 --auth参数启动认证功能
----------在那个库下,就是给哪个库建管理员
进入以后
show tables
db.auth("zhangsan","password")
db.changeUserPassword("zhangsan",'123')
db.removerUser('zhangsan')
如果需要给用户添加更多的权限,可以用json结构来传递用户参数
use test
db.addUser({user:'guan',pwd:'12312',roles:['readWrite','dbAdmin']})
备份与恢复
./bin/mongoexport -h host -p port -u username -p password -d database 要导出的库 -c collection 要导出的collection -f 要导出的列 多个用,分割 # 默认id会导出 -q 查询条件(要用引号抱起来) '{sn:{$gte:124}}' -o 导出的文件名 --csv 导出csv格式 #便于和传统数据库交换数据
.bin/mongoimport -h host -p port -u username -p passwd -d database 要导出的库 -c collection 要导出的collection --type csv/json(默认) --file 待导入文件 --headerline 跳过第1行 # csv使用,跳过第一行的列名称
./bin/mongoimport -d test -c bird --types csv -f sn,name --file ./test.csv --headerline
二进制导入导出,速度快,也有索引,若仅仅是数据交换,用上边的方式
mongodump 导出二进制bson结构的数据及其索引信息 -d 库名 -c 表名 # 不写,导出所有 -f 列明
默认导出到mongo下的dump目录以数据名的文件夹下 一个*.bson 一个*.metadata.json
恢复数据
./bin/mongorestore -d test --directorydb dump/test/ (mongodump时的备份目录)
repilcation set 复制集
replication set 多台服务器维护相同的数据副本,提高服务器的可用性
mkdir /home/m17 /hom3/m18 /home/m19 /home/log
声明实例都属于rs0复制集
mongod --port 27017 --dbpath /home/m17 --logpath /home/log/m17.log --fork --replSet rs0
mongod --port 27018 --dbpath /hom3/m18 --logpath /home/log/m18.log --fork --replSet rs0
mongod --port 27019 --dbpath /home/m19 --logpath /home/log/m19.log --fork --replSet rs0
配置信息
./bin/mongo # 连上其中一个 管理replication set use admin var rsconf ={ _id:'rs0', members:[ {_id:0,host:'192.168.1.202:21017'}, {_id:1,host:'192.168.1.202:21018'}, {_id:2,host:'192.168.1.202:21019'}, ] } printjson(rsconf) rs.initiate(rsconf) # 根据配置初始化---> # rs.reconfig(rsconf) rs0:STARTUP0> rs.status() # 一个复制集 后期 进入主PRIMARY rs0:PRIMARY> rs.help()
#----------------------------------------
#rs.add('rs0/124.1.2.1:8010') # 添加复制集
#---------------------------------------- 添加一个member rs.add('192.168.1.202.27020') 删除一个member rs.remove('192.168.1.202.27019') 当主添加了数据 从查看数据:例子 show tables error:{'$err':'not master and slaveok=false','code':13435} 出现上述错误是因为slave默认不许读写 rs.slaveOK()
当master荡掉以后
rs0:PRIMARY>db.shutdownServer() shutdown command only works with the admin database;try 'use admin' rs0:PRIMARY> use admin
rs0:PRIMARY>db.shutdownServer()
其后其中一个slave会变成master
db.isMaster()
#!/bin/bash IP=192.168.1.202 NA=rs0 if [ $1=='reset'];then pkill -9 mongo rm -rf /home/m*
exit(0);
fi if [ $1=='reset'];then mkdir -p /home/m17 /hom3/m18 /home/m19 /home/log mongod --port 27017 --dbpath /home/m17 --logpath /home/log/m17.log --fork --samllfiles --replSet ${NA} mongod --port 27018 --dbpath /hom3/m18 --logpath /home/log/m18.log --fork --samllfiles --replSet ${NA} mongod --port 27019 --dbpath /home/m19 --logpath /home/log/m19.log --fork --samllfiles --replSet ${NA} mongo <<EOF use admin var rsconf ={ _id:'rs0', members:[ {_id:0,host:'192.168.1.202:21017'}, {_id:1,host:'192.168.1.202:21018'}, {_id:2,host:'192.168.1.202:21019'}, ] } rs.initiate(rsconf); EOF
exit(0); fi
sh start.sh restart
shard分片
------> mongos路由器 ----- > configsvr -----> shard1
|_________> shard2
configsvr不是存储真正的数据,存储的meta信息,即‘某条数据在哪个片上’的信息
mongos查询某条数据时,要先找configsvr,询问得到该数据在哪个shard上
总结:分片要有如下要素
1、要有N>2个mongod服务做片节点
2、要有congigsvr维护meta信息
3、要设定好数据的分片规则(configsvr才能维护)
4、要启动mongos路由
mkdir -p /home/m17 /home/m18 /home/m20
启动两个分片
./bin/mongod --dbpath /home/m17/ --logpath /home/mlog/m17/m17.log --fork --port 27017 --smallfiles
./bin/mongod --dbpath /home/m18/ --logpath /home/mlog/m18/m18.log --fork --port 27018 --smallfiles
启动配置服务器
./bin/mongod --dbpath /home/m17/ --logpath /home/mlog/m17/m17.log --fork --port 27020 --smallfiles --configsvr
启动mongos
./bin/mongos --logpath /home/mlog/m30.log --port 30000 --configdb 192.168.1.202.27020 --fork
./bin/mongo --port 30000
sh.addShard("rs0/192.168.1.202.27017")
sh.addShard("192.168.1.202.27017")
sh.addShard("192.168.1.202.27018")
# 设定数据的分片规则
sh.status()
sh.enableShard("shop") # 添加待分片的库
sh.enableSharding('shop') # 拆分数据库 db.runCommand({enableSharing:database})
sh.shardCollection('dbname.coolectionname',{field:1}) # 添加待分片的表
field是一个
mongodb不是从单片文档的级别,绝对平均的散落在各个片上, 而是N篇文章,形成一个chunk有限放在某个片上, 当这个片上的chunk比另一个片的chunk,区别比较大时(片数差>=3),会把本片上的chunk,一道另一个维护片之间的数据均衡 为什么插入了100000条数据才创建了2个chunk: 因为chunk比较大,默认64MB
use config db.settings.find() db.settings.save({_id:"chunksize"},{$set:value:4}) # 改为4MB
既然优先往某个片上插入,当chunk失衡时,在移动chunk
自然随着数据的增多,shard的实例之间有chunk来回移动的现象,这将带来什么现象?
接上问:能否自定义一个规则,某N条数据形成一个快,预告分配M个chunk,M个chunk预告分配在不同片上,
以后的数据直接存入各自分配好的chunk,不再来回移动
答:能,手动预先分片
sh.shardCollection('shop.user',{userid:1}); //user表用userid做shard key
for (var i=1;i<=40;i++){
sh.splitAt('shop.user',{userid:i*10000000}) # 一千万条 一片,预先切好块,虽然chunk是空的,
这些chunk会均匀移动到各片上
}
通过mongos添加user数据,数据会添加到预先分配好的chunk上,chunk就不会来回移动了
分片与复制集的 组合应用
聚合与map reduce
分组统计 group() # 不能跨shard
简单聚合 aggregate()
强大统计 mapreduce() # 可以跨shard ,暴力服务
查询每隔栏目下age大于10的商品个数
db.collection.group(document)
以下是document的格式
{ key:{key1:1,}, cond:{age:{$gt:10}},# 过滤条件 reduce: function(curr,result) {
# curr 每一行,result每一个分组的结果
result.total+=1 }, initial:{
total:0,
}, finalize:function() {
#对result进行最后一次操作 } }
key: 分组字段
cond:查询条件
reduce:聚合函数
initial:初始化
finalize:统计一组后的回调函数
group需要我们手写聚合函数的业务逻辑 group不支持shard cluster,无法分布式运算 分布式可以使用 aggregate() version2.2 mapReduce() version2.4
# 查询每个栏目的最贵的商品数量 max()操作 { key:{cat_id:1}, cond:{}, reduce:function(curr,result){ if (curr.shop_price > result.max){ result.max = curr.shop_price; } }, initial:{max:0} }
# 查询每个栏目下商品的平均价格 { key:{cat_Id:1}, cond:{}, reduce:function(curr,result){ result.cnt+=1; result.sum +=curr.shop_price; }, initial:{sum:0,cnt:0}, finalize:function(result){ result.avg=result.sum/result.cnt; } }
聚合框架
db.collection.aggregate(document);
[
{$group:{_id:'$cat_id',total:{$sum:1}}}
] # $代指每个值,,而不是一个字段 _id指定按谁分组,null不分组
WHERE $match
GROUP BY $group
HAVING $match
SELECT $project
ORDER BY $sort
LIMIT $limit
SUM() $sum
COUNT() $sum
一定要按照顺序
# 查询每个栏目下,价格大于50元的商品个数
# 并筛选出“满足条件的商品个数”大于等于3的数目
[
{$match:{shop_price:{$gt:50}}},
{$group:{_id:"$cat_id",total:{$sum:1}}},
{$match:{total:{$gte:3}}},
]
#查询每个栏目下的库存量,并按库存量排序,1升-1降
[
{$group:{_id:"$cat_id",total:{$sum:"$goods_number"}}},
结果{$sort:{total:1}},
{$limit:3}
]
#查询每个栏目的商品平均价格,并按平均价格由高到低排序
[
{$group:{_id:"$cat_id",avg:{$avg:"$shop_price"}}},
{$sort:{avg:-1}},
]
$addToSet
$first
$last
$max
$min
$avg
$push
$sum
mapreduce
随着大数据概念而流行,从功能上来说,相当于RDBMS的group操作
真正强项在于,分布式,当属非常大时,像google,有N多数据中心,数据都不在地球的一端,用group力所不及。
group既然不支持分布式,单台服务器的运算能力必然是有限的。
而mapRecure支持分布式,支持大量的服务器同时工作,用蛮力来统计,
重剑无锋(不是提高单个效率,而是让基数增大)
map->映射 先把属于同一个租的数据,映射到一个数组上,cat_id-3 [23,2,6,7]
reduce->归约 把数组(同一 组数据)的数据,进行运算。
用mapReduce 计算每个栏目的库存总量
var map = function(){ emit(this.cat_id,this.shop_price) } var reduce = function(cat_id,all_price){ return Array.sum(all_price) } db.goods.mapReduce(map,reduce,{out:'res'})
db.serverStatus() { "host" : "localhost.localdomain:1245", "version" : "4.0.4", "process" : "mongod", "pid" : NumberLong(4323), "uptime" : 25, #每秒更新 "uptimeMillis" : NumberLong(24606), "uptimeEstimate" : NumberLong(24), "localTime" : ISODate("2018-11-28T03:55:09.086Z"), "asserts" : { "regular" : 0, "warning" : 0, "msg" : 0, "user" : 1, "rollovers" : 0 }, "connections" : { "current" : 1, "available" : 818, "totalCreated" : 1 }, "extra_info" : { "note" : "fields vary by platform", "page_faults" : 3 }, "globalLock" : { "totalTime" : NumberLong(24600000), "currentQueue" : { "total" : 0, "readers" : 0, "writers" : 0 }, "activeClients" : { "total" : 10, "readers" : 0, "writers" : 0 } }, "locks" : { "Global" : { "acquireCount" : { "r" : NumberLong(124), "w" : NumberLong(9), "W" : NumberLong(5) } }, "Database" : { "acquireCount" : { "r" : NumberLong(41), "w" : NumberLong(2), "R" : NumberLong(4), "W" : NumberLong(7) } }, "Collection" : { "acquireCount" : { "r" : NumberLong(37), "w" : NumberLong(1) } } }, "logicalSessionRecordCache" : { "activeSessionsCount" : 1, "sessionsCollectionJobCount" : 1, "lastSessionsCollectionJobDurationMillis" : 0, "lastSessionsCollectionJobTimestamp" : ISODate("2018-11-28T03:54:45.874Z"), "lastSessionsCollectionJobEntriesRefreshed" : 0, "lastSessionsCollectionJobEntriesEnded" : 0, "lastSessionsCollectionJobCursorsClosed" : 0, "transactionReaperJobCount" : 0, "lastTransactionReaperJobDurationMillis" : 0, "lastTransactionReaperJobTimestamp" : ISODate("2018-11-28T03:54:45.874Z"), "lastTransactionReaperJobEntriesCleanedUp" : 0 }, "network" : { "bytesIn" : NumberLong(1678), "bytesOut" : NumberLong(6088), "physicalBytesIn" : NumberLong(1678), "physicalBytesOut" : NumberLong(6088), "numRequests" : NumberLong(14), "compression" : { "snappy" : { "compressor" : { "bytesIn" : NumberLong(0), "bytesOut" : NumberLong(0) }, "decompressor" : { "bytesIn" : NumberLong(0), "bytesOut" : NumberLong(0) } } }, "serviceExecutorTaskStats" : { "executor" : "passthrough", "threadsRunning" : 1 } }, "opLatencies" : { "reads" : { "latency" : NumberLong(0), "ops" : NumberLong(0) }, "writes" : { "latency" : NumberLong(0), "ops" : NumberLong(0) }, "commands" : { "latency" : NumberLong(29705), "ops" : NumberLong(13) }, "transactions" : { "latency" : NumberLong(0), "ops" : NumberLong(0) } }, "opcounters" : { "insert" : 0, "query" : 1, "update" : 0, "delete" : 0, "getmore" : 0,# 每秒查询游标 "command" : 15 }, "opcountersRepl" : { "insert" : 0, "query" : 0, "update" : 0, "delete" : 0, "getmore" : 0,# 每秒查询游标 "command" : 0 }, "storageEngine" : { "name" : "wiredTiger", "supportsCommittedReads" : true, "supportsSnapshotReadConcern" : true, "readOnly" : false, "persistent" : true }, "tcmalloc" : { "generic" : { "current_allocated_bytes" : 67860992, "heap_size" : 71118848 }, "tcmalloc" : { "pageheap_free_bytes" : 2318336, "pageheap_unmapped_bytes" : 0, "max_total_thread_cache_bytes" : 494927872, "current_total_thread_cache_bytes" : 460096, "total_free_bytes" : 939520, "central_cache_free_bytes" : 218816, "transfer_cache_free_bytes" : 260608, "thread_cache_free_bytes" : 460096, "aggressive_memory_decommit" : 0, "pageheap_committed_bytes" : 71118848, "pageheap_scavenge_count" : 0, "pageheap_commit_count" : 46, "pageheap_total_commit_bytes" : 71118848, "pageheap_decommit_count" : 0, "pageheap_total_decommit_bytes" : 0, "pageheap_reserve_count" : 46, "pageheap_total_reserve_bytes" : 71118848, "spinlock_total_delay_ns" : 0, "formattedString" : "------------------------------------------------\nMALLOC: 67861568 ( 64.7 MiB) Bytes in use by application\nMALLOC: + 2318336 ( 2.2 MiB) Bytes in page heap freelist\nMALLOC: + 218816 ( 0.2 MiB) Bytes in central cache freelist\nMALLOC: + 260608 ( 0.2 MiB) Bytes in transfer cache freelist\nMALLOC: + 459520 ( 0.4 MiB) Bytes in thread cache freelists\nMALLOC: + 1335552 ( 1.3 MiB) Bytes in malloc metadata\nMALLOC: ------------\nMALLOC: = 72454400 ( 69.1 MiB) Actual memory used (physical + swap)\nMALLOC: + 0 ( 0.0 MiB) Bytes released to OS (aka unmapped)\nMALLOC: ------------\nMALLOC: = 72454400 ( 69.1 MiB) Virtual address space used\nMALLOC:\nMALLOC: 525 Spans in use\nMALLOC: 17 Thread heaps in use\nMALLOC: 4096 Tcmalloc page size\n------------------------------------------------\nCall ReleaseFreeMemory() to release freelist memory to the OS (via madvise()).\nBytes released to the OS take up virtual address space but no physical memory.\n" } }, "transactions" : { "retriedCommandsCount" : NumberLong(0), "retriedStatementsCount" : NumberLong(0), "transactionsCollectionWriteCount" : NumberLong(0), "currentActive" : NumberLong(0), "currentInactive" : NumberLong(0), "currentOpen" : NumberLong(0), "totalAborted" : NumberLong(0), "totalCommitted" : NumberLong(0), "totalStarted" : NumberLong(0) }, "wiredTiger" : { "uri" : "statistics:", "LSM" : { "application work units currently queued" : 0, "merge work units currently queued" : 0, "rows merged in an LSM tree" : 0, "sleep for LSM checkpoint throttle" : 0, "sleep for LSM merge throttle" : 0, "switch work units currently queued" : 0, "tree maintenance operations discarded" : 0, "tree maintenance operations executed" : 0, "tree maintenance operations scheduled" : 0, "tree queue hit maximum" : 0 }, "async" : { "current work queue length" : 0, "maximum work queue length" : 0, "number of allocation state races" : 0, "number of flush calls" : 0, "number of operation slots viewed for allocation" : 0, "number of times operation allocation failed" : 0, "number of times worker found no work" : 0, "total allocations" : 0, "total compact calls" : 0, "total insert calls" : 0, "total remove calls" : 0, "total search calls" : 0, "total update calls" : 0 }, "block-manager" : { "blocks pre-loaded" : 10, "blocks read" : 31, "blocks written" : 12, "bytes read" : 139264, "bytes written" : 61440, "bytes written for checkpoint" : 61440, "mapped blocks read" : 0, "mapped bytes read" : 0 }, "cache" : { "application threads page read from disk to cache count" : 7, "application threads page read from disk to cache time (usecs)" : 359, "application threads page write from cache to disk count" : 0, "application threads page write from cache to disk time (usecs)" : 0, "bytes belonging to page images in the cache" : 38233, "bytes belonging to the cache overflow table in the cache" : 182, "bytes currently in the cache" : 49407, "bytes not belonging to page images in the cache" : 11174, "bytes read into cache" : 50094, "bytes written from cache" : 32282, "cache overflow cursor application thread wait time (usecs)" : 0, "cache overflow cursor internal thread wait time (usecs)" : 0, "cache overflow score" : 0, "cache overflow table entries" : 0, "cache overflow table insert calls" : 0, "cache overflow table remove calls" : 0, "checkpoint blocked page eviction" : 0, "eviction calls to get a page" : 5, "eviction calls to get a page found queue empty" : 5, "eviction calls to get a page found queue empty after locking" : 0, "eviction currently operating in aggressive mode" : 0, "eviction empty score" : 0, "eviction passes of a file" : 0, "eviction server candidate queue empty when topping up" : 0, "eviction server candidate queue not empty when topping up" : 0, "eviction server evicting pages" : 0, "eviction server slept, because we did not make progress with eviction" : 0, "eviction server unable to reach eviction goal" : 0, "eviction state" : 32, "eviction walk target pages histogram - 0-9" : 0, "eviction walk target pages histogram - 10-31" : 0, "eviction walk target pages histogram - 128 and higher" : 0, "eviction walk target pages histogram - 32-63" : 0, "eviction walk target pages histogram - 64-128" : 0, "eviction walks abandoned" : 0, "eviction walks gave up because they restarted their walk twice" : 0, "eviction walks gave up because they saw too many pages and found no candidates" : 0, "eviction walks gave up because they saw too many pages and found too few candidates" : 0, "eviction walks reached end of tree" : 0, "eviction walks started from root of tree" : 0, "eviction walks started from saved location in tree" : 0, "eviction worker thread active" : 4, "eviction worker thread created" : 0, "eviction worker thread evicting pages" : 0, "eviction worker thread removed" : 0, "eviction worker thread stable number" : 0, "failed eviction of pages that exceeded the in-memory maximum count" : 0, "failed eviction of pages that exceeded the in-memory maximum time (usecs)" : 0, "files with active eviction walks" : 0, "files with new eviction walks started" : 0, "force re-tuning of eviction workers once in a while" : 0, "hazard pointer blocked page eviction" : 0, "hazard pointer check calls" : 0, "hazard pointer check entries walked" : 0, "hazard pointer maximum array length" : 0, "in-memory page passed criteria to be split" : 0, "in-memory page splits" : 0, "internal pages evicted" : 0, "internal pages split during eviction" : 0, "leaf pages split during eviction" : 0, "maximum bytes configured" : 1442840576, "maximum page size at eviction" : 0, "modified pages evicted" : 0, "modified pages evicted by application threads" : 0, "operations timed out waiting for space in cache" : 0, "overflow pages read into cache" : 0, "page split during eviction deepened the tree" : 0, "page written requiring cache overflow records" : 0, "pages currently held in the cache" : 18, "pages evicted because they exceeded the in-memory maximum count" : 0, "pages evicted because they exceeded the in-memory maximum time (usecs)" : 0, "pages evicted because they had chains of deleted items count" : 0, "pages evicted because they had chains of deleted items time (usecs)" : 0, "pages evicted by application threads" : 0, "pages queued for eviction" : 0, "pages queued for urgent eviction" : 0, "pages queued for urgent eviction during walk" : 0, "pages read into cache" : 20, "pages read into cache after truncate" : 0, "pages read into cache after truncate in prepare state" : 0, "pages read into cache requiring cache overflow entries" : 0, "pages read into cache requiring cache overflow for checkpoint" : 0, "pages read into cache skipping older cache overflow entries" : 0, "pages read into cache with skipped cache overflow entries needed later" : 0, "pages read into cache with skipped cache overflow entries needed later by checkpoint" : 0, "pages requested from the cache" : 299, "pages seen by eviction walk" : 0, "pages selected for eviction unable to be evicted" : 0, "pages walked for eviction" : 0, "pages written from cache" : 6, "pages written requiring in-memory restoration" : 0, "percentage overhead" : 8, "tracked bytes belonging to internal pages in the cache" : 2282, "tracked bytes belonging to leaf pages in the cache" : 47125, "tracked dirty bytes in the cache" : 43301, "tracked dirty pages in the cache" : 3, "unmodified pages evicted" : 0 }, "connection" : { "auto adjusting condition resets" : 13, "auto adjusting condition wait calls" : 171, "detected system time went backwards" : 0, "files currently open" : 13, "memory allocations" : 3163, "memory frees" : 2297, "memory re-allocations" : 273, "pthread mutex condition wait calls" : 423, "pthread mutex shared lock read-lock calls" : 352, "pthread mutex shared lock write-lock calls" : 133, "total fsync I/Os" : 29, "total read I/Os" : 915, "total write I/Os" : 32 }, "cursor" : { "cursor create calls" : 50, "cursor insert calls" : 8, "cursor modify calls" : 0, "cursor next calls" : 107, "cursor operation restarted" : 0, "cursor prev calls" : 5, "cursor remove calls" : 2, "cursor reserve calls" : 0, "cursor reset calls" : 281, "cursor search calls" : 269, "cursor search near calls" : 11, "cursor sweep buckets" : 150, "cursor sweep cursors closed" : 0, "cursor sweep cursors examined" : 2, "cursor sweeps" : 25, "cursor update calls" : 0, "cursors cached on close" : 13, "cursors reused from cache" : 3, "truncate calls" : 0 }, "data-handle" : { "connection data handles currently active" : 21, "connection sweep candidate became referenced" : 0, "connection sweep dhandles closed" : 0, "connection sweep dhandles removed from hash list" : 5, "connection sweep time-of-death sets" : 23, "connection sweeps" : 7, "session dhandles swept" : 0, "session sweep attempts" : 26 }, "lock" : { "checkpoint lock acquisitions" : 11, "checkpoint lock application thread wait time (usecs)" : 0, "checkpoint lock internal thread wait time (usecs)" : 0, "commit timestamp queue lock application thread time waiting (usecs)" : 0, "commit timestamp queue lock internal thread time waiting (usecs)" : 0, "commit timestamp queue read lock acquisitions" : 0, "commit timestamp queue write lock acquisitions" : 0, "dhandle lock application thread time waiting (usecs)" : 0, "dhandle lock internal thread time waiting (usecs)" : 0, "dhandle read lock acquisitions" : 113, "dhandle write lock acquisitions" : 33, "metadata lock acquisitions" : 1, "metadata lock application thread wait time (usecs)" : 0, "metadata lock internal thread wait time (usecs)" : 0, "read timestamp queue lock application thread time waiting (usecs)" : 0, "read timestamp queue lock internal thread time waiting (usecs)" : 0, "read timestamp queue read lock acquisitions" : 0, "read timestamp queue write lock acquisitions" : 0, "schema lock acquisitions" : 26, "schema lock application thread wait time (usecs)" : 0, "schema lock internal thread wait time (usecs)" : 0, "table lock application thread time waiting for the table lock (usecs)" : 0, "table lock internal thread time waiting for the table lock (usecs)" : 0, "table read lock acquisitions" : 0, "table write lock acquisitions" : 13, "txn global lock application thread time waiting (usecs)" : 0, "txn global lock internal thread time waiting (usecs)" : 0, "txn global read lock acquisitions" : 12, "txn global write lock acquisitions" : 6 }, "log" : { "busy returns attempting to switch slots" : 0, "force archive time sleeping (usecs)" : 0, "log bytes of payload data" : 2289, "log bytes written" : 4608, "log files manually zero-filled" : 0, "log flush operations" : 231, "log force write operations" : 262, "log force write operations skipped" : 261, "log records compressed" : 2, "log records not compressed" : 0, "log records too small to compress" : 15, "log release advances write LSN" : 13, "log scan operations" : 5, "log scan records requiring two reads" : 4, "log server thread advances write LSN" : 1, "log server thread write LSN walk skipped" : 997, "log sync operations" : 14, "log sync time duration (usecs)" : 28650, "log sync_dir operations" : 1, "log sync_dir time duration (usecs)" : 1, "log write operations" : 17, "logging bytes consolidated" : 4096, "maximum log file size" : 104857600, "number of pre-allocated log files to create" : 2, "pre-allocated log files not ready and missed" : 1, "pre-allocated log files prepared" : 2, "pre-allocated log files used" : 0, "records processed by log scan" : 41, "slot close lost race" : 0, "slot close unbuffered waits" : 0, "slot closures" : 14, "slot join atomic update races" : 0, "slot join calls atomic updates raced" : 0, "slot join calls did not yield" : 17, "slot join calls found active slot closed" : 0, "slot join calls slept" : 0, "slot join calls yielded" : 0, "slot join found active slot closed" : 0, "slot joins yield time (usecs)" : 0, "slot transitions unable to find free slot" : 0, "slot unbuffered writes" : 0, "total in-memory size of compressed records" : 2530, "total log buffer size" : 33554432, "total size of compressed records" : 1978, "written slots coalesced" : 0, "yields waiting for previous log file close" : 0 }, "perf" : { "file system read latency histogram (bucket 1) - 10-49ms" : 3, "file system read latency histogram (bucket 2) - 50-99ms" : 0, "file system read latency histogram (bucket 3) - 100-249ms" : 0, "file system read latency histogram (bucket 4) - 250-499ms" : 0, "file system read latency histogram (bucket 5) - 500-999ms" : 0, "file system read latency histogram (bucket 6) - 1000ms+" : 0, "file system write latency histogram (bucket 1) - 10-49ms" : 0, "file system write latency histogram (bucket 2) - 50-99ms" : 0, "file system write latency histogram (bucket 3) - 100-249ms" : 0, "file system write latency histogram (bucket 4) - 250-499ms" : 0, "file system write latency histogram (bucket 5) - 500-999ms" : 0, "file system write latency histogram (bucket 6) - 1000ms+" : 0, "operation read latency histogram (bucket 1) - 100-249us" : 0, "operation read latency histogram (bucket 2) - 250-499us" : 0, "operation read latency histogram (bucket 3) - 500-999us" : 0, "operation read latency histogram (bucket 4) - 1000-9999us" : 0, "operation read latency histogram (bucket 5) - 10000us+" : 0, "operation write latency histogram (bucket 1) - 100-249us" : 2, "operation write latency histogram (bucket 2) - 250-499us" : 1, "operation write latency histogram (bucket 3) - 500-999us" : 0, "operation write latency histogram (bucket 4) - 1000-9999us" : 0, "operation write latency histogram (bucket 5) - 10000us+" : 0 }, "reconciliation" : { "fast-path pages deleted" : 0, "page reconciliation calls" : 6, "page reconciliation calls for eviction" : 0, "pages deleted" : 0, "split bytes currently awaiting free" : 0, "split objects currently awaiting free" : 0 }, "session" : { "open cursor count" : 21, "open session count" : 18, "session query timestamp calls" : 0, "table alter failed calls" : 0, "table alter successful calls" : 10, "table alter unchanged and skipped" : 30, "table compact failed calls" : 0, "table compact successful calls" : 0, "table create failed calls" : 0, "table create successful calls" : 1, "table drop failed calls" : 0, "table drop successful calls" : 0, "table rebalance failed calls" : 0, "table rebalance successful calls" : 0, "table rename failed calls" : 0, "table rename successful calls" : 0, "table salvage failed calls" : 0, "table salvage successful calls" : 0, "table truncate failed calls" : 0, "table truncate successful calls" : 0, "table verify failed calls" : 0, "table verify successful calls" : 0 }, "thread-state" : { "active filesystem fsync calls" : 0, "active filesystem read calls" : 0, "active filesystem write calls" : 0 }, "thread-yield" : { "application thread time evicting (usecs)" : 0, "application thread time waiting for cache (usecs)" : 0, "connection close blocked waiting for transaction state stabilization" : 0, "connection close yielded for lsm manager shutdown" : 0, "data handle lock yielded" : 0, "get reference for page index and slot time sleeping (usecs)" : 0, "log server sync yielded for log write" : 0, "page access yielded due to prepare state change" : 0, "page acquire busy blocked" : 0, "page acquire eviction blocked" : 0, "page acquire locked blocked" : 0, "page acquire read blocked" : 0, "page acquire time sleeping (usecs)" : 0, "page delete rollback time sleeping for state change (usecs)" : 0, "page reconciliation yielded due to child modification" : 0 }, "transaction" : { "Number of prepared updates" : 0, "Number of prepared updates added to cache overflow" : 0, "Number of prepared updates resolved" : 0, "commit timestamp queue entries walked" : 0, "commit timestamp queue insert to empty" : 0, "commit timestamp queue inserts to head" : 0, "commit timestamp queue inserts total" : 0, "commit timestamp queue length" : 0, "number of named snapshots created" : 0, "number of named snapshots dropped" : 0, "prepared transactions" : 0, "prepared transactions committed" : 0, "prepared transactions currently active" : 0, "prepared transactions rolled back" : 0, "query timestamp calls" : 1, "read timestamp queue entries walked" : 0, "read timestamp queue insert to empty" : 0, "read timestamp queue inserts to head" : 0, "read timestamp queue inserts total" : 0, "read timestamp queue length" : 0, "rollback to stable calls" : 0, "rollback to stable updates aborted" : 0, "rollback to stable updates removed from cache overflow" : 0, "set timestamp calls" : 0, "set timestamp commit calls" : 0, "set timestamp commit updates" : 0, "set timestamp oldest calls" : 0, "set timestamp oldest updates" : 0, "set timestamp stable calls" : 0, "set timestamp stable updates" : 0, "transaction begins" : 5, "transaction checkpoint currently running" : 0, "transaction checkpoint generation" : 2, "transaction checkpoint max time (msecs)" : 5, "transaction checkpoint min time (msecs)" : 5, "transaction checkpoint most recent time (msecs)" : 5, "transaction checkpoint scrub dirty target" : 0, "transaction checkpoint scrub time (msecs)" : 0, "transaction checkpoint total time (msecs)" : 5, "transaction checkpoints" : 1, "transaction checkpoints skipped because database was clean" : 0, "transaction failures due to cache overflow" : 0, "transaction fsync calls for checkpoint after allocating the transaction ID" : 1, "transaction fsync duration for checkpoint after allocating the transaction ID (usecs)" : 1502, "transaction range of IDs currently pinned" : 0, "transaction range of IDs currently pinned by a checkpoint" : 0, "transaction range of IDs currently pinned by named snapshots" : 0, "transaction range of timestamps currently pinned" : 0, "transaction range of timestamps pinned by a checkpoint" : 0, "transaction range of timestamps pinned by the oldest timestamp" : 0, "transaction sync calls" : 0, "transactions committed" : 1, "transactions rolled back" : 4, "update conflicts" : 0 }, "concurrentTransactions" : { "write" : { "out" : 0, "available" : 128, "totalTickets" : 128 }, "read" : { "out" : 1, "available" : 127, "totalTickets" : 128 } } }, "mem" : { "bits" : 64, "resident" : 51, "virtual" : 960, "supported" : true, "mapped" : 0, "mappedWithJournal" : 0 }, "metrics" : { "commands" : { "buildInfo" : { "failed" : NumberLong(0), "total" : NumberLong(2) }, "createIndexes" : { "failed" : NumberLong(0), "total" : NumberLong(1) }, "find" : { "failed" : NumberLong(0), "total" : NumberLong(1) }, "getFreeMonitoringStatus" : { "failed" : NumberLong(0), "total" : NumberLong(1) }, "getLog" : { "failed" : NumberLong(0), "total" : NumberLong(1) }, "isMaster" : { "failed" : NumberLong(0), "total" : NumberLong(3) }, "listCollections" : { "failed" : NumberLong(0), "total" : NumberLong(4) }, "replSetGetStatus" : { "failed" : NumberLong(1), "total" : NumberLong(1) }, "serverStatus" : { "failed" : NumberLong(0), "total" : NumberLong(1) }, "whatsmyuri" : { "failed" : NumberLong(0), "total" : NumberLong(1) } }, "cursor" : { "timedOut" : NumberLong(0), "open" : { "noTimeout" : NumberLong(0), "pinned" : NumberLong(0), "total" : NumberLong(0) } }, "document" : { "deleted" : NumberLong(0), "inserted" : NumberLong(0), "returned" : NumberLong(0), "updated" : NumberLong(0) }, "getLastError" : { "wtime" : { "num" : 0, "totalMillis" : 0 }, "wtimeouts" : NumberLong(0) }, "operation" : { "scanAndOrder" : NumberLong(0), "writeConflicts" : NumberLong(0) }, "queryExecutor" : { "scanned" : NumberLong(0), "scannedObjects" : NumberLong(0) }, "record" : { "moves" : NumberLong(0) }, "repl" : { "executor" : { "pool" : { "inProgressCount" : 0 }, "queues" : { "networkInProgress" : 0, "sleepers" : 0 }, "unsignaledEvents" : 0, "shuttingDown" : false, "networkInterface" : "DEPRECATED: getDiagnosticString is deprecated in NetworkInterfaceTL" }, "apply" : { "attemptsToBecomeSecondary" : NumberLong(0), "batches" : { "num" : 0, "totalMillis" : 0 }, "ops" : NumberLong(0) }, "buffer" : { "count" : NumberLong(0), "maxSizeBytes" : NumberLong(0), "sizeBytes" : NumberLong(0) }, "initialSync" : { "completed" : NumberLong(0), "failedAttempts" : NumberLong(0), "failures" : NumberLong(0) }, "network" : { "bytes" : NumberLong(0), "getmores" : { "num" : 0, "totalMillis" : 0 }, "ops" : NumberLong(0), "readersCreated" : NumberLong(0) }, "preload" : { "docs" : { "num" : 0, "totalMillis" : 0 }, "indexes" : { "num" : 0, "totalMillis" : 0 } } }, "storage" : { "freelist" : { "search" : { "bucketExhausted" : NumberLong(0), "requests" : NumberLong(0), "scanned" : NumberLong(0) } } }, "ttl" : { "deletedDocuments" : NumberLong(0), "passes" : NumberLong(0) } }, "ok" : 1 }

浙公网安备 33010602011771号