速度慢得令人无法接受的MongoDB$lte+$带索引的gte查询
我正在笔记本电脑上运行Community MongoDB 3.4.9,内存为64 GB。我收集了超过1200万份文件。每个文档至少有Int64类型的速度慢得令人无法接受的MongoDB$lte+$带索引的gte查询,mongodb,mongodb-query,Mongodb,Mongodb Query,我正在笔记本电脑上运行Community MongoDB 3.4.9,内存为64 GB。我收集了超过1200万份文件。每个文档至少有Int64类型的from和to字段。从-到是唯一的范围。没有范围重叠的文档。集合上有一个索引,如下所示: { "v" : NumberInt(1), "unique" : true, "key" : { "from" : NumberInt(1), "to" : NumberInt(1) },
from
和to
字段。从
-到
是唯一的范围。没有范围重叠的文档。集合上有一个索引,如下所示:
{
"v" : NumberInt(1),
"unique" : true,
"key" : {
"from" : NumberInt(1),
"to" : NumberInt(1)
},
"name" : "range",
"ns" : "db.location",
"background" : true
}
服务器/数据库处于空闲状态。没有客户。我反复运行下面的查询,得到了大约21秒的恒定执行时间
db.location.find({from:{$lte:NumberLong(3682093364)},to:{$gte:NumberLong(3682093364)}}).limit(1)
和
条件的反转对执行时间没有影响。explain
命令显示以下内容
{
"queryPlanner" : {
"plannerVersion" : 1.0,
"namespace" : "db.location",
"indexFilterSet" : false,
"parsedQuery" : {
"$and" : [
{
"from" : {
"$lte" : NumberLong(3682093364)
}
},
{
"to" : {
"$gte" : NumberLong(3682093364)
}
}
]
},
"winningPlan" : {
"stage" : "LIMIT",
"limitAmount" : 1.0,
"inputStage" : {
"stage" : "FETCH",
"inputStage" : {
"stage" : "IXSCAN",
"keyPattern" : {
"from" : 1.0,
"to" : 1.0
},
"indexName" : "range",
"isMultiKey" : false,
"multiKeyPaths" : {
"from" : [
],
"to" : [
]
},
"isUnique" : true,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 1.0,
"direction" : "forward",
"indexBounds" : {
"from" : [
"[-inf.0, 3682093364]"
],
"to" : [
"[3682093364, inf.0]"
]
}
}
}
},
"rejectedPlans" : [
]
},
"executionStats" : {
"executionSuccess" : true,
"nReturned" : 1.0,
"executionTimeMillis" : 21526.0,
"totalKeysExamined" : 12284007.0,
"totalDocsExamined" : 1.0,
"executionStages" : {
"stage" : "LIMIT",
"nReturned" : 1.0,
"executionTimeMillisEstimate" : 20945.0,
"works" : 12284008.0,
"advanced" : 1.0,
"needTime" : 12284006.0,
"needYield" : 0.0,
"saveState" : 96299.0,
"restoreState" : 96299.0,
"isEOF" : 1.0,
"invalidates" : 0.0,
"limitAmount" : 1.0,
"inputStage" : {
"stage" : "FETCH",
"nReturned" : 1.0,
"executionTimeMillisEstimate" : 20714.0,
"works" : 12284007.0,
"advanced" : 1.0,
"needTime" : 12284006.0,
"needYield" : 0.0,
"saveState" : 96299.0,
"restoreState" : 96299.0,
"isEOF" : 0.0,
"invalidates" : 0.0,
"docsExamined" : 1.0,
"alreadyHasObj" : 0.0,
"inputStage" : {
"stage" : "IXSCAN",
"nReturned" : 1.0,
"executionTimeMillisEstimate" : 20357.0,
"works" : 12284007.0,
"advanced" : 1.0,
"needTime" : 12284006.0,
"needYield" : 0.0,
"saveState" : 96299.0,
"restoreState" : 96299.0,
"isEOF" : 0.0,
"invalidates" : 0.0,
"keyPattern" : {
"from" : 1.0,
"to" : 1.0
},
"indexName" : "range",
"isMultiKey" : false,
"multiKeyPaths" : {
"from" : [
],
"to" : [
]
},
"isUnique" : true,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 1.0,
"direction" : "forward",
"indexBounds" : {
"from" : [
"[-inf.0, 3682093364]"
],
"to" : [
"[3682093364, inf.0]"
]
},
"keysExamined" : 12284007.0,
"seeks" : 12284007.0,
"dupsTested" : 0.0,
"dupsDropped" : 0.0,
"seenInvalidated" : 0.0
}
}
},
"allPlansExecution" : [
]
},
"serverInfo" : {
"host" : "LAPTOP-Q96TVSN8",
"port" : 27017.0,
"version" : "3.4.9",
"gitVersion" : "876ebee8c7dd0e2d992f36a848ff4dc50ee6603e"
},
"ok" : 1.0
}
提供一个提示
并没有什么区别explain
似乎表明已经使用了正确的(也是唯一的)索引,但大部分执行时间(20秒)都花在了IXSCAN
中。MongoDB日志显示,扫描了许多索引项,但只接触并返回了一个文档。考虑到数据库上没有并发操作,它还显示了数量惊人的锁和产量。底层引擎是SSD磁盘上的wiredTiger
。MongoDB RAM的使用率为7 GB
2017-10-10T10:06:14.456+0200 I COMMAND [conn33] command db.location appName: "MongoDB Shell" command: explain { explain: { find: "location", filter: { from: { $lte: 3682093364 }, to: { $gte: 3682093364 } }, limit: 1.0, singleBatch: false }, verbosity: "allPlansExecution" } numYields:96299 reslen:1944 locks:{ Global: { acquireCount: { r: 192600 } }, Database: { acquireCount: { r: 96300 } }, Collection: { acquireCount: { r: 96300 } } } protocol:op_command 21526ms
考虑到我的范围从不重叠,有没有更好的方法来组织文档以便更快地查找?是否有明显的事情表明我做错了
更新:
当我删除索引时,使用了
COLLSCAN
,文档在8-9秒内被一致地找到。我不想回答自己的问题,但我很高兴找到了解决方案
尽管创建这样一个复合索引是有意义的,但考虑到非重叠范围的具体情况,搜索范围太广了。输入的数字越高,查找结果所需的时间就越长,因为越来越多的索引项被发现满足
的要求。从这些信息中,我发现了一个类似的问题。这非常有用。但为了我的澄清,1索引应该用于$lte,1索引应该用于$gte?这是很久以前的事了,但从迄今为止所写的一切来看,我会说-是的!确实很有帮助。