Mongodb 平衡器未将数据移动到新的分片

Mongodb 平衡器未将数据移动到新的分片,mongodb,sharding,Mongodb,Sharding,最初我尝试使用3个碎片进行MongoDB水平切分,我看到数据在碎片之间平均分配。我使用了“hash”作为shard键 现在,我已经将第四个碎片添加到这个现有设置中。但我可以看到balancer没有将数据块移动到第四个分片,所以我插入了超过100万个文档,但数据块仍在移动到第四个分片,即第四个分片为空 请查找我执行的命令 配置服务器 mongod --configsvr --port 27019 --replSet rs0 --dbpath data1 --bind_ip localhost m

最初我尝试使用3个碎片进行MongoDB水平切分,我看到数据在碎片之间平均分配。我使用了“hash”作为shard键

现在,我已经将第四个碎片添加到这个现有设置中。但我可以看到balancer没有将数据块移动到第四个分片,所以我插入了超过100万个文档,但数据块仍在移动到第四个分片,即第四个分片为空

请查找我执行的命令

配置服务器


mongod --configsvr --port 27019 --replSet rs0 --dbpath data1 --bind_ip localhost
mongod --configsvr --port 27020 --replSet rs0 --dbpath data2 --bind_ip localhost
mongod --configsvr --port 27021 --replSet rs0 --dbpath data3 --bind_ip localhost


mongod --configsvr --replSet rs0 --dbpath data1 --bind_ip localhost


rs.initiate(
  {
    _id: "rs0",
    configsvr: true,
    members: [
      { _id : 0, host : "localhost:27019" },
      { _id : 1, host : "localhost:27020" },
      { _id : 2, host : "localhost:27021" }
    ]
  }
)
mongod --shardsvr --port 37019 --replSet rp1  --dbpath data1 --bind_ip localhost
mongod --shardsvr --port 37020 --replSet rp1  --dbpath data2 --bind_ip localhost
mongod --shardsvr --port 37021 --replSet rp1  --dbpath data3 --bind_ip localhost
rs.initiate( { _id : "rp1",  members: [   { _id: 0, host: "localhost:37019" },   { _id: 1, host: "localhost:37020" },   { _id: 2, host: "localhost:37021" } ] })

mongod --shardsvr --port 47019 --replSet rp2  --dbpath data1 --bind_ip localhost
mongod --shardsvr --port 47020 --replSet rp2  --dbpath data2 --bind_ip localhost
mongod --shardsvr --port 47021 --replSet rp2  --dbpath data3 --bind_ip localhost
rs.initiate( { _id : "rp2",  members: [   { _id: 0, host: "localhost:47019" },   { _id: 1, host: "localhost:47020" },   { _id: 2, host: "localhost:47021" } ] })

mongod --shardsvr --port 57019 --replSet rp3  --dbpath data1 --bind_ip localhost
mongod --shardsvr --port 57020 --replSet rp3  --dbpath data2 --bind_ip localhost
mongod --shardsvr --port 57021 --replSet rp3  --dbpath data3 --bind_ip localhost
rs.initiate( { _id : "rp3",  members: [   { _id: 0, host: "localhost:57019" },   { _id: 1, host: "localhost:57020" },   { _id: 2, host: "localhost:57021" } ] })

sharding version: {
    "_id" : 1,
    "minCompatibleVersion" : 5,
    "currentVersion" : 6,
    "clusterId" : ObjectId("6052f2e74f97982edd469896")
  }
  shards:
        {  "_id" : "rp1",  "host" : "rp1/localhost:37019,localhost:37020,localhost:37021",  "state" : 1 }
        {  "_id" : "rp2",  "host" : "rp2/localhost:47019,localhost:47020,localhost:47021",  "state" : 1 }
        {  "_id" : "rp3",  "host" : "rp3/localhost:57019,localhost:57020,localhost:57021",  "state" : 1 }
        {  "_id" : "rp4",  "host" : "rp4/localhost:57119,localhost:57120,localhost:57121",  "state" : 1 }
  active mongoses:
        "4.2.7" : 1
  autosplit:
        Currently enabled: yes
  balancer:
        Currently enabled:  yes
        Currently running:  no
        Failed balancer rounds in last 5 attempts:  0
        Migration Results for the last 24 hours:
                No recent migrations
  databases:
        {  "_id" : "config",  "primary" : "config",  "partitioned" : true }
                config.system.sessions
                        shard key: { "_id" : 1 }
                        unique: false
                        balancing: true
                        chunks:
                                rp1 256
                                rp2 256
                                rp3 256
                                rp4 256
                        too many chunks to print, use verbose if you want to force print
        {  "_id" : "demodb",  "primary" : "rp2",  "partitioned" : true,  "version" : {  "uuid" : UUID("0034d13f-d46f-4248-9bec-021b8949336b"),  "lastMod" : 1 } }
                demodb.collection
                        shard key: { "timeStamp" : "hashed" }
                        unique: false
                        balancing: true
                        chunks:
                                rp1 2
                                rp2 2
                                rp3 2
                        { "timeStamp" : { "$minKey" : 1 } } -->> { "timeStamp" : NumberLong("-6148914691236517204") } on : rp1 Timestamp(1, 0)
                        { "timeStamp" : NumberLong("-6148914691236517204") } -->> { "timeStamp" : NumberLong("-3074457345618258602") } on : rp1 Timestamp(1, 1)
                        { "timeStamp" : NumberLong("-3074457345618258602") } -->> { "timeStamp" : NumberLong(0) } on : rp2 Timestamp(1, 2)
                        { "timeStamp" : NumberLong(0) } -->> { "timeStamp" : NumberLong("3074457345618258602") } on : rp2 Timestamp(1, 3)
                        { "timeStamp" : NumberLong("3074457345618258602") } -->> { "timeStamp" : NumberLong("6148914691236517204") } on : rp3 Timestamp(1, 4)
                        { "timeStamp" : NumberLong("6148914691236517204") } -->> { "timeStamp" : { "$maxKey" : 1 } } on : rp3 Timestamp(1, 5)
三个碎片


mongod --configsvr --port 27019 --replSet rs0 --dbpath data1 --bind_ip localhost
mongod --configsvr --port 27020 --replSet rs0 --dbpath data2 --bind_ip localhost
mongod --configsvr --port 27021 --replSet rs0 --dbpath data3 --bind_ip localhost


mongod --configsvr --replSet rs0 --dbpath data1 --bind_ip localhost


rs.initiate(
  {
    _id: "rs0",
    configsvr: true,
    members: [
      { _id : 0, host : "localhost:27019" },
      { _id : 1, host : "localhost:27020" },
      { _id : 2, host : "localhost:27021" }
    ]
  }
)
mongod --shardsvr --port 37019 --replSet rp1  --dbpath data1 --bind_ip localhost
mongod --shardsvr --port 37020 --replSet rp1  --dbpath data2 --bind_ip localhost
mongod --shardsvr --port 37021 --replSet rp1  --dbpath data3 --bind_ip localhost
rs.initiate( { _id : "rp1",  members: [   { _id: 0, host: "localhost:37019" },   { _id: 1, host: "localhost:37020" },   { _id: 2, host: "localhost:37021" } ] })

mongod --shardsvr --port 47019 --replSet rp2  --dbpath data1 --bind_ip localhost
mongod --shardsvr --port 47020 --replSet rp2  --dbpath data2 --bind_ip localhost
mongod --shardsvr --port 47021 --replSet rp2  --dbpath data3 --bind_ip localhost
rs.initiate( { _id : "rp2",  members: [   { _id: 0, host: "localhost:47019" },   { _id: 1, host: "localhost:47020" },   { _id: 2, host: "localhost:47021" } ] })

mongod --shardsvr --port 57019 --replSet rp3  --dbpath data1 --bind_ip localhost
mongod --shardsvr --port 57020 --replSet rp3  --dbpath data2 --bind_ip localhost
mongod --shardsvr --port 57021 --replSet rp3  --dbpath data3 --bind_ip localhost
rs.initiate( { _id : "rp3",  members: [   { _id: 0, host: "localhost:57019" },   { _id: 1, host: "localhost:57020" },   { _id: 2, host: "localhost:57021" } ] })

sharding version: {
    "_id" : 1,
    "minCompatibleVersion" : 5,
    "currentVersion" : 6,
    "clusterId" : ObjectId("6052f2e74f97982edd469896")
  }
  shards:
        {  "_id" : "rp1",  "host" : "rp1/localhost:37019,localhost:37020,localhost:37021",  "state" : 1 }
        {  "_id" : "rp2",  "host" : "rp2/localhost:47019,localhost:47020,localhost:47021",  "state" : 1 }
        {  "_id" : "rp3",  "host" : "rp3/localhost:57019,localhost:57020,localhost:57021",  "state" : 1 }
        {  "_id" : "rp4",  "host" : "rp4/localhost:57119,localhost:57120,localhost:57121",  "state" : 1 }
  active mongoses:
        "4.2.7" : 1
  autosplit:
        Currently enabled: yes
  balancer:
        Currently enabled:  yes
        Currently running:  no
        Failed balancer rounds in last 5 attempts:  0
        Migration Results for the last 24 hours:
                No recent migrations
  databases:
        {  "_id" : "config",  "primary" : "config",  "partitioned" : true }
                config.system.sessions
                        shard key: { "_id" : 1 }
                        unique: false
                        balancing: true
                        chunks:
                                rp1 256
                                rp2 256
                                rp3 256
                                rp4 256
                        too many chunks to print, use verbose if you want to force print
        {  "_id" : "demodb",  "primary" : "rp2",  "partitioned" : true,  "version" : {  "uuid" : UUID("0034d13f-d46f-4248-9bec-021b8949336b"),  "lastMod" : 1 } }
                demodb.collection
                        shard key: { "timeStamp" : "hashed" }
                        unique: false
                        balancing: true
                        chunks:
                                rp1 2
                                rp2 2
                                rp3 2
                        { "timeStamp" : { "$minKey" : 1 } } -->> { "timeStamp" : NumberLong("-6148914691236517204") } on : rp1 Timestamp(1, 0)
                        { "timeStamp" : NumberLong("-6148914691236517204") } -->> { "timeStamp" : NumberLong("-3074457345618258602") } on : rp1 Timestamp(1, 1)
                        { "timeStamp" : NumberLong("-3074457345618258602") } -->> { "timeStamp" : NumberLong(0) } on : rp2 Timestamp(1, 2)
                        { "timeStamp" : NumberLong(0) } -->> { "timeStamp" : NumberLong("3074457345618258602") } on : rp2 Timestamp(1, 3)
                        { "timeStamp" : NumberLong("3074457345618258602") } -->> { "timeStamp" : NumberLong("6148914691236517204") } on : rp3 Timestamp(1, 4)
                        { "timeStamp" : NumberLong("6148914691236517204") } -->> { "timeStamp" : { "$maxKey" : 1 } } on : rp3 Timestamp(1, 5)
启用分片

mongos --port 40000 --configdb rs0/localhost:27019,localhost:27020,localhost:27021

mongo --port 40000
sh.addShard( "rp1/localhost:37019,localhost:37020,localhost:37021");
sh.addShard( "rp2/localhost:47019,localhost:47020,localhost:47021");
sh.addShard( "rp3/localhost:57019,localhost:57020,localhost:57021");


sh.enableSharding("demodb");

sh.shardCollection("demodb.collection", { timeStamp : "hashed" } )
现在我运行脚本将数据插入demodab.collection,我可以看到数据分布在上述三个碎片上

所以我使用下面的命令添加了第四个碎片

mongod --shardsvr --port 57119 --replSet rp4  --dbpath data1 --bind_ip localhost
mongod --shardsvr --port 57120 --replSet rp4  --dbpath data2 --bind_ip localhost
mongod --shardsvr --port 57121 --replSet rp4  --dbpath data3 --bind_ip localhost
rs.initiate( { _id : "rp4",  members: [   { _id: 0, host: "localhost:57119" },   { _id: 1, host: "localhost:57120" },   { _id: 2, host: "localhost:57121" } ] })

mongo --port 40000
sh.addShard( "rp4/localhost:57119,localhost:57120,localhost:57121");

现在,我在demodab.collection中添加了一些数据,认为balancer会将块移动到第四个碎片,以平衡所有4个碎片之间的数据。但它不起作用

低于碎片状态的输出


mongod --configsvr --port 27019 --replSet rs0 --dbpath data1 --bind_ip localhost
mongod --configsvr --port 27020 --replSet rs0 --dbpath data2 --bind_ip localhost
mongod --configsvr --port 27021 --replSet rs0 --dbpath data3 --bind_ip localhost


mongod --configsvr --replSet rs0 --dbpath data1 --bind_ip localhost


rs.initiate(
  {
    _id: "rs0",
    configsvr: true,
    members: [
      { _id : 0, host : "localhost:27019" },
      { _id : 1, host : "localhost:27020" },
      { _id : 2, host : "localhost:27021" }
    ]
  }
)
mongod --shardsvr --port 37019 --replSet rp1  --dbpath data1 --bind_ip localhost
mongod --shardsvr --port 37020 --replSet rp1  --dbpath data2 --bind_ip localhost
mongod --shardsvr --port 37021 --replSet rp1  --dbpath data3 --bind_ip localhost
rs.initiate( { _id : "rp1",  members: [   { _id: 0, host: "localhost:37019" },   { _id: 1, host: "localhost:37020" },   { _id: 2, host: "localhost:37021" } ] })

mongod --shardsvr --port 47019 --replSet rp2  --dbpath data1 --bind_ip localhost
mongod --shardsvr --port 47020 --replSet rp2  --dbpath data2 --bind_ip localhost
mongod --shardsvr --port 47021 --replSet rp2  --dbpath data3 --bind_ip localhost
rs.initiate( { _id : "rp2",  members: [   { _id: 0, host: "localhost:47019" },   { _id: 1, host: "localhost:47020" },   { _id: 2, host: "localhost:47021" } ] })

mongod --shardsvr --port 57019 --replSet rp3  --dbpath data1 --bind_ip localhost
mongod --shardsvr --port 57020 --replSet rp3  --dbpath data2 --bind_ip localhost
mongod --shardsvr --port 57021 --replSet rp3  --dbpath data3 --bind_ip localhost
rs.initiate( { _id : "rp3",  members: [   { _id: 0, host: "localhost:57019" },   { _id: 1, host: "localhost:57020" },   { _id: 2, host: "localhost:57021" } ] })

sharding version: {
    "_id" : 1,
    "minCompatibleVersion" : 5,
    "currentVersion" : 6,
    "clusterId" : ObjectId("6052f2e74f97982edd469896")
  }
  shards:
        {  "_id" : "rp1",  "host" : "rp1/localhost:37019,localhost:37020,localhost:37021",  "state" : 1 }
        {  "_id" : "rp2",  "host" : "rp2/localhost:47019,localhost:47020,localhost:47021",  "state" : 1 }
        {  "_id" : "rp3",  "host" : "rp3/localhost:57019,localhost:57020,localhost:57021",  "state" : 1 }
        {  "_id" : "rp4",  "host" : "rp4/localhost:57119,localhost:57120,localhost:57121",  "state" : 1 }
  active mongoses:
        "4.2.7" : 1
  autosplit:
        Currently enabled: yes
  balancer:
        Currently enabled:  yes
        Currently running:  no
        Failed balancer rounds in last 5 attempts:  0
        Migration Results for the last 24 hours:
                No recent migrations
  databases:
        {  "_id" : "config",  "primary" : "config",  "partitioned" : true }
                config.system.sessions
                        shard key: { "_id" : 1 }
                        unique: false
                        balancing: true
                        chunks:
                                rp1 256
                                rp2 256
                                rp3 256
                                rp4 256
                        too many chunks to print, use verbose if you want to force print
        {  "_id" : "demodb",  "primary" : "rp2",  "partitioned" : true,  "version" : {  "uuid" : UUID("0034d13f-d46f-4248-9bec-021b8949336b"),  "lastMod" : 1 } }
                demodb.collection
                        shard key: { "timeStamp" : "hashed" }
                        unique: false
                        balancing: true
                        chunks:
                                rp1 2
                                rp2 2
                                rp3 2
                        { "timeStamp" : { "$minKey" : 1 } } -->> { "timeStamp" : NumberLong("-6148914691236517204") } on : rp1 Timestamp(1, 0)
                        { "timeStamp" : NumberLong("-6148914691236517204") } -->> { "timeStamp" : NumberLong("-3074457345618258602") } on : rp1 Timestamp(1, 1)
                        { "timeStamp" : NumberLong("-3074457345618258602") } -->> { "timeStamp" : NumberLong(0) } on : rp2 Timestamp(1, 2)
                        { "timeStamp" : NumberLong(0) } -->> { "timeStamp" : NumberLong("3074457345618258602") } on : rp2 Timestamp(1, 3)
                        { "timeStamp" : NumberLong("3074457345618258602") } -->> { "timeStamp" : NumberLong("6148914691236517204") } on : rp3 Timestamp(1, 4)
                        { "timeStamp" : NumberLong("6148914691236517204") } -->> { "timeStamp" : { "$maxKey" : 1 } } on : rp3 Timestamp(1, 5)

请让我知道我遗漏了什么。

这是否回答了您的问题?每个人你没有足够的块。谢谢大家。让我看一下上面的链接。