Validation Failed: 1: this action would add [1] total shards, but this cluster currently has [1000]

错误信息:

{"type": "server", "timestamp": "2020-02-17T13:49:39,538+08:00", "level": "WARN", "component": "o.e.x.m.e.l.LocalExporter", "cluster.name": "docker-cluster", "node.name": "node-1", "message": "unexpected error while indexing monitoring document", "cluster.uuid": "fD3_MqLzT8KTeBVsXMwMQw", "node.id": "XICr4VuwS1uiMTwgnhInZA" , 
"stacktrace": ["org.elasticsearch.xpack.monitoring.exporter.ExportException: org.elasticsearch.common.ValidationException: Validation Failed: 1: this action would add [1] total shards, but this cluster currently has [1000]/[1000] maximum shards open;",
"at org.elasticsearch.xpack.monitoring.exporter.local.LocalBulk.lambda$throwExportException$2(LocalBulk.java:125) ~[x-pack-monitoring-7.4.2.jar:7.4.2]",
"at java.util.stream.ReferencePipeline$3$1.accept(ReferencePipeline.java:195) ~[?:?]",
"at java.util.stream.ReferencePipeline$2$1.accept(ReferencePipeline.java:177) ~[?:?]",
"at java.util.Spliterators$ArraySpliterator.forEachRemaining(Spliterators.java:948) ~[?:?]",
"at java.util.stream.AbstractPipeline.copyInto(AbstractPipeline.java:484) ~[?:?]",
"at java.util.stream.AbstractPipeline.wrapAndCopyInto(AbstractPipeline.java:474) ~[?:?]",
"at java.util.stream.ForEachOps$ForEachOp.evaluateSequential(ForEachOps.java:150) ~[?:?]",
"at java.util.stream.ForEachOps$ForEachOp$OfRef.evaluateSequential(ForEachOps.java:173) ~[?:?]",
"at java.util.stream.AbstractPipeline.evaluate(AbstractPipeline.java:234) ~[?:?]",
"at java.util.stream.ReferencePipeline.forEach(ReferencePipeline.java:497) ~[?:?]",
"at org.elasticsearch.xpack.monitoring.exporter.local.LocalBulk.throwExportException(LocalBulk.java:126) [x-pack-monitoring-7.4.2.jar:7.4.2]",
"at org.elasticsearch.xpack.monitoring.exporter.local.LocalBulk.lambda$doFlush$0(LocalBulk.java:108) [x-pack-monitoring-7.4.2.jar:7.4.2]",
"at org.elasticsearch.action.ActionListener$1.onResponse(ActionListener.java:62) [elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.action.support.ContextPreservingActionListener.onResponse(ContextPreservingActionListener.java:43) [elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.action.support.TransportAction$1.onResponse(TransportAction.java:70) [elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.action.support.TransportAction$1.onResponse(TransportAction.java:64) [elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.action.ActionListener.lambda$map$2(ActionListener.java:145) [elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.action.ActionListener$1.onResponse(ActionListener.java:62) [elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.action.ActionListener$1.onResponse(ActionListener.java:62) [elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.action.bulk.TransportBulkAction$BulkOperation.doRun(TransportBulkAction.java:421) [elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.action.bulk.TransportBulkAction.executeBulk(TransportBulkAction.java:551) [elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.action.bulk.TransportBulkAction$1.onFailure(TransportBulkAction.java:287) [elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.action.support.TransportAction$1.onFailure(TransportAction.java:79) [elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.action.support.master.TransportMasterNodeAction$AsyncSingleAction.lambda$doStart$2(TransportMasterNodeAction.java:161) [elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.action.ActionListener$2.onFailure(ActionListener.java:93) [elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.action.ActionListener$1.onFailure(ActionListener.java:70) [elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.action.ActionListener$1.onFailure(ActionListener.java:70) [elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.cluster.AckedClusterStateUpdateTask.onFailure(AckedClusterStateUpdateTask.java:79) [elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.cluster.metadata.MetaDataCreateIndexService$IndexCreationTask.onFailure(MetaDataCreateIndexService.java:617) [elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.cluster.service.MasterService$SafeClusterStateTaskListener.onFailure(MasterService.java:513) [elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.cluster.service.MasterService$TaskOutputs.notifyFailedTasks(MasterService.java:446) [elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.cluster.service.MasterService.runTasks(MasterService.java:220) [elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.cluster.service.MasterService.access$000(MasterService.java:73) [elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.cluster.service.MasterService$Batcher.run(MasterService.java:151) [elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.cluster.service.TaskBatcher.runIfNotProcessed(TaskBatcher.java:150) [elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.cluster.service.TaskBatcher$BatchedTask.run(TaskBatcher.java:188) [elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:703) [elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:252) [elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:215) [elasticsearch-7.4.2.jar:7.4.2]",
"at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?]",
"at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?]",
"at java.lang.Thread.run(Thread.java:830) [?:?]",
"Caused by: org.elasticsearch.common.ValidationException: Validation Failed: 1: this action would add [1] total shards, but this cluster currently has [1000]/[1000] maximum shards open;",
"at org.elasticsearch.cluster.metadata.MetaDataCreateIndexService.checkShardLimit(MetaDataCreateIndexService.java:651) ~[elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.cluster.metadata.MetaDataCreateIndexService$IndexCreationTask.checkShardLimit(MetaDataCreateIndexService.java:607) ~[elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.cluster.metadata.MetaDataCreateIndexService$IndexCreationTask.execute(MetaDataCreateIndexService.java:446) ~[elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.cluster.ClusterStateUpdateTask.execute(ClusterStateUpdateTask.java:47) ~[elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.cluster.service.MasterService.executeTasks(MasterService.java:702) ~[elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.cluster.service.MasterService.calculateTaskOutputs(MasterService.java:324) ~[elasticsearch-7.4.2.jar:7.4.2]",
"at org.elasticsearch.cluster.service.MasterService.runTasks(MasterService.java:219) ~[elasticsearch-7.4.2.jar:7.4.2]",
"... 10 more"] }


问题:

  分片数量超过限制数1000
   

原因:

    elasticsearch7版本及以上的,默认只允许1000个分片,因为集群分片数不足引起的。

解决:

在kibana的tools中改变临时设,如图:

代码:


PUT /_cluster/settings
{
  "transient": {
    "cluster": {
      "max_shards_per_node":10000
    }
  }
}

发布了111 篇原创文章 · 获赞 28 · 访问量 4万+

猜你喜欢

转载自blog.csdn.net/weixin_42697074/article/details/104357248