hadoop dfsadmin -report时出现数据都为0B

版权声明:@抛物线 https://blog.csdn.net/qq_28513801/article/details/90743647

执行hadoop dfsamin -report 时出现数据时都为0B

如下所示:

[root@master hadoop]# hadoop fsadmin -report
Error: Could not find or load main class fsadmin
[root@master hadoop]# hadoop dfsadmin -report
DEPRECATED: Use of this script to execute hdfs command is deprecated.
Instead use the hdfs command for it.

Configured Capacity: 0 (0 B)
Present Capacity: 0 (0 B)
DFS Remaining: 0 (0 B)
DFS Used: 0 (0 B)
DFS Used%: NaN%
Under replicated blocks: 0
Blocks with corrupt replicas: 0
Missing blocks: 0
Missing blocks (with replication factor 1): 0

-------------------------------------------------

那么我们就删除hadoop下的hdfs和tmp两个文件夹就可以啦


[root@master hadoop]# cd /
[root@master /]# cd /opt/
[root@master opt]# ls
anaconda-ks.cfg  hadoop-2.7.6  hadoop-2.7.6.tar.gz  jdk1.8.0_171  jdk-8u171-linux-x64.tar.gz
[root@master opt]# cd hadoop-2.7.6
[root@master hadoop-2.7.6]# ls
bin  hdfs     lib      LICENSE.txt  NOTICE.txt  sbin   tmp
etc  include  libexec  logs         README.txt  share
[root@master hadoop-2.7.6]# rm -rf hdfs/
[root@master hadoop-2.7.6]# rm -rf tmp/
[root@master hadoop-2.7.6]# ls
bin  etc  include  lib  libexec  LICENSE.txt  logs  NOTICE.txt  README.txt  sbin  share
[root@master hadoop-2.7.6]# 
[root@master hadoop-2.7.6]# 
[root@master hadoop-2.7.6]# 
[root@master hadoop-2.7.6]# ls
bin  etc  include  lib  libexec  LICENSE.txt  logs  NOTICE.txt  README.txt  sbin  share

然后在执行关闭hadoop。

[root@master hadoop-2.7.6]# stop-all.sh 
This script is Deprecated. Instead use stop-dfs.sh and stop-yarn.sh
Stopping namenodes on [master]
master: stopping namenode
slaver1: stopping datanode
slaver2: stopping datanode
master: stopping datanode

Stopping secondary namenodes [0.0.0.0]
0.0.0.0: stopping secondarynamenode
stopping yarn daemons
stopping resourcemanager
slaver1: stopping nodemanager
slaver2: stopping nodemanager
master: stopping nodemanager
slaver1: nodemanager did not stop gracefully after 5 seconds: killing with kill -9
slaver2: nodemanager did not stop gracefully after 5 seconds: killing with kill -9
no proxyserver to stop

然后在执行格式化hadoop

[root@master hadoop-2.7.6]# hadoop namenode -format
DEPRECATED: Use of this script to execute hdfs command is deprecated.
Instead use the hdfs command for it.

19/06/02 07:56:12 INFO namenode.NameNode: STARTUP_MSG: 
/************************************************************
STARTUP_MSG: Starting NameNode
STARTUP_MSG:   host = master/192.168.100.10
STARTUP_MSG:   args = [-format]
STARTUP_MSG:   version = 2.7.6
STARTUP_MSG:   classpath = /opt/hadoop-2.7.6/etc/hadoop:/opt/hadoop-2.7.6/share/hadoop/common/lib/commons-compress-1.4.1.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/commons-cli-1.2.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/jettison-1.1.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/curator-framework-2.7.1.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/java-xmlbuilder-0.4.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/slf4j-api-1.7.10.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/commons-digester-1.8.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/httpclient-4.2.5.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/api-asn1-api-1.0.0-M20.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/protobuf-java-2.5.0.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/jersey-server-1.9.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/mockito-all-1.8.5.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/commons-httpclient-3.1.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/jersey-core-1.9.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/xmlenc-0.52.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/jackson-mapper-asl-1.9.13.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/jersey-json-1.9.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/curator-client-2.7.1.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/avro-1.7.4.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/commons-net-3.1.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/jackson-xc-1.9.13.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/log4j-1.2.17.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/gson-2.2.4.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/hamcrest-core-1.3.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/commons-io-2.4.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/commons-configuration-1.6.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/activation-1.1.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/api-util-1.0.0-M20.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/jets3t-0.9.0.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/apacheds-i18n-2.0.0-M15.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/jetty-util-6.1.26.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/commons-collections-3.2.2.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/zookeeper-3.4.6.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/jackson-core-asl-1.9.13.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/commons-beanutils-core-1.8.0.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/jsch-0.1.54.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/jaxb-impl-2.2.3-1.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/commons-math3-3.1.1.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/hadoop-auth-2.7.6.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/servlet-api-2.5.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/commons-logging-1.1.3.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/jsr305-3.0.0.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/commons-beanutils-1.7.0.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/xz-1.0.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/jaxb-api-2.2.2.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/jetty-sslengine-6.1.26.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/curator-recipes-2.7.1.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/snappy-java-1.0.4.1.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/guava-11.0.2.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/hadoop-annotations-2.7.6.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/httpcore-4.2.5.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/junit-4.11.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/slf4j-log4j12-1.7.10.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/jackson-jaxrs-1.9.13.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/paranamer-2.3.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/netty-3.6.2.Final.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/jsp-api-2.1.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/asm-3.2.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/stax-api-1.0-2.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/apacheds-kerberos-codec-2.0.0-M15.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/commons-codec-1.4.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/jetty-6.1.26.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/htrace-core-3.1.0-incubating.jar:/opt/hadoop-2.7.6/share/hadoop/common/lib/commons-lang-2.6.jar:/opt/hadoop-2.7.6/share/hadoop/common/hadoop-common-2.7.6-tests.jar:/opt/hadoop-2.7.6/share/hadoop/common/hadoop-common-2.7.6.jar:/opt/hadoop-2.7.6/share/hadoop/common/hadoop-nfs-2.7.6.jar:/opt/hadoop-2.7.6/share/hadoop/hdfs:/opt/hadoop-2.7.6/share/hadoop/hdfs/lib/xml-apis-1.3.04.jar:/opt/hadoop-2.7.6/share/hadoop/hdfs/lib/commons-cli-1.2.jar:/opt/hadoop-2.7.6/share/hadoop/hdfs/lib/netty-all-4.0.23.Final.jar:/opt/hadoop-2.7.6/share/hadoop/hdfs/lib/protobuf-java-2.5.0.jar:/opt/hadoop-2.7.6/share/hadoop/hdfs/lib/jersey-server-1.9.jar:/opt/hadoop-2.7.6/share/hadoop/hdfs/lib/jersey-core-1.9.jar:/opt/hadoop-2.7.6/share/hadoop/hdfs/lib/xmlenc-0.52.jar:/opt/hadoop-2.7.6/share/hadoop/hdfs/lib/jackson-mapper-asl-1.9.13.jar:/opt/hadoop-2.7.6/share/hadoop/hdfs/lib/leveldbjni-all-1.8.jar:/opt/hadoop-2.7.6/share/hadoop/hdfs/lib/log4j-1.2.17.jar:/opt/hadoop-2.7.6/share/hadoop/hdfs/lib/commons-io-2.4.jar:/opt/hadoop-2.7.6/share/hadoop/hdfs/lib/jetty-util-6.1.26.jar:/opt/hadoop-2.7.6/share/hadoop/hdfs/lib/jackson-core-asl-1.9.13.jar:/opt/hadoop-2.7.6/share/hadoop/hdfs/lib/xercesImpl-2.9.1.jar:/opt/hadoop-2.7.6/share/hadoop/hdfs/lib/servlet-api-2.5.jar:/opt/hadoop-2.7.6/share/hadoop/hdfs/lib/commons-logging-1.1.3.jar:/opt/hadoop-2.7.6/share/hadoop/hdfs/lib/jsr305-3.0.0.jar:/opt/hadoop-2.7.6/share/hadoop/hdfs/lib/guava-11.0.2.jar:/opt/hadoop-2.7.6/share/hadoop/hdfs/lib/commons-daemon-1.0.13.jar:/opt/hadoop-2.7.6/share/hadoop/hdfs/lib/netty-3.6.2.Final.jar:/opt/hadoop-2.7.6/share/hadoop/hdfs/lib/asm-3.2.jar:/opt/hadoop-2.7.6/share/hadoop/hdfs/lib/commons-codec-1.4.jar:/opt/hadoop-2.7.6/share/hadoop/hdfs/lib/jetty-6.1.26.jar:/opt/hadoop-2.7.6/share/hadoop/hdfs/lib/htrace-core-3.1.0-incubating.jar:/opt/hadoop-2.7.6/share/hadoop/hdfs/lib/commons-lang-2.6.jar:/opt/hadoop-2.7.6/share/hadoop/hdfs/hadoop-hdfs-nfs-2.7.6.jar:/opt/hadoop-2.7.6/share/hadoop/hdfs/hadoop-hdfs-2.7.6-tests.jar:/opt/hadoop-2.7.6/share/hadoop/hdfs/hadoop-hdfs-2.7.6.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/commons-compress-1.4.1.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/guice-3.0.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/commons-cli-1.2.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/jettison-1.1.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/protobuf-java-2.5.0.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/jersey-server-1.9.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/jersey-core-1.9.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/jackson-mapper-asl-1.9.13.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/jersey-json-1.9.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/jackson-xc-1.9.13.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/leveldbjni-all-1.8.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/guice-servlet-3.0.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/log4j-1.2.17.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/commons-io-2.4.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/activation-1.1.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/jetty-util-6.1.26.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/commons-collections-3.2.2.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/zookeeper-3.4.6.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/jersey-guice-1.9.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/jackson-core-asl-1.9.13.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/jaxb-impl-2.2.3-1.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/javax.inject-1.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/jersey-client-1.9.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/servlet-api-2.5.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/commons-logging-1.1.3.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/jsr305-3.0.0.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/xz-1.0.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/jaxb-api-2.2.2.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/guava-11.0.2.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/zookeeper-3.4.6-tests.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/jackson-jaxrs-1.9.13.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/netty-3.6.2.Final.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/asm-3.2.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/stax-api-1.0-2.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/aopalliance-1.0.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/commons-codec-1.4.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/jetty-6.1.26.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/lib/commons-lang-2.6.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/hadoop-yarn-server-nodemanager-2.7.6.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/hadoop-yarn-server-common-2.7.6.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/hadoop-yarn-server-resourcemanager-2.7.6.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/hadoop-yarn-server-applicationhistoryservice-2.7.6.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/hadoop-yarn-server-tests-2.7.6.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/hadoop-yarn-common-2.7.6.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/hadoop-yarn-client-2.7.6.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/hadoop-yarn-applications-distributedshell-2.7.6.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/hadoop-yarn-registry-2.7.6.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/hadoop-yarn-server-web-proxy-2.7.6.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/hadoop-yarn-applications-unmanaged-am-launcher-2.7.6.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/hadoop-yarn-server-sharedcachemanager-2.7.6.jar:/opt/hadoop-2.7.6/share/hadoop/yarn/hadoop-yarn-api-2.7.6.jar:/opt/hadoop-2.7.6/share/hadoop/mapreduce/lib/commons-compress-1.4.1.jar:/opt/hadoop-2.7.6/share/hadoop/mapreduce/lib/guice-3.0.jar:/opt/hadoop-2.7.6/share/hadoop/mapreduce/lib/protobuf-java-2.5.0.jar:/opt/hadoop-2.7.6/share/hadoop/mapreduce/lib/jersey-server-1.9.jar:/opt/hadoop-2.7.6/share/hadoop/mapreduce/lib/jersey-core-1.9.jar:/opt/hadoop-2.7.6/share/hadoop/mapreduce/lib/jackson-mapper-asl-1.9.13.jar:/opt/hadoop-2.7.6/share/hadoop/mapreduce/lib/avro-1.7.4.jar:/opt/hadoop-2.7.6/share/hadoop/mapreduce/lib/leveldbjni-all-1.8.jar:/opt/hadoop-2.7.6/share/hadoop/mapreduce/lib/guice-servlet-3.0.jar:/opt/hadoop-2.7.6/share/hadoop/mapreduce/lib/log4j-1.2.17.jar:/opt/hadoop-2.7.6/share/hadoop/mapreduce/lib/hamcrest-core-1.3.jar:/opt/hadoop-2.7.6/share/hadoop/mapreduce/lib/commons-io-2.4.jar:/opt/hadoop-2.7.6/share/hadoop/mapreduce/lib/jersey-guice-1.9.jar:/opt/hadoop-2.7.6/share/hadoop/mapreduce/lib/jackson-core-asl-1.9.13.jar:/opt/hadoop-2.7.6/share/hadoop/mapreduce/lib/javax.inject-1.jar:/opt/hadoop-2.7.6/share/hadoop/mapreduce/lib/xz-1.0.jar:/opt/hadoop-2.7.6/share/hadoop/mapreduce/lib/snappy-java-1.0.4.1.jar:/opt/hadoop-2.7.6/share/hadoop/mapreduce/lib/hadoop-annotations-2.7.6.jar:/opt/hadoop-2.7.6/share/hadoop/mapreduce/lib/junit-4.11.jar:/opt/hadoop-2.7.6/share/hadoop/mapreduce/lib/paranamer-2.3.jar:/opt/hadoop-2.7.6/share/hadoop/mapreduce/lib/netty-3.6.2.Final.jar:/opt/hadoop-2.7.6/share/hadoop/mapreduce/lib/asm-3.2.jar:/opt/hadoop-2.7.6/share/hadoop/mapreduce/lib/aopalliance-1.0.jar:/opt/hadoop-2.7.6/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.6.jar:/opt/hadoop-2.7.6/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.7.6-tests.jar:/opt/hadoop-2.7.6/share/hadoop/mapreduce/hadoop-mapreduce-client-common-2.7.6.jar:/opt/hadoop-2.7.6/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-2.7.6.jar:/opt/hadoop-2.7.6/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-plugins-2.7.6.jar:/opt/hadoop-2.7.6/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.7.6.jar:/opt/hadoop-2.7.6/share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.7.6.jar:/opt/hadoop-2.7.6/share/hadoop/mapreduce/hadoop-mapreduce-client-app-2.7.6.jar:/opt/hadoop-2.7.6/share/hadoop/mapreduce/hadoop-mapreduce-client-shuffle-2.7.6.jar:/contrib/capacity-scheduler/*.jar:/contrib/capacity-scheduler/*.jar
STARTUP_MSG:   build = https://shv@git-wip-us.apache.org/repos/asf/hadoop.git -r 085099c66cf28be31604560c376fa282e69282b8; compiled by 'kshvachk' on 2018-04-18T01:33Z
STARTUP_MSG:   java = 1.8.0_171
************************************************************/
19/06/02 07:56:12 INFO namenode.NameNode: registered UNIX signal handlers for [TERM, HUP, INT]
19/06/02 07:56:12 INFO namenode.NameNode: createNameNode [-format]
19/06/02 07:56:14 WARN common.Util: Path /opt/hadoop-2.7.6/hdfs/name should be specified as a URI in configuration files. Please update hdfs configuration.
19/06/02 07:56:14 WARN common.Util: Path /opt/hadoop-2.7.6/hdfs/name should be specified as a URI in configuration files. Please update hdfs configuration.
Formatting using clusterid: CID-c48ac603-6037-4947-b57f-ff6c3c0c4a36
19/06/02 07:56:14 INFO namenode.FSNamesystem: No KeyProvider found.
19/06/02 07:56:14 INFO namenode.FSNamesystem: fsLock is fair: true
19/06/02 07:56:14 INFO namenode.FSNamesystem: Detailed lock hold time metrics enabled: false
19/06/02 07:56:14 INFO blockmanagement.DatanodeManager: dfs.block.invalidate.limit=1000
19/06/02 07:56:14 INFO blockmanagement.DatanodeManager: dfs.namenode.datanode.registration.ip-hostname-check=true
19/06/02 07:56:14 INFO blockmanagement.BlockManager: dfs.namenode.startup.delay.block.deletion.sec is set to 000:00:00:00.000
19/06/02 07:56:14 INFO blockmanagement.BlockManager: The block deletion will start around 2019 Jun 02 07:56:14
19/06/02 07:56:14 INFO util.GSet: Computing capacity for map BlocksMap
19/06/02 07:56:14 INFO util.GSet: VM type       = 64-bit
19/06/02 07:56:14 INFO util.GSet: 2.0% max memory 966.7 MB = 19.3 MB
19/06/02 07:56:14 INFO util.GSet: capacity      = 2^21 = 2097152 entries
19/06/02 07:56:14 INFO blockmanagement.BlockManager: dfs.block.access.token.enable=false
19/06/02 07:56:14 INFO blockmanagement.BlockManager: defaultReplication         = 1
19/06/02 07:56:14 INFO blockmanagement.BlockManager: maxReplication             = 512
19/06/02 07:56:14 INFO blockmanagement.BlockManager: minReplication             = 1
19/06/02 07:56:14 INFO blockmanagement.BlockManager: maxReplicationStreams      = 2
19/06/02 07:56:14 INFO blockmanagement.BlockManager: replicationRecheckInterval = 3000
19/06/02 07:56:14 INFO blockmanagement.BlockManager: encryptDataTransfer        = false
19/06/02 07:56:14 INFO blockmanagement.BlockManager: maxNumBlocksToLog          = 1000
19/06/02 07:56:14 INFO namenode.FSNamesystem: fsOwner             = root (auth:SIMPLE)
19/06/02 07:56:14 INFO namenode.FSNamesystem: supergroup          = supergroup
19/06/02 07:56:14 INFO namenode.FSNamesystem: isPermissionEnabled = true
19/06/02 07:56:14 INFO namenode.FSNamesystem: HA Enabled: false
19/06/02 07:56:14 INFO namenode.FSNamesystem: Append Enabled: true
19/06/02 07:56:15 INFO util.GSet: Computing capacity for map INodeMap
19/06/02 07:56:15 INFO util.GSet: VM type       = 64-bit
19/06/02 07:56:15 INFO util.GSet: 1.0% max memory 966.7 MB = 9.7 MB
19/06/02 07:56:15 INFO util.GSet: capacity      = 2^20 = 1048576 entries
19/06/02 07:56:15 INFO namenode.FSDirectory: ACLs enabled? false
19/06/02 07:56:15 INFO namenode.FSDirectory: XAttrs enabled? true
19/06/02 07:56:15 INFO namenode.FSDirectory: Maximum size of an xattr: 16384
19/06/02 07:56:15 INFO namenode.NameNode: Caching file names occuring more than 10 times
19/06/02 07:56:15 INFO util.GSet: Computing capacity for map cachedBlocks
19/06/02 07:56:15 INFO util.GSet: VM type       = 64-bit
19/06/02 07:56:15 INFO util.GSet: 0.25% max memory 966.7 MB = 2.4 MB
19/06/02 07:56:15 INFO util.GSet: capacity      = 2^18 = 262144 entries
19/06/02 07:56:15 INFO namenode.FSNamesystem: dfs.namenode.safemode.threshold-pct = 0.9990000128746033
19/06/02 07:56:15 INFO namenode.FSNamesystem: dfs.namenode.safemode.min.datanodes = 0
19/06/02 07:56:15 INFO namenode.FSNamesystem: dfs.namenode.safemode.extension     = 30000
19/06/02 07:56:15 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.window.num.buckets = 10
19/06/02 07:56:15 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.num.users = 10
19/06/02 07:56:15 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.windows.minutes = 1,5,25
19/06/02 07:56:15 INFO namenode.FSNamesystem: Retry cache on namenode is enabled
19/06/02 07:56:15 INFO namenode.FSNamesystem: Retry cache will use 0.03 of total heap and retry cache entry expiry time is 600000 millis
19/06/02 07:56:16 INFO util.GSet: Computing capacity for map NameNodeRetryCache
19/06/02 07:56:16 INFO util.GSet: VM type       = 64-bit
19/06/02 07:56:16 INFO util.GSet: 0.029999999329447746% max memory 966.7 MB = 297.0 KB
19/06/02 07:56:16 INFO util.GSet: capacity      = 2^15 = 32768 entries
19/06/02 07:56:16 INFO namenode.FSImage: Allocated new BlockPoolId: BP-905566702-192.168.100.10-1559476576058
19/06/02 07:56:16 INFO common.Storage: Storage directory /opt/hadoop-2.7.6/hdfs/name has been successfully formatted.
19/06/02 07:56:16 INFO namenode.FSImageFormatProtobuf: Saving image file /opt/hadoop-2.7.6/hdfs/name/current/fsimage.ckpt_0000000000000000000 using no compression
19/06/02 07:56:16 INFO namenode.FSImageFormatProtobuf: Image file /opt/hadoop-2.7.6/hdfs/name/current/fsimage.ckpt_0000000000000000000 of size 321 bytes saved in 0 seconds.
19/06/02 07:56:16 INFO namenode.NNStorageRetentionManager: Going to retain 1 images with txid >= 0
19/06/02 07:56:16 INFO util.ExitUtil: Exiting with status 0
19/06/02 07:56:16 INFO namenode.NameNode: SHUTDOWN_MSG: 
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at master/192.168.100.10
************************************************************/
[root@master hadoop-2.7.6]# 

然后在重启hadoop

[root@master hadoop-2.7.6]# start-all.sh 
This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh
Starting namenodes on [master]
master: starting namenode, logging to /opt/hadoop-2.7.6/logs/hadoop-root-namenode-master.out
slaver2: starting datanode, logging to /opt/hadoop-2.7.6/logs/hadoop-root-datanode-slaver2.out
slaver1: starting datanode, logging to /opt/hadoop-2.7.6/logs/hadoop-root-datanode-slaver1.out
master: starting datanode, logging to /opt/hadoop-2.7.6/logs/hadoop-root-datanode-master.out
Starting secondary namenodes [0.0.0.0]
0.0.0.0: starting secondarynamenode, logging to /opt/hadoop-2.7.6/logs/hadoop-root-secondarynamenode-master.out
starting yarn daemons
starting resourcemanager, logging to /opt/hadoop-2.7.6/logs/yarn-root-resourcemanager-master.out
slaver1: starting nodemanager, logging to /opt/hadoop-2.7.6/logs/yarn-root-nodemanager-slaver1.out
slaver2: starting nodemanager, logging to /opt/hadoop-2.7.6/logs/yarn-root-nodemanager-slaver2.out
master: starting nodemanager, logging to /opt/hadoop-2.7.6/logs/yarn-root-nodemanager-master.out
[root@master hadoop-2.7.6]# 
[root@master hadoop-2.7.6]# 
[root@master hadoop-2.7.6]# 
[root@master hadoop-2.7.6]# jps
5729 ResourceManager
5284 NameNode
5880 Jps
5833 NodeManager
5386 DataNode
5549 SecondaryNameNode
[root@master hadoop-2.7.6]# 
[root@master hadoop-2.7.6]# 
[root@master hadoop-2.7.6]# 


最后在执行hadoop dfsadmin -report 查看一下hadoop的集群状况

[root@master hadoop-2.7.6]# hadoop dfsadmin -report
DEPRECATED: Use of this script to execute hdfs command is deprecated.
Instead use the hdfs command for it.

Configured Capacity: 30335164416 (28.25 GB)
Present Capacity: 28264038400 (26.32 GB)
DFS Remaining: 28264034304 (26.32 GB)
DFS Used: 4096 (4 KB)
DFS Used%: 0.00%
Under replicated blocks: 0
Blocks with corrupt replicas: 0
Missing blocks: 0
Missing blocks (with replication factor 1): 0

-------------------------------------------------
Live datanodes (1):

Name: 192.168.100.10:50010 (master)
Hostname: master
Decommission Status : Normal
Configured Capacity: 30335164416 (28.25 GB)
DFS Used: 4096 (4 KB)
Non DFS Used: 2071126016 (1.93 GB)
DFS Remaining: 28264034304 (26.32 GB)
DFS Used%: 0.00%
DFS Remaining%: 93.17%
Configured Cache Capacity: 0 (0 B)
Cache Used: 0 (0 B)
Cache Remaining: 0 (0 B)
Cache Used%: 100.00%
Cache Remaining%: 0.00%
Xceivers: 1
Last contact: Sun Jun 02 07:57:37 EDT 2019


[root@master hadoop-2.7.6]# 


会发现所有的数据都有了.这里出现这种状况的原因,我认为应该是hadoop目录下的/tmp 和 /hdfs 这两个文件夹造成的,因为每次格式化之后,我们在打开这个两个文件夹,可以看到里面存在数据样本:

[root@master hadoop-2.7.6]# ls
bin  etc  hdf  include  lib  libexec  LICENSE.txt  logs  NOTICE.txt  README.txt  sbin  share  tmp
[root@master hadoop-2.7.6]# cd hdf/
[root@master hdf]# ls
data  name
[root@master hdf]# cd ..
[root@master hadoop-2.7.6]# ls
bin  etc  hdf  include  lib  libexec  LICENSE.txt  logs  NOTICE.txt  README.txt  sbin  share  tmp
[root@master hadoop-2.7.6]# cd tmp/
[root@master tmp]# ls
dfs  nm-local-dir
[root@master tmp]# 
[root@master tmp]# ls
dfs  nm-local-dir
[root@master tmp]# cd dfs/
[root@master dfs]# ls
namesecondary
[root@master dfs]# cd namesecondary/
[root@master namesecondary]# ls
current  in_use.lock
[root@master namesecondary]# cd current/
[root@master current]# ls
edits_0000000000000000001-0000000000000000224  edits_0000000000000000806-0000000000000000808  edits_0000000000000001221-0000000000000001329
edits_0000000000000000225-0000000000000000226  edits_0000000000000000809-0000000000000000985  edits_0000000000000001330-0000000000000001330
edits_0000000000000000227-0000000000000000320  edits_0000000000000000986-0000000000000000995  edits_0000000000000001331-0000000000000001332
edits_0000000000000000321-0000000000000000330  edits_0000000000000000996-0000000000000001025  edits_0000000000000001333-0000000000000001334
edits_0000000000000000331-0000000000000000786  edits_0000000000000001026-0000000000000001027  edits_0000000000000001335-0000000000000001336
edits_0000000000000000787-0000000000000000788  edits_0000000000000001028-0000000000000001029  edits_0000000000000001337-0000000000000001338
edits_0000000000000000789-0000000000000000790  edits_0000000000000001030-0000000000000001031  edits_0000000000000001339-0000000000000001340
edits_0000000000000000791-0000000000000000792  edits_0000000000000001032-0000000000000001033  edits_0000000000000001341-0000000000000001342
edits_0000000000000000793-0000000000000000794  edits_0000000000000001034-0000000000000001035  fsimage_0000000000000001340
edits_0000000000000000795-0000000000000000796  edits_0000000000000001036-0000000000000001037  fsimage_0000000000000001340.md5
edits_0000000000000000797-0000000000000000798  edits_0000000000000001038-0000000000000001039  fsimage_0000000000000001342
edits_0000000000000000799-0000000000000000800  edits_0000000000000001040-0000000000000001041  fsimage_0000000000000001342.md5
edits_0000000000000000801-0000000000000000802  edits_0000000000000001042-0000000000000001043  VERSION
edits_0000000000000000803-0000000000000000804  edits_0000000000000001044-0000000000000001220
[root@master current]# 

		因为dfs/tmp 是hadoop下的namenode 工作时的临时文件夹,
		所以我们每次执行hadoop namenode -format 时 就直接把其文件夹下的内容给删除掉,
		或者直接将整个tmp 和  /dfs 文件夹给删除。就好了。

猜你喜欢

转载自blog.csdn.net/qq_28513801/article/details/90743647