hadoop命令

 hadoop fs -help rm  #产看某个命令的参数

hadoop@muhe221:~/test$ hadoop fs -mkdir /hehe
hadoop@muhe221:~/test$ hadoop fs -ls  /
Found 1 items
drwxr-xr-x   - hadoop supergroup          0 2019-02-26 09:12 /hehe
hadoop@muhe221:~/test$ vi maomei.txt
hello world
hadoop@muhe221:~/test$ hadoop fs -put maomei.txt /hehe
hadoop@muhe221:~/test$ hadoop fs -ls /hehe
Found 1 items
-rw-r--r--   1 hadoop supergroup         12 2019-02-26 09:21 /hehe/maomei.txt
hadoop@muhe221:~/test$ hadoop fs -cat /hehe/maomei.txt
hello world
hadoop@muhe221:~/test$ vi maomei.txt
hello world
123456
hadoop@muhe221:~/test$ hadoop fs -put -f maomei.txt /hehe/maomei.txt  #覆盖
hadoop@muhe221:~/test$ hadoop fs -cat /hehe/maomei.txt
hello world
123456
hadoop@muhe221:~/test$ hadoop fs -put test1.txt test2.txt /hehe  #拷贝多个本地文件到hdfs中
hadoop@muhe221:~/test$ hadoop fs -ls /hehe
Found 3 items
-rw-r--r--   1 hadoop supergroup         19 2019-02-26 09:23 /hehe/maomei.txt
-rw-r--r--   1 hadoop supergroup          4 2019-02-26 09:25 /hehe/test1.txt
-rw-r--r--   1 hadoop supergroup          7 2019-02-26 09:25 /hehe/test2.txt
hadoop@muhe221:~/test$ hadoop fs -rm  /hehe/test1.txt
Deleted /hehe/test1.txt
hadoop@muhe221:~/test$ hadoop fs -ls /hehe
Found 2 items
-rw-r--r--   1 hadoop supergroup         19 2019-02-26 09:23 /hehe/maomei.txt
-rw-r--r--   1 hadoop supergroup          7 2019-02-26 09:25 /hehe/test2.txt
hadoop@muhe221:~/test/st$ hadoop fs -rmdir /aa  #只能删除空的目录
hadoop@muhe221:~/test$ hadoop fs -rm -r /hehe  #删除文件夹/hehe(包含子目录)
Deleted /hehe

hadoop@muhe221:~/test/st$ hadoop fs -get /hehe/test1.txt .
hadoop@muhe221:~/test/st$ hadoop fs -get /hehe/test1.txt aa.txt


hadoop@muhe221:~/test$ hadoop fs -mkdir hdfs://10.121.63.240:9000/hehe # hadoop fs -mkdir /hehe
hadoop@muhe221:~/test$ hadoop fs -ls /
Found 1 items
drwxr-xr-x   - hadoop supergroup          0 2019-02-26 09:55 /hehe
hadoop@muhe221:~/test$ hadoop fs -put test1.txt /hehe
hadoop@muhe221:~/test$ hadoop fs -cat /hehe/test1.txt
345
hadoop@muhe221:~/test$ cat test2.txt
ksdjfl
#文件test2.txt附加(可以附加多个文件)到test1.txt
hadoop@muhe221:~/test$ hadoop fs -appendToFile test2.txt /hehe/test1.txt
hadoop@muhe221:~/test$ hadoop fs -cat /hehe/test1.txt
345
ksdjfl
#从stdin中输入Ctrl+c结束
hadoop@muhe221:~/test$ hadoop fs -appendToFile -  /hehe/test1.txt
789
369
^C
hadoop@muhe221:~/test$
hadoop@muhe221:~/test$ hadoop fs -cat /hehe/test1.txt
345
ksdjfl
789
369

hadoop@muhe221:~/test$ hadoop fs -cat /hehe/test1.txt
345
hadoop@muhe221:~/test$
hadoop@muhe221:~/test$ hadoop fs -cat /hehe/test2.txt
ksdjfl
#显示多个文件的内容,文件内容依次显示
adoop@muhe221:~/test$ hadoop fs -cat /hehe/test1.txt /hehe/test2.txt
345
ksdjfl

#查看文件的checksum information
hadoop@muhe221:~/test$ hadoop fs -checksum /hehe/test1.txt
/hehe/test1.txt MD5-of-0MD5-of-512CRC32C        000002000000000000000000f17ed069c394011dd6be11fca77fff37

hadoop@muhe221:~/test/st$ hadoop fs -count  /hehe/test1.txt
   0       1 (文件个数)      19(大小byte) /hehe/test1.txt
hadoop@muhe221:~/test/st$ hadoop fs -count  /hehe/test2.txt
   0       1                  7 /hehe/test2.txt
hadoop@muhe221:~/test/st$ hadoop fs -count  /hehe/test1.txt /hehe/test2.txt
   0       1                 19 /hehe/test1.txt
   0       1                  7 /hehe/test2.txt
hadoop@muhe221:~/test/st$ hadoop fs -count  /hehe
   1       2                 26 /hehe

 

hadoop@muhe221:~/test/st$ hadoop fs -count  -q -h -v /hehe/test1.txt
QUOTA REM_QUOTA SPACE_QUOTA REM_SPACE_QUOTA DIR_COUNT FILE_COUNT CONTENT_SIZE PATHNAME
none     inf        none          inf           0         1           19     /hehe/test1.txt
hadoop@muhe221:~/test/st$ hadoop fs -count  -e /hehe/test1.txt
0            1                 19 Replicated /hehe/test1.txt

Usage: hadoop fs -count [-q] [-h] [-v] [-x] [-t [<storage type>]] [-u] [-e] <paths>
Count the number of directories, files and bytes under the paths that match the specified file pattern. Get the quota and the usage. The output columns with -count are: DIR_COUNT, FILE_COUNT, CONTENT_SIZE, PATHNAME
The -u and -q options control what columns the output contains. -q means show quotas, -u limits the output to show quotas and usage only.
The output columns with -count -q are: QUOTA, REMAINING_QUOTA, SPACE_QUOTA, REMAINING_SPACE_QUOTA, DIR_COUNT, FILE_COUNT, CONTENT_SIZE, PATHNAME
The output columns with -count -u are: QUOTA, REMAINING_QUOTA, SPACE_QUOTA, REMAINING_SPACE_QUOTA, PATHNAME
The -t option shows the quota and usage for each storage type. The -t option is ignored if -u or -q option is not given. The list of possible parameters that can be used in -t option(case insensitive except the parameter ""): "", “all”, “ram_disk”, “ssd”, “disk” or “archive”.
The -h option shows sizes in human readable format.
The -v option displays a header line.
The -x option excludes snapshots from the result calculation. Without the -x option (default), the result is always calculated from all INodes, including all snapshots under the given path. The -x option is ignored if -u or -q option is given.
The -e option shows the erasure coding policy for each file.

hadoop@muhe221:~/test/st$ hadoop fs -cp /hehe/test1.txt /hehe/aa.txt
hadoop@muhe221:~/test/st$ hadoop fs -cp /hehe/test1.txt /hehe/aa.txt /hehe/sd #拷贝多个文件到某个目录

hdfs Snapshot 

hadoop@muhe221:~/test$ hdfs dfs -createSnapshot /hehe  heheSnapshot
createSnapshot: Directory is not a snapshottable directory: /hehe
# Allowing snapshots of a directory to be created
hadoop@muhe221:~/test$ hdfs dfsadmin -allowSnapshot /hehe
Allowing snapshot on /hehe succeeded
hadoop@muhe221:~/test$ hdfs dfs -createSnapshot /hehe  heheSnapshot
Created snapshot /hehe/.snapshot/heheSnapshot
hadoop@muhe221:~/test$ hadoop fs -ls /hehe
Found 3 items
-rw-r--r--   1 hadoop supergroup         19 2019-02-26 11:12 /hehe/aa.txt
drwxr-xr-x   - hadoop supergroup          0 2019-02-26 11:17 /hehe/sd
-rw-r--r--   1 hadoop supergroup         19 2019-02-26 10:02 /hehe/test1.txt
hadoop@muhe221:~/test$ hdfs lsSnapshottableDir  #查看有快照的目录
drwxr-xr-x 0 hadoop supergroup 0 2019-02-26 14:01 1 65536 /hehe
hadoop@muhe221:~/test$ hadoop fs -rm -r /hehe #有快照的目录不能被删除
rm: The directory /hehe cannot be deleted since /hehe is snapshottable and already has snapshots
hadoop@muhe221:~/test$ hadoop fs -rm /hehe/aa.txt  #删除一个文件
Deleted /hehe/aa.txt
hadoop@muhe221:~/test$ hadoop fs -ls /hehe
Found 2 items
drwxr-xr-x   - hadoop supergroup          0 2019-02-26 11:17 /hehe/sd
-rw-r--r--   1 hadoop supergroup         19 2019-02-26 10:02 /hehe/test1.txt
hadoop@muhe221:~/test$ hdfs dfs -cp /hehe/.snapshot/heheSnapshot/aa.txt /hehe/ #从快照中恢复该文件
hadoop@muhe221:~/test$ hadoop fs -ls /hehe
Found 3 items
-rw-r--r--   1 hadoop supergroup         19 2019-02-26 14:22 /hehe/aa.txt
drwxr-xr-x   - hadoop supergroup          0 2019-02-26 11:17 /hehe/sd
-rw-r--r--   1 hadoop supergroup         19 2019-02-26 10:02 /hehe/test1.txt

$ hdfs dfs -deleteSnapshot <path> <snapshotName>
$ hdfs dfs -renameSnapshot <path> <oldName> <newName>
$ hdfs snapshotDiff <path> <fromSnapshot> <toSnapshot>

 

hadoop@muhe221:~/test$ hadoop fs -df /hehe  #Displays free space
Filesystem                         Size    Used     Available  Use%
hdfs://10.121.63.240:9000  975964258304  958464  432912715776    0%
hadoop@muhe221:~/test$ hadoop fs -df -h /hehe
Filesystem                    Size   Used  Available  Use%
hdfs://10.121.63.240:9000  908.9 G  936 K    403.2 G    0%
hadoop@muhe221:~/test$ hadoop fs -ls  /hehe
Found 3 items
-rw-r--r--   1 hadoop supergroup         19 2019-02-26 14:22 /hehe/aa.txt
drwxr-xr-x   - hadoop supergroup          0 2019-02-26 11:17 /hehe/sd
-rw-r--r--   1 hadoop supergroup         19 2019-02-26 10:02 /hehe/test1.txt
hadoop@muhe221:~/test$ hadoop fs -du  /hehe
19  19  /hehe/aa.txt
38  38  /hehe/sd
19  19  /hehe/test1.txt
hadoop@muhe221:~/test$ hadoop fs -find /hehe -name "test*.txt"
/hehe/sd/test1.txt
/hehe/test1.txt
# Displays first kilobyte of the file to stdout
hadoop@muhe221:~/test$ hadoop fs -head /hehe/test1.txt 
345
ksdjfl
789
369
hadoop@muhe221:~/test$ hadoop fs -mv /hehe/test1.txt /hehe/testx.txt
hadoop@muhe221:~/test$ hadoop fs -touch /hehe/1.txt  #创建一个文件

 

posted @ 2019-02-26 11:46  牧 天  阅读(345)  评论(0)    收藏  举报