hadoop2

虚拟机nn01

[root@nn01 ~]# cd /usr/local/hadoop/etc/hadoop/
[root@nn01 hadoop]# mv mapred-site.xml.template mapred-site.xml
[root@nn01 hadoop]# vim mapred-site.xml
19 <configuration>
20 <property> 追加20-23行
21 <name>mapreduce.framework.name</name>
22 <value>yarn</value>
23 </property> 追加20-23行
24 </configuration>


[root@nn01 hadoop]# vim yarn-site.xml
15 <configuration>
16
17 <!-- Site specific YARN configuration properties -->
18
19 <property> 追加19-26行
20 <name>yarn.resourcemanager.hostname</name>
21 <value>nn01</value>
22 </property>
23 <property>
24 <name>yarn.nodemanager.aux-services</name>
25 <value>mapreduce_shuffle</value>
26 </property> 追加19-26行
27 </configuration>


[root@nn01 hadoop]# for i in node{1..3};do rsync -aSH /usr/local/hadoop/etc $i:/usr/local/hadoop/ ;done
[root@nn01 hadoop]# cd /usr/local/hadoop/

启动服务
[root@nn01 hadoop]# ./sbin/start-yarn.sh
starting yarn daemons
starting resourcemanager, logging to /usr/local/hadoop/logs/yarn-root-resourcemanager-nn01.out
node1: starting nodemanager, logging to /usr/local/hadoop/logs/yarn-root-nodemanager-node1.out
node3: starting nodemanager, logging to /usr/local/hadoop/logs/yarn-root-nodemanager-node3.out
node2: starting nodemanager, logging to /usr/local/hadoop/logs/yarn-root-nodemanager-node2.out

验证
[root@nn01 hadoop]# jps
11952 NameNode
15904 Jps
15642 ResourceManager
12140 SecondaryNameNode
################################################################################################
其他虚拟机node1,node2,node3上,都能查到自己是NodeManager
[root@node1 hadoop]# jps
12373 Jps
12269 NodeManager
11471 DataNode
################################################################################################
虚拟机nn01

[root@nn01 hadoop]# ./bin/yarn node -list
19/01/03 10:28:36 INFO client.RMProxy: Connecting to ResourceManager at nn01/192.168.3.90:8032
Total Nodes:3
Node-Id Node-State Node-Http-Address Number-of-Running-Containers
node1:36485 RUNNING node1:8042 0
node2:39090 RUNNING node2:8042 0
node3:35644 RUNNING node3:8042 0
################################################################################################
真机浏览器访问

http://192.168.3.90:50070 #namenode页面
http://192.168.3.90:50090 #secondarynamenode页面
http://192.168.3.90:8088 #resourcemanager页面
http://192.168.3.91:50075 #datanode页面(node1,node2,node3)
http://192.168.3.91:8042 #nodemanager页面(node1,node2,node3)

################################################################################################
虚拟机nn01

格式一般是
# ./bin/hadoop fs -命令

[root@nn01 hadoop]# ./bin/hadoop fs -mkdir /abc
[root@nn01 hadoop]# ./bin/hadoop fs -ls /
Found 1 items
drwxr-xr-x - root supergroup 0 2019-01-03 10:52 /abc


[root@nn01 hadoop]# ls
bin etc include lib libexec LICENSE.txt logs NOTICE.txt README.txt sbin share
[root@nn01 hadoop]# ./bin/hadoop fs -put *.txt /abc


[root@nn01 hadoop]# cd /dev/shm/
[root@nn01 shm]# /usr/local/hadoop/bin/hadoop fs -get /abc/*.txt
[root@nn01 shm]# ls
LICENSE.txt NOTICE.txt README.txt


[root@nn01 shm]# /usr/local/hadoop/bin/hadoop fs -mkdir /xyz
[root@nn01 shm]# /usr/local/hadoop/bin/hadoop fs -touchz /xyz/x.txt
[root@nn01 shm]# /usr/local/hadoop/bin/hadoop fs -touchz /xyz/y.txt
[root@nn01 shm]# /usr/local/hadoop/bin/hadoop fs -touchz /xyz/z.txt

真机访问http://192.168.3.90:50070
点击页面右上角Utilities的下三角形选项,选择Browse the file system

先删除目录下的所有文件
[root@nn01 hadoop]# ./bin/hadoop fs -rm /xyz/*
显示如下:
19/01/03 15:56:08 INFO fs.TrashPolicyDefault: Namenode trash configuration: Deletion interval = 0 minutes, Emptier interval = 0 minutes.
Deleted /xyz/x.txt
19/01/03 15:56:08 INFO fs.TrashPolicyDefault: Namenode trash configuration: Deletion interval = 0 minutes, Emptier interval = 0 minutes.
Deleted /xyz/y.txt
19/01/03 15:56:08 INFO fs.TrashPolicyDefault: Namenode trash configuration: Deletion interval = 0 minutes, Emptier interval = 0 minutes.
Deleted /xyz/z.txt

再用-rmdir删除空目录
[root@nn01 hadoop]# ./bin/hadoop fs -rmdir /xyz
################################################################################################
[root@nn01 hadoop]# cd /usr/local/hadoop/
[root@nn01 hadoop]# ./bin/hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.6.jar wordcount /abc /output


以下是命令的完整格式
[root@nn01 hadoop]# ./bin/hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.6.jar wordcount hdfs://nn01:9000/abc hdfs://nn01:9000/output1

[root@nn01 hadoop]# /usr/local/hadoop/bin/hadoop fs -ls /output
Found 2 items
-rw-r--r-- 2 root supergroup 0 2019-01-03 11:12 /output/_SUCCESS
-rw-r--r-- 2 root supergroup 30290 2019-01-03 11:12 /output/part-r-00000


真机访问http://192.168.3.90:50070
点击页面右上角Utilities的下三角形选项,选择Browse the file system
会发现output的文件夹,相当于我们把结果写到了集群里。

点击output文件夹,看到里面有文件_SUCCESS和part-r-00000,
点击文件part-r-00000,跳出一个弹框,点击Download,显示无法访问网站,
当前网址是http://node2:50075/webhdfs/v1/output/part-r-00000?op=OPEN&namenoderpcaddress=nn01:9000&offset=0,
把域名node2改为对应的IP,192.168.3.92,
http://192.168.3.92:50075/webhdfs/v1/output/part-r-00000?op=OPEN&namenoderpcaddress=nn01:9000&offset=0
访问即可下载part-r-00000文件。

下载后查看,跟我们在命令行直接查看是一样的结果
[root@nn01 hadoop]# ./bin/hadoop fs -cat /output/part-r-00000
##############################################################################################
新增虚拟机2台
2G内存,2CPU,20G硬盘(扩容)

192.168.3.94 node4
192.168.3.95 nfsgw


配IP和主机名,扩容
[root@node4 ~]# LANG=en_US.UTF-8
[root@node4 ~]# growpart /dev/vda 1
[root@node4 ~]# xfs_growfs /
##############################################################################################
虚拟机nn01

[root@nn01 hadoop]# cat /etc/hosts
... ...
192.168.3.94 node4 追加这行
192.168.3.95 nfsgw 追加这行

把这个文件传给其他所有主机
##############################################################################################
虚拟机node4

[root@node4 ~]# rsync -aSH --delete 192.168.3.91:/root/.ssh /root/
[root@node4 ~]# yum -y install java-1.8.0-openjdk-devel
[root@node4 ~]# rsync -aSH --delete 192.168.3.90:/usr/local/hadoop /usr/local
[root@node4 ~]# cd /usr/local/hadoop
[root@node4 hadoop]# ./sbin/hadoop-daemon.sh start datanode
[root@node4 hadoop]# jps


[root@node4 hadoop]# ./bin/hdfs dfsadmin --help
显示如下:
... ...
[-setBalancerBandwidth <bandwidth in bytes per second>] 复制这个选项-setBalancerBandwidth
... ...


[root@node4 hadoop]# ./bin/hdfs dfsadmin -setBalancerBandwidth 60000000
显示如下:
Balancer bandwidth is set to 60000000


[root@node4 hadoop]# ./sbin/start-balancer.sh
显示如下:
starting balancer, logging to /usr/local/hadoop/logs/hadoop-root-balancer-node4.out
Time Stamp Iteration# Bytes Already Moved Bytes Left To Move Bytes Being Moved


[root@node4 hadoop]# ./bin/hdfs dfsadmin -report
显示如下:
Configured Capacity: 85853126656 (79.96 GB)
Present Capacity: 77409566720 (72.09 GB)
DFS Remaining: 77408378880 (72.09 GB)
DFS Used: 1187840 (1.13 MB)
DFS Used%: 0.00%
Under replicated blocks: 0
Blocks with corrupt replicas: 0
Missing blocks: 0
Missing blocks (with replication factor 1): 0

-------------------------------------------------
Live datanodes (4):

Name: 192.168.3.92:50010 (node2)
Hostname: node2
Decommission Status : Normal
Configured Capacity: 21463281664 (19.99 GB)
DFS Used: 364544 (356 KB)
Non DFS Used: 2230411264 (2.08 GB)
DFS Remaining: 19232505856 (17.91 GB)
DFS Used%: 0.00%
DFS Remaining%: 89.61%
Configured Cache Capacity: 0 (0 B)
Cache Used: 0 (0 B)
Cache Remaining: 0 (0 B)
Cache Used%: 100.00%
Cache Remaining%: 0.00%
Xceivers: 1
Last contact: Thu Jan 03 14:48:21 CST 2019


Name: 192.168.3.91:50010 (node1)
Hostname: node1
Decommission Status : Normal
Configured Capacity: 21463281664 (19.99 GB)
DFS Used: 393216 (384 KB)
Non DFS Used: 2230484992 (2.08 GB)
DFS Remaining: 19232403456 (17.91 GB)
DFS Used%: 0.00%
DFS Remaining%: 89.61%
Configured Cache Capacity: 0 (0 B)
Cache Used: 0 (0 B)
Cache Remaining: 0 (0 B)
Cache Used%: 100.00%
Cache Remaining%: 0.00%
Xceivers: 1
Last contact: Thu Jan 03 14:48:19 CST 2019


Name: 192.168.3.94:50010 (node4)
Hostname: node4
Decommission Status : Normal
Configured Capacity: 21463281664 (19.99 GB)
DFS Used: 8192 (8 KB)
Non DFS Used: 1752305664 (1.63 GB)
DFS Remaining: 19710967808 (18.36 GB)
DFS Used%: 0.00%
DFS Remaining%: 91.84%
Configured Cache Capacity: 0 (0 B)
Cache Used: 0 (0 B)
Cache Remaining: 0 (0 B)
Cache Used%: 100.00%
Cache Remaining%: 0.00%
Xceivers: 1
Last contact: Thu Jan 03 14:48:18 CST 2019


Name: 192.168.3.93:50010 (node3)
Hostname: node3
Decommission Status : Normal
Configured Capacity: 21463281664 (19.99 GB)
DFS Used: 421888 (412 KB)
Non DFS Used: 2230358016 (2.08 GB)
DFS Remaining: 19232501760 (17.91 GB)
DFS Used%: 0.00%
DFS Remaining%: 89.61%
Configured Cache Capacity: 0 (0 B)
Cache Used: 0 (0 B)
Cache Remaining: 0 (0 B)
Cache Used%: 100.00%
Cache Remaining%: 0.00%
Xceivers: 1
Last contact: Thu Jan 03 14:48:21 CST 2019
#################################################################################################
删除节点node4

[root@nn01 hadoop]# cd /usr/local/hadoop/etc/hadoop/
[root@nn01 hadoop]# vim hdfs-site.xml
32 <property> 追加32-35行
33 <name>dfs.hosts.exclude</name> exclude是排除的意思
34 <value>/usr/local/hadoop/etc/hadoop/exclude</value> 这个是要读的移除节点的文件
35 </property> 追加32-35行
36 </configuration>


[root@nn01 hadoop]# touch /usr/local/hadoop/etc/hadoop/exclude 创建要读的移除节点的文件
[root@nn01 hadoop]# vim /usr/local/hadoop/etc/hadoop/exclude 写要读的移除节点的文件
node4 要删除的节点的主机名


[root@nn01 hadoop]# cd /usr/local/hadoop/
[root@nn01 hadoop]# ./bin/hdfs dfsadmin -refreshNodes
显示如下:
Refresh nodes successful

[root@nn01 hadoop]# ./bin/hdfs dfsadmin -report
显示如下:
Configured Capacity: 66782453760 (62.20 GB)
Present Capacity: 60090949632 (55.96 GB)
DFS Remaining: 50975907840 (47.48 GB)
DFS Used: 9115041792 (8.49 GB)
DFS Used%: 15.17%
Under replicated blocks: 0
Blocks with corrupt replicas: 0
Missing blocks: 0
Missing blocks (with replication factor 1): 0

-------------------------------------------------
Live datanodes (4):

Name: 192.168.3.92:50010 (node2)
Hostname: node2
Decommission Status : Normal
Configured Capacity: 21463281664 (19.99 GB)
DFS Used: 2122420224 (1.98 GB) 当前node2使用了1.98G
Non DFS Used: 2230476800 (2.08 GB)
DFS Remaining: 17110384640 (15.94 GB)
DFS Used%: 9.89%
DFS Remaining%: 79.72%
Configured Cache Capacity: 0 (0 B)
Cache Used: 0 (0 B)
Cache Remaining: 0 (0 B)
Cache Used%: 100.00%
Cache Remaining%: 0.00%
Xceivers: 1
Last contact: Thu Jan 03 16:14:21 CST 2019


Name: 192.168.3.91:50010 (node1)
Hostname: node1
Decommission Status : Normal
Configured Capacity: 21463281664 (19.99 GB)
DFS Used: 2299990016 (2.14 GB) 当前node1使用了2.14G
Non DFS Used: 2230587392 (2.08 GB)
DFS Remaining: 16932704256 (15.77 GB)
DFS Used%: 10.72%
DFS Remaining%: 78.89%
Configured Cache Capacity: 0 (0 B)
Cache Used: 0 (0 B)
Cache Remaining: 0 (0 B)
Cache Used%: 100.00%
Cache Remaining%: 0.00%
Xceivers: 1
Last contact: Thu Jan 03 16:14:20 CST 2019


Name: 192.168.3.94:50010 (node4)
Hostname: node4
Decommission Status : Decommission in progress
Configured Capacity: 21463281664 (19.99 GB)
DFS Used: 2392608768 (2.23 GB) 当前node4使用了2.23G
Non DFS Used: 1752379392 (1.63 GB)
DFS Remaining: 17318293504 (16.13 GB)
DFS Used%: 11.15%
DFS Remaining%: 80.69%
Configured Cache Capacity: 0 (0 B)
Cache Used: 0 (0 B)
Cache Remaining: 0 (0 B)
Cache Used%: 100.00%
Cache Remaining%: 0.00%
Xceivers: 1
Last contact: Thu Jan 03 16:14:20 CST 2019


Name: 192.168.3.93:50010 (node3)
Hostname: node3
Decommission Status : Normal
Configured Capacity: 21463281664 (19.99 GB)
DFS Used: 2300022784 (2.14 GB) 当前node3使用了2.14G
Non DFS Used: 2230439936 (2.08 GB)
DFS Remaining: 16932818944 (15.77 GB)
DFS Used%: 10.72%
DFS Remaining%: 78.89%
Configured Cache Capacity: 0 (0 B)
Cache Used: 0 (0 B)
Cache Remaining: 0 (0 B)
Cache Used%: 100.00%
Cache Remaining%: 0.00%
Xceivers: 1
Last contact: Thu Jan 03 16:14:20 CST 2019


Decommissioning datanodes (1):

Name: 192.168.3.94:50010 (node4)
Hostname: node4
Decommission Status : Decommission in progress 显示Decommission in progress,代表正在移除这台主机的数据到其他主机
Configured Capacity: 21463281664 (19.99 GB)
DFS Used: 2392608768 (2.23 GB) 当前node4上要移除的数据是2.23G
Non DFS Used: 1752379392 (1.63 GB)
DFS Remaining: 17318293504 (16.13 GB)
DFS Used%: 11.15%
DFS Remaining%: 80.69%
Configured Cache Capacity: 0 (0 B)
Cache Used: 0 (0 B)
Cache Remaining: 0 (0 B)
Cache Used%: 100.00%
Cache Remaining%: 0.00%
Xceivers: 1
Last contact: Thu Jan 03 16:14:20 CST 2019


根据上面的显示,要移除node4的数据共2.23G
当前node1使用了2.14G
当前node2使用了1.98G
当前node3使用了2.14G


[root@nn01 hadoop]# ./bin/hdfs dfsadmin -report
Configured Capacity: 66782453760 (62.20 GB)
Present Capacity: 60225945698 (56.09 GB)
DFS Remaining: 48583229440 (45.25 GB)
DFS Used: 11642716258 (10.84 GB)
DFS Used%: 19.33%
Under replicated blocks: 0
Blocks with corrupt replicas: 0
Missing blocks: 0
Missing blocks (with replication factor 1): 0

-------------------------------------------------
Live datanodes (4):

Name: 192.168.3.92:50010 (node2)
Hostname: node2
Decommission Status : Normal
Configured Capacity: 21463281664 (19.99 GB)
DFS Used: 3069169678 (2.86 GB) 当前node2使用了2.86G
Non DFS Used: 2095386610 (1.95 GB)
DFS Remaining: 16298725376 (15.18 GB)
DFS Used%: 14.30%
DFS Remaining%: 75.94%
Configured Cache Capacity: 0 (0 B)
Cache Used: 0 (0 B)
Cache Remaining: 0 (0 B)
Cache Used%: 100.00%
Cache Remaining%: 0.00%
Xceivers: 1
Last contact: Thu Jan 03 16:22:09 CST 2019


Name: 192.168.3.91:50010 (node1)
Hostname: node1
Decommission Status : Normal
Configured Capacity: 21463281664 (19.99 GB)
DFS Used: 2798784540 (2.61 GB) 当前node1使用了2.61G
Non DFS Used: 2230607844 (2.08 GB)
DFS Remaining: 16433889280 (15.31 GB)
DFS Used%: 13.04%
DFS Remaining%: 76.57%
Configured Cache Capacity: 0 (0 B)
Cache Used: 0 (0 B)
Cache Remaining: 0 (0 B)
Cache Used%: 100.00%
Cache Remaining%: 0.00%
Xceivers: 1
Last contact: Thu Jan 03 16:22:09 CST 2019


Name: 192.168.3.94:50010 (node4)
Hostname: node4
Decommission Status : Decommissioned 显示Decommission,代表正在移除完成
Configured Capacity: 21463281664 (19.99 GB)
DFS Used: 2392608768 (2.23 GB)
Non DFS Used: 1752383488 (1.63 GB)
DFS Remaining: 17318289408 (16.13 GB)
DFS Used%: 11.15%
DFS Remaining%: 80.69%
Configured Cache Capacity: 0 (0 B)
Cache Used: 0 (0 B)
Cache Remaining: 0 (0 B)
Cache Used%: 100.00%
Cache Remaining%: 0.00%
Xceivers: 1
Last contact: Thu Jan 03 16:22:08 CST 2019


Name: 192.168.3.93:50010 (node3)
Hostname: node3
Decommission Status : Normal
Configured Capacity: 21463281664 (19.99 GB)
DFS Used: 3382153272 (3.15 GB) 当前node3使用了3.15G
Non DFS Used: 2230513608 (2.08 GB)
DFS Remaining: 15850614784 (14.76 GB)
DFS Used%: 15.76%
DFS Remaining%: 73.85%
Configured Cache Capacity: 0 (0 B)
Cache Used: 0 (0 B)
Cache Remaining: 0 (0 B)
Cache Used%: 100.00%
Cache Remaining%: 0.00%
Xceivers: 1
Last contact: Thu Jan 03 16:22:08 CST 2019
#####################################################################################
了解即可

nfs+rsync+inotify+keepalived ---->一般少于5G,少于10层,少于1万个文件?
nfs+drbd+headbeat---->一般少于100G
#####################################################################################
虚拟机nfsgw

[root@nfsgw ~]# cat /etc/hosts
... ...
192.168.3.95 nfsgw
#####################################################################################
虚拟机nn01

[root@nn01 hadoop]# pwd
/usr/local/hadoop
[root@nn01 hadoop]# ./sbin/stop-all.sh
This script is Deprecated. Instead use stop-dfs.sh and stop-yarn.sh
Stopping namenodes on [nn01]
nn01: stopping namenode
node4: stopping datanode
node1: stopping datanode
node2: stopping datanode
node3: stopping datanode
Stopping secondary namenodes [nn01]
nn01: stopping secondarynamenode
stopping yarn daemons
stopping resourcemanager
node4: stopping nodemanager
node2: stopping nodemanager
node1: stopping nodemanager
node3: stopping nodemanager
no proxyserver to stop


[root@nn01 hadoop]# cd logs/
[root@nn01 logs]# ls
hadoop-root-namenode-nn01.log SecurityAuth-root.audit
hadoop-root-namenode-nn01.out yarn-root-resourcemanager-nn01.log
hadoop-root-secondarynamenode-nn01.log yarn-root-resourcemanager-nn01.out
hadoop-root-secondarynamenode-nn01.out
[root@nn01 logs]# rm -rf *


[root@nn01 hadoop]# jps
19414 Jps
[root@nn01 hadoop]# id 700
id: 700: no such user
[root@nn01 hadoop]# groupadd -g 700 nsd1808
[root@nn01 hadoop]# useradd -u 700 -g 700 -r nsd1808


[root@nn01 hadoop]# vim core-site.xml
28 <property> 追加28-35行
29 <name>hadoop.proxyuser.nsd1808.groups</name> 对nfs的代理用户nsd1808授权
30 <value>*</value>
31 </property>
32 <property>
33 <name>hadoop.proxyuser.nsd1808.hosts</name> 对nfs的代理用户nsd1808授权
34 <value>*</value>
35 </property> 追加28-35行
36 </configuration>


[root@nn01 hadoop]# for i in 9{1..5};do scp -r /usr/local/hadoop/ 192.168.3.$i:/usr/local/hadoop/ ;done
[root@nn01 hadoop]# ./sbin/start-dfs.sh
[root@nn01 hadoop]# /usr/local/hadoop/bin/hdfs dfsadmin -report
##############################################################################################
虚拟机nfsgw

[root@nfsgw ~]# groupadd -g 700 nsd1808
[root@nfsgw ~]# useradd -u 700 -g 700 -r nsd1808
[root@nfsgw ~]# mkdir /var/hadoop
[root@nfsgw ~]# mkdir /var/nfstmp
[root@nfsgw ~]# chown nsd1808.nsd1808 /var/nfstmp
[root@nfsgw ~]# setfacl -m u:nsd1808:rwx /usr/local/hadoop/logs
[root@nfsgw ~]# vim /usr/local/hadoop/etc/hadoop/hdfs-site.xml

[root@nfsgw ~]# su - nsd1808
su: 警告:无法更改到 /home/nsd1808 目录: 没有那个文件或目录
-bash-4.2$ cd /var/nfstmp/
-bash-4.2$ pwd
/var/nfstmp
-bash-4.2$ touch 1
-bash-4.2$ ls
1

-bash-4.2$ cd /usr/local/hadoop/logs/
-bash-4.2$ touch 2
-bash-4.2$ ls
2 hadoop-root-namenode-nn01.out hadoop-root-secondarynamenode-nn01.out
hadoop-root-namenode-nn01.log hadoop-root-secondarynamenode-nn01.log SecurityAuth-root.audit
-bash-4.2$ rm -rf 2
-bash-4.2$ exit

[root@nfsgw ~]# /usr/local/hadoop/sbin/hadoop-daemon.sh --script ./bin/hdfs start portmap
starting portmap, logging to /usr/local/hadoop/logs/hadoop-root-portmap-nfsgw.out

[root@nfsgw ~]# jps
11536 Portmap
11582 Jps

[root@nfsgw ~]# su - nsd1808
上一次登录:五 1月 4 13:34:54 CST 2019pts/0 上
su: 警告:无法更改到 /home/nsd1808 目录: 没有那个文件或目录
-bash-4.2$ cd /usr/local/hadoop/

-bash-4.2$ ./sbin/hadoop-daemon.sh --script ./bin/hdfs start nfs3
starting nfs3, logging to /usr/local/hadoop/logs/hadoop-nsd1808-nfs3-nfsgw.out

-bash-4.2$ jps
11683 Jps
11630 Nfs3

-bash-4.2$ exit
登出

[root@nfsgw ~]# jps
11536 Portmap
11630 Nfs3
11695 Jps
##############################################################################################
客户端(可以用真机做客户端)

[root@localhost ~]# yum -y install nfs-utils
[root@localhost ~]# mount -t nfs -o vers=3,proto=tcp,nolock,noatime,sync,noacl 192.168.3.95:/ /mnt/

[root@localhost ~]# cd /mnt/
[root@localhost mnt]# ls
abc output tmp

[root@localhost mnt]# cd
[root@localhost ~]# umount /mnt/

[root@localhost ~]# vim /etc/fstab
... ...
192.168.3.25:/ /mnt nfs vers=3,proto=tcp,nolock,noatime,sync,noacl,_netdev 0 0

[root@localhost ~]# mount -a

[root@localhost ~]# cd /mnt/
[root@localhost mnt]# touch 1
[root@localhost mnt]# ls
1 abc output tmp

[root@localhost mnt]# rm -rf 1
[root@localhost mnt]# ls
abc output tmp

[root@localhost ~]# df -h
文件系统 容量 已用 可用 已用% 挂载点
... ...
192.168.3.95:/ 80G 7.9G 73G 10% /mnt


[root@localhost ~]# rpcinfo -p 192.168.3.95
program vers proto port service
100005 3 udp 4242 mountd
100005 1 tcp 4242 mountd
100000 2 udp 111 portmapper
100000 2 tcp 111 portmapper
100005 3 tcp 4242 mountd
100005 2 tcp 4242 mountd
100003 3 tcp 2049 nfs
100005 2 udp 4242 mountd
100005 1 udp 4242 mountd

 

posted @ 2019-04-30 22:58  安于夏  阅读(160)  评论(0编辑  收藏  举报