Linux系统管理上机作业10
系统管理:
1、通过ps命令的两种选项形式查看进程信息
[root@localhost ~]# ps aux
USER        PID %CPU %MEM    VSZ   RSS TTY      STAT START   TIME COMMAND
root          1  0.0  0.3  60652  7916 ?        Ss   01:07   0:03 /usr/lib/syste
root          2  0.0  0.0      0     0 ?        S    01:07   0:00 [kthreadd]
root          3  0.0  0.0      0     0 ?        S    01:07   0:04 [ksoftirqd/0]
root          5  0.0  0.0      0     0 ?        S<   01:07   0:00 [kworker/0:0H]
root          7  0.0  0.0      0     0 ?        S    01:07   0:00 [migration/0]
root          8  0.0  0.0      0     0 ?        S    01:07   0:00 [rcu_bh]
root          9  0.0  0.0      0     0 ?        S    01:07   0:00 [rcuob/0]
root         10  0.0  0.0      0     0 ?        S    01:07   0:00 [rcuob/1]
root         11  0.0  0.0      0     0 ?        S    01:07   0:00 [rcuob/2]
........
root 92001 0.0 0.0 22652 1956 ? Ss 10:43 0:00 /usr/lib/syste
root      92014  0.0  0.1 116248  2884 pts/0    Ss+  10:43   0:00 /bin/bash
root      92055  0.2  0.2 135348  4904 ?        Ss   10:44   0:00 sshd: root@pts
root      92059  0.0  0.1 116008  2760 pts/3    Ss   10:44   0:00 -bash
root      92099  0.0  0.0 107892   612 ?        S    10:45   0:00 sleep 60
root      92100  0.0  0.0 123372  1396 pts/3    R+   10:45   0:00 ps aux
[root@localhost ~]# ps -elf
F S UID         PID   PPID  C PRI  NI ADDR SZ WCHAN  STIME TTY          TIME CMD
4 S root          1      0  0  80   0 - 15163 ep_pol 01:07 ?        00:00:03 /us
1 S root          2      0  0  80   0 -     0 kthrea 01:07 ?        00:00:00 [kt
1 S root          3      2  0  80   0 -     0 smpboo 01:07 ?        00:00:04 [ks
1 S root          5      2  0  60 -20 -     0 worker 01:07 ?        00:00:00 [kw
1 S root          7      2  0 -40   - -     0 smpboo 01:07 ?        00:00:00 [mi
1 S root          8      2  0  80   0 -     0 rcu_gp 01:07 ?        00:00:00 [rc
1 S root          9      2  0  80   0 -     0 rcu_no 01:07 ?        00:00:00 [rc
1 S root         10      2  0  80   0 -     0 rcu_no 01:07 ?        00:00:00 [rc
1 S root         11      2  0  80   0 -     0 rcu_no 01:07 ?        00:00:00 [rc
.......
4 S root 92001 1 0 80 0 - 5663 poll_s 10:43 ? 00:00:00 /us
4 S root      92014  90177  0  80   0 - 29062 n_tty_ 10:43 pts/0    00:00:00 /bi
4 S root      92055   1407  0  80   0 - 33837 poll_s 10:44 ?        00:00:00 ssh
4 S root      92059  92055  0  80   0 - 29002 wait   10:44 pts/3    00:00:00 -ba
1 S root      92109      2  0  80   0 -     0 worker 10:46 ?        00:00:00 [kw
0 S root      92117    943  0  80   0 - 26973 hrtime 10:47 ?        00:00:00 sle
0 R root      92118  92059  0  80   0 - 30843 -      10:47 pts/3    00:00:00 ps 
2、通过top命令查看进程
[root@localhost ~]# top
top - 10:50:12 up  9:42,  5 users,  load average: 0.07, 0.09, 0.06
Tasks: 434 total,   3 running, 431 sleeping,   0 stopped,   0 zombie
%Cpu(s):  0.0 us,  0.3 sy,  0.0 ni, 99.7 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
KiB Mem :  2035352 total,   146668 free,   730760 used,  1157924 buff/cache
KiB Swap:  2097148 total,  2096660 free,      488 used.  1047584 avail Mem 
   PID USER      PR  NI    VIRT    RES    SHR S %CPU %MEM     TIME+ COMMAND                                                                                                       
   883 root      20   0  269016   3612   2756 S  0.3  0.2   0:50.29 vmtoolsd                                                                                                      
  2264 root      20   0  343988  11944   8704 R  0.3  0.6   0:51.19 vmtoolsd                                                                                                      
 92138 root      20   0  130288   2116   1268 R  0.3  0.1   0:00.43 top                                                                                                           
     1 root      20   0   60652   7916   2536 S  0.0  0.4   0:03.15 systemd                                                                                                       
     2 root      20   0       0      0      0 S  0.0  0.0   0:00.05 kthreadd                                                                                                      
     3 root      20   0       0      0      0 S  0.0  0.0   0:04.68 ksoftirqd/0                                                                                                   
     5 root       0 -20       0      0      0 S  0.0  0.0   0:00.00 kworker/0:0H                                                                                                  
     7 root      rt   0       0      0      0 S  0.0  0.0   0:00.00 migration/0                                                                                                   
     8 root      20   0       0      0      0 S  0.0  0.0   0:00.00 rcu_bh                                                                                                        
     9 root      20   0       0      0      0 S  0.0  0.0   0:00.00 rcuob/0                                                                                                       
    10 root      20   0       0      0      0 S  0.0  0.0   0:00.00 rcuob/1                                                                                                       
    11 root      20   0       0      0      0 S  0.0  0.0   0:00.00 rcuob/2                                                                                                       
    12 root      20   0       0      0      0 S  0.0  0.0   0:00.00 rcuob/3                                                                                                       
    13 root      20   0       0      0      0 S  0.0  0.0   0:00.00 rcuob/4                                                                                                       
    14 root      20   0       0      0      0 S  0.0  0.0   0:00.00 rcuob/5    
3、通过pgrep命令查看sshd服务的进程号
[root@localhost ~]# pgrep -l sshd
1407 sshd
90218 sshd
92055 sshd
4、查看系统进程树
[root@localhost ~]# pstree -aup
systemd,1 --switched-root --system --deserialize 21
  ├─ModemManager,897
  │   ├─{ModemManager},927
  │   └─{ModemManager},954
  ├─NetworkManager,1039 --no-daemon
  │   ├─dhclient,90705 -d -q -sf /usr/libexec/nm-dhcp-helper -pf /var/run/dhclient-eno16777736.pid -lf...
  │   ├─{NetworkManager},1057
  │   ├─{NetworkManager},1059
  │   └─{NetworkManager},1061
  ├─abrt-watch-log,889 -F BUG: WARNING: at WARNING: CPU: INFO: possible recursive locking detected ernel BUG at list_del corruption list_add corruption ...
  ├─abrt-watch-log,891 -F Backtrace /var/log/Xorg.0.log -- /usr/bin/abrt-dump-xorg -xD
  ├─abrtd,888 -d -s
  ├─accounts-daemon,920
  │   ├─{accounts-daemon},931
  │   └─{accounts-daemon},948
  ├─alsactl,878 -s -n 19 -c -E ALSA_CONFIG_PATH=/etc/alsa/alsactl.conf --initfile=/lib/alsa/init/00main rdaemon
  ├─at-spi-bus-laun,2037
  │   ├─dbus-daemon,2041 --config-file=/etc/at-spi2/accessibility.conf --nofork --print-address 3
  │   │   └─{dbus-daemon},2042
  │   ├─{at-spi-bus-laun},2038
  │   ├─{at-spi-bus-laun},2040
  │   └─{at-spi-bus-laun},2043
  ├─at-spi2-registr,2046 --use-gnome-session
  │   └─{at-spi2-registr},2049
  ├─atd,1416 -f
  ├─auditd,854 -n
  │   ├─audispd,861
  │   │   ├─sedispatch,865
  │   │   └─{audispd},866
  │   └─{auditd},860
  ├─avahi-daemon,900,avahi
  │   └─avahi-daemon,915
  ├─bluetoothd,879 -n
  ├─chronyd,893,chrony -u chrony
  ├─colord,1779,colord
  │   ├─{colord},1781
  │   └─{colord},1783
  ├─crond,1415 -n
  ├─cupsd,2127 -f
  ├─dbus-daemon,1891 --fork --print-pid 4 --print-address 6 --session
  │   └─{dbus-daemon},1892
  ├─dbus-daemon,924,dbus --system --address=systemd: --nofork --nopidfile --systemd-activation
  │   └─{dbus-daemon},940
  ├─dbus-launch,1890 --sh-syntax --exit-with-session
  ├─dconf-service,2142
  │   ├─{dconf-service},2145
  │   └─{dconf-service},2146
  ├─evolution-addre,2221
  │   ├─{evolution-addre},2226
  │   ├─{evolution-addre},2244
  │   ├─{evolution-addre},2251
  │   └─{evolution-addre},2252
  ├─evolution-calen,2247
  │   ├─{evolution-calen},2282
  │   ├─{evolution-calen},2286
  │   ├─{evolution-calen},2296
  │   └─{evolution-calen},2297
  ├─evolution-sourc,2178
  │   ├─{evolution-sourc},2179
  │   └─{evolution-sourc},2188
  ├─gconfd-2,2219
  ├─gdm,1418
  │   ├─gdm-simple-slav,1441 --display-id /org/gnome/DisplayManager/Displays/_0
  │   │   ├─Xorg,1464 :0 -background none -verbose -auth /run/gdm/auth-for-gdm-z9CyMo/database -seat seat0 -nolisten tcp vt1
  │   │   ├─gdm-session-wor,1871
  │   │   │   ├─gdm-session-wor,89917
  │   │   │   │   ├─{gdm-session-wor},89918
  │   │   │   │   └─{gdm-session-wor},89920
  │   │   │   ├─gnome-session,1882 --session gnome-classic
  │   │   │   │   ├─abrt-applet,2276
  │   │   │   │   │   └─{abrt-applet},2285
  │   │   │   │   ├─gnome-settings-,2059
  │   │   │   │   │   ├─{gnome-settings-},2067
  │   │   │   │   │   ├─{gnome-settings-},2073
  │   │   │   │   │   ├─{gnome-settings-},2078
  │   │   │   │   │   └─{gnome-settings-},2084
  │   │   │   │   ├─gnome-shell,2126
  │   │   │   │   │   ├─{gnome-shell},2131
  │   │   │   │   │   ├─{gnome-shell},2139
  │   │   │   │   │   ├─{gnome-shell},2141
  │   │   │   │   │   ├─{gnome-shell},2143
  │   │   │   │   │   └─{gnome-shell},2165
  │   │   │   │   ├─seapplet,2237
  │   │   │   │   ├─ssh-agent,2025 /bin/sh -c exec -l /bin/bash -c "env GNOME_SHELL_SESSION_MODE=classic gnome-session --session gnome-classic"
  │   │   │   │   ├─tracker-miner-f,2267
  │   │   │   │   │   ├─{tracker-miner-f},2271
  │   │   │   │   │   ├─{tracker-miner-f},2274
  │   │   │   │   │   └─{tracker-miner-f},2278
  │   │   │   │   ├─{gnome-session},2045
  │   │   │   │   ├─{gnome-session},2047
  │   │   │   │   └─{gnome-session},2048
  │   │   │   ├─{gdm-session-wor},1872
  │   │   │   └─{gdm-session-wor},1873
  │   │   ├─{gdm-simple-slav},1453
  │   │   └─{gdm-simple-slav},1456
  │   ├─{gdm},1433
  │   └─{gdm},1438
  ├─gnome-keyring-d,1880 --daemonize --login
  │   ├─{gnome-keyring-d},1881
  │   ├─{gnome-keyring-d},2069
  │   ├─{gnome-keyring-d},2071
  │   └─{gnome-keyring-d},2075
  ├─gnome-shell-cal,2167
  │   ├─{gnome-shell-cal},2172
  │   ├─{gnome-shell-cal},2173
  │   ├─{gnome-shell-cal},2174
  │   └─{gnome-shell-cal},2235
  ├─gnome-terminal-,90177
  │   ├─bash,90184
  │   ├─bash,92014
  │   ├─gnome-pty-helpe,90183
  │   ├─{gnome-terminal-},90178
  │   ├─{gnome-terminal-},90179
  │   └─{gnome-terminal-},90180
  ├─goa-daemon,2108
  │   ├─{goa-daemon},2109
  │   ├─{goa-daemon},2111
  │   └─{goa-daemon},2112
  ├─goa-identity-se,2116
  │   ├─{goa-identity-se},2119
  │   └─{goa-identity-se},2123
  ├─gsd-printer,2133
  │   └─{gsd-printer},2136
  ├─gvfs-afc-volume,2118
  │   ├─{gvfs-afc-volume},2120
  │   └─{gvfs-afc-volume},2122
  ├─gvfs-goa-volume,2105
  │   └─{gvfs-goa-volume},2106
  ├─gvfs-gphoto2-vo,2097
  │   └─{gvfs-gphoto2-vo},2099
  ├─gvfs-mtp-volume,2101
  │   └─{gvfs-mtp-volume},2103
  ├─gvfs-udisks2-vo,2086
  │   ├─{gvfs-udisks2-vo},2087
  │   └─{gvfs-udisks2-vo},2094
  ├─gvfsd,1946
  │   └─{gvfsd},1947
  ├─gvfsd-burn,2284 --spawner :1.3 /org/gtk/gvfs/exec_spaw/1
  │   └─{gvfsd-burn},2295
  ├─gvfsd-fuse,1955 /run/user/0/gvfs -f -o big_writes
  │   ├─{gvfsd-fuse},1965
  │   ├─{gvfsd-fuse},1969
  │   ├─{gvfsd-fuse},1971
  │   └─{gvfsd-fuse},1974
  ├─gvfsd-trash,2242 --spawner :1.3 /org/gtk/gvfs/exec_spaw/0
  │   ├─{gvfsd-trash},2250
  │   └─{gvfsd-trash},2259
  ├─httpd,89753 -k start
  │   ├─httpd,89754,daemon -k start
  │   ├─httpd,89755,daemon -k start
  │   ├─httpd,89756,daemon -k start
  │   ├─httpd,89757,daemon -k start
  │   └─httpd,89758,daemon -k start
  ├─ibus-daemon,2149 --replace --xim --panel disable
  │   ├─ibus-dconf,2153
  │   │   ├─{ibus-dconf},2155
  │   │   ├─{ibus-dconf},2157
  │   │   └─{ibus-dconf},2159
  │   ├─ibus-engine-sim,2215
  │   │   ├─{ibus-engine-sim},2220
  │   │   └─{ibus-engine-sim},2223
  │   ├─{ibus-daemon},2150
  │   └─{ibus-daemon},2151
  ├─ibus-x11,2156 --kill-daemon
  │   ├─{ibus-x11},2160
  │   └─{ibus-x11},2164
  ├─ksmtuned,943 /usr/sbin/ksmtuned
  │   └─sleep,92249 60
  ├─libvirtd,1405
  │   ├─{libvirtd},1422
  │   ├─{libvirtd},1423
  │   ├─{libvirtd},1424
  │   ├─{libvirtd},1425
  │   ├─{libvirtd},1426
  │   ├─{libvirtd},1427
  │   ├─{libvirtd},1428
  │   ├─{libvirtd},1429
  │   ├─{libvirtd},1430
  │   └─{libvirtd},1431
  ├─lsmd,882,libstoragemgmt -d
  ├─lvmetad,740 -f
  ├─master,1624 -w
  │   ├─pickup,91699,postfix -l -t unix -u
  │   └─qmgr,1627,postfix -l -t unix -u
  ├─mission-control,2169
  │   ├─{mission-control},2175
  │   └─{mission-control},2176
  ├─nautilus,2186 --no-default-window
  │   ├─{nautilus},2200
  │   ├─{nautilus},2209
  │   └─{nautilus},2222
  ├─nm-dispatcher,92222
  │   ├─{nm-dispatcher},92223
  │   └─{nm-dispatcher},92224
  ├─polkitd,955,polkitd --no-debug
  │   ├─{polkitd},968
  │   ├─{polkitd},969
  │   ├─{polkitd},970
  │   ├─{polkitd},971
  │   └─{polkitd},972
  ├─pulseaudio,2065 --start
  │   ├─{pulseaudio},2070
  │   └─{pulseaudio},2072
  ├─rngd,906 -f
  ├─rsyslogd,895 -n
  │   ├─{rsyslogd},917
  │   └─{rsyslogd},918
  ├─rtkit-daemon,923,rtkit
  │   ├─{rtkit-daemon},951
  │   └─{rtkit-daemon},952
  ├─smartd,910 -n -q never
  ├─sshd,1407 -D
  │   ├─sshd,90218    
  │   │   └─bash,90222
  │   └─sshd,92055    
  │       └─bash,92059
  │           └─pstree,92250 -aup
  ├─systemd-journal,734
  ├─systemd-logind,914
  ├─systemd-udevd,746
  ├─tracker-store,2241
  │   ├─{tracker-store},2248
  │   ├─{tracker-store},2249
  │   ├─{tracker-store},2253
  │   ├─{tracker-store},2254
  │   ├─{tracker-store},2255
  │   ├─{tracker-store},2256
  │   └─{tracker-store},2257
  ├─tuned,896 -Es /usr/sbin/tuned -l -P
  │   ├─{tuned},986
  │   ├─{tuned},987
  │   ├─{tuned},991
  │   └─{tuned},993
  ├─udisksd,2088 --no-debug
  │   ├─{udisksd},2089
  │   ├─{udisksd},2091
  │   ├─{udisksd},2092
  │   └─{udisksd},2093
  ├─upowerd,1720
  │   ├─{upowerd},1722
  │   └─{upowerd},1723
  ├─vmtoolsd,883
  │   └─{vmtoolsd},937
  └─vmtoolsd,2264 -n vmusr
5、使dd if=/dev/zero of=/root/file bs=1M count=8190 命令操作在前台运行
[root@localhost ~]# dd if=/dev/zero of=/root/file bs=1M count=8190
6、将第5题命令操作调入到后台并暂停
^Z
[1]+  已停止               dd if=/dev/zero of=/root/file bs=1M count=8190
7、使dd if=/dev/zero of=/root/file2 bs=1M count=1024 命令操作在后台运行
[root@localhost ~]# dd if=/dev/zero of=/root/file2 bs=1M count=1024 &
[2] 92387
8、查看后台的任务列表
[root@localhost ~]# jobs -l
[1]+ 92317 停止                  dd if=/dev/zero of=/root/file bs=1M count=8190
[2]- 92387 运行中               dd if=/dev/zero of=/root/file2 bs=1M count=1024 &
[root@localhost ~]# 记录了1024+0 的读入
记录了1024+0 的写出
1073741824字节(1.1 GB)已复制,5.4253 秒,198 MB/秒
[2]- 完成 dd if=/dev/zero of=/root/file2 bs=1M count=1024
9、恢复dd if=/dev/zero of=/root/file bs=1M count=8190 让其在后台继续运行
[root@localhost ~]# bg 1
[1]+ dd if=/dev/zero of=/root/file bs=1M count=8190 &
[root@localhost ~]# 记录了8190+0 的读入
记录了8190+0 的写出
8587837440字节(8.6 GB)已复制,495.078 秒,17.3 MB/秒
[1]+ 完成 dd if=/dev/zero of=/root/file bs=1M count=8190
10、查询dd if=/dev/zero of=/root/file bs=1M count=8190 命令的进程并通过kill杀死
[root@localhost ~]# dd if=/dev/zero of=/root/file bs=1M count=819000
[root@localhost ~]# ps aux |grep dd
root 92453 14.6 0.0 108956 1672 pts/3 D+ 11:12 0:20 dd if=/dev/zero of=/root/file bs=1M count=819000
[root@localhost ~]# kill -9 92453
[root@localhost ~]# dd if=/dev/zero of=/root/file bs=1M count=819000
已杀死
11、设置一次性计划任务在18:00时关闭系统,并查看任务信息
[root@localhost ~]# service atd status
Redirecting to /bin/systemctl status  atd.service
atd.service - Job spooling tools
   Loaded: loaded (/usr/lib/systemd/system/atd.service; enabled)
   Active: active (running) since 五 2019-08-02 12:10:52 CST; 5 days ago
 Main PID: 1416 (atd)
   CGroup: /system.slice/atd.service
           └─1416 /usr/sbin/atd -f
8月 02 12:10:52 localhost.localdomain systemd[1]: Starting Job spooling tool...
8月 02 12:10:52 localhost.localdomain systemd[1]: Started Job spooling tools.
Hint: Some lines were ellipsized, use -l to show in full.
[root@localhost ~]# at 18:00 2019-8-8
at> init 0
at> <EOT>
job 2 at Thu Aug  8 18:00:00 2019
[root@localhost ~]# atq
2	Thu Aug  8 18:00:00 2019 a root
[root@localhost ~]# atrm 2
[root@localhost ~]# atq
12、以root身份设置周期性计划任务
a) 每天晚上的24点时打包压缩 /etc/passwd /etc/shadow /etc/group /etc/gshadow 为 file.tar.gz
b) 每周一的每隔五分钟列出磁盘使用状况
c) 每天的8:30与互联网时间同步服务器pool.ntp.org同步时间
[root@localhost ~]# crontab -e
no crontab for root - using an empty one
0  0  *  *  *  tar -zcf file.tar.gz /etc/passwd /etc/shadow /etc/group /etc/gshadow
*/5  *  *  *  1  df -i /dev/sd*
30  8  *  *  *  ntpdate pool.ntp.org
13、通过crontab命令查看root的计划任务,通过文件查看类工具列出/var/spool/cron下对应的文件内容
[root@localhost ~]# crontab -l
0  0  *  *  *  tar -zcf file.tar.gz /etc/passwd /etc/shadow /etc/group /etc/gshadow
*/5  *  *  *  1  df -i /dev/sd*
30  8  *  *  *  ntpdate pool.ntp.org
[root@localhost ~]# cat /var/spool/cron/root
0  0  *  *  *  tar -zcf file.tar.gz /etc/passwd /etc/shadow /etc/group /etc/gshadow
*/5  *  *  *  1  df -i /dev/sd*
30  8  *  *  *  ntpdate pool.ntp.org
 
                    
                
 
                
            
         浙公网安备 33010602011771号
浙公网安备 33010602011771号