实验环境:CentOS7

ansible:是自动化运维工具,基于Python开发,实现批量系统设置、批量程序部署、批量执行命令等功能。其中,批量部署是立身于ansible的模块进行工作的。

首先给所有需要批量部署的设备安装ansible:

[root@~ localhost]#yum -y install ansible
#在管理机/堡垒机上配置如下:
[root@ansible localhost]#vim hosts
## db-[99:101]-node.example.com
[web]      #列表名
172.16.252.143     #堡垒机(本机)
172.16.254.47
172.16.253.177
172.16.252.182
#添加认证:
[root@ansible localhost]#ssh-keygen -t rsa 
#无密码的RSA加密方式
[root@ansible localhost]#ls /root/.ssh
id_rsa  id_rsa.pub  known_hosts
#其中,id_rsa是私钥,id_rsa.pub是公钥,将公钥安全的发送到其他主机,包括自己本机

  [root@.ssh localhost]#ssh-copy-id root@172.16.253.177

 [root@.ssh localhost]#ls
 authorized_keys known_hosts

#authorized_keys是发过来的公钥

 

[root@ansible localhost]#vim ansible.cfg 
host_key_checking = False
#启用此项,不进行主机key检查,防止出现如下信息
The authenticity of host '172.16.253.177 (172.16.253.177)' can't be 
established. #使用帮助 [root@ansible localhost]#ansible -h
#web也可以改为管理机的ip,是模式:-m是module模块 [root@.ssh localhost]#ansible web -m ping 172.16.254.47 | SUCCESS => { "changed": false, "ping": "pong" } 172.16.252.182 | SUCCESS => { "changed": false, "ping": "pong" } 172.16.253.177 | SUCCESS => { "changed": false, "ping": "pong" } 172.16.252.143 | SUCCESS => { "changed": false, "ping": "pong" }
#列出列表

[root@.ssh localhost]#ansible web --list
hosts (4):
172.16.252.143
172.16.254.47
172.16.253.177
172.16.252.182

 

 

#各种模块的使用帮助,如下是command的模块
[root@.ssh localhost]#ansible-doc command

 #setup模块:查看系统软硬件信息;

#只查看本发布机的CPU核数:
[root@ansible master-80]#ansible 172.16.252.143 -m setup |grep vcpus        "ansible_processor_vcpus": 1, 
#其中,ansible_processor_vcpus是变量

 

 

#command模块
#不支持`$ `"<"', `">"', `"|"',`";"' and `"&"';有这些符号需用shell模块
 [root@.ssh localhost]#cat /etc/passwd |grep tom
语法:  ansible <host-pattern> [-m module_name] [-a args] [options]
#批量添加tom用户
[root@.ssh localhost]#ansible web -m command -a  'useradd tom'
172.16.253.177 | SUCCESS | rc=0 >>
172.16.254.47 | SUCCESS | rc=0 >>
172.16.252.143 | SUCCESS | rc=0 >>
172.16.252.182 | SUCCESS | rc=0 >>
[root@.ssh localhost]#cat /etc/passwd |grep tom
tom:x:1001:1001::/home/tom:/bin/bash

#shell模块:
[root@.ssh localhost]#ansible web -m shell -a 'echo "123456" |passwd --stdin tom'
172.16.253.177 | SUCCESS | rc=0 >>
Changing password for user tom.
passwd: all authentication tokens updated successfully.

172.16.254.47 | SUCCESS | rc=0 >>
Changing password for user tom.
passwd: all authentication tokens updated successfully.

172.16.252.182 | SUCCESS | rc=0 >>
Changing password for user tom.
passwd: all authentication tokens updated successfully.

172.16.252.143 | SUCCESS | rc=0 >>
Changing password for user tom.
passwd: all authentication tokens updated successfully.

#sudo相关的配置,先必须在其他主机上都给予sudo的权限,即按如下配置:
[root@.ssh localhost]#visudo
## Same thing without a password
#启用此项配置
%wheel  ALL=(ALL)       NOPASSWD: ALL
#把tom添加到附加的wheel组
[root@.ssh localhost]#usermod -G wheel tom
[root@.ssh localhost]#ansible web -m ping -u tom -b --become-user root

 #copy模块:

[root@.ssh localhost]#ansible web -m copy -a 'src=/etc/fstab dest=/tmp/ backup=yes mode=0644 owner=tom group=tom'
#src:源文件所在路径;
#owner文件的属主;group文件的属组,属主属组必须存在
#dest:目的地路径;mode文件的权限,backup是否备份
172.16.254.47 | SUCCESS => {
    "changed": true, 
    "checksum": "e1c1246c3358f74ffb2090c9b4c9172974306084", 
    "dest": "/tmp/fstab", 
    "gid": 1001, 
    "group": "tom", 
    "md5sum": "f3e046a46def8356721f867aedfd97ad", 
    "mode": "0644", 
    "owner": "tom", 
    "secontext": "unconfined_u:object_r:admin_home_t:s0", 
    "size": 595, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497749953.26-144906199640828/source", 
    "state": "file", 
    "uid": 1001
}
172.16.253.177 | SUCCESS => {
    "changed": true, 
    "checksum": "e1c1246c3358f74ffb2090c9b4c9172974306084", 
    "dest": "/tmp/fstab", 
    "gid": 1001, 
    "group": "tom", 
    "md5sum": "f3e046a46def8356721f867aedfd97ad", 
    "mode": "0644", 
    "owner": "tom", 
    "secontext": "unconfined_u:object_r:admin_home_t:s0", 
    "size": 595, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497749953.1-86517078793254/source", 
    "state": "file", 
    "uid": 1001
}
172.16.252.182 | SUCCESS => {
    "changed": true, 
    "checksum": "e1c1246c3358f74ffb2090c9b4c9172974306084", 
    "dest": "/tmp/fstab", 
    "gid": 1001, 
    "group": "tom", 
    "md5sum": "f3e046a46def8356721f867aedfd97ad", 
    "mode": "0644", 
    "owner": "tom", 
    "secontext": "unconfined_u:object_r:admin_home_t:s0", 
    "size": 595, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497749953.52-203914743845832/source", 
    "state": "file", 
    "uid": 1001
}
172.16.252.143 | SUCCESS => {
    "changed": true, 
    "checksum": "e1c1246c3358f74ffb2090c9b4c9172974306084", 
    "dest": "/tmp/fstab", 
    "gid": 1001, 
    "group": "tom", 
    "md5sum": "f3e046a46def8356721f867aedfd97ad", 
    "mode": "0644", 
    "owner": "tom", 
    "size": 595, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497749952.25-164901742964830/source", 
    "state": "file", 
    "uid": 1001
}
[root@ansible localhost]#ll /tmp/
-rw-r--r--  1 tom  tom   595 Jun 18 09:39 fstab

#fetch模块:

[root@.ssh localhost]#ansible web -m fetch -a 'src=/etc/fstab dest=/tmp/'
172.16.253.177 | SUCCESS => {
    "changed": true, 
    "checksum": "329f9159f37f8b2ab90d60e9da9025a7f1bb4a7c", 
    "dest": "/tmp/172.16.253.177/etc/fstab", 
    "md5sum": "6fc34eb32e5312f4b7631570eeb25fae", 
    "remote_checksum": "329f9159f37f8b2ab90d60e9da9025a7f1bb4a7c", 
    "remote_md5sum": null
}
172.16.252.182 | SUCCESS => {
    "changed": true, 
    "checksum": "055d967377645aee6c1956fe7109c293aa961251", 
    "dest": "/tmp/172.16.252.182/etc/fstab", 
    "md5sum": "5b681acdbe19e886c1532b97d79d7acb", 
    "remote_checksum": "055d967377645aee6c1956fe7109c293aa961251", 
    "remote_md5sum": null
}
172.16.254.47 | SUCCESS => {
    "changed": true, 
    "checksum": "167915186001b6ae17b5878f7ec50162635835d1", 
    "dest": "/tmp/172.16.254.47/etc/fstab", 
    "md5sum": "98caa9822182c0662a2153e066bb9542", 
    "remote_checksum": "167915186001b6ae17b5878f7ec50162635835d1", 
    "remote_md5sum": null
}
172.16.252.143 | SUCCESS => {
    "changed": true, 
    "checksum": "e1c1246c3358f74ffb2090c9b4c9172974306084", 
    "dest": "/tmp/172.16.252.143/etc/fstab", 
    "md5sum": "f3e046a46def8356721f867aedfd97ad", 
    "remote_checksum": "e1c1246c3358f74ffb2090c9b4c9172974306084", 
    "remote_md5sum": null
}
[root@.ssh localhost]#ls /tmp/
172.16.252.143
172.16.252.182
172.16.253.177
172.16.254.47
#多个文件的获取,在/tmp下新建同名的test文件
#先打包,再获取
[root@~ localhost]#ansible web -m shell -a 'tar -cf fstab.gz /tmp/fstab /tmp/test'
172.16.252.143 | SUCCESS | rc=0 >>
tar: Removing leading `/' from member names

172.16.254.47 | SUCCESS | rc=0 >>
tar: Removing leading `/' from member names

172.16.253.177 | SUCCESS | rc=0 >>
tar: Removing leading `/' from member names

172.16.252.182 | SUCCESS | rc=0 >>
tar: Removing leading `/' from member names

[root@~ localhost]#ls
anaconda-ks.cfg  fstab.gz  hellodb.sql
[root@~ localhost]#ansible web -m fetch -a 'src=/root/fstab.gz dest=/tmp/'
172.16.252.143 | SUCCESS => {
    "changed": true, 
    "checksum": "e6fb8cdf9ef285c5955dca6f698c57ab58708c6b", 
    "dest": "/tmp/172.16.252.143/root/fstab.gz", 
    "md5sum": "2891f8e9b46aaedde7fe40deeb845085", 
    "remote_checksum": "e6fb8cdf9ef285c5955dca6f698c57ab58708c6b", 
    "remote_md5sum": null
}
172.16.254.47 | SUCCESS => {
    "changed": true, 
    "checksum": "166689817e5fbc45a9cad526c5fb36f4ee44550b", 
    "dest": "/tmp/172.16.254.47/root/fstab.gz", 
    "md5sum": "8effda0c3008f5ffec7b819c1a690c53", 
    "remote_checksum": "166689817e5fbc45a9cad526c5fb36f4ee44550b", 
    "remote_md5sum": null
}
172.16.252.182 | SUCCESS => {
    "changed": true, 
    "checksum": "a138abc36510beb3c8d12ced6004da666f3245c8", 
    "dest": "/tmp/172.16.252.182/root/fstab.gz", 
    "md5sum": "f7fc951a1631e50fe223f8edf08c5c89", 
    "remote_checksum": "a138abc36510beb3c8d12ced6004da666f3245c8", 
    "remote_md5sum": null
}
172.16.253.177 | SUCCESS => {
    "changed": true, 
    "checksum": "bb056a73f27d78daf06887b14184917e5c4cb9c7", 
    "dest": "/tmp/172.16.253.177/root/fstab.gz", 
    "md5sum": "6793b0ccba71b4c75a0d254f1898f5c3", 
    "remote_checksum": "bb056a73f27d78daf06887b14184917e5c4cb9c7", 
    "remote_md5sum": null
}
[root@~ localhost]#cd /tmp/172.16.252.143
[root@172.16.252.143 localhost]#ls
etc  root
[root@172.16.252.143 localhost]#cd root
[root@root localhost]#ls
fstab.gz

#file模块:

#批量添加软连接
[root@root localhost]#ansible web -m file -a 'src=/tmp/fstab dest=/tmp/fstab.link state=link'
172.16.252.143 | SUCCESS => {
    "changed": true, 
    "dest": "/tmp/fstab.link", 
    "gid": 0, 
    "group": "root", 
    "mode": "0777", 
    "owner": "root", 
    "size": 10, 
    "src": "/tmp/fstab", 
    "state": "link", 
    "uid": 0
}
172.16.253.177 | SUCCESS => {
    "changed": true, 
    "dest": "/tmp/fstab.link", 
    "gid": 0, 
    "group": "root", 
    "mode": "0777", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 10, 
    "src": "/tmp/fstab", 
    "state": "link", 
    "uid": 0
}
172.16.254.47 | SUCCESS => {
    "changed": true, 
    "dest": "/tmp/fstab.link", 
    "gid": 0, 
    "group": "root", 
    "mode": "0777", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 10, 
    "src": "/tmp/fstab", 
    "state": "link", 
    "uid": 0
}
172.16.252.182 | SUCCESS => {
    "changed": true, 
    "dest": "/tmp/fstab.link", 
    "gid": 0, 
    "group": "root", 
    "mode": "0777", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 10, 
    "src": "/tmp/fstab", 
    "state": "link", 
    "uid": 0
}
[root@~ localhost]#ll /tmp
-rw-r--r--. 1 tom  tom  595 Jul  6 21:52 fstab
lrwxrwxrwx. 1 root root  10 Jul  6 22:20 fstab.link -> /tmp/fstab

#批量创建目录:state是directory
[root@root localhost]#ansible web -m file -a 'path=/tmp/tomtest/ state=directory mode=0755'
172.16.252.143 | SUCCESS => {
    "changed": true, 
    "gid": 0, 
    "group": "root", 
    "mode": "0755", 
    "owner": "root", 
    "path": "/tmp/tomtest/", 
    "size": 4096, 
    "state": "directory", 
    "uid": 0
}
172.16.254.47 | SUCCESS => {
    "changed": true, 
    "gid": 0, 
    "group": "root", 
    "mode": "0755", 
    "owner": "root", 
    "path": "/tmp/tomtest/", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 6, 
    "state": "directory", 
    "uid": 0
}
172.16.253.177 | SUCCESS => {
    "changed": true, 
    "gid": 0, 
    "group": "root", 
    "mode": "0755", 
    "owner": "root", 
    "path": "/tmp/tomtest/", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 6, 
    "state": "directory", 
    "uid": 0
}
172.16.252.182 | SUCCESS => {
    "changed": true, 
    "gid": 0, 
    "group": "root", 
    "mode": "0755", 
    "owner": "root", 
    "path": "/tmp/tomtest/", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 6, 
    "state": "directory", 
    "uid": 0
}
[root@~ localhost]#ll  /tmp
drwxr-xr-x. 2 root root   6 Jul  6 22:26 tomtest

#批量创建文件:  state=touch
[root@root localhost]#ansible web -m file -a 'path=/tmp/ansible mode=0644 state=touch'
172.16.252.143 | SUCCESS => {
    "changed": true, 
    "dest": "/tmp/ansible", 
    "gid": 0, 
    "group": "root", 
    "mode": "0644", 
    "owner": "root", 
    "size": 0, 
    "state": "file", 
    "uid": 0
}
172.16.254.47 | SUCCESS => {
    "changed": true, 
    "dest": "/tmp/ansible", 
    "gid": 0, 
    "group": "root", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 0, 
    "state": "file", 
    "uid": 0
}
172.16.252.182 | SUCCESS => {
    "changed": true, 
    "dest": "/tmp/ansible", 
    "gid": 0, 
    "group": "root", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 0, 
    "state": "file", 
    "uid": 0
}
172.16.253.177 | SUCCESS => {
    "changed": true, 
    "dest": "/tmp/ansible", 
    "gid": 0, 
    "group": "root", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 0, 
    "state": "file", 
    "uid": 0
}
[root@~ localhost]#ll /tmp
total 8
-rw-r--r--. 1 root root   0 Jul  6 22:30 ansible

#group模块:

#添加是present,删除是absent
- state
        Whether the group should be present or not on the remote hos
        (Choices: present, absent)[Default: present]

[root@root localhost]#ansible web -m group -a 'name=ansi gid=1200 state=present'
172.16.252.143 | SUCCESS => {
    "changed": true, 
    "gid": 1200, 
    "name": "ansi", 
    "state": "present", 
    "system": false
}
172.16.254.47 | SUCCESS => {
    "changed": true, 
    "gid": 1200, 
    "name": "ansi", 
    "state": "present", 
    "system": false
}
172.16.253.177 | SUCCESS => {
    "changed": true, 
    "gid": 1200, 
    "name": "ansi", 
    "state": "present", 
    "system": false
}
172.16.252.182 | SUCCESS => {
    "changed": true, 
    "gid": 1200, 
    "name": "ansi", 
    "state": "present", 
    "system": false
}
[root@~ localhost]#cat /etc/group|grep ansi
ansi:x:1200:
#删除组;
[root@root localhost]#ansible web -m group -a 'name=ansi  state=absent'
172.16.252.143 | SUCCESS => {
    "changed": true, 
    "name": "ansi", 
    "state": "absent"
}
172.16.253.177 | SUCCESS => {
    "changed": true, 
    "name": "ansi", 
    "state": "absent"
}
172.16.254.47 | SUCCESS => {
    "changed": true, 
    "name": "ansi", 
    "state": "absent"
}
172.16.252.182 | SUCCESS => {
    "changed": true, 
    "name": "ansi", 
    "state": "absent"
}
[root@~ localhost]#cat /etc/group|grep ansi

#hostname模块:

#定义变量hostname,批量修改hostname;
[root@ansible localhost]#cat hosts 
[web]
172.16.252.143  hostname=master
172.16.254.47   hostname=host1 
172.16.253.177  hostname=host2
172.16.252.182  hostname=host3
#hostname变量两边有空格
[root@ansible localhost]#ansible web -m hostname -a 'name={{ hostname }}'
172.16.254.47 | SUCCESS => {
    "ansible_facts": {
        "ansible_domain": "", 
        "ansible_fqdn": "host1", 
        "ansible_hostname": "host1", 
        "ansible_nodename": "host1"
    }, 
    "changed": true, 
    "name": "host1"
}
172.16.253.177 | SUCCESS => {
    "ansible_facts": {
        "ansible_domain": "", 
        "ansible_fqdn": "host2", 
        "ansible_hostname": "host2", 
        "ansible_nodename": "host2"
    }, 
    "changed": true, 
    "name": "host2"
}
172.16.252.143 | SUCCESS => {
    "ansible_facts": {
        "ansible_domain": "", 
        "ansible_fqdn": "master", 
        "ansible_hostname": "master", 
        "ansible_nodename": "master"
    }, 
    "changed": true, 
    "name": "master"
}
172.16.252.182 | SUCCESS => {
    "ansible_facts": {
        "ansible_domain": "", 
        "ansible_fqdn": "host3", 
        "ansible_hostname": "host3", 
        "ansible_nodename": "host3"
    }, 
    "changed": true, 
    "name": "host3"
}

#web组下添加变量,变量名必须是vars;组下的变量的优先级低于web组
[root@ansible localhost]#tail hosts 

## db-[99:101]-node.example.com
[web:vars]
http_port=808

[web]
172.16.252.143    hostname=master    http_port=80
172.16.254.47    hostname=host1    http_port=808
172.16.253.177    hostname=host2    http_port=8080
172.16.252.182    hostname=host3    http_port=8088
[root@ansible localhost]#ansible web -m hostname -a 'name={{ hostname }}-{{ http_port }}'
172.16.252.182 | SUCCESS => {
    "ansible_facts": {
        "ansible_domain": "", 
        "ansible_fqdn": "host3-8088", 
        "ansible_hostname": "host3-8088", 
        "ansible_nodename": "host3-8088"
    }, 
    "changed": true, 
    "name": "host3-8088"
}
172.16.253.177 | SUCCESS => {
    "ansible_facts": {
        "ansible_domain": "", 
        "ansible_fqdn": "host2-8080", 
        "ansible_hostname": "host2-8080", 
        "ansible_nodename": "host2-8080"
    }, 
    "changed": true, 
    "name": "host2-8080"
}
172.16.254.47 | SUCCESS => {
    "ansible_facts": {
        "ansible_domain": "", 
        "ansible_fqdn": "host1-808", 
        "ansible_hostname": "host1-808", 
        "ansible_nodename": "host1-808"
    }, 
    "changed": true, 
    "name": "host1-808"
}
172.16.252.143 | SUCCESS => {
    "ansible_facts": {
        "ansible_domain": "", 
        "ansible_fqdn": "master-80", 
        "ansible_hostname": "master-80", 
        "ansible_nodename": "master-80"
    }, 
    "changed": true, 
    "name": "master-80"
}
[root@~ localhost]#hostname
host3
[root@~ localhost]#hostname
host3-8088

[root@ansible localhost]#hostname
master-80
[root@tmp localhost]#hostname
host1-808
[root@tmp localhost]#hostname
host2-8080

#yum模块

#批量安装Nginx
[root@ansible localhost]#ansible web -m yum -a 'name=nginx state=latest'
[root@ansible localhost]#ansible web -m shell -a 'rpm -q nginx'
172.16.252.182 | SUCCESS | rc=0 >>
nginx-1.10.2-1.el7.x86_64

172.16.252.143 | SUCCESS | rc=0 >>
nginx-1.10.2-1.el7.x86_64

172.16.253.177 | SUCCESS | rc=0 >>
nginx-1.10.2-1.el7.x86_64

172.16.254.47 | SUCCESS | rc=0 >>
nginx-1.10.2-1.el7.x86_64

#service模块:

#批量启动服务;
[root@ansible localhost]#ansible web -m service -a 'name=nginx state=started'
#检查是否启动
[root@ansible localhost]#ansible web -m shell -a 'ps aux|grep nginx'172.16.253.177 | SUCCESS | rc=0 >>
root      29223  0.0  0.2 122280  2072 ?        Ss   23:25   0:00 nginx: master process /usr/sbin/nginx
nginx     29224  0.0  0.3 122712  3104 ?        S    23:25   0:00 nginx: worker process
root      29266  0.0  0.1 113120  1192 pts/0    S+   23:26   0:00 /bin/sh -c ps aux|grep nginx
root      29268  0.0  0.0 112648   944 pts/0    S+   23:26   0:00 grep nginx

172.16.254.47 | SUCCESS | rc=0 >>
root      27782  0.0  0.2 122280  2072 ?        Ss   23:25   0:00 nginx: master process /usr/sbin/nginx
nginx     27783  0.0  0.3 122712  3104 ?        S    23:25   0:00 nginx: worker process
root      27826  0.0  0.1 113120  1196 pts/0    S+   23:26   0:00 /bin/sh -c ps aux|grep nginx
root      27828  0.0  0.0 112648   944 pts/0    S+   23:26   0:00 grep nginx

172.16.252.182 | SUCCESS | rc=0 >>
root      30744  0.0  0.2 122280  2068 ?        Ss   23:25   0:00 nginx: master process /usr/sbin/nginx
nginx     30745  0.0  0.3 122712  3100 ?        S    23:25   0:00 nginx: worker process
root      30787  0.0  0.1 113120  1196 pts/0    S+   23:26   0:00 /bin/sh -c ps aux|grep nginx
root      30789  0.0  0.0 112648   944 pts/0    S+   23:26   0:00 grep nginx

172.16.252.143 | SUCCESS | rc=0 >>
root      31512  0.0  0.2 122232  2068 ?        Ss   11:12   0:00 nginx: master process /usr/sbin/nginx
nginx     31513  0.0  0.3 122660  3092 ?        S    11:12   0:00 nginx: worker process
root      31526 54.0  2.5 341608 26008 pts/3    Rl+  11:13   0:02 /usr/bin/python2 /usr/bin/ansible web -m shell -a ps aux|grep nginx
root      31534 20.0  2.8 348404 28384 pts/3    S+   11:13   0:00 /usr/bin/python2 /usr/bin/ansible web -m shell -a ps aux|grep nginx
root      31610  0.0  0.1 113120  1200 pts/6    S+   11:13   0:00 /bin/sh -c ps aux|grep nginx
root      31612  0.0  0.0 112648   944 pts/6    R+   11:13   0:00 grep nginx

#停止服务:
[root@ansible localhost]#ansible web -m service -a 'name=nginx state=stopped'

 #user模块:

#批量添加新用户
[root@ansible localhost]#ansible web -m user -a 'name=wang comment=wang uid=1100 group=wang'
[root@~ localhost]#id wang
uid=1100(wang) gid=1000(wang) groups=1000(wang)

#script模块:

#一个脚本在多台主机一次性执行:
[root@ansible localhost]#cat /tmp/echo.sh 
#!/bin/bash
touch /tmp/a.txt
[root@ansible localhost]#vim /tmp/echo.sh
[root@ansible localhost]#ansible web -m script -a '/tmp/echo.sh' 
172.16.252.143 | SUCCESS => {
    "changed": true, 
    "rc": 0, 
    "stderr": "Shared connection to 172.16.252.143 closed.\r\n", 
    "stdout": "", 
    "stdout_lines": []
}
172.16.253.177 | SUCCESS => {
    "changed": true, 
    "rc": 0, 
    "stderr": "Shared connection to 172.16.253.177 closed.\r\n", 
    "stdout": "", 
    "stdout_lines": []
}
172.16.252.182 | SUCCESS => {
    "changed": true, 
    "rc": 0, 
    "stderr": "Shared connection to 172.16.252.182 closed.\r\n", 
    "stdout": "", 
    "stdout_lines": []
}
172.16.254.47 | SUCCESS => {
    "changed": true, 
    "rc": 0, 
    "stderr": "Shared connection to 172.16.254.47 closed.\r\n", 
    "stdout": "", 
    "stdout_lines": []
}
[root@~ localhost]#ls /tmp
ansible
a.txt

#cron模块:

#计划任务模块,每周六1点执行
[root@ansible localhost]#ansible web -m cron -a 'name=daycron minute="0" hour="1" weekday="5" job="ls /tmp >>/tmp/a.log"'
172.16.252.143 | SUCCESS => {
    "changed": true, 
    "envs": [], 
    "jobs": [
        "daycron"
    ]
}
172.16.254.47 | SUCCESS => {
    "changed": true, 
    "envs": [], 
    "jobs": [
        "daycron"
    ]
}
172.16.253.177 | SUCCESS => {
    "changed": true, 
    "envs": [], 
    "jobs": [
        "daycron"
    ]
}
172.16.252.182 | SUCCESS => {
    "changed": true, 
    "envs": [], 
    "jobs": [
        "daycron"
    ]
}

[root@ansible localhost]#crontab -l
#Ansible: daycron
0 1 * * 5 ls /tmp >>/tmp/a.log

 

 #ansible-console:控制台式的批量交互执行

[root@ansible localhost]#ansible-console
Welcome to the ansible console.
Type help or ? to list commands.
root@all (4)[f:5]$ pwd
172.16.253.177 | SUCCESS | rc=0 >>
/root

172.16.252.182 | SUCCESS | rc=0 >>
/root

172.16.254.47 | SUCCESS | rc=0 >>
/root

172.16.252.143 | SUCCESS | rc=0 >>
/root

root@all (4)[f:5]$ list
172.16.252.143
172.16.254.47
172.16.253.177
172.16.252.182

#ansible-galaxy:install 安装模块

官网: https://galaxy.ansible.com

 

[root@ansible localhost]#ansible-galaxy install DavidWittman.redis
- downloading role 'redis', owned by DavidWittman
- downloading role from https://github.com/DavidWittman/ansible-redis/archive/1.2.4.tar.gz
- extracting DavidWittman.redis to /etc/ansible/roles/DavidWittman.redis
- DavidWittman.redis was installed successfully
#下载完成之后在roles下:

  [root@ansible localhost]#ls roles/
  DavidWittman.redis

#ansible-galaxy用法帮助

[root@ansible localhost]#ansible-galaxy -h
Usage: ansible-galaxy [delete|import|info|init|install|list|login|remove|search|setup] [--help] [options] ...

Options:
-h, --help show this help message and exit
-v, --verbose verbose mode (-vvv for more, -vvvv to enable connection
debugging)
--version show program's version number and exit


[root@ansible localhost]#ansible-galaxy list
- DavidWittman.redis, 1.2.4

 #ansible-playbook:

#批量安装httpd,并批量做好配置文件
[root@ansible localhost]#vim web.yml 
---          #标准格式

- hosts: web        #  -表示定义一个变量
  remote_user: root
  tasks:
  - name: install httpd
    yum: name=httpd state=latest        #yum表示要用到的模块

[root@ansible localhost]#ansible-playbook web.yml 

PLAY [web] *********************************************************************

TASK [setup] *******************************************************************
ok: [172.16.252.143]
ok: [172.16.252.182]
ok: [172.16.253.177]
ok: [172.16.254.47]

TASK [install httpd] ***********************************************************
changed: [172.16.252.182]
changed: [172.16.252.143]
changed: [172.16.253.177]
changed: [172.16.254.47]

PLAY RECAP *********************************************************************
172.16.252.143             : ok=2    changed=1    unreachable=0    failed=0   
172.16.252.182             : ok=2    changed=1    unreachable=0    failed=0   
172.16.253.177             : ok=2    changed=1    unreachable=0    failed=0   
172.16.254.47              : ok=2    changed=1    unreachable=0    failed=0   
[root@~ localhost]#rpm -q httpd
httpd-2.4.6-45.el7.centos.4.x86_64

#配置文件
[root@ansible localhost]#vim web.yml
 - name: configure httpd
    copy: src=/etc/ansible/files/http.conf     #使用模块copy
 dest=/etc/httpd/conf/ backup=yes
#启动服务:
[root@ansible localhost]#vim web.yml 
- name: start httpd
service: name=httpd state=started

#配置文件加入handler功能:
#-C:是检查语法错误
[root@ansible localhost]#cat web.yml 
---

- hosts: web
  remote_user: root
  tasks:
  - name: install httpd
    yum: name=httpd state=latest
  - name: configure httpd
    copy: src=/etc/ansible/files/httpd.conf  dest=/etc/httpd/conf/ backup=yes
    notify: restart httpd
#handlers之前需要有notify,表示当configure httpd任务发生改变时,才执行handler的操作
  handlers:
  - name: restart httpd
    service: name=httpd state=restarted

[root@ansible localhost]#ansible-playbook -C web.yml 

PLAY [web] *********************************************************************

TASK [setup] *******************************************************************
ok: [172.16.252.143]
ok: [172.16.252.182]
ok: [172.16.253.177]
ok: [172.16.254.47]

TASK [install httpd] ***********************************************************
ok: [172.16.252.182]
ok: [172.16.253.177]
ok: [172.16.254.47]
ok: [172.16.252.143]

TASK [configure httpd] *********************************************************
ok: [172.16.253.177]
ok: [172.16.254.47]
ok: [172.16.252.182]
ok: [172.16.252.143]

PLAY RECAP *********************************************************************
172.16.252.143             : ok=3    changed=0    unreachable=0    failed=0   
172.16.252.182             : ok=3    changed=0    unreachable=0    failed=0   
172.16.253.177             : ok=3    changed=0    unreachable=0    failed=0   
172.16.254.47              : ok=3    changed=0    unreachable=0    failed=0   

 #templates:

#templates:需首先创建目录;配置的是.jz的文件
[root@ansible master-80]#mkdir templates
[root@ansible master-80]#cp -a /etc/nginx/nginx.conf templates/nginx.conf.jz
[root@templates master-80]#cat nginx.conf.jz 
#批量创建配置文件,如果port没有指定,则用默认的80;并循环指定server_name
{% for node in webs %}
server {
        {% if http_port is defined  %}
                listen {{ http_port  }};
        {% endif %}
        server_name {{ node }};
        root /usr/share/nginx/html;

        location / {
        }
}
{% endfor %}

#再配置.yml文件:
[root@ansible master-80]#vim web3.yml 
---

- hosts: web
  remote_user: root
  vars:
   - webs:
     - node1
     - node2
     - node3
     - node4
  tasks:
  - name: nginx conf sync
    template: src=/etc/ansible/templates/nginx.conf.jz dest=/etc/nginx/nginx.conf

[root@ansible master-80]#ansible-playbook web3.yml 
#测试:
[root@~ host3-8088]#cat /etc/nginx/nginx.conf

server {
                        listen 8088;
                server_name node1;
        root /usr/share/nginx/html;

        location / {
        }
}
server {
                        listen 8088;
                server_name node2;
        root /usr/share/nginx/html;

        location / {
        }
}
server {
                        listen 8088;
                server_name node3;
        root /usr/share/nginx/html;

        location / {
        }
}
server {
                        listen 8088;
                server_name node4;
        root /usr/share/nginx/html;

        location / {
        }
}

 

 #批量安装多个APP:

#item的用法:
[root@ansible master-80]#vim multi.yml 

---

- hosts: web
  remote_user: root
  tasks:
  - name: install app
    yum: name={{ item  }} state=latest
    with_items:
       - php
       - php-fpm
       - mariadb-server
[root@ansible master-80]#ansible-playbook multi.yml 
#批量添加用户和组:
[root@ansible master-80]#vim useradd.yml

---

- hosts: web
  remote_user: root
  tasks:
  -name: groupadd multi
   group: name={{ item }}
   with_items:
   - group5
   - group6
   - group7
  - name: useradd multi
    user: name={{ item.username }} group={{ item.groupname }}
    with_items:
     - {  username: "user5", groupname: "group5" }
     - {  username: "user6", groupname: "group6" }
     - {  username: "user7", groupname: "group7" }
[root@ansible master-80]#ansible-playbook useradd.yml

 #roles:

#创建roles所需的目录;
[root@roles master-80]#pwd
/etc/ansible/roles
[root@roles master-80]#mkdir nginx
[root@roles master-80]#cd nginx/
[root@nginx master-80]#mkdir {tasks,templates,files,vars,handlers,meta,default}
[root@nginx master-80]#ls
default  files  handlers  meta  tasks  templates  vars
#用roles批量安装Nginx