-
Notifications
You must be signed in to change notification settings - Fork 1k
Description
1. 1变量定义 (group_vars/cn-sh-region.yaml)
base_image: "/var/lib/libvirt/images/openeuler-24-03.img"
gateway: "192.168.10.99"
dns_servers:
- "8.8.8.8"
- "8.8.4.4"
nodes:
# openebs-01 集群
- name: openebs-01-b
ip: 192.168.10.103
region: cn-sh
zone: cn-sh-1b
subzone: rack-01
cluster: openebs-01
vcpus: 6
memory: 20480
disks:
- size: 100G
- size: 100G
- size: 100G
- size: 100G
- name: openebs-01-c
ip: 192.168.10.104
region: cn-sh
zone: cn-sh-1b
subzone: rack-01
cluster: openebs-01
vcpus: 6
memory: 20480
disks:
- size: 100G
- size: 100G
- size: 100G
- size: 100G
- name: openebs-01-d
ip: 192.168.10.105
region: cn-sh
zone: cn-sh-1b
subzone: rack-01
cluster: openebs-01
vcpus: 6
memory: 20480
disks:
- size: 100G
- size: 100G
- size: 100G
- size: 100G
- name: openebs-01-e
ip: 192.168.10.106
region: cn-sh
zone: cn-sh-1b
subzone: rack-01
cluster: openebs-01
vcpus: 6
memory: 20480
disks:
- size: 100G
- size: 100G
- size: 100G
- size: 100G
# minio-quay-01 集群
- name: minio-01-b
ip: 192.168.10.107
region: cn-sh
zone: cn-sh-1a
subzone: rack-02
cluster: minio-quay-01
vcpus: 6
memory: 20480
disks:
- size: 100G
- size: 100G
- size: 100G
- size: 100G
- name: minio-01-c
ip: 192.168.10.108
region: cn-sh
zone: cn-sh-1a
subzone: rack-02
cluster: minio-quay-01
vcpus: 6
memory: 20480
disks:
- size: 100G
- size: 100G
- size: 100G
- size: 100G
- name: minio-01-d
ip: 192.168.10.109
region: cn-sh
zone: cn-sh-1a
subzone: rack-02
cluster: minio-quay-01
vcpus: 6
memory: 20480
disks:
- size: 100G
- size: 100G
- size: 100G
- size: 100G
- name: minio-01-e
ip: 192.168.10.110
region: cn-sh
zone: cn-sh-1a
subzone: rack-02
cluster: minio-quay-01
vcpus: 6
memory: 20480
disks:
- size: 100G
- size: 100G
- size: 100G
- size: 100G
# juicefs-01 集群
- name: juicefs-01-b
ip: 192.168.10.111
region: cn-sh
zone: cn-sh-1b
subzone: rack-03
cluster: juicefs-01
vcpus: 6
memory: 20480
disks:
- size: 100G
- size: 100G
- name: juicefs-01-c
ip: 192.168.10.112
region: cn-sh
zone: cn-sh-1b
subzone: rack-03
cluster: juicefs-01
vcpus: 6
memory: 20480
disks:
- size: 100G
- size: 100G
- name: juicefs-01-d
ip: 192.168.10.113
region: cn-sh
zone: cn-sh-1b
subzone: rack-03
cluster: juicefs-01
vcpus: 6
memory: 20480
disks:
- size: 100G
- size: 100G
# kamaji-manager-01 集群
- name: kamaji-manager-01-a
ip: 192.168.10.114
region: cn-sh
zone: cn-sh-1a
subzone: rack-04
cluster: kamaji-manager-01
vcpus: 4
memory: 10240
disks:
- size: 100G
- name: kamaji-manager-01-b
ip: 192.168.10.115
region: cn-sh
zone: cn-sh-1a
subzone: rack-04
cluster: kamaji-manager-01
vcpus: 10
memory: 40960
disks:
- size: 100G
- name: kamaji-manager-01-c
ip: 192.168.10.116
region: cn-sh
zone: cn-sh-1a
subzone: rack-04
cluster: kamaji-manager-01
vcpus: 10
memory: 40960
disks:
- size: 100G
- name: kamaji-manager-01-d
ip: 192.168.10.117
region: cn-sh
zone: cn-sh-1a
subzone: rack-04
cluster: kamaji-manager-01
vcpus: 10
memory: 40960
disks:
- size: 100G
# sveltos-member-01 集群
- name: sveltos-member-01-b
ip: 192.168.10.118
region: cn-sh
zone: cn-sh-1b
subzone: rack-05
cluster: sveltos-member-01
vcpus: 8
memory: 61440
disks:
- size: 100G
- name: sveltos-member-01-c
ip: 192.168.10.119
region: cn-sh
zone: cn-sh-1b
subzone: rack-05
cluster: sveltos-member-01
vcpus: 8
memory: 61440
disks:
- size: 100G
- name: sveltos-member-01-d
ip: 192.168.10.120
region: cn-sh
zone: cn-sh-1b
subzone: rack-05
cluster: sveltos-member-01
vcpus: 8
memory: 61440
disks:
- size: 100G
# sveltos-member-02 集群
- name: sveltos-member-02-b
ip: 192.168.10.121
region: cn-sh
zone: cn-sh-1a
subzone: rack-06
cluster: sveltos-member-02
vcpus: 8
memory: 61440
disks:
- size: 100G
- name: sveltos-member-02-c
ip: 192.168.10.122
region: cn-sh
zone: cn-sh-1a
subzone: rack-06
cluster: sveltos-member-02
vcpus: 8
memory: 61440
disks:
- size: 100G
- name: sveltos-member-02-d
ip: 192.168.10.123
region: cn-sh
zone: cn-sh-1a
subzone: rack-06
cluster: sveltos-member-02
vcpus: 8
memory: 61440
disks:
- size: 100G
# sveltos-member-03 集群
- name: sveltos-member-03-b
ip: 192.168.10.124
region: cn-sh
zone: cn-sh-1b
subzone: rack-07
cluster: sveltos-member-03
vcpus: 10
memory: 102400
disks:
- size: 100G
- name: sveltos-member-03-c
ip: 192.168.10.125
region: cn-sh
zone: cn-sh-1b
subzone: rack-07
cluster: sveltos-member-03
vcpus: 10
memory: 102400
disks:
- size: 100G
- name: sveltos-member-03-d
ip: 192.168.10.126
region: cn-sh
zone: cn-sh-1b
subzone: rack-07
cluster: sveltos-member-03
vcpus: 10
memory: 102400
disks:
- size: 100G
# sveltos-member-04 集群
- name: sveltos-member-04-b
ip: 192.168.10.127
region: cn-sh
zone: cn-sh-1a
subzone: rack-08
cluster: sveltos-member-04
vcpus: 10
memory: 102400
disks:
- size: 100G
- name: sveltos-member-04-c
ip: 192.168.10.128
region: cn-sh
zone: cn-sh-1a
subzone: rack-08
cluster: sveltos-member-04
vcpus: 10
memory: 102400
disks:
- size: 100G
- name: sveltos-member-04-d
ip: 192.168.10.129
region: cn-sh
zone: cn-sh-1a
subzone: rack-08
cluster: sveltos-member-04
vcpus: 10
memory: 102400
disks:
- size: 100G2. 模板文件
templates/metadata.j2
instance-id: {{ item.name }}
local-hostname: {{ item.name }}templates/user-data.j2
#cloud-config
users:
- name: mark
plain_text_passwd: M_ark123abc
sudo: ALL=(ALL) NOPASSWD:ALL
groups: sudo
shell: /bin/bash
lock_passwd: false
ssh-authorized-keys:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFxy8LShsTNFa0EjFeSDRpP/+uEPUC+gXbPCz5FWe3WD root@ubuntu
- name: root
lock_passwd: false
plain_text_passwd: M_ark123abc
ssh-authorized-keys:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFxy8LShsTNFa0EjFeSDRpP/+uEPUC+gXbPCz5FWe3WD root@ubuntu
hostname: {{ item.name }}
ssh_pwauth: True
network:
config: disabled
runcmd:
- |
# 配置网络
cat > /etc/sysconfig/network-scripts/ifcfg-enp1s0 << EOF
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=none
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=enp1s0
DEVICE=enp1s0
ONBOOT=yes
IPADDR={{ item.ip }}
PREFIX=24
GATEWAY={{ gateway }}
DNS1={{ dns_servers[0] }}
DNS2={{ dns_servers[1] }}
EOF
- |
# 重启网络
systemctl restart NetworkManager
- |
# 安装 openssh-server
yum update -y || apt-get update
yum install -y openssh-server || apt-get install -y openssh-server
- |
# 配置 SSH
sed -i 's/^#\?PasswordAuthentication.*/PasswordAuthentication yes/' /etc/ssh/sshd_config
sed -i 's/^#\?PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config
systemctl restart sshd
systemctl enable sshdtemplates/network-config.j2
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=none
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=enp1s0
DEVICE=enp1s0
ONBOOT=yes
IPADDR={{ item.ip }}
PREFIX=24
GATEWAY={{ gateway }}
DNS1={{ dns_servers[0] }}
DNS2={{ dns_servers[1] }}3. 1Ansible Playbook (create-vm-sh.yaml)
- name: 批量创建云虚拟机
hosts: localhost
gather_facts: false
vars_files:
- group_vars/cn-sh-region.yaml
tasks:
- name: 创建存储目录
ansible.builtin.file:
path: "/var/lib/libvirt/{{ item.zone }}/"
state: directory
mode: '0755'
loop: "{{ nodes }}"
loop_control:
label: "{{ item.zone }}"
- name: 创建 cloud-init 配置目录
ansible.builtin.file:
path: "cloud-init/configs"
state: directory
mode: '0755'
- name: 生成 metadata 配置
ansible.builtin.template:
src: "templates/metadata.j2"
dest: "cloud-init/configs/{{ item.name }}-metadata.yaml"
loop: "{{ nodes }}"
- name: 生成 user-data 配置
ansible.builtin.template:
src: "templates/user-data.j2"
dest: "cloud-init/configs/{{ item.name }}-user-data.yaml"
loop: "{{ nodes }}"
- name: 生成 network 配置文件
ansible.builtin.template:
src: "templates/network-config.j2"
dest: "cloud-init/configs/{{ item.name }}-network-config"
loop: "{{ nodes }}"
- name: 生成 cloud-init ISO
ansible.builtin.command:
cmd: >
cloud-localds
/var/lib/libvirt/images/{{ item.zone }}/{{ item.name }}-init.iso
cloud-init/configs/{{ item.name }}-user-data.yaml
cloud-init/configs/{{ item.name }}-metadata.yaml
loop: "{{ nodes }}"
- name: 创建虚拟机磁盘
ansible.builtin.command:
cmd: >
qemu-img create -f qcow2 -b {{ base_image }}
-F qcow2 /var/lib/libvirt/disk/{{ item.zone }}/{{ item.name }}.qcow2
{{ item.disks[0].size }}
loop: "{{ nodes }}"
- name: 创建虚拟机实例
ansible.builtin.command:
cmd: |
virt-install --name={{ item.name }} --memory={{ item.memory }} --vcpus={{ item.vcpus }} --disk path=/var/lib/libvirt/disk/{{ item.zone }}/{{ item.name }}.qcow2,format=qcow2 --disk path=/var/lib/libvirt/images/{{ item.zone }}/{{ item.name }}-init.iso,device=cdrom --network bridge=br0,model=virtio --os-type=linux --os-variant=rocky9 --graphics=none --console=pty,target_type=serial --import --boot=hd
loop: "{{ nodes }}"
throttle: 20 # 不同任务设置不同并发度
- name: 等待虚拟机启动
ansible.builtin.pause:
minutes: 2
- name: 验证虚拟机网络
ansible.builtin.wait_for:
host: "{{ item.ip }}"
port: 22
timeout: 300
state: started
loop: "{{ nodes }}"
ignore_errors: yes
- name: 显示虚拟机状态
debug:
msg: "虚拟机 {{ item.name }} ({{ item.ip }}) 创建完成"
loop: "{{ nodes }}"4. 执行部署
# 安装依赖
sudo apt-get install cloud-utils genisoimage libguestfs-tools
apt install ansible-core
apt-get install python3-lxml
mkdir cloud-init/configs -p
mkdir /var/lib/libvirt/images/{cn-sh-1b,cn-sh-1a,cn-hz-1b,cn-hz-1a} -p
mkdir /var/lib/libvirt/disk/{cn-sh-1b,cn-sh-1a,cn-hz-1b,cn-hz-1a} -p
# 运行 Playbook
ansible-playbook create-vm-sh.yaml
[root@localhost ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host noprefixroute
valid_lft forever preferred_lft forever
2: enp1s0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 52:54:00:05:2e:eb brd ff:ff:ff:ff:ff:ff
inet 192.168.10.83/24 brd 192.168.10.255 scope global dynamic noprefixroute enp1s0
valid_lft 84575sec preferred_lft 84575sec
inet6 2409:8a28:2cd4:3a41::9ac/128 scope global dynamic noprefixroute
valid_lft 1783sec preferred_lft 1783sec
inet6 2409:8a28:2cd4:3a41:5054:ff:fe05:2eeb/64 scope global dynamic noprefixroute
valid_lft 3584sec preferred_lft 3584sec
inet6 fe80::5054:ff:fe05:2eeb/64 scope link noprefixroute
valid_lft forever preferred_lft forever