From: cel@kernel.org
To: <kdevops@lists.linux.dev>
Cc: Chuck Lever <chuck.lever@oracle.com>
Subject: [RFC PATCH 5/5] guestfs: Convert part of scripts/bringup_guestfs.sh to Ansible
Date: Thu, 22 May 2025 09:31:37 -0400 [thread overview]
Message-ID: <20250522133137.989457-6-cel@kernel.org> (raw)
In-Reply-To: <20250522133137.989457-1-cel@kernel.org>
From: Chuck Lever <chuck.lever@oracle.com>
The part of bringup_guestfs.sh that provisions and starts up target
nodes is converted to Ansible. This parallelizes node bringup.
The new Ansible code takes a stab at being more idempotent than the
script was, as well.
The part of bringup_guestfs.sh that creates missing base images is
left in place for the moment.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---
playbooks/roles/guestfs/defaults/main.yml | 1 +
playbooks/roles/guestfs/tasks/bringup.yml | 157 ++++++++++++++++++
| 16 ++
playbooks/roles/guestfs/tasks/largeio.yml | 11 ++
playbooks/roles/guestfs/tasks/main.yml | 6 +
scripts/bringup_guestfs.sh | 76 +--------
scripts/guestfs.Makefile | 5 +
7 files changed, 197 insertions(+), 75 deletions(-)
create mode 100644 playbooks/roles/guestfs/tasks/bringup.yml
create mode 100644 playbooks/roles/guestfs/tasks/extra_disks.yml
create mode 100644 playbooks/roles/guestfs/tasks/largeio.yml
diff --git a/playbooks/roles/guestfs/defaults/main.yml b/playbooks/roles/guestfs/defaults/main.yml
index dc955d915d70..448ff1f0c6c5 100644
--- a/playbooks/roles/guestfs/defaults/main.yml
+++ b/playbooks/roles/guestfs/defaults/main.yml
@@ -2,3 +2,4 @@
---
libvirt_uri_system: false
distro_debian_based: false
+libvirt_enable_largeio: false
diff --git a/playbooks/roles/guestfs/tasks/bringup.yml b/playbooks/roles/guestfs/tasks/bringup.yml
new file mode 100644
index 000000000000..b871e2c82ffa
--- /dev/null
+++ b/playbooks/roles/guestfs/tasks/bringup.yml
@@ -0,0 +1,157 @@
+---
+- name: Check if target nodes are already defined
+ ansible.builtin.command:
+ cmd: "virsh domstate {{ inventory_hostname }}"
+ register: domstate_output
+ changed_when: false
+ failed_when: false
+
+- name: Ensure the target node is up
+ community.libvirt.virt:
+ name: "{{ inventory_hostname }}"
+ uri: "{{ libvirt_uri }}"
+ state: running
+ when:
+ - domstate_output.rc == 0
+
+- name: The target node is already defined
+ ansible.builtin.meta: end_host
+ when:
+ - domstate_output.rc == 0
+
+- name: Set the pathname of the ssh directory for the target node
+ ansible.builtin.set_fact:
+ ssh_key_dir: "{{ guestfs_path }}/{{ inventory_hostname }}/ssh"
+
+- name: Set the pathname of the ssh key for the target node
+ ansible.builtin.set_fact:
+ ssh_key: "{{ ssh_key_dir }}/id_ed25519"
+
+- name: Generate ssh keys for the target node
+ block:
+ - name: Create the ssh key directory on the control host
+ ansible.builtin.file:
+ path: "{{ ssh_key_dir }}"
+ state: directory
+ mode: "u=rwx"
+
+# - name: Destroy old keys for the target node
+# ansible.builtin.file:
+# path: "{{ item }}"
+# state: absent
+# loop:
+# - "{{ ssh_key }}"
+# - "{{ ssh_key }}.pub"
+
+ - name: Generate fresh keys for the target node
+ ansible.builtin.command:
+ cmd: 'ssh-keygen -q -t ed25519 -f {{ ssh_key }} -N ""'
+
+- name: Set the pathname of storage pool directory
+ ansible.builtin.set_fact:
+ storagedir: "{{ kdevops_storage_pool_path }}/guestfs"
+
+- name: Set the pathname of root image for the target node
+ ansible.builtin.set_fact:
+ rootimg: "{{ storagedir }}/{{ inventory_hostname }}/root.raw"
+
+- name: Set the pathname of the OS base image
+ ansible.builtin.set_fact:
+ base_image: "{{ storagedir }}/base_images/{{ virtbuilder_os_version }}.raw"
+
+- name: Create the storage pool directory for the target node
+ ansible.builtin.file:
+ path: "{{ storagedir }}/{{ inventory_hostname }}"
+ state: directory
+
+- name: Copy the base image
+ ansible.builtin.command:
+ cmd: "cp --reflink=auto {{ base_image }} {{ rootimg }}"
+
+- name: Get the timezone of the control host
+ ansible.builtin.command:
+ cmd: "timedatectl show -p Timezone --value"
+ register: host_timezone
+
+- name: Prep the boot image for the target node (as root)
+ become: true
+ become_method: ansible.builtin.sudo
+ ansible.builtin.command:
+ argv:
+ - "virt-sysprep"
+ - "-a"
+ - "{{ rootimg }}"
+ - "--hostname"
+ - "{{ inventory_hostname }}"
+ - "--ssh-inject"
+ - "kdevops:file:{{ ssh_key }}.pub"
+ - "--timezone"
+ - "{{ host_timezone.stdout }}"
+ when:
+ - libvirt_uri_system|bool
+
+- name: Prep the boot image for the target node (non-root)
+ ansible.builtin.command:
+ argv:
+ - "virt-sysprep"
+ - "-a"
+ - "{{ rootimg }}"
+ - "--hostname"
+ - "{{ inventory_hostname }}"
+ - "--ssh-inject"
+ - "kdevops:file:{{ ssh_key }}.pub"
+ - "--timezone"
+ - "{{ host_timezone.stdout }}"
+ when:
+ - not libvirt_uri_system|bool
+
+- name: Build largeio devices
+ ansible.builtin.include_tasks:
+ file: "{{ role_path }}/tasks/largeio.yml"
+ when:
+ - libvirt_enable_largeio|bool
+
+- name: Create extra disks
+ vars:
+ path: "{{ storagedir }}/{{ inventory_hostname }}/extra{{ item }}.{{ libvirt_extra_drive_format }}"
+ ansible.builtin.include_tasks:
+ file: "{{ role_path }}/tasks/extra_disks.yml"
+ loop: "{{ range(0, 4) | list }}"
+ when:
+ - not libvirt_enable_largeio|bool
+
+- name: Define the target nodes
+ vars:
+ xml_file: "{{ guestfs_path }}/{{ inventory_hostname }}/{{ inventory_hostname }}.xml"
+ community.libvirt.virt:
+ command: define
+ name: "{{ inventory_hostname }}"
+ xml: "{{ lookup('file', xml_file) }}"
+ uri: "{{ libvirt_uri }}"
+
+- name: Find PCIe passthrough devices
+ ansible.builtin.find:
+ paths: "{{ guestfs_path }}/{{ inventory_hostname }}"
+ file_type: file
+ patterns: "pcie_passthrough_*.xml"
+ register: passthrough_devices
+
+- name: Attach PCIe passthrough devices
+ ansible.builtin.command:
+ argv:
+ - "virsh"
+ - "attach-device"
+ - "{{ inventory_hostname }}"
+ - "{{ item }}"
+ - "--config"
+ loop: "{{ passthrough_devices.files }}"
+ loop_control:
+ label: "Doing PCI-E passthrough for device {{ item }}"
+ when:
+ - passthrough_devices.matched > 0
+
+- name: Boot the target nodes
+ community.libvirt.virt:
+ name: "{{ inventory_hostname }}"
+ uri: "{{ libvirt_uri }}"
+ state: running
--git a/playbooks/roles/guestfs/tasks/extra_disks.yml b/playbooks/roles/guestfs/tasks/extra_disks.yml
new file mode 100644
index 000000000000..c8a9bd63885f
--- /dev/null
+++ b/playbooks/roles/guestfs/tasks/extra_disks.yml
@@ -0,0 +1,16 @@
+---
+- name: Create the new drive image
+ ansible.builtin.command:
+ argv:
+ - "qemu-img"
+ - "create"
+ - "-f"
+ - "{{ libvirt_extra_drive_format }}"
+ - "{{ path }}"
+ - "100G"
+
+- name: Adjust the permission settings of the drive image file
+ ansible.builtin.file:
+ path: "{{ path }}"
+ group: "{{ libvirt_qemu_group }}"
+ mode: "g+rw,o-rw"
diff --git a/playbooks/roles/guestfs/tasks/largeio.yml b/playbooks/roles/guestfs/tasks/largeio.yml
new file mode 100644
index 000000000000..4246677d18d8
--- /dev/null
+++ b/playbooks/roles/guestfs/tasks/largeio.yml
@@ -0,0 +1,11 @@
+---
+- name: Compute the total number of devices to build
+ ansible.builtin.set_fact:
+ total_devices: "{{ libvirt_largeio_pow_limit * libvirt_largeio_drives_per_space }}"
+
+- name: Create largeio block devices
+ ansible.builtin.include_tasks:
+ file: "{{ role_path }}/tasks/extra_disks.yml"
+ vars:
+ path: "{{ storagedir }}/{{ inventory_hostname }}/extra{{ item }}.{{ libvirt_extra_drive_format }}"
+ loop: "{{ range(0, total_devices) | list }}"
diff --git a/playbooks/roles/guestfs/tasks/main.yml b/playbooks/roles/guestfs/tasks/main.yml
index bda91de79983..e63484b0229d 100644
--- a/playbooks/roles/guestfs/tasks/main.yml
+++ b/playbooks/roles/guestfs/tasks/main.yml
@@ -23,6 +23,12 @@
ansible.builtin.import_tasks:
file: "{{role_path }}/tasks/network.yml"
+- name: Bring up each target node
+ tags:
+ - bringup
+ ansible.builtin.import_tasks:
+ file: "{{role_path }}/tasks/bringup.yml"
+
- name: Set up target node console permissions
tags:
- console-permissions
diff --git a/scripts/bringup_guestfs.sh b/scripts/bringup_guestfs.sh
index 67f85a5fdb0a..be9ec3405037 100755
--- a/scripts/bringup_guestfs.sh
+++ b/scripts/bringup_guestfs.sh
@@ -322,78 +322,4 @@ if [ ! -f $BASE_IMAGE ]; then
fi
fi
-# FIXME: is there a yaml equivalent of jq?
-grep -e '^ - name: ' ${TOPDIR}/guestfs/kdevops_nodes.yaml | sed 's/^ - name: //' | while read name
-do
- #
- # If the guest is already defined, then just stop what we're doing
- # and plead to the developer to clean things up.
- #
- if virsh list --all | grep --quiet --word-regexp "$name"; then
- output_domstate=$(virsh domstate $name 2>/dev/null)
- echo "Domain $name is already defined. (state: $output_domstate)"
- if [ "$output_domstate" != "running" ]; then
- virsh start $name
- fi
- exit 0
- fi
-
- SSH_KEY_DIR="${GUESTFSDIR}/$name/ssh"
- SSH_KEY="${SSH_KEY_DIR}/id_ed25519"
-
- # Generate a new ssh key
- mkdir -p "$SSH_KEY_DIR"
- chmod 0700 "$SSH_KEY_DIR"
- rm -f $SSH_KEY $SSH_KEY.pub
- ssh-keygen -q -t ed25519 -f $SSH_KEY -N ""
-
- mkdir -p "$STORAGEDIR/$name"
-
- # Copy the base image and prep it
- ROOTIMG="$STORAGEDIR/$name/root.raw"
- cp --reflink=auto $BASE_IMAGE $ROOTIMG
- TZ="$(timedatectl show -p Timezone --value)"
- $USE_SUDO virt-sysprep -a $ROOTIMG --hostname $name --ssh-inject "kdevops:file:$SSH_KEY.pub" --timezone $TZ
-
- if [[ "${CONFIG_LIBVIRT_ENABLE_LARGEIO+x}" && \
- "$CONFIG_LIBVIRT_ENABLE_LARGEIO" == "y" ]]; then
- lbs_idx=0
- for i in $(seq 1 $(($CONFIG_QEMU_LARGEIO_MAX_POW_LIMIT+1))); do
- for x in $(seq 0 $CONFIG_QEMU_EXTRA_DRIVE_LARGEIO_NUM_DRIVES_PER_SPACE); do
- diskimg="$STORAGEDIR/$name/extra${lbs_idx}.${IMG_FMT}"
- rm -f $diskimg
- qemu-img create -f $IMG_FMT "$diskimg" 100G
- if [[ "$CONFIG_LIBVIRT_URI_SYSTEM" == "y" ]]; then
- chmod g+rw $diskimg
- chgrp $QEMU_GROUP $diskimg
- fi
- let lbs_idx=$lbs_idx+1
- done
- done
- else
- # build some extra disks
- for i in $(seq 0 3); do
- diskimg="$STORAGEDIR/$name/extra${i}.${IMG_FMT}"
- rm -f $diskimg
- qemu-img create -f $IMG_FMT "$STORAGEDIR/$name/extra${i}.$IMG_FMT" 100G
- if [[ "$CONFIG_LIBVIRT_URI_SYSTEM" == "y" ]]; then
- chmod g+rw $STORAGEDIR/$name/extra${i}.$IMG_FMT
- chgrp $QEMU_GROUP $STORAGEDIR/$name/extra${i}.$IMG_FMT
- fi
- done
- fi
-
- virsh define $GUESTFSDIR/$name/$name.xml
- XML_DEVICES_COUNT=$(find $GUESTFSDIR/$name/ -name pcie_passthrough_*.xml | wc -l)
- if [[ $XML_DEVICES_COUNT -gt 0 ]]; then
- for xml in $GUESTFSDIR/$name/pcie_passthrough_*.xml; do
- echo "Doing PCI-E passthrough for device $xml"
- virsh attach-device $name $xml --config
- done
- fi
- virsh start $name
- if [[ $? -ne 0 ]]; then
- echo "Failed to start $name"
- exit 1
- fi
-done
+exit 0
diff --git a/scripts/guestfs.Makefile b/scripts/guestfs.Makefile
index 290315ee9c9e..84cf99db982d 100644
--- a/scripts/guestfs.Makefile
+++ b/scripts/guestfs.Makefile
@@ -81,6 +81,11 @@ bringup_guestfs: $(GUESTFS_BRINGUP_DEPS)
--extra-vars=@./extra_vars.yaml \
--tags config-check,network,storage-pool-path
$(Q)$(TOPDIR)/scripts/bringup_guestfs.sh
+ $(Q)ansible-playbook $(ANSIBLE_VERBOSE) \
+ -i hosts \
+ playbooks/guestfs.yml \
+ --extra-vars=@./extra_vars.yaml \
+ --tags bringup
$(Q)ansible-playbook $(ANSIBLE_VERBOSE) \
--inventory localhost, \
playbooks/guestfs.yml \
--
2.49.0
next prev parent reply other threads:[~2025-05-22 13:31 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-05-22 13:31 [RFC PATCH 0/5] Convert bringup_guestfs to a single Ansible role cel
2025-05-22 13:31 ` [RFC PATCH 1/5] guestfs: Replace scripts/destroy_guestfs.sh with an Ansible playbook cel
2025-05-22 17:02 ` Luis Chamberlain
2025-05-22 17:03 ` Chuck Lever
2025-05-22 13:31 ` [RFC PATCH 2/5] Move the guestfs install-deps to the guestfs playbook cel
2025-05-22 17:07 ` Luis Chamberlain
2025-05-22 17:13 ` Chuck Lever
2025-05-22 17:16 ` Luis Chamberlain
2025-05-22 13:31 ` [RFC PATCH 3/5] guestfs: Move console-related steps to guestfs role cel
2025-05-22 17:09 ` Luis Chamberlain
2025-05-22 17:11 ` Chuck Lever
2025-05-22 17:15 ` Luis Chamberlain
2025-05-22 13:31 ` [RFC PATCH 4/5] guestfs: Move check-config, network, and storage-pool tags cel
2025-05-22 13:31 ` cel [this message]
2025-05-22 17:14 ` [RFC PATCH 5/5] guestfs: Convert part of scripts/bringup_guestfs.sh to Ansible Luis Chamberlain
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250522133137.989457-6-cel@kernel.org \
--to=cel@kernel.org \
--cc=chuck.lever@oracle.com \
--cc=kdevops@lists.linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox