CRIU (Checkpoint/Restore in Userspace) mailing list
 help / color / mirror / Atom feed
From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
To: criu@lists.linux.dev
Cc: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>,
	David Francis <David.Francis@amd.com>
Subject: [PATCH v2 12/23] plugins/amdgpu: Remove plugin_log_msg()
Date: Fri, 10 Apr 2026 19:55:03 +0100	[thread overview]
Message-ID: <20260410185514.51153-13-tvrtko.ursulin@igalia.com> (raw)
In-Reply-To: <20260410185514.51153-1-tvrtko.ursulin@igalia.com>

The extra debug build log helper does not seem very useful and some of
them do not even compile. Lets just remove it and replace with the
standard pr_debug. In case of too much noise, we can later re-evaluate
to remove some of the not very useful log messages.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
Reviewed-By: David Francis <David.Francis@amd.com>
---
 plugins/amdgpu/amdgpu_plugin.c      | 42 ++++++++++++++++-------------
 plugins/amdgpu/amdgpu_plugin_drm.c  |  5 ++--
 plugins/amdgpu/amdgpu_plugin_util.h |  9 -------
 3 files changed, 27 insertions(+), 29 deletions(-)

diff --git a/plugins/amdgpu/amdgpu_plugin.c b/plugins/amdgpu/amdgpu_plugin.c
index e01fc4b88abe..1bbd13f7396b 100644
--- a/plugins/amdgpu/amdgpu_plugin.c
+++ b/plugins/amdgpu/amdgpu_plugin.c
@@ -570,7 +570,7 @@ int sdma_copy_bo(int shared_fd, uint64_t size, FILE *storage_fp,
 	src_bo_size = (type == SDMA_OP_VRAM_WRITE) ? buffer_bo_size : size;
 	dst_bo_size = (type == SDMA_OP_VRAM_READ) ? buffer_bo_size : size;
 
-	plugin_log_msg("Enter %s\n", __func__);
+	pr_debug("Enter %s\n", __func__);
 
 	/* prepare src buffer */
 	switch (type) {
@@ -605,7 +605,8 @@ int sdma_copy_bo(int shared_fd, uint64_t size, FILE *storage_fp,
 		pr_perror("failed to GPU map the src BO");
 		goto err_src_bo_map;
 	}
-	plugin_log_msg("Source BO: GPU VA: %lx, size: %lx\n", gpu_addr_src, src_bo_size);
+	pr_debug("Source BO: GPU VA: %lx, size: %lx\n",
+		 gpu_addr_src, src_bo_size);
 
 	/* prepare dest buffer */
 	switch (type) {
@@ -640,7 +641,8 @@ int sdma_copy_bo(int shared_fd, uint64_t size, FILE *storage_fp,
 		pr_perror("failed to GPU map the dest BO");
 		goto err_dst_bo_map;
 	}
-	plugin_log_msg("Dest BO: GPU VA: %lx, size: %lx\n", gpu_addr_dst, dst_bo_size);
+	pr_debug("Dest BO: GPU VA: %lx, size: %lx\n",
+		 gpu_addr_dst, dst_bo_size);
 
 	/* prepare ring buffer/indirect buffer for command submission
 	 * each copy packet is 7 dwords so we need to alloc 28x size for ib
@@ -651,7 +653,8 @@ int sdma_copy_bo(int shared_fd, uint64_t size, FILE *storage_fp,
 		pr_perror("failed to allocate and map ib/rb");
 		goto err_ib_gpu_alloc;
 	}
-	plugin_log_msg("Indirect BO: GPU VA: %lx, size: %lx\n", gpu_addr_ib, packets_per_buffer * 28);
+	pr_debug("Indirect BO: GPU VA: %" PRIx64 ", size: %d\n",
+		 gpu_addr_ib, packets_per_buffer * 28);
 
 	resources[0] = h_bo_src;
 	resources[1] = h_bo_dst;
@@ -792,7 +795,7 @@ err_src_va:
 	err2 = amdgpu_bo_free(h_bo_src);
 	if (err2)
 		pr_perror("src bo free failed");
-	plugin_log_msg("Leaving sdma_copy_bo, err = %d\n", err);
+	pr_debug("Leaving sdma_copy_bo, err = %d\n", err);
 	return err;
 }
 
@@ -818,7 +821,7 @@ void *dump_bo_contents(void *_thread_data)
 		pr_perror("failed to initialize device");
 		goto exit;
 	}
-	plugin_log_msg("libdrm initialized successfully\n");
+	pr_debug("libdrm initialized successfully\n");
 
 	ret = amdgpu_query_gpu_info(h_dev, &gpu_info);
 	if (ret) {
@@ -911,7 +914,7 @@ void *restore_bo_contents(void *_thread_data)
 		pr_perror("failed to initialize device");
 		goto exit;
 	}
-	plugin_log_msg("libdrm initialized successfully\n");
+	pr_debug("libdrm initialized successfully\n");
 
 	ret = amdgpu_query_gpu_info(h_dev, &gpu_info);
 	if (ret) {
@@ -972,7 +975,8 @@ void *restore_bo_contents(void *_thread_data)
 			pr_err("Failed to fill the BO using sDMA: bo_buckets[%d]\n", i);
 			break;
 		}
-		plugin_log_msg("** Successfully filled the BO using sDMA: bo_buckets[%d] **\n", i);
+		pr_debug("** Successfully filled the BO using sDMA: bo_buckets[%d] **\n",
+			 i);
 	}
 
 exit:
@@ -1006,7 +1010,7 @@ int check_hsakmt_shared_mem(uint64_t *shared_mem_size, uint32_t *shared_mem_magi
 	if (ret)
 		pr_perror("Failed to read shared mem magic");
 	else
-		plugin_log_msg("Shared mem magic:0x%x\n", *shared_mem_magic);
+		pr_debug("Shared mem magic:0x%x\n", *shared_mem_magic);
 
 	return 0;
 }
@@ -1647,8 +1651,9 @@ static int restore_bos(struct kfd_ioctl_criu_args *args, CriuKfd *e)
 		bo_bucket->offset = bo_entry->offset;
 		bo_bucket->alloc_flags = bo_entry->alloc_flags;
 
-		plugin_log_msg("BO [%d] gpu_id:%x addr:%llx size:%llx offset:%llx\n", i, bo_bucket->gpu_id,
-			       bo_bucket->addr, bo_bucket->size, bo_bucket->offset);
+		pr_debug("BO [%d] gpu_id:%x addr:%" PRIx64 " size:%" PRIx64 " offset:%" PRIx64 "\n",
+			 i, bo_bucket->gpu_id, bo_bucket->addr, bo_bucket->size,
+			 bo_bucket->offset);
 	}
 
 	pr_info("Restore BOs Ok\n");
@@ -1716,9 +1721,9 @@ static int restore_bo_data(int id, struct kfd_criu_bo_bucket *bo_buckets, CriuKf
 			vma_md->new_pgoff = bo_bucket->restored_offset;
 			vma_md->fd = node_get_drm_render_device(tp_node);
 
-			plugin_log_msg("adding vma_entry:addr:0x%lx old-off:0x%lx "
-				       "new_off:0x%lx new_minor:%d\n",
-				       vma_md->vma_entry, vma_md->old_pgoff, vma_md->new_pgoff, tp_node->drm_render_minor);
+			pr_debug("adding vma_entry:addr:0x%lx old-off:0x%lx new_off:0x%lx new_minor:%d\n",
+				 vma_md->vma_entry, vma_md->old_pgoff,
+				 vma_md->new_pgoff, tp_node->drm_render_minor);
 
 			list_add_tail(&vma_md->list, &update_vma_info_list);
 		}
@@ -1990,7 +1995,7 @@ int amdgpu_plugin_restore_file(int id, bool *retry_needed)
 		return -1;
 	}
 
-	plugin_log_msg("read image file data\n");
+	pr_debug("read image file data\n");
 
 	/*
 	 * Initialize fd_next to be 1 greater than the biggest file descriptor in use by the target restore process.
@@ -2097,7 +2102,7 @@ int amdgpu_plugin_update_vmamap(const char *in_path, const uint64_t addr, const
 	if (plugin_disabled)
 		return -ENOTSUP;
 
-	plugin_log_msg("Enter %s\n", __func__);
+	pr_debug("Enter %s\n", __func__);
 
 	strncpy(path, in_path, sizeof(path));
 
@@ -2140,8 +2145,9 @@ int amdgpu_plugin_update_vmamap(const char *in_path, const uint64_t addr, const
 				*updated_fd = fd;
 			}
 
-			plugin_log_msg("old_pgoff=0x%lx new_pgoff=0x%lx fd=%d\n", vma_md->old_pgoff, vma_md->new_pgoff,
-				       *updated_fd);
+			pr_debug("old_pgoff=0x%lx new_pgoff=0x%lx fd=%d\n",
+				 vma_md->old_pgoff, vma_md->new_pgoff,
+				 *updated_fd);
 
 			return 1;
 		}
diff --git a/plugins/amdgpu/amdgpu_plugin_drm.c b/plugins/amdgpu/amdgpu_plugin_drm.c
index 30f5fbb38000..6dddc4beeb48 100644
--- a/plugins/amdgpu/amdgpu_plugin_drm.c
+++ b/plugins/amdgpu/amdgpu_plugin_drm.c
@@ -173,7 +173,7 @@ static int restore_bo_contents_drm(int drm_render_minor, CriuRenderNode *rd, int
 		pr_perror("failed to initialize device");
 		goto exit;
 	}
-	plugin_log_msg("libdrm initialized successfully\n");
+	pr_debug("libdrm initialized successfully\n");
 
 	ret = amdgpu_query_gpu_info(h_dev, &gpu_info);
 	if (ret) {
@@ -222,7 +222,8 @@ static int restore_bo_contents_drm(int drm_render_minor, CriuRenderNode *rd, int
 			pr_err("Failed to fill the BO using sDMA: bo_buckets[%d]\n", i);
 			break;
 		}
-		plugin_log_msg("** Successfully filled the BO using sDMA: bo_buckets[%d] **\n", i);
+		pr_debug("** Successfully filled the BO using sDMA: bo_buckets[%d] **\n",
+			 i);
 
 		if (bo_contents_fp)
 			fclose(bo_contents_fp);
diff --git a/plugins/amdgpu/amdgpu_plugin_util.h b/plugins/amdgpu/amdgpu_plugin_util.h
index 69b98a31c97a..2f4001b2ed68 100644
--- a/plugins/amdgpu/amdgpu_plugin_util.h
+++ b/plugins/amdgpu/amdgpu_plugin_util.h
@@ -25,15 +25,6 @@
 #endif
 #define LOG_PREFIX "amdgpu_plugin: "
 
-#ifdef DEBUG
-#define plugin_log_msg(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
-#else
-#define plugin_log_msg(fmt, ...) \
-	{                        \
-	}
-#endif
-
-
 /* Path where KFD device is surfaced */
 #define AMDGPU_KFD_DEVICE		"/dev/kfd"
 
-- 
2.52.0


  parent reply	other threads:[~2026-04-10 18:55 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-04-10 18:54 [PATCH v2 00/23] Amdgpu plugin cleanups and fixes Tvrtko Ursulin
2026-04-10 18:54 ` [PATCH v2 01/23] plugins/amgdpu: Fix one error message Tvrtko Ursulin
2026-04-10 18:54 ` [PATCH v2 02/23] plugins/amdgpu: Remove unused current_pid global variable Tvrtko Ursulin
2026-04-10 18:54 ` [PATCH v2 03/23] plugins/amdgpu: Remove unused new_minor from struct vma_metadata Tvrtko Ursulin
2026-04-10 18:54 ` [PATCH v2 04/23] plugins/amdgpu: Fix drm pages size header Tvrtko Ursulin
2026-04-10 18:54 ` [PATCH v2 05/23] plugins/amdgpu: Fix logging of failures to open files during restore init Tvrtko Ursulin
2026-04-10 18:54 ` [PATCH v2 06/23] plugins/amdgpu: Propagate failure to save buffer object content Tvrtko Ursulin
2026-04-10 18:54 ` [PATCH v2 07/23] plugins/amdgpu: Close the directory when image probing fails Tvrtko Ursulin
2026-04-10 18:54 ` [PATCH v2 08/23] plugins/amdgpu: Close dma-buf image file if the read fails Tvrtko Ursulin
2026-04-10 18:55 ` [PATCH v2 09/23] plugins/amdgpu: Flatten amdgpu_restore_init a bit Tvrtko Ursulin
2026-04-10 18:55 ` [PATCH v2 10/23] plugins/amdgpu: Add error handling for seek operations Tvrtko Ursulin
2026-04-10 18:55 ` [PATCH v2 11/23] plugins/amdgpu: Consolidate vm_info collection Tvrtko Ursulin
2026-04-10 18:55 ` Tvrtko Ursulin [this message]
2026-04-10 18:55 ` [PATCH v2 13/23] plugins/amdgpu: Reduce amount of debug logging a little bit Tvrtko Ursulin
2026-04-10 18:55 ` [PATCH v2 14/23] plugins/amdgpu: Do not eat the errno in kmtIoctl Tvrtko Ursulin
2026-04-10 18:55 ` [PATCH v2 15/23] plugins/amdgpu: Fix open_drm_render_device() Tvrtko Ursulin
2026-04-10 18:55 ` [PATCH v2 16/23] plugins/amdgpu: Check sdma operation type early and once Tvrtko Ursulin
2026-04-10 18:55 ` [PATCH v2 17/23] plugins/amdgpu: Add plugin to inventory even if process has no vmas Tvrtko Ursulin
2026-04-10 18:55 ` [PATCH v2 18/23] plugins/amdgpu: Move drm file dump and restore into helpers Tvrtko Ursulin
2026-04-10 18:55 ` [PATCH v2 19/23] plugins/amdgpu: Use the load_img helper in drm file restore Tvrtko Ursulin
2026-04-10 18:55 ` [PATCH v2 20/23] plugins/amdgpu: Convert away from libc buffered file IO Tvrtko Ursulin
2026-04-10 18:55 ` [PATCH v2 21/23] plugins/amdgpu: Use save_vma_updates for all call sites Tvrtko Ursulin
2026-04-10 18:55 ` [PATCH v2 22/23] plugins/amdgpu: amdgpu_plugin_drm_restore_file() does not need to use libdrm Tvrtko Ursulin
2026-04-10 18:55 ` [PATCH v2 23/23] plugins/amdgpu: Fix remaining wrong usages of pr_perror Tvrtko Ursulin
2026-04-13 18:23 ` [PATCH v2 00/23] Amdgpu plugin cleanups and fixes Andrei Vagin
2026-04-13 19:47   ` Tvrtko Ursulin
2026-04-13 20:03     ` Andrei Vagin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260410185514.51153-13-tvrtko.ursulin@igalia.com \
    --to=tvrtko.ursulin@igalia.com \
    --cc=David.Francis@amd.com \
    --cc=criu@lists.linux.dev \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox