public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Daniel Almeida <daniel.almeida@collabora.com>
To: liviu.dudau@arm.com, steven.price@arm.com,
	carsten.haitzler@arm.com, boris.brezillon@collabora.com,
	robh@kernel.org, faith.ekstrand@collabora.com
Cc: Daniel Almeida <daniel.almeida@collabora.com>,
	linux-kernel@vger.kernel.org
Subject: [PATCH v2 5/5] drm: panthor: allow dumping multiple jobs
Date: Tue, 13 Aug 2024 18:05:48 -0300	[thread overview]
Message-ID: <20240813210555.607641-7-daniel.almeida@collabora.com> (raw)
In-Reply-To: <20240813210555.607641-1-daniel.almeida@collabora.com>

When dumping successful jobs, it's useful to dump a given number of
them if needed. This is blocked by the fact that the devcoredump
mechanism will not create a new dump if an old one has not been read.

In particular, if we're dumping multiple jobs in sequence, there are
sections of the dump that we do not want to include again, since they
would be redundant.

Allow dumping multiple jobs by keeping a counter and a list. The list
gets appended until the counter is zero, at which point, the whole list
is dumped at once, thereby calling into devcoredump also only once.

This counter is controlled through a debugfs file.

Signed-off-by: Daniel Almeida <daniel.almeida@collabora.com>
---
 drivers/gpu/drm/panthor/panthor_dump.c  | 229 ++++++++++++++++--------
 drivers/gpu/drm/panthor/panthor_dump.h  |  15 ++
 drivers/gpu/drm/panthor/panthor_sched.c |  20 ++-
 3 files changed, 186 insertions(+), 78 deletions(-)

diff --git a/drivers/gpu/drm/panthor/panthor_dump.c b/drivers/gpu/drm/panthor/panthor_dump.c
index 7ec0e21dc7e9..d3b29359e13a 100644
--- a/drivers/gpu/drm/panthor/panthor_dump.c
+++ b/drivers/gpu/drm/panthor/panthor_dump.c
@@ -5,6 +5,7 @@
 #include <linux/iosys-map.h>
 #include <linux/devcoredump.h>
 #include <linux/err.h>
+#include <linux/list.h>
 #include <linux/vmalloc.h>
 #include <linux/types.h>
 #include <uapi/drm/panthor_drm.h>
@@ -152,22 +153,25 @@ static void count_queues(struct queue_count *count,
 }
 
 static int compute_dump_size(struct vm_dump_count *va_count,
-			     struct queue_count *group_and_q_cnt)
+			     struct queue_count *group_and_q_cnt,
+			     bool job_list_is_empty)
 {
 	int size = 0;
 	int i;
 
-	size += sizeof(struct drm_panthor_dump_header);
-	size += sizeof(struct drm_panthor_dump_version);
+	if (job_list_is_empty) {
+		size += sizeof(struct drm_panthor_dump_header);
+		size += sizeof(struct drm_panthor_dump_version);
 
-	size += sizeof(struct drm_panthor_dump_header);
-	size += sizeof(struct drm_panthor_gpu_info);
+		size += sizeof(struct drm_panthor_dump_header);
+		size += sizeof(struct drm_panthor_gpu_info);
 
-	size += sizeof(struct drm_panthor_dump_header);
-	size += sizeof(struct drm_panthor_csif_info);
+		size += sizeof(struct drm_panthor_dump_header);
+		size += sizeof(struct drm_panthor_csif_info);
 
-	size += sizeof(struct drm_panthor_dump_header);
-	size += sizeof(struct drm_panthor_fw_info);
+		size += sizeof(struct drm_panthor_dump_header);
+		size += sizeof(struct drm_panthor_fw_info);
+	}
 
 	for (i = 0; i < va_count->vas; i++) {
 		size += sizeof(struct drm_panthor_dump_header);
@@ -250,6 +254,58 @@ static int dump_group_info(struct dump_group_args *dump_group_args,
 	return ret;
 }
 
+static void clean_job_list(struct list_head *joblist)
+{
+	struct panthor_dump_job_entry *job, *tmp;
+
+	list_for_each_entry_safe(job, tmp, joblist, node) {
+		list_del(&job->node);
+		vfree(job->mem);
+		kfree(job);
+	}
+}
+
+static int append_job(struct panthor_core_dump_args *args, void *mem,
+		      size_t size)
+{
+	struct panthor_dump_job_entry *job;
+
+	job = kzalloc(sizeof(*job), GFP_KERNEL);
+	if (!job)
+		return -ENOMEM;
+
+	job->mem = mem;
+	job->size = size;
+	list_add_tail(&job->node, args->job_list);
+	return 0;
+}
+
+static int copy_from_job_list(struct list_head *job_list, void **out_mem,
+			      u32 *out_size)
+{
+	u32 total_size = 0;
+	u32 offset = 0;
+	struct panthor_dump_job_entry *entry;
+	void *mem;
+
+	list_for_each_entry(entry, job_list, node) {
+		total_size += entry->size;
+	}
+
+	mem = vzalloc(total_size);
+	if (!mem)
+		return -ENOMEM;
+
+	list_for_each_entry(entry, job_list, node) {
+		memcpy(mem + offset, entry->mem, entry->size);
+		offset += entry->size;
+	}
+
+	*out_mem = mem;
+	*out_size = total_size;
+	return 0;
+}
+
 int panthor_core_dump(struct panthor_core_dump_args *args)
 {
 	u8 *mem;
@@ -273,7 +329,8 @@ int panthor_core_dump(struct panthor_core_dump_args *args)
 
 	count_queues(&group_and_q_cnt, &group_info);
 
-	dump_size = compute_dump_size(&va_count, &group_and_q_cnt);
+	dump_size = compute_dump_size(&va_count, &group_and_q_cnt,
+				      list_empty(args->job_list));
 
 	mem = vzalloc(dump_size);
 	if (!mem)
@@ -286,69 +343,73 @@ int panthor_core_dump(struct panthor_core_dump_args *args)
 		.capacity = dump_size,
 	};
 
-	hdr = alloc_header(&alloc, DRM_PANTHOR_DUMP_HEADER_TYPE_VERSION,
-			   sizeof(struct drm_panthor_dump_version));
-	if (IS_ERR(hdr)) {
-		ret = PTR_ERR(hdr);
-		goto free_valloc;
+	if (list_empty(args->job_list)) {
+		hdr = alloc_header(&alloc, DRM_PANTHOR_DUMP_HEADER_TYPE_VERSION,
+				   sizeof(struct drm_panthor_dump_version));
+		if (IS_ERR(hdr)) {
+			ret = PTR_ERR(hdr);
+			goto free_valloc;
+		}
+
+		version = alloc_bytes(&alloc, sizeof(*version));
+		if (IS_ERR(version)) {
+			ret = PTR_ERR(version);
+			goto free_valloc;
+		}
+
+		*version = (struct drm_panthor_dump_version){
+			.major = PANT_DUMP_MAJOR,
+			.minor = PANT_DUMP_MINOR,
+		};
+
+		hdr = alloc_header(&alloc,
+				   DRM_PANTHOR_DUMP_HEADER_TYPE_GPU_INFO,
+				   sizeof(args->ptdev->gpu_info));
+		if (IS_ERR(hdr)) {
+			ret = PTR_ERR(hdr);
+			goto free_valloc;
+		}
+
+		gpu_info = alloc_bytes(&alloc, sizeof(*gpu_info));
+		if (IS_ERR(gpu_info)) {
+			ret = PTR_ERR(gpu_info);
+			goto free_valloc;
+		}
+
+		*gpu_info = args->ptdev->gpu_info;
+
+		hdr = alloc_header(&alloc,
+				   DRM_PANTHOR_DUMP_HEADER_TYPE_CSIF_INFO,
+				   sizeof(args->ptdev->csif_info));
+		if (IS_ERR(hdr)) {
+			ret = PTR_ERR(hdr);
+			goto free_valloc;
+		}
+
+		csif_info = alloc_bytes(&alloc, sizeof(*csif_info));
+		if (IS_ERR(csif_info)) {
+			ret = PTR_ERR(csif_info);
+			goto free_valloc;
+		}
+
+		*csif_info = args->ptdev->csif_info;
+
+		hdr = alloc_header(&alloc, DRM_PANTHOR_DUMP_HEADER_TYPE_FW_INFO,
+				   sizeof(args->ptdev->fw_info));
+		if (IS_ERR(hdr)) {
+			ret = PTR_ERR(hdr);
+			goto free_valloc;
+		}
+
+		fw_info = alloc_bytes(&alloc, sizeof(*fw_info));
+		if (IS_ERR(fw_info)) {
+			ret = PTR_ERR(fw_info);
+			goto free_valloc;
+		}
+
+		*fw_info = args->ptdev->fw_info;
 	}
 
-	version = alloc_bytes(&alloc, sizeof(*version));
-	if (IS_ERR(version)) {
-		ret = PTR_ERR(version);
-		goto free_valloc;
-	}
-
-	*version = (struct drm_panthor_dump_version){
-		.major = PANT_DUMP_MAJOR,
-		.minor = PANT_DUMP_MINOR,
-	};
-
-	hdr = alloc_header(&alloc, DRM_PANTHOR_DUMP_HEADER_TYPE_GPU_INFO,
-			   sizeof(args->ptdev->gpu_info));
-	if (IS_ERR(hdr)) {
-		ret = PTR_ERR(hdr);
-		goto free_valloc;
-	}
-
-	gpu_info = alloc_bytes(&alloc, sizeof(*gpu_info));
-	if (IS_ERR(gpu_info)) {
-		ret = PTR_ERR(gpu_info);
-		goto free_valloc;
-	}
-
-	*gpu_info = args->ptdev->gpu_info;
-
-	hdr = alloc_header(&alloc, DRM_PANTHOR_DUMP_HEADER_TYPE_CSIF_INFO,
-			   sizeof(args->ptdev->csif_info));
-	if (IS_ERR(hdr)) {
-		ret = PTR_ERR(hdr);
-		goto free_valloc;
-	}
-
-	csif_info = alloc_bytes(&alloc, sizeof(*csif_info));
-	if (IS_ERR(csif_info)) {
-		ret = PTR_ERR(csif_info);
-		goto free_valloc;
-	}
-
-	*csif_info = args->ptdev->csif_info;
-
-	hdr = alloc_header(&alloc, DRM_PANTHOR_DUMP_HEADER_TYPE_FW_INFO,
-			   sizeof(args->ptdev->fw_info));
-	if (IS_ERR(hdr)) {
-		ret = PTR_ERR(hdr);
-		goto free_valloc;
-	}
-
-	fw_info = alloc_bytes(&alloc, sizeof(*fw_info));
-	if (IS_ERR(fw_info)) {
-		ret = PTR_ERR(fw_info);
-		goto free_valloc;
-	}
-
-	*fw_info = args->ptdev->fw_info;
-
 	dump_va_args.ptdev = args->ptdev;
 	dump_va_args.alloc = &alloc;
 	ret = panthor_vm_foreach_va(args->group_vm, dump_va_cb, &dump_va_args);
@@ -365,12 +426,34 @@ int panthor_core_dump(struct panthor_core_dump_args *args)
 			 "dump size mismatch: expected %d, got %zu\n",
 			 dump_size, alloc.pos);
 
-	dev_coredumpv(args->ptdev->base.dev, alloc.start, alloc.pos,
-		      GFP_KERNEL);
+	if (args->append) {
+		ret = append_job(args, alloc.start, alloc.pos);
+		if (ret)
+			goto free_valloc;
+	} else if (!list_empty(args->job_list)) {
+		void *mem;
+		u32 size;
+
+		/* append ourselves */
+		append_job(args, alloc.start, alloc.pos);
+		if (ret)
+			goto free_valloc;
+
+		ret = copy_from_job_list(args->job_list, &mem, &size);
+		if (ret)
+			goto free_valloc;
+
+		dev_coredumpv(args->ptdev->base.dev, mem, size, GFP_KERNEL);
+		clean_job_list(args->job_list);
+	} else {
+		dev_coredumpv(args->ptdev->base.dev, alloc.start, alloc.pos,
+			      GFP_KERNEL);
+	}
 
 	return ret;
 
 free_valloc:
+	clean_job_list(args->job_list);
 	vfree(mem);
 	return ret;
 }
diff --git a/drivers/gpu/drm/panthor/panthor_dump.h b/drivers/gpu/drm/panthor/panthor_dump.h
index 2a02943a2dbd..f16051d7da21 100644
--- a/drivers/gpu/drm/panthor/panthor_dump.h
+++ b/drivers/gpu/drm/panthor/panthor_dump.h
@@ -10,10 +10,25 @@
 #include "panthor_device.h"
 #include "panthor_gem.h"
 
+struct panthor_dump_job_entry {
+	void *mem;
+	size_t size;
+	struct list_head node;
+};
+
 struct panthor_core_dump_args {
 	struct panthor_device *ptdev;
 	struct panthor_vm *group_vm;
 	struct panthor_group *group;
+	/** @job_list: used if the dump contains more than one job.
+	 *
+	 * Note that the default devcoredump behavior is to discard dumps when a
+	 * previous dump has not been read yet. There is also a limit on the number
+	 * of dumps that can be stored.
+	 */
+	struct list_head *job_list;
+	/** @append: whether to append the current job dump to job_list */
+	bool append;
 };
 
 int panthor_core_dump(struct panthor_core_dump_args *args);
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index ea2696c1075a..5f31a476866b 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -319,8 +319,10 @@ struct panthor_scheduler {
 		struct list_head stopped_groups;
 	} reset;
 
-	/** @dump_successful_jobs: whether to dump successful jobs through coredumpv */
-	bool dump_successful_jobs;
+	/** @dump_successful_jobs: Whether to dump successful jobs through coredumpv */
+	u32 dump_next_n_successful_jobs;
+	/** @dump_job_list: List containing dump entries if multiple jobs are being dumped */
+	struct list_head dump_job_list;
 };
 
 /**
@@ -2950,11 +2952,15 @@ queue_run_job(struct drm_sched_job *sched_job)
 	queue->iface.input->extract = queue->iface.output->extract;
 	queue->iface.input->insert = job->ringbuf.end;
 
-	if (sched->dump_successful_jobs) {
+	if (sched->dump_next_n_successful_jobs > 0) {
+		sched->dump_next_n_successful_jobs--;
+
 		struct panthor_core_dump_args core_dump_args = {
 			.ptdev = ptdev,
 			.group_vm = job->group->vm,
 			.group = job->group,
+			.job_list = &sched->dump_job_list,
+			.append = !!sched->dump_next_n_successful_jobs,
 		};
 
 		panthor_core_dump(&core_dump_args);
@@ -3014,6 +3020,7 @@ queue_timedout_job(struct drm_sched_job *sched_job)
 		.ptdev = ptdev,
 		.group_vm = job->group->vm,
 		.group = job->group,
+		.job_list = &sched->dump_job_list,
 	};
 
 	panthor_core_dump(&core_dump_args);
@@ -3509,6 +3516,7 @@ static void panthor_sched_fini(struct drm_device *ddev, void *res)
 	}
 
 	drm_WARN_ON(ddev, !list_empty(&sched->groups.waiting));
+	drm_WARN_ON(ddev, !list_empty(&sched->dump_job_list));
 }
 
 int panthor_sched_init(struct panthor_device *ptdev)
@@ -3585,6 +3593,7 @@ int panthor_sched_init(struct panthor_device *ptdev)
 		return ret;
 
 	INIT_LIST_HEAD(&sched->reset.stopped_groups);
+	INIT_LIST_HEAD(&sched->dump_job_list);
 
 	/* sched->heap_alloc_wq will be used for heap chunk allocation on
 	 * tiler OOM events, which means we can't use the same workqueue for
@@ -3624,7 +3633,8 @@ void panthor_sched_debugfs_init(struct drm_minor *minor)
 		container_of(minor->dev, struct panthor_device, base);
 	struct panthor_scheduler *sched = ptdev->scheduler;
 
-	debugfs_create_bool("dump_successful_jobs", 0644, minor->debugfs_root,
-			    &sched->dump_successful_jobs);
+	debugfs_create_u32("dump_next_n_successful_jobs", 0644,
+			   minor->debugfs_root,
+			   &sched->dump_next_n_successful_jobs);
 }
 #endif /* CONFIG_DEBUG_FS */
-- 
2.45.2


  parent reply	other threads:[~2024-08-13 21:06 UTC|newest]

Thread overview: 56+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-07-10 22:50 [RFC PATCH] drm: panthor: add dev_coredumpv support Daniel Almeida
2024-07-11  0:01 ` Danilo Krummrich
2024-07-15  9:03   ` Daniel Vetter
2024-07-15 17:05     ` Daniel Almeida
2024-07-16  9:25       ` Daniel Vetter
2024-07-25 19:35         ` Lyude Paul
2024-07-26 13:40           ` Daniel Vetter
2024-07-29 18:34             ` Lyude Paul
2024-07-30  8:29               ` Daniel Vetter
2024-07-11 16:57 ` Liviu Dudau
2024-07-11 18:40   ` Daniel Almeida
2024-07-12  9:46 ` Steven Price
2024-07-12 14:35   ` Daniel Almeida
2024-07-12 14:53     ` Danilo Krummrich
2024-07-12 15:13       ` Daniel Almeida
2024-07-12 15:32         ` Danilo Krummrich
2024-07-13  0:48           ` Dave Airlie
2024-07-13  1:00             ` Daniel Almeida
2024-07-13  8:17             ` Miguel Ojeda
2024-07-15  9:12     ` Steven Price
2024-07-23  9:44       ` Alice Ryhl
2024-07-23 16:06       ` Boris Brezillon
2024-07-23 17:23         ` Daniel Almeida
2024-07-24  8:59         ` Steven Price
2024-07-24 10:44           ` Boris Brezillon
2024-07-24 12:37             ` Steven Price
2024-07-24 13:15           ` Rob Herring
2024-07-24 13:54             ` Steven Price
2024-07-24 14:27               ` Daniel Almeida
2024-07-24 14:35                 ` Steven Price
2024-07-24 14:38               ` Miguel Ojeda
2024-07-25 11:42               ` Carsten Haitzler
2024-07-25 11:45         ` Carsten Haitzler
2024-07-23  9:53 ` Alice Ryhl
2024-07-23 13:41   ` Daniel Almeida
2024-07-23 13:45     ` Alice Ryhl
2024-08-13 21:05 ` [PATCH v2 0/5] Panthor devcoredump support Daniel Almeida
2024-08-13 21:05   ` [PATCH v2 1/5] drm: panthor: expose some fw information through the query ioctl Daniel Almeida
2024-08-14 13:44     ` Boris Brezillon
2024-08-13 21:05   ` [PATCH v2 2/5] drm: panthor: add devcoredump support Daniel Almeida
2024-08-15  6:01     ` kernel test robot
2024-08-13 21:05   ` [PATCH v2 3/5] drm: panthor: add debugfs support in panthor_sched Daniel Almeida
2024-08-13 21:05   ` [PATCH v2 4/5] drm: panthor: add debugfs knob to dump successful jobs Daniel Almeida
2024-08-13 21:05   ` Daniel Almeida [this message]
2024-08-21 14:37 ` [PATCH v2 RESEND 0/5] Panthor devcoredump support Daniel Almeida
2024-08-21 14:37   ` [PATCH v2 RESEND 1/5] drm: panthor: expose some fw information through the query ioctl Daniel Almeida
2024-08-22 12:26     ` Adrian Larumbe
2024-08-23 13:15     ` Boris Brezillon
2024-09-20 12:57     ` Mihail Atanassov
2024-08-21 14:37   ` [PATCH v2 RESEND 2/5] drm: panthor: add devcoredump support Daniel Almeida
2024-08-22 12:27     ` Adrian Larumbe
2024-08-23 14:46     ` Boris Brezillon
2024-08-21 14:37   ` [PATCH v2 RESEND 3/5] drm: panthor: add debugfs support in panthor_sched Daniel Almeida
2024-08-21 14:37   ` [PATCH v2 RESEND 4/5] drm: panthor: add debugfs knob to dump successful jobs Daniel Almeida
2024-08-22 12:36     ` Adrian Larumbe
2024-08-21 14:37   ` [PATCH v2 RESEND 5/5] drm: panthor: allow dumping multiple jobs Daniel Almeida

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240813210555.607641-7-daniel.almeida@collabora.com \
    --to=daniel.almeida@collabora.com \
    --cc=boris.brezillon@collabora.com \
    --cc=carsten.haitzler@arm.com \
    --cc=faith.ekstrand@collabora.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=liviu.dudau@arm.com \
    --cc=robh@kernel.org \
    --cc=steven.price@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox