From: "T.J. Mercier" <tjmercier-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org>
To: tjmercier-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org,
"David Airlie" <airlied-cv59FeDIM0c@public.gmane.org>,
"Daniel Vetter" <daniel-/w4YWyX8dFk@public.gmane.org>,
"Maarten Lankhorst"
<maarten.lankhorst-VuQAYsv1563Yd54FQh9/CA@public.gmane.org>,
"Maxime Ripard" <mripard-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>,
"Thomas Zimmermann" <tzimmermann-l3A5Bk7waGM@public.gmane.org>,
"Jonathan Corbet" <corbet-T1hC0tSOHrs@public.gmane.org>,
"Greg Kroah-Hartman"
<gregkh-hQyY1W1yCW8ekmWlsbkhG0B+6BGkLq7r@public.gmane.org>,
"Arve Hjønnevåg" <arve-z5hGa2qSFaRBDgjK7y7TUQ@public.gmane.org>,
"Todd Kjos" <tkjos-z5hGa2qSFaRBDgjK7y7TUQ@public.gmane.org>,
"Martijn Coenen" <maco-z5hGa2qSFaRBDgjK7y7TUQ@public.gmane.org>,
"Joel Fernandes"
<joel-QYYGw3jwrUn5owFQY34kdNi2O/JbrIOy@public.gmane.org>,
"Christian Brauner"
<brauner-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>,
"Hridya Valsaraju"
<hridya-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org>,
"Suren Baghdasaryan"
<surenb-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org>,
"Sumit Semwal"
<sumit.semwal-QSEj5FYQhm4dnm+yROfE0A@public.gmane.org>,
"Christian König" <christian.koenig-5C7GfCeVMHo@public.gmane.org>,
"Benjamin Gaignard"
<benjamin.gaignard-QSEj5FYQhm4dnm+yROfE0A@public.gmane.org>
Cc: kaleshsingh-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org,
Kenny.Ho-5C7GfCeVMHo@public.gmane.org,
mkoutny-IBi9RG/b67k@public.gmane.org,
skhan-hQyY1W1yCW8ekmWlsbkhG0B+6BGkLq7r@public.gmane.org,
dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org,
linux-doc-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
linux-media-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
linaro-mm-sig-cunTk1MwBs8s++Sfvej+rw@public.gmane.org,
cgroups-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
linux-kselftest-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
Subject: [RFC v4 5/8] dmabuf: Add gpu cgroup charge transfer function
Date: Mon, 28 Mar 2022 03:59:44 +0000 [thread overview]
Message-ID: <20220328035951.1817417-6-tjmercier@google.com> (raw)
In-Reply-To: <20220328035951.1817417-1-tjmercier-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org>
From: Hridya Valsaraju <hridya-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org>
The dma_buf_charge_transfer function provides a way for processes to
transfer charge of a buffer to a different process. This is essential
for the cases where a central allocator process does allocations for
various subsystems, hands over the fd to the client who requested the
memory and drops all references to the allocated memory.
Signed-off-by: Hridya Valsaraju <hridya-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org>
Signed-off-by: T.J. Mercier <tjmercier-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org>
---
v4 changes
Adjust ordering of charge/uncharge during transfer to avoid potentially
hitting cgroup limit per Michal Koutný.
v3 changes
Use more common dual author commit message format per John Stultz.
v2 changes
Move dma-buf cgroup charge transfer from a dma_buf_op defined by every
heap to a single dma-buf function for all heaps per Daniel Vetter and
Christian König.
---
drivers/dma-buf/dma-buf.c | 49 +++++++++++++++++++++++++++++++
include/linux/cgroup_gpu.h | 12 ++++++++
include/linux/dma-buf.h | 2 ++
kernel/cgroup/gpu.c | 59 ++++++++++++++++++++++++++++++++++++++
4 files changed, 122 insertions(+)
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 1ee5c60d3d6d..7748c3453b91 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -1380,6 +1380,55 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
}
EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);
+/**
+ * dma_buf_transfer_charge - Change the GPU cgroup to which the provided dma_buf is charged.
+ * @dmabuf: [in] buffer whose charge will be migrated to a different GPU cgroup
+ * @gpucg: [in] the destination GPU cgroup for dmabuf's charge
+ *
+ * Only tasks that belong to the same cgroup the buffer is currently charged to
+ * may call this function, otherwise it will return -EPERM.
+ *
+ * Returns 0 on success, or a negative errno code otherwise.
+ */
+int dma_buf_transfer_charge(struct dma_buf *dmabuf, struct gpucg *gpucg)
+{
+#ifdef CONFIG_CGROUP_GPU
+ struct gpucg *current_gpucg;
+ int ret;
+
+ /* If the source and destination cgroups are the same, don't do anything. */
+ current_gpucg = gpucg_get(current);
+ if (current_gpucg == gpucg) {
+ ret = 0;
+ goto skip_transfer;
+ }
+
+ /*
+ * Verify that the cgroup of the process requesting the transfer is the
+ * same as the one the buffer is currently charged to.
+ */
+ current_gpucg = gpucg_get(current);
+ mutex_lock(&dmabuf->lock);
+ if (current_gpucg != dmabuf->gpucg) {
+ ret = -EPERM;
+ goto err;
+ }
+
+ ret = gpucg_transfer_charge(current_gpucg, gpucg, dmabuf->gpucg_dev, dmabuf->size);
+ if (ret)
+ goto err;
+ dmabuf->gpucg = gpucg;
+err:
+ mutex_unlock(&dmabuf->lock);
+skip_transfer:
+ gpucg_put(current_gpucg);
+ return ret;
+#else
+ return 0;
+#endif /* CONFIG_CGROUP_GPU */
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_transfer_charge, DMA_BUF);
+
#ifdef CONFIG_DEBUG_FS
static int dma_buf_debug_show(struct seq_file *s, void *unused)
{
diff --git a/include/linux/cgroup_gpu.h b/include/linux/cgroup_gpu.h
index c90069719022..e30f15d5e9be 100644
--- a/include/linux/cgroup_gpu.h
+++ b/include/linux/cgroup_gpu.h
@@ -87,6 +87,10 @@ static inline struct gpucg *gpucg_parent(struct gpucg *cg)
int gpucg_try_charge(struct gpucg *gpucg, struct gpucg_device *device, u64 usage);
void gpucg_uncharge(struct gpucg *gpucg, struct gpucg_device *device, u64 usage);
+int gpucg_transfer_charge(struct gpucg *source,
+ struct gpucg *dest,
+ struct gpucg_device *device,
+ u64 usage);
void gpucg_register_device(struct gpucg_device *gpucg_dev, const char *name);
#else /* CONFIG_CGROUP_GPU */
@@ -121,6 +125,14 @@ static inline void gpucg_uncharge(struct gpucg *gpucg,
struct gpucg_device *device,
u64 usage) {}
+static inline int gpucg_transfer_charge(struct gpucg *source,
+ struct gpucg *dest,
+ struct gpucg_device *device,
+ u64 usage)
+{
+ return 0;
+}
+
static inline void gpucg_register_device(struct gpucg_device *gpucg_dev,
const char *name) {}
#endif /* CONFIG_CGROUP_GPU */
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 742f29c3daaf..646827156213 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -646,4 +646,6 @@ int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
unsigned long);
int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map);
void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map);
+
+int dma_buf_transfer_charge(struct dma_buf *dmabuf, struct gpucg *gpucg);
#endif /* __DMA_BUF_H__ */
diff --git a/kernel/cgroup/gpu.c b/kernel/cgroup/gpu.c
index ac4c470914b5..40531323d6da 100644
--- a/kernel/cgroup/gpu.c
+++ b/kernel/cgroup/gpu.c
@@ -247,6 +247,65 @@ void gpucg_uncharge(struct gpucg *gpucg, struct gpucg_device *device, u64 usage)
css_put_many(&gpucg->css, nr_pages);
}
+/**
+ * gpucg_transfer_charge - Transfer a GPU charge from one cgroup to another.
+ * @source: [in] The GPU cgroup the charge will be transferred from.
+ * @dest: [in] The GPU cgroup the charge will be transferred to.
+ * @device: [in] The GPU cgroup device corresponding to the charge.
+ * @usage: [in] The size of the memory in bytes.
+ *
+ * Returns 0 on success, or a negative errno code otherwise.
+ */
+int gpucg_transfer_charge(struct gpucg *source,
+ struct gpucg *dest,
+ struct gpucg_device *device,
+ u64 usage)
+{
+ struct page_counter *counter;
+ u64 nr_pages;
+ struct gpucg_resource_pool *rp_source, *rp_dest;
+ int ret = 0;
+
+ nr_pages = PAGE_ALIGN(usage) >> PAGE_SHIFT;
+
+ mutex_lock(&gpucg_mutex);
+ rp_source = find_cg_rpool_locked(source, device);
+ if (unlikely(!rp_source)) {
+ ret = -ENOENT;
+ goto exit_early;
+ }
+
+ rp_dest = get_cg_rpool_locked(dest, device);
+ if (IS_ERR(rp_dest)) {
+ ret = PTR_ERR(rp_dest);
+ goto exit_early;
+ }
+
+ /*
+ * First uncharge from the pool it's currently charged to. This ordering avoids double
+ * charging while the transfer is in progress, which could cause us to hit a limit.
+ * If the try_charge fails for this transfer, we need to be able to reverse this uncharge,
+ * so we continue to hold the gpucg_mutex here.
+ */
+ page_counter_uncharge(&rp_source->total, nr_pages);
+ css_put_many(&source->css, nr_pages);
+
+ /* Now attempt the new charge */
+ if (page_counter_try_charge(&rp_dest->total, nr_pages, &counter)) {
+ css_get_many(&dest->css, nr_pages);
+ } else {
+ /*
+ * The new charge failed, so reverse the uncharge from above. This should always
+ * succeed since charges on source are blocked by gpucg_mutex.
+ */
+ WARN_ON(!page_counter_try_charge(&rp_source->total, nr_pages, &counter));
+ css_get_many(&source->css, nr_pages);
+ }
+exit_early:
+ mutex_unlock(&gpucg_mutex);
+ return ret;
+}
+
/**
* gpucg_register_device - Registers a device for memory accounting using the
* GPU cgroup controller.
--
2.35.1.1021.g381101b075-goog
next prev parent reply other threads:[~2022-03-28 3:59 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-03-28 3:59 [RFC v4 0/8] Proposal for a GPU cgroup controller T.J. Mercier
2022-03-28 3:59 ` [RFC v4 1/8] gpu: rfc: " T.J. Mercier
2022-03-28 3:59 ` [RFC v4 2/8] cgroup: gpu: Add a cgroup controller for allocator attribution of GPU memory T.J. Mercier
2022-03-29 16:59 ` Tejun Heo
[not found] ` <YkM6/57mVxoNfSvm-NiLfg/pYEd1N0TnZuCh8vA@public.gmane.org>
2022-03-30 20:56 ` T.J. Mercier
2022-04-04 17:41 ` Tejun Heo
2022-04-04 17:49 ` T.J. Mercier
2022-03-28 3:59 ` [RFC v4 4/8] dmabuf: heaps: export system_heap buffers with GPU cgroup charging T.J. Mercier
[not found] ` <20220328035951.1817417-5-tjmercier-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org>
2022-03-28 14:36 ` Daniel Vetter
[not found] ` <YkHH/0Use7F30UUE-dv86pmgwkMBes7Z6vYuT8azUEOm+Xw19@public.gmane.org>
2022-03-28 18:28 ` T.J. Mercier
[not found] ` <CABdmKX01p6g_iHsB6dd4Wwh=8iLdYiUqdY6_yyA5ax2YNHt6tQ@mail.gmail.com>
[not found] ` <CABdmKX01p6g_iHsB6dd4Wwh=8iLdYiUqdY6_yyA5ax2YNHt6tQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2022-03-29 8:42 ` Daniel Vetter
2022-03-29 16:50 ` Tejun Heo
[not found] ` <YkLGbL5Z3HVCyVkK-dv86pmgwkMBes7Z6vYuT8azUEOm+Xw19@public.gmane.org>
2022-03-29 17:52 ` T.J. Mercier
[not found] ` <20220328035951.1817417-1-tjmercier-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org>
2022-03-28 3:59 ` [RFC v4 3/8] dmabuf: Use the GPU cgroup charge/uncharge APIs T.J. Mercier
2022-03-28 3:59 ` T.J. Mercier [this message]
2022-03-29 15:21 ` [RFC v4 5/8] dmabuf: Add gpu cgroup charge transfer function Michal Koutný
2022-04-01 18:41 ` T.J. Mercier
2022-04-05 12:12 ` Michal Koutný
2022-04-05 17:48 ` T.J. Mercier
2022-03-28 3:59 ` [RFC v4 6/8] binder: Add a buffer flag to relinquish ownership of fds T.J. Mercier
2022-03-28 3:59 ` [RFC v4 7/8] binder: use __kernel_pid_t and __kernel_uid_t for userspace T.J. Mercier
2022-03-28 3:59 ` [RFC v4 8/8] selftests: Add binder cgroup gpu memory transfer test T.J. Mercier
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220328035951.1817417-6-tjmercier@google.com \
--to=tjmercier-hpiqsd4aklfqt0dzr+alfa@public.gmane.org \
--cc=Kenny.Ho-5C7GfCeVMHo@public.gmane.org \
--cc=airlied-cv59FeDIM0c@public.gmane.org \
--cc=arve-z5hGa2qSFaRBDgjK7y7TUQ@public.gmane.org \
--cc=benjamin.gaignard-QSEj5FYQhm4dnm+yROfE0A@public.gmane.org \
--cc=brauner-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org \
--cc=cgroups-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
--cc=christian.koenig-5C7GfCeVMHo@public.gmane.org \
--cc=corbet-T1hC0tSOHrs@public.gmane.org \
--cc=daniel-/w4YWyX8dFk@public.gmane.org \
--cc=dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org \
--cc=gregkh-hQyY1W1yCW8ekmWlsbkhG0B+6BGkLq7r@public.gmane.org \
--cc=hridya-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org \
--cc=joel-QYYGw3jwrUn5owFQY34kdNi2O/JbrIOy@public.gmane.org \
--cc=kaleshsingh-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org \
--cc=linaro-mm-sig-cunTk1MwBs8s++Sfvej+rw@public.gmane.org \
--cc=linux-doc-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
--cc=linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
--cc=linux-kselftest-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
--cc=linux-media-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
--cc=maarten.lankhorst-VuQAYsv1563Yd54FQh9/CA@public.gmane.org \
--cc=maco-z5hGa2qSFaRBDgjK7y7TUQ@public.gmane.org \
--cc=mkoutny-IBi9RG/b67k@public.gmane.org \
--cc=mripard-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org \
--cc=skhan-hQyY1W1yCW8ekmWlsbkhG0B+6BGkLq7r@public.gmane.org \
--cc=sumit.semwal-QSEj5FYQhm4dnm+yROfE0A@public.gmane.org \
--cc=surenb-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org \
--cc=tkjos-z5hGa2qSFaRBDgjK7y7TUQ@public.gmane.org \
--cc=tzimmermann-l3A5Bk7waGM@public.gmane.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox